report.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KFENCE reporting.
  4. *
  5. * Copyright (C) 2020, Google LLC.
  6. */
  7. #include <linux/stdarg.h>
  8. #include <linux/kernel.h>
  9. #include <linux/lockdep.h>
  10. #include <linux/math.h>
  11. #include <linux/printk.h>
  12. #include <linux/sched/debug.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/sprintf.h>
  15. #include <linux/stacktrace.h>
  16. #include <linux/string.h>
  17. #include <linux/sched/clock.h>
  18. #include <trace/events/error_report.h>
  19. #include <asm/kfence.h>
  20. #include "kfence.h"
  21. /* May be overridden by <asm/kfence.h>. */
  22. #ifndef ARCH_FUNC_PREFIX
  23. #define ARCH_FUNC_PREFIX ""
  24. #endif
  25. /* Helper function to either print to a seq_file or to console. */
  26. __printf(2, 3)
  27. static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
  28. {
  29. va_list args;
  30. va_start(args, fmt);
  31. if (seq)
  32. seq_vprintf(seq, fmt, args);
  33. else
  34. vprintk(fmt, args);
  35. va_end(args);
  36. }
  37. /*
  38. * Get the number of stack entries to skip to get out of MM internals. @type is
  39. * optional, and if set to NULL, assumes an allocation or free stack.
  40. */
  41. static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
  42. const enum kfence_error_type *type)
  43. {
  44. char buf[64];
  45. int skipnr, fallback = 0;
  46. if (type) {
  47. /* Depending on error type, find different stack entries. */
  48. switch (*type) {
  49. case KFENCE_ERROR_UAF:
  50. case KFENCE_ERROR_OOB:
  51. case KFENCE_ERROR_INVALID:
  52. /*
  53. * kfence_handle_page_fault() may be called with pt_regs
  54. * set to NULL; in that case we'll simply show the full
  55. * stack trace.
  56. */
  57. return 0;
  58. case KFENCE_ERROR_CORRUPTION:
  59. case KFENCE_ERROR_INVALID_FREE:
  60. break;
  61. }
  62. }
  63. for (skipnr = 0; skipnr < num_entries; skipnr++) {
  64. int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
  65. if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
  66. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
  67. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
  68. !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
  69. /*
  70. * In case of tail calls from any of the below to any of
  71. * the above, optimized by the compiler such that the
  72. * stack trace would omit the initial entry point below.
  73. */
  74. fallback = skipnr + 1;
  75. }
  76. /*
  77. * The below list should only include the initial entry points
  78. * into the slab allocators. Includes the *_bulk() variants by
  79. * checking prefixes.
  80. */
  81. if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
  82. str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
  83. str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
  84. str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
  85. goto found;
  86. }
  87. if (fallback < num_entries)
  88. return fallback;
  89. found:
  90. skipnr++;
  91. return skipnr < num_entries ? skipnr : 0;
  92. }
  93. static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
  94. bool show_alloc)
  95. {
  96. const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
  97. u64 ts_sec = track->ts_nsec;
  98. unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
  99. u64 interval_nsec = local_clock() - track->ts_nsec;
  100. unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
  101. /* Timestamp matches printk timestamp format. */
  102. seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
  103. show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ?
  104. "rcu freeing" : "freed", track->pid,
  105. track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
  106. (unsigned long)interval_nsec, rem_interval_nsec / 1000);
  107. if (track->num_stack_entries) {
  108. /* Skip allocation/free internals stack. */
  109. int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
  110. /* stack_trace_seq_print() does not exist; open code our own. */
  111. for (; i < track->num_stack_entries; i++)
  112. seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
  113. } else {
  114. seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
  115. }
  116. }
  117. void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
  118. {
  119. const int size = abs(meta->size);
  120. const unsigned long start = meta->addr;
  121. const struct kmem_cache *const cache = meta->cache;
  122. lockdep_assert_held(&meta->lock);
  123. if (meta->state == KFENCE_OBJECT_UNUSED) {
  124. seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
  125. return;
  126. }
  127. seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
  128. meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
  129. size, (cache && cache->name) ? cache->name : "<destroyed>");
  130. kfence_print_stack(seq, meta, true);
  131. if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) {
  132. seq_con_printf(seq, "\n");
  133. kfence_print_stack(seq, meta, false);
  134. }
  135. }
  136. /*
  137. * Show bytes at @addr that are different from the expected canary values, up to
  138. * @max_bytes.
  139. */
  140. static void print_diff_canary(unsigned long address, size_t bytes_to_show,
  141. const struct kfence_metadata *meta)
  142. {
  143. const unsigned long show_until_addr = address + bytes_to_show;
  144. const u8 *cur, *end;
  145. /* Do not show contents of object nor read into following guard page. */
  146. end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
  147. : min(show_until_addr, PAGE_ALIGN(address)));
  148. pr_cont("[");
  149. for (cur = (const u8 *)address; cur < end; cur++) {
  150. if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
  151. pr_cont(" .");
  152. else if (no_hash_pointers)
  153. pr_cont(" 0x%02x", *cur);
  154. else /* Do not leak kernel memory in non-debug builds. */
  155. pr_cont(" !");
  156. }
  157. pr_cont(" ]");
  158. }
  159. static const char *get_access_type(bool is_write)
  160. {
  161. return is_write ? "write" : "read";
  162. }
  163. void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
  164. const struct kfence_metadata *meta, enum kfence_error_type type)
  165. {
  166. unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
  167. const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
  168. int num_stack_entries;
  169. int skipnr = 0;
  170. if (regs) {
  171. num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
  172. } else {
  173. num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
  174. skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
  175. }
  176. /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
  177. if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
  178. return;
  179. if (meta)
  180. lockdep_assert_held(&meta->lock);
  181. /*
  182. * Because we may generate reports in printk-unfriendly parts of the
  183. * kernel, such as scheduler code, the use of printk() could deadlock.
  184. * Until such time that all printing code here is safe in all parts of
  185. * the kernel, accept the risk, and just get our message out (given the
  186. * system might already behave unpredictably due to the memory error).
  187. * As such, also disable lockdep to hide warnings, and avoid disabling
  188. * lockdep for the rest of the kernel.
  189. */
  190. lockdep_off();
  191. pr_err("==================================================================\n");
  192. /* Print report header. */
  193. switch (type) {
  194. case KFENCE_ERROR_OOB: {
  195. const bool left_of_object = address < meta->addr;
  196. pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
  197. (void *)stack_entries[skipnr]);
  198. pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
  199. get_access_type(is_write), (void *)address,
  200. left_of_object ? meta->addr - address : address - meta->addr,
  201. left_of_object ? "left" : "right", object_index);
  202. break;
  203. }
  204. case KFENCE_ERROR_UAF:
  205. pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
  206. (void *)stack_entries[skipnr]);
  207. pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
  208. get_access_type(is_write), (void *)address, object_index);
  209. break;
  210. case KFENCE_ERROR_CORRUPTION:
  211. pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
  212. pr_err("Corrupted memory at 0x%p ", (void *)address);
  213. print_diff_canary(address, 16, meta);
  214. pr_cont(" (in kfence-#%td):\n", object_index);
  215. break;
  216. case KFENCE_ERROR_INVALID:
  217. pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
  218. (void *)stack_entries[skipnr]);
  219. pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
  220. (void *)address);
  221. break;
  222. case KFENCE_ERROR_INVALID_FREE:
  223. pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
  224. pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
  225. object_index);
  226. break;
  227. }
  228. /* Print stack trace and object info. */
  229. stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
  230. if (meta) {
  231. pr_err("\n");
  232. kfence_print_object(NULL, meta);
  233. }
  234. /* Print report footer. */
  235. pr_err("\n");
  236. if (no_hash_pointers && regs)
  237. show_regs(regs);
  238. else
  239. dump_stack_print_info(KERN_ERR);
  240. trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
  241. pr_err("==================================================================\n");
  242. lockdep_on();
  243. check_panic_on_warn("KFENCE");
  244. /* We encountered a memory safety error, taint the kernel! */
  245. add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
  246. }
  247. #ifdef CONFIG_PRINTK
  248. static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
  249. {
  250. int i, j;
  251. i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
  252. for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
  253. kp_stack[j] = (void *)track->stack_entries[i];
  254. if (j < KS_ADDRS_COUNT)
  255. kp_stack[j] = NULL;
  256. }
  257. bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
  258. {
  259. struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
  260. unsigned long flags;
  261. if (!meta)
  262. return false;
  263. /*
  264. * If state is UNUSED at least show the pointer requested; the rest
  265. * would be garbage data.
  266. */
  267. kpp->kp_ptr = object;
  268. /* Requesting info an a never-used object is almost certainly a bug. */
  269. if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
  270. return true;
  271. raw_spin_lock_irqsave(&meta->lock, flags);
  272. kpp->kp_slab = slab;
  273. kpp->kp_slab_cache = meta->cache;
  274. kpp->kp_objp = (void *)meta->addr;
  275. kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
  276. if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING)
  277. kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
  278. /* get_stack_skipnr() ensures the first entry is outside allocator. */
  279. kpp->kp_ret = kpp->kp_stack[0];
  280. raw_spin_unlock_irqrestore(&meta->lock, flags);
  281. return true;
  282. }
  283. #endif