report.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file contains common KASAN error reporting code.
  4. *
  5. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7. *
  8. * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9. * Andrey Konovalov <andreyknvl@gmail.com>
  10. */
  11. #include <kunit/test.h>
  12. #include <linux/bitops.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/init.h>
  15. #include <linux/kernel.h>
  16. #include <linux/lockdep.h>
  17. #include <linux/mm.h>
  18. #include <linux/printk.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/stackdepot.h>
  22. #include <linux/stacktrace.h>
  23. #include <linux/string.h>
  24. #include <linux/types.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/kasan.h>
  27. #include <linux/module.h>
  28. #include <linux/sched/task_stack.h>
  29. #include <linux/uaccess.h>
  30. #include <trace/events/error_report.h>
  31. #include <asm/sections.h>
  32. #include "kasan.h"
  33. #include "../slab.h"
  34. static unsigned long kasan_flags;
  35. #define KASAN_BIT_REPORTED 0
  36. #define KASAN_BIT_MULTI_SHOT 1
  37. enum kasan_arg_fault {
  38. KASAN_ARG_FAULT_DEFAULT,
  39. KASAN_ARG_FAULT_REPORT,
  40. KASAN_ARG_FAULT_PANIC,
  41. KASAN_ARG_FAULT_PANIC_ON_WRITE,
  42. };
  43. static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;
  44. /* kasan.fault=report/panic */
  45. static int __init early_kasan_fault(char *arg)
  46. {
  47. if (!arg)
  48. return -EINVAL;
  49. if (!strcmp(arg, "report"))
  50. kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
  51. else if (!strcmp(arg, "panic"))
  52. kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
  53. else if (!strcmp(arg, "panic_on_write"))
  54. kasan_arg_fault = KASAN_ARG_FAULT_PANIC_ON_WRITE;
  55. else
  56. return -EINVAL;
  57. return 0;
  58. }
  59. early_param("kasan.fault", early_kasan_fault);
  60. static int __init kasan_set_multi_shot(char *str)
  61. {
  62. set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
  63. return 1;
  64. }
  65. __setup("kasan_multi_shot", kasan_set_multi_shot);
  66. /*
  67. * This function is used to check whether KASAN reports are suppressed for
  68. * software KASAN modes via kasan_disable/enable_current() critical sections.
  69. *
  70. * This is done to avoid:
  71. * 1. False-positive reports when accessing slab metadata,
  72. * 2. Deadlocking when poisoned memory is accessed by the reporting code.
  73. *
  74. * Hardware Tag-Based KASAN instead relies on:
  75. * For #1: Resetting tags via kasan_reset_tag().
  76. * For #2: Suppression of tag checks via CPU, see report_suppress_start/end().
  77. */
  78. static bool report_suppressed_sw(void)
  79. {
  80. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  81. if (current->kasan_depth)
  82. return true;
  83. #endif
  84. return false;
  85. }
  86. static void report_suppress_start(void)
  87. {
  88. #ifdef CONFIG_KASAN_HW_TAGS
  89. /*
  90. * Disable preemption for the duration of printing a KASAN report, as
  91. * hw_suppress_tag_checks_start() disables checks on the current CPU.
  92. */
  93. preempt_disable();
  94. hw_suppress_tag_checks_start();
  95. #else
  96. kasan_disable_current();
  97. #endif
  98. }
  99. static void report_suppress_stop(void)
  100. {
  101. #ifdef CONFIG_KASAN_HW_TAGS
  102. hw_suppress_tag_checks_stop();
  103. preempt_enable();
  104. #else
  105. kasan_enable_current();
  106. #endif
  107. }
  108. /*
  109. * Used to avoid reporting more than one KASAN bug unless kasan_multi_shot
  110. * is enabled. Note that KASAN tests effectively enable kasan_multi_shot
  111. * for their duration.
  112. */
  113. static bool report_enabled(void)
  114. {
  115. if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
  116. return true;
  117. return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
  118. }
  119. #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST)
  120. bool kasan_save_enable_multi_shot(void)
  121. {
  122. return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
  123. }
  124. EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
  125. void kasan_restore_multi_shot(bool enabled)
  126. {
  127. if (!enabled)
  128. clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
  129. }
  130. EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
  131. #endif
  132. #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
  133. /*
  134. * Whether the KASAN KUnit test suite is currently being executed.
  135. * Updated in kasan_test.c.
  136. */
  137. static bool kasan_kunit_executing;
  138. void kasan_kunit_test_suite_start(void)
  139. {
  140. WRITE_ONCE(kasan_kunit_executing, true);
  141. }
  142. EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_start);
  143. void kasan_kunit_test_suite_end(void)
  144. {
  145. WRITE_ONCE(kasan_kunit_executing, false);
  146. }
  147. EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_end);
  148. static bool kasan_kunit_test_suite_executing(void)
  149. {
  150. return READ_ONCE(kasan_kunit_executing);
  151. }
  152. #else /* CONFIG_KASAN_KUNIT_TEST */
  153. static inline bool kasan_kunit_test_suite_executing(void) { return false; }
  154. #endif /* CONFIG_KASAN_KUNIT_TEST */
  155. #if IS_ENABLED(CONFIG_KUNIT)
  156. static void fail_non_kasan_kunit_test(void)
  157. {
  158. struct kunit *test;
  159. if (kasan_kunit_test_suite_executing())
  160. return;
  161. test = current->kunit_test;
  162. if (test)
  163. kunit_set_failure(test);
  164. }
  165. #else /* CONFIG_KUNIT */
  166. static inline void fail_non_kasan_kunit_test(void) { }
  167. #endif /* CONFIG_KUNIT */
  168. static DEFINE_RAW_SPINLOCK(report_lock);
  169. static void start_report(unsigned long *flags, bool sync)
  170. {
  171. fail_non_kasan_kunit_test();
  172. /* Respect the /proc/sys/kernel/traceoff_on_warning interface. */
  173. disable_trace_on_warning();
  174. /* Do not allow LOCKDEP mangling KASAN reports. */
  175. lockdep_off();
  176. /* Make sure we don't end up in loop. */
  177. report_suppress_start();
  178. raw_spin_lock_irqsave(&report_lock, *flags);
  179. pr_err("==================================================================\n");
  180. }
  181. static void end_report(unsigned long *flags, const void *addr, bool is_write)
  182. {
  183. if (addr)
  184. trace_error_report_end(ERROR_DETECTOR_KASAN,
  185. (unsigned long)addr);
  186. pr_err("==================================================================\n");
  187. raw_spin_unlock_irqrestore(&report_lock, *flags);
  188. if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
  189. check_panic_on_warn("KASAN");
  190. switch (kasan_arg_fault) {
  191. case KASAN_ARG_FAULT_DEFAULT:
  192. case KASAN_ARG_FAULT_REPORT:
  193. break;
  194. case KASAN_ARG_FAULT_PANIC:
  195. panic("kasan.fault=panic set ...\n");
  196. break;
  197. case KASAN_ARG_FAULT_PANIC_ON_WRITE:
  198. if (is_write)
  199. panic("kasan.fault=panic_on_write set ...\n");
  200. break;
  201. }
  202. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  203. lockdep_on();
  204. report_suppress_stop();
  205. }
  206. static void print_error_description(struct kasan_report_info *info)
  207. {
  208. pr_err("BUG: KASAN: %s in %pS\n", info->bug_type, (void *)info->ip);
  209. if (info->type != KASAN_REPORT_ACCESS) {
  210. pr_err("Free of addr %px by task %s/%d\n",
  211. info->access_addr, current->comm, task_pid_nr(current));
  212. return;
  213. }
  214. if (info->access_size)
  215. pr_err("%s of size %zu at addr %px by task %s/%d\n",
  216. info->is_write ? "Write" : "Read", info->access_size,
  217. info->access_addr, current->comm, task_pid_nr(current));
  218. else
  219. pr_err("%s at addr %px by task %s/%d\n",
  220. info->is_write ? "Write" : "Read",
  221. info->access_addr, current->comm, task_pid_nr(current));
  222. }
  223. static void print_track(struct kasan_track *track, const char *prefix)
  224. {
  225. #ifdef CONFIG_KASAN_EXTRA_INFO
  226. u64 ts_nsec = track->timestamp;
  227. unsigned long rem_usec;
  228. ts_nsec <<= 9;
  229. rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
  230. pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
  231. prefix, track->pid, track->cpu,
  232. (unsigned long)ts_nsec, rem_usec);
  233. #else
  234. pr_err("%s by task %u:\n", prefix, track->pid);
  235. #endif /* CONFIG_KASAN_EXTRA_INFO */
  236. if (track->stack)
  237. stack_depot_print(track->stack);
  238. else
  239. pr_err("(stack is not available)\n");
  240. }
  241. static inline struct page *addr_to_page(const void *addr)
  242. {
  243. if (virt_addr_valid(addr))
  244. return virt_to_head_page(addr);
  245. return NULL;
  246. }
  247. static void describe_object_addr(const void *addr, struct kasan_report_info *info)
  248. {
  249. unsigned long access_addr = (unsigned long)addr;
  250. unsigned long object_addr = (unsigned long)info->object;
  251. const char *rel_type, *region_state = "";
  252. int rel_bytes;
  253. pr_err("The buggy address belongs to the object at %px\n"
  254. " which belongs to the cache %s of size %d\n",
  255. info->object, info->cache->name, info->cache->object_size);
  256. if (access_addr < object_addr) {
  257. rel_type = "to the left";
  258. rel_bytes = object_addr - access_addr;
  259. } else if (access_addr >= object_addr + info->alloc_size) {
  260. rel_type = "to the right";
  261. rel_bytes = access_addr - (object_addr + info->alloc_size);
  262. } else {
  263. rel_type = "inside";
  264. rel_bytes = access_addr - object_addr;
  265. }
  266. /*
  267. * Tag-Based modes use the stack ring to infer the bug type, but the
  268. * memory region state description is generated based on the metadata.
  269. * Thus, defining the region state as below can contradict the metadata.
  270. * Fixing this requires further improvements, so only infer the state
  271. * for the Generic mode.
  272. */
  273. if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
  274. if (strcmp(info->bug_type, "slab-out-of-bounds") == 0)
  275. region_state = "allocated ";
  276. else if (strcmp(info->bug_type, "slab-use-after-free") == 0)
  277. region_state = "freed ";
  278. }
  279. pr_err("The buggy address is located %d bytes %s of\n"
  280. " %s%zu-byte region [%px, %px)\n",
  281. rel_bytes, rel_type, region_state, info->alloc_size,
  282. (void *)object_addr, (void *)(object_addr + info->alloc_size));
  283. }
  284. static void describe_object_stacks(struct kasan_report_info *info)
  285. {
  286. if (info->alloc_track.stack) {
  287. print_track(&info->alloc_track, "Allocated");
  288. pr_err("\n");
  289. }
  290. if (info->free_track.stack) {
  291. print_track(&info->free_track, "Freed");
  292. pr_err("\n");
  293. }
  294. kasan_print_aux_stacks(info->cache, info->object);
  295. }
  296. static void describe_object(const void *addr, struct kasan_report_info *info)
  297. {
  298. if (kasan_stack_collection_enabled())
  299. describe_object_stacks(info);
  300. describe_object_addr(addr, info);
  301. }
  302. static inline bool kernel_or_module_addr(const void *addr)
  303. {
  304. if (is_kernel((unsigned long)addr))
  305. return true;
  306. if (is_module_address((unsigned long)addr))
  307. return true;
  308. return false;
  309. }
  310. static inline bool init_task_stack_addr(const void *addr)
  311. {
  312. return addr >= (void *)&init_thread_union.stack &&
  313. (addr <= (void *)&init_thread_union.stack +
  314. sizeof(init_thread_union.stack));
  315. }
  316. static void print_address_description(void *addr, u8 tag,
  317. struct kasan_report_info *info)
  318. {
  319. struct page *page = addr_to_page(addr);
  320. dump_stack_lvl(KERN_ERR);
  321. pr_err("\n");
  322. if (info->cache && info->object) {
  323. describe_object(addr, info);
  324. pr_err("\n");
  325. }
  326. if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
  327. pr_err("The buggy address belongs to the variable:\n");
  328. pr_err(" %pS\n", addr);
  329. pr_err("\n");
  330. }
  331. if (object_is_on_stack(addr)) {
  332. /*
  333. * Currently, KASAN supports printing frame information only
  334. * for accesses to the task's own stack.
  335. */
  336. kasan_print_address_stack_frame(addr);
  337. pr_err("\n");
  338. }
  339. if (is_vmalloc_addr(addr)) {
  340. struct vm_struct *va = find_vm_area(addr);
  341. if (va) {
  342. pr_err("The buggy address belongs to the virtual mapping at\n"
  343. " [%px, %px) created by:\n"
  344. " %pS\n",
  345. va->addr, va->addr + va->size, va->caller);
  346. pr_err("\n");
  347. page = vmalloc_to_page(addr);
  348. }
  349. }
  350. if (page) {
  351. pr_err("The buggy address belongs to the physical page:\n");
  352. dump_page(page, "kasan: bad access detected");
  353. pr_err("\n");
  354. }
  355. }
  356. static bool meta_row_is_guilty(const void *row, const void *addr)
  357. {
  358. return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
  359. }
  360. static int meta_pointer_offset(const void *row, const void *addr)
  361. {
  362. /*
  363. * Memory state around the buggy address:
  364. * ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
  365. * ...
  366. *
  367. * The length of ">ff00ff00ff00ff00: " is
  368. * 3 + (BITS_PER_LONG / 8) * 2 chars.
  369. * The length of each granule metadata is 2 bytes
  370. * plus 1 byte for space.
  371. */
  372. return 3 + (BITS_PER_LONG / 8) * 2 +
  373. (addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
  374. }
  375. static void print_memory_metadata(const void *addr)
  376. {
  377. int i;
  378. void *row;
  379. row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
  380. - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
  381. pr_err("Memory state around the buggy address:\n");
  382. for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
  383. char buffer[4 + (BITS_PER_LONG / 8) * 2];
  384. char metadata[META_BYTES_PER_ROW];
  385. snprintf(buffer, sizeof(buffer),
  386. (i == 0) ? ">%px: " : " %px: ", row);
  387. /*
  388. * We should not pass a shadow pointer to generic
  389. * function, because generic functions may try to
  390. * access kasan mapping for the passed address.
  391. */
  392. kasan_metadata_fetch_row(&metadata[0], row);
  393. print_hex_dump(KERN_ERR, buffer,
  394. DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
  395. metadata, META_BYTES_PER_ROW, 0);
  396. if (meta_row_is_guilty(row, addr))
  397. pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
  398. row += META_MEM_BYTES_PER_ROW;
  399. }
  400. }
  401. static void print_report(struct kasan_report_info *info)
  402. {
  403. void *addr = kasan_reset_tag((void *)info->access_addr);
  404. u8 tag = get_tag((void *)info->access_addr);
  405. print_error_description(info);
  406. if (addr_has_metadata(addr))
  407. kasan_print_tags(tag, info->first_bad_addr);
  408. pr_err("\n");
  409. if (addr_has_metadata(addr)) {
  410. print_address_description(addr, tag, info);
  411. print_memory_metadata(info->first_bad_addr);
  412. } else {
  413. dump_stack_lvl(KERN_ERR);
  414. }
  415. }
  416. static void complete_report_info(struct kasan_report_info *info)
  417. {
  418. void *addr = kasan_reset_tag((void *)info->access_addr);
  419. struct slab *slab;
  420. if (info->type == KASAN_REPORT_ACCESS)
  421. info->first_bad_addr = kasan_find_first_bad_addr(
  422. (void *)info->access_addr, info->access_size);
  423. else
  424. info->first_bad_addr = addr;
  425. slab = kasan_addr_to_slab(addr);
  426. if (slab) {
  427. info->cache = slab->slab_cache;
  428. info->object = nearest_obj(info->cache, slab, addr);
  429. /* Try to determine allocation size based on the metadata. */
  430. info->alloc_size = kasan_get_alloc_size(info->object, info->cache);
  431. /* Fallback to the object size if failed. */
  432. if (!info->alloc_size)
  433. info->alloc_size = info->cache->object_size;
  434. } else
  435. info->cache = info->object = NULL;
  436. switch (info->type) {
  437. case KASAN_REPORT_INVALID_FREE:
  438. info->bug_type = "invalid-free";
  439. break;
  440. case KASAN_REPORT_DOUBLE_FREE:
  441. info->bug_type = "double-free";
  442. break;
  443. default:
  444. /* bug_type filled in by kasan_complete_mode_report_info. */
  445. break;
  446. }
  447. /* Fill in mode-specific report info fields. */
  448. kasan_complete_mode_report_info(info);
  449. }
  450. void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type)
  451. {
  452. unsigned long flags;
  453. struct kasan_report_info info;
  454. /*
  455. * Do not check report_suppressed_sw(), as an invalid-free cannot be
  456. * caused by accessing poisoned memory and thus should not be suppressed
  457. * by kasan_disable/enable_current() critical sections.
  458. *
  459. * Note that for Hardware Tag-Based KASAN, kasan_report_invalid_free()
  460. * is triggered by explicit tag checks and not by the ones performed by
  461. * the CPU. Thus, reporting invalid-free is not suppressed as well.
  462. */
  463. if (unlikely(!report_enabled()))
  464. return;
  465. start_report(&flags, true);
  466. __memset(&info, 0, sizeof(info));
  467. info.type = type;
  468. info.access_addr = ptr;
  469. info.access_size = 0;
  470. info.is_write = false;
  471. info.ip = ip;
  472. complete_report_info(&info);
  473. print_report(&info);
  474. /*
  475. * Invalid free is considered a "write" since the allocator's metadata
  476. * updates involves writes.
  477. */
  478. end_report(&flags, ptr, true);
  479. }
  480. /*
  481. * kasan_report() is the only reporting function that uses
  482. * user_access_save/restore(): kasan_report_invalid_free() cannot be called
  483. * from a UACCESS region, and kasan_report_async() is not used on x86.
  484. */
  485. bool kasan_report(const void *addr, size_t size, bool is_write,
  486. unsigned long ip)
  487. {
  488. bool ret = true;
  489. unsigned long ua_flags = user_access_save();
  490. unsigned long irq_flags;
  491. struct kasan_report_info info;
  492. if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) {
  493. ret = false;
  494. goto out;
  495. }
  496. start_report(&irq_flags, true);
  497. __memset(&info, 0, sizeof(info));
  498. info.type = KASAN_REPORT_ACCESS;
  499. info.access_addr = addr;
  500. info.access_size = size;
  501. info.is_write = is_write;
  502. info.ip = ip;
  503. complete_report_info(&info);
  504. print_report(&info);
  505. end_report(&irq_flags, (void *)addr, is_write);
  506. out:
  507. user_access_restore(ua_flags);
  508. return ret;
  509. }
  510. #ifdef CONFIG_KASAN_HW_TAGS
  511. void kasan_report_async(void)
  512. {
  513. unsigned long flags;
  514. /*
  515. * Do not check report_suppressed_sw(), as
  516. * kasan_disable/enable_current() critical sections do not affect
  517. * Hardware Tag-Based KASAN.
  518. */
  519. if (unlikely(!report_enabled()))
  520. return;
  521. start_report(&flags, false);
  522. pr_err("BUG: KASAN: invalid-access\n");
  523. pr_err("Asynchronous fault: no details available\n");
  524. pr_err("\n");
  525. dump_stack_lvl(KERN_ERR);
  526. /*
  527. * Conservatively set is_write=true, because no details are available.
  528. * In this mode, kasan.fault=panic_on_write is like kasan.fault=panic.
  529. */
  530. end_report(&flags, NULL, true);
  531. }
  532. #endif /* CONFIG_KASAN_HW_TAGS */
  533. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  534. /*
  535. * With compiler-based KASAN modes, accesses to bogus pointers (outside of the
  536. * mapped kernel address space regions) cause faults when KASAN tries to check
  537. * the shadow memory before the actual memory access. This results in cryptic
  538. * GPF reports, which are hard for users to interpret. This hook helps users to
  539. * figure out what the original bogus pointer was.
  540. */
  541. void kasan_non_canonical_hook(unsigned long addr)
  542. {
  543. unsigned long orig_addr;
  544. const char *bug_type;
  545. /*
  546. * All addresses that came as a result of the memory-to-shadow mapping
  547. * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET.
  548. */
  549. if (addr < KASAN_SHADOW_OFFSET)
  550. return;
  551. orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr);
  552. /*
  553. * For faults near the shadow address for NULL, we can be fairly certain
  554. * that this is a KASAN shadow memory access.
  555. * For faults that correspond to the shadow for low or high canonical
  556. * addresses, we can still be pretty sure: these shadow regions are a
  557. * fairly narrow chunk of the address space.
  558. * But the shadow for non-canonical addresses is a really large chunk
  559. * of the address space. For this case, we still print the decoded
  560. * address, but make it clear that this is not necessarily what's
  561. * actually going on.
  562. */
  563. if (orig_addr < PAGE_SIZE)
  564. bug_type = "null-ptr-deref";
  565. else if (orig_addr < TASK_SIZE)
  566. bug_type = "probably user-memory-access";
  567. else if (addr_in_shadow((void *)addr))
  568. bug_type = "probably wild-memory-access";
  569. else
  570. bug_type = "maybe wild-memory-access";
  571. pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
  572. orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
  573. }
  574. #endif