common.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file contains common KASAN code.
  4. *
  5. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7. *
  8. * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9. * Andrey Konovalov <andreyknvl@gmail.com>
  10. */
  11. #include <linux/export.h>
  12. #include <linux/init.h>
  13. #include <linux/kasan.h>
  14. #include <linux/kernel.h>
  15. #include <linux/linkage.h>
  16. #include <linux/memblock.h>
  17. #include <linux/memory.h>
  18. #include <linux/mm.h>
  19. #include <linux/module.h>
  20. #include <linux/printk.h>
  21. #include <linux/sched.h>
  22. #include <linux/sched/clock.h>
  23. #include <linux/sched/task_stack.h>
  24. #include <linux/slab.h>
  25. #include <linux/stackdepot.h>
  26. #include <linux/stacktrace.h>
  27. #include <linux/string.h>
  28. #include <linux/types.h>
  29. #include <linux/bug.h>
  30. #include "kasan.h"
  31. #include "../slab.h"
  32. struct slab *kasan_addr_to_slab(const void *addr)
  33. {
  34. if (virt_addr_valid(addr))
  35. return virt_to_slab(addr);
  36. return NULL;
  37. }
  38. depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
  39. {
  40. unsigned long entries[KASAN_STACK_DEPTH];
  41. unsigned int nr_entries;
  42. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  43. return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
  44. }
  45. void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
  46. {
  47. #ifdef CONFIG_KASAN_EXTRA_INFO
  48. u32 cpu = raw_smp_processor_id();
  49. u64 ts_nsec = local_clock();
  50. track->cpu = cpu;
  51. track->timestamp = ts_nsec >> 9;
  52. #endif /* CONFIG_KASAN_EXTRA_INFO */
  53. track->pid = current->pid;
  54. track->stack = stack;
  55. }
  56. void kasan_save_track(struct kasan_track *track, gfp_t flags)
  57. {
  58. depot_stack_handle_t stack;
  59. stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
  60. kasan_set_track(track, stack);
  61. }
  62. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  63. void kasan_enable_current(void)
  64. {
  65. current->kasan_depth++;
  66. }
  67. EXPORT_SYMBOL(kasan_enable_current);
  68. void kasan_disable_current(void)
  69. {
  70. current->kasan_depth--;
  71. }
  72. EXPORT_SYMBOL(kasan_disable_current);
  73. #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  74. void __kasan_unpoison_range(const void *address, size_t size)
  75. {
  76. if (is_kfence_address(address))
  77. return;
  78. kasan_unpoison(address, size, false);
  79. }
  80. #ifdef CONFIG_KASAN_STACK
  81. /* Unpoison the entire stack for a task. */
  82. void kasan_unpoison_task_stack(struct task_struct *task)
  83. {
  84. void *base = task_stack_page(task);
  85. kasan_unpoison(base, THREAD_SIZE, false);
  86. }
  87. /* Unpoison the stack for the current task beyond a watermark sp value. */
  88. asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
  89. {
  90. /*
  91. * Calculate the task stack base address. Avoid using 'current'
  92. * because this function is called by early resume code which hasn't
  93. * yet set up the percpu register (%gs).
  94. */
  95. void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
  96. kasan_unpoison(base, watermark - base, false);
  97. }
  98. #endif /* CONFIG_KASAN_STACK */
  99. bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
  100. {
  101. u8 tag;
  102. unsigned long i;
  103. if (unlikely(PageHighMem(page)))
  104. return false;
  105. if (!kasan_sample_page_alloc(order))
  106. return false;
  107. tag = kasan_random_tag();
  108. kasan_unpoison(set_tag(page_address(page), tag),
  109. PAGE_SIZE << order, init);
  110. for (i = 0; i < (1 << order); i++)
  111. page_kasan_tag_set(page + i, tag);
  112. return true;
  113. }
  114. void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
  115. {
  116. if (likely(!PageHighMem(page)))
  117. kasan_poison(page_address(page), PAGE_SIZE << order,
  118. KASAN_PAGE_FREE, init);
  119. }
  120. void __kasan_poison_slab(struct slab *slab)
  121. {
  122. struct page *page = slab_page(slab);
  123. unsigned long i;
  124. for (i = 0; i < compound_nr(page); i++)
  125. page_kasan_tag_reset(page + i);
  126. kasan_poison(page_address(page), page_size(page),
  127. KASAN_SLAB_REDZONE, false);
  128. }
  129. void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
  130. {
  131. kasan_unpoison(object, cache->object_size, false);
  132. }
  133. void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
  134. {
  135. kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
  136. KASAN_SLAB_REDZONE, false);
  137. }
  138. /*
  139. * This function assigns a tag to an object considering the following:
  140. * 1. A cache might have a constructor, which might save a pointer to a slab
  141. * object somewhere (e.g. in the object itself). We preassign a tag for
  142. * each object in caches with constructors during slab creation and reuse
  143. * the same tag each time a particular object is allocated.
  144. * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
  145. * accessed after being freed. We preassign tags for objects in these
  146. * caches as well.
  147. */
  148. static inline u8 assign_tag(struct kmem_cache *cache,
  149. const void *object, bool init)
  150. {
  151. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  152. return 0xff;
  153. /*
  154. * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
  155. * set, assign a tag when the object is being allocated (init == false).
  156. */
  157. if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
  158. return init ? KASAN_TAG_KERNEL : kasan_random_tag();
  159. /*
  160. * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
  161. * assign a random tag during slab creation, otherwise reuse
  162. * the already assigned tag.
  163. */
  164. return init ? kasan_random_tag() : get_tag(object);
  165. }
  166. void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
  167. const void *object)
  168. {
  169. /* Initialize per-object metadata if it is present. */
  170. if (kasan_requires_meta())
  171. kasan_init_object_meta(cache, object);
  172. /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
  173. object = set_tag(object, assign_tag(cache, object, true));
  174. return (void *)object;
  175. }
  176. /* Returns true when freeing the object is not safe. */
  177. static bool check_slab_allocation(struct kmem_cache *cache, void *object,
  178. unsigned long ip)
  179. {
  180. void *tagged_object = object;
  181. object = kasan_reset_tag(object);
  182. if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
  183. kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
  184. return true;
  185. }
  186. if (!kasan_byte_accessible(tagged_object)) {
  187. kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
  188. return true;
  189. }
  190. return false;
  191. }
  192. static inline void poison_slab_object(struct kmem_cache *cache, void *object,
  193. bool init, bool still_accessible)
  194. {
  195. void *tagged_object = object;
  196. object = kasan_reset_tag(object);
  197. /* RCU slabs could be legally used after free within the RCU period. */
  198. if (unlikely(still_accessible))
  199. return;
  200. kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
  201. KASAN_SLAB_FREE, init);
  202. if (kasan_stack_collection_enabled())
  203. kasan_save_free_info(cache, tagged_object);
  204. }
  205. bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
  206. unsigned long ip)
  207. {
  208. if (!kasan_arch_is_ready() || is_kfence_address(object))
  209. return false;
  210. return check_slab_allocation(cache, object, ip);
  211. }
  212. bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
  213. bool still_accessible)
  214. {
  215. if (!kasan_arch_is_ready() || is_kfence_address(object))
  216. return false;
  217. poison_slab_object(cache, object, init, still_accessible);
  218. /*
  219. * If the object is put into quarantine, do not let slab put the object
  220. * onto the freelist for now. The object's metadata is kept until the
  221. * object gets evicted from quarantine.
  222. */
  223. if (kasan_quarantine_put(cache, object))
  224. return true;
  225. /*
  226. * Note: Keep per-object metadata to allow KASAN print stack traces for
  227. * use-after-free-before-realloc bugs.
  228. */
  229. /* Let slab put the object onto the freelist. */
  230. return false;
  231. }
  232. static inline bool check_page_allocation(void *ptr, unsigned long ip)
  233. {
  234. if (!kasan_arch_is_ready())
  235. return false;
  236. if (ptr != page_address(virt_to_head_page(ptr))) {
  237. kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
  238. return true;
  239. }
  240. if (!kasan_byte_accessible(ptr)) {
  241. kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
  242. return true;
  243. }
  244. return false;
  245. }
  246. void __kasan_kfree_large(void *ptr, unsigned long ip)
  247. {
  248. check_page_allocation(ptr, ip);
  249. /* The object will be poisoned by kasan_poison_pages(). */
  250. }
  251. static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
  252. gfp_t flags, bool init)
  253. {
  254. /*
  255. * Unpoison the whole object. For kmalloc() allocations,
  256. * poison_kmalloc_redzone() will do precise poisoning.
  257. */
  258. kasan_unpoison(object, cache->object_size, init);
  259. /* Save alloc info (if possible) for non-kmalloc() allocations. */
  260. if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
  261. kasan_save_alloc_info(cache, object, flags);
  262. }
  263. void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
  264. void *object, gfp_t flags, bool init)
  265. {
  266. u8 tag;
  267. void *tagged_object;
  268. if (gfpflags_allow_blocking(flags))
  269. kasan_quarantine_reduce();
  270. if (unlikely(object == NULL))
  271. return NULL;
  272. if (is_kfence_address(object))
  273. return (void *)object;
  274. /*
  275. * Generate and assign random tag for tag-based modes.
  276. * Tag is ignored in set_tag() for the generic mode.
  277. */
  278. tag = assign_tag(cache, object, false);
  279. tagged_object = set_tag(object, tag);
  280. /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
  281. unpoison_slab_object(cache, tagged_object, flags, init);
  282. return tagged_object;
  283. }
  284. static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
  285. const void *object, size_t size, gfp_t flags)
  286. {
  287. unsigned long redzone_start;
  288. unsigned long redzone_end;
  289. /*
  290. * The redzone has byte-level precision for the generic mode.
  291. * Partially poison the last object granule to cover the unaligned
  292. * part of the redzone.
  293. */
  294. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  295. kasan_poison_last_granule((void *)object, size);
  296. /* Poison the aligned part of the redzone. */
  297. redzone_start = round_up((unsigned long)(object + size),
  298. KASAN_GRANULE_SIZE);
  299. redzone_end = round_up((unsigned long)(object + cache->object_size),
  300. KASAN_GRANULE_SIZE);
  301. kasan_poison((void *)redzone_start, redzone_end - redzone_start,
  302. KASAN_SLAB_REDZONE, false);
  303. /*
  304. * Save alloc info (if possible) for kmalloc() allocations.
  305. * This also rewrites the alloc info when called from kasan_krealloc().
  306. */
  307. if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
  308. kasan_save_alloc_info(cache, (void *)object, flags);
  309. }
  310. void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
  311. size_t size, gfp_t flags)
  312. {
  313. if (gfpflags_allow_blocking(flags))
  314. kasan_quarantine_reduce();
  315. if (unlikely(object == NULL))
  316. return NULL;
  317. if (is_kfence_address(object))
  318. return (void *)object;
  319. /* The object has already been unpoisoned by kasan_slab_alloc(). */
  320. poison_kmalloc_redzone(cache, object, size, flags);
  321. /* Keep the tag that was set by kasan_slab_alloc(). */
  322. return (void *)object;
  323. }
  324. EXPORT_SYMBOL(__kasan_kmalloc);
  325. static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
  326. gfp_t flags)
  327. {
  328. unsigned long redzone_start;
  329. unsigned long redzone_end;
  330. /*
  331. * The redzone has byte-level precision for the generic mode.
  332. * Partially poison the last object granule to cover the unaligned
  333. * part of the redzone.
  334. */
  335. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  336. kasan_poison_last_granule(ptr, size);
  337. /* Poison the aligned part of the redzone. */
  338. redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
  339. redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
  340. kasan_poison((void *)redzone_start, redzone_end - redzone_start,
  341. KASAN_PAGE_REDZONE, false);
  342. }
  343. void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
  344. gfp_t flags)
  345. {
  346. if (gfpflags_allow_blocking(flags))
  347. kasan_quarantine_reduce();
  348. if (unlikely(ptr == NULL))
  349. return NULL;
  350. /* The object has already been unpoisoned by kasan_unpoison_pages(). */
  351. poison_kmalloc_large_redzone(ptr, size, flags);
  352. /* Keep the tag that was set by alloc_pages(). */
  353. return (void *)ptr;
  354. }
  355. void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
  356. {
  357. struct slab *slab;
  358. if (gfpflags_allow_blocking(flags))
  359. kasan_quarantine_reduce();
  360. if (unlikely(object == ZERO_SIZE_PTR))
  361. return (void *)object;
  362. if (is_kfence_address(object))
  363. return (void *)object;
  364. /*
  365. * Unpoison the object's data.
  366. * Part of it might already have been unpoisoned, but it's unknown
  367. * how big that part is.
  368. */
  369. kasan_unpoison(object, size, false);
  370. slab = virt_to_slab(object);
  371. /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
  372. if (unlikely(!slab))
  373. poison_kmalloc_large_redzone(object, size, flags);
  374. else
  375. poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
  376. return (void *)object;
  377. }
  378. bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
  379. unsigned long ip)
  380. {
  381. unsigned long *ptr;
  382. if (unlikely(PageHighMem(page)))
  383. return true;
  384. /* Bail out if allocation was excluded due to sampling. */
  385. if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
  386. page_kasan_tag(page) == KASAN_TAG_KERNEL)
  387. return true;
  388. ptr = page_address(page);
  389. if (check_page_allocation(ptr, ip))
  390. return false;
  391. kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
  392. return true;
  393. }
  394. void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
  395. unsigned long ip)
  396. {
  397. __kasan_unpoison_pages(page, order, false);
  398. }
  399. bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
  400. {
  401. struct folio *folio = virt_to_folio(ptr);
  402. struct slab *slab;
  403. /*
  404. * This function can be called for large kmalloc allocation that get
  405. * their memory from page_alloc. Thus, the folio might not be a slab.
  406. */
  407. if (unlikely(!folio_test_slab(folio))) {
  408. if (check_page_allocation(ptr, ip))
  409. return false;
  410. kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
  411. return true;
  412. }
  413. if (is_kfence_address(ptr) || !kasan_arch_is_ready())
  414. return true;
  415. slab = folio_slab(folio);
  416. if (check_slab_allocation(slab->slab_cache, ptr, ip))
  417. return false;
  418. poison_slab_object(slab->slab_cache, ptr, false, false);
  419. return true;
  420. }
  421. void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
  422. {
  423. struct slab *slab;
  424. gfp_t flags = 0; /* Might be executing under a lock. */
  425. slab = virt_to_slab(ptr);
  426. /*
  427. * This function can be called for large kmalloc allocation that get
  428. * their memory from page_alloc.
  429. */
  430. if (unlikely(!slab)) {
  431. kasan_unpoison(ptr, size, false);
  432. poison_kmalloc_large_redzone(ptr, size, flags);
  433. return;
  434. }
  435. if (is_kfence_address(ptr))
  436. return;
  437. /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
  438. unpoison_slab_object(slab->slab_cache, ptr, flags, false);
  439. /* Poison the redzone and save alloc info for kmalloc() allocations. */
  440. if (is_kmalloc_cache(slab->slab_cache))
  441. poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
  442. }
  443. bool __kasan_check_byte(const void *address, unsigned long ip)
  444. {
  445. if (!kasan_byte_accessible(address)) {
  446. kasan_report(address, 1, false, ip);
  447. return false;
  448. }
  449. return true;
  450. }