generic.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file contains core generic KASAN code.
  4. *
  5. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7. *
  8. * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9. * Andrey Konovalov <andreyknvl@gmail.com>
  10. */
  11. #include <linux/export.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/init.h>
  14. #include <linux/kasan.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kfence.h>
  17. #include <linux/kmemleak.h>
  18. #include <linux/linkage.h>
  19. #include <linux/memblock.h>
  20. #include <linux/memory.h>
  21. #include <linux/mm.h>
  22. #include <linux/module.h>
  23. #include <linux/printk.h>
  24. #include <linux/sched.h>
  25. #include <linux/sched/task_stack.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/stackdepot.h>
  29. #include <linux/stacktrace.h>
  30. #include <linux/string.h>
  31. #include <linux/types.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/bug.h>
  34. #include "kasan.h"
  35. #include "../slab.h"
  36. /*
  37. * All functions below always inlined so compiler could
  38. * perform better optimizations in each of __asan_loadX/__assn_storeX
  39. * depending on memory access size X.
  40. */
  41. static __always_inline bool memory_is_poisoned_1(const void *addr)
  42. {
  43. s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
  44. if (unlikely(shadow_value)) {
  45. s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
  46. return unlikely(last_accessible_byte >= shadow_value);
  47. }
  48. return false;
  49. }
  50. static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
  51. unsigned long size)
  52. {
  53. u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
  54. /*
  55. * Access crosses 8(shadow size)-byte boundary. Such access maps
  56. * into 2 shadow bytes, so we need to check them both.
  57. */
  58. if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
  59. return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
  60. return memory_is_poisoned_1(addr + size - 1);
  61. }
  62. static __always_inline bool memory_is_poisoned_16(const void *addr)
  63. {
  64. u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
  65. /* Unaligned 16-bytes access maps into 3 shadow bytes. */
  66. if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
  67. return *shadow_addr || memory_is_poisoned_1(addr + 15);
  68. return *shadow_addr;
  69. }
  70. static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
  71. size_t size)
  72. {
  73. while (size) {
  74. if (unlikely(*start))
  75. return (unsigned long)start;
  76. start++;
  77. size--;
  78. }
  79. return 0;
  80. }
  81. static __always_inline unsigned long memory_is_nonzero(const void *start,
  82. const void *end)
  83. {
  84. unsigned int words;
  85. unsigned long ret;
  86. unsigned int prefix = (unsigned long)start % 8;
  87. if (end - start <= 16)
  88. return bytes_is_nonzero(start, end - start);
  89. if (prefix) {
  90. prefix = 8 - prefix;
  91. ret = bytes_is_nonzero(start, prefix);
  92. if (unlikely(ret))
  93. return ret;
  94. start += prefix;
  95. }
  96. words = (end - start) / 8;
  97. while (words) {
  98. if (unlikely(*(u64 *)start))
  99. return bytes_is_nonzero(start, 8);
  100. start += 8;
  101. words--;
  102. }
  103. return bytes_is_nonzero(start, (end - start) % 8);
  104. }
  105. static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
  106. {
  107. unsigned long ret;
  108. ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
  109. kasan_mem_to_shadow(addr + size - 1) + 1);
  110. if (unlikely(ret)) {
  111. const void *last_byte = addr + size - 1;
  112. s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
  113. s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
  114. if (unlikely(ret != (unsigned long)last_shadow ||
  115. last_accessible_byte >= *last_shadow))
  116. return true;
  117. }
  118. return false;
  119. }
  120. static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
  121. {
  122. if (__builtin_constant_p(size)) {
  123. switch (size) {
  124. case 1:
  125. return memory_is_poisoned_1(addr);
  126. case 2:
  127. case 4:
  128. case 8:
  129. return memory_is_poisoned_2_4_8(addr, size);
  130. case 16:
  131. return memory_is_poisoned_16(addr);
  132. default:
  133. BUILD_BUG();
  134. }
  135. }
  136. return memory_is_poisoned_n(addr, size);
  137. }
  138. static __always_inline bool check_region_inline(const void *addr,
  139. size_t size, bool write,
  140. unsigned long ret_ip)
  141. {
  142. if (!kasan_arch_is_ready())
  143. return true;
  144. if (unlikely(size == 0))
  145. return true;
  146. if (unlikely(addr + size < addr))
  147. return !kasan_report(addr, size, write, ret_ip);
  148. if (unlikely(!addr_has_metadata(addr)))
  149. return !kasan_report(addr, size, write, ret_ip);
  150. if (likely(!memory_is_poisoned(addr, size)))
  151. return true;
  152. return !kasan_report(addr, size, write, ret_ip);
  153. }
  154. bool kasan_check_range(const void *addr, size_t size, bool write,
  155. unsigned long ret_ip)
  156. {
  157. return check_region_inline(addr, size, write, ret_ip);
  158. }
  159. bool kasan_byte_accessible(const void *addr)
  160. {
  161. s8 shadow_byte;
  162. if (!kasan_arch_is_ready())
  163. return true;
  164. shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
  165. return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
  166. }
  167. void kasan_cache_shrink(struct kmem_cache *cache)
  168. {
  169. kasan_quarantine_remove_cache(cache);
  170. }
  171. void kasan_cache_shutdown(struct kmem_cache *cache)
  172. {
  173. if (!__kmem_cache_empty(cache))
  174. kasan_quarantine_remove_cache(cache);
  175. }
  176. static void register_global(struct kasan_global *global)
  177. {
  178. size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
  179. kasan_unpoison(global->beg, global->size, false);
  180. kasan_poison(global->beg + aligned_size,
  181. global->size_with_redzone - aligned_size,
  182. KASAN_GLOBAL_REDZONE, false);
  183. }
  184. void __asan_register_globals(void *ptr, ssize_t size)
  185. {
  186. int i;
  187. struct kasan_global *globals = ptr;
  188. for (i = 0; i < size; i++)
  189. register_global(&globals[i]);
  190. }
  191. EXPORT_SYMBOL(__asan_register_globals);
  192. void __asan_unregister_globals(void *ptr, ssize_t size)
  193. {
  194. }
  195. EXPORT_SYMBOL(__asan_unregister_globals);
  196. #define DEFINE_ASAN_LOAD_STORE(size) \
  197. void __asan_load##size(void *addr) \
  198. { \
  199. check_region_inline(addr, size, false, _RET_IP_); \
  200. } \
  201. EXPORT_SYMBOL(__asan_load##size); \
  202. __alias(__asan_load##size) \
  203. void __asan_load##size##_noabort(void *); \
  204. EXPORT_SYMBOL(__asan_load##size##_noabort); \
  205. void __asan_store##size(void *addr) \
  206. { \
  207. check_region_inline(addr, size, true, _RET_IP_); \
  208. } \
  209. EXPORT_SYMBOL(__asan_store##size); \
  210. __alias(__asan_store##size) \
  211. void __asan_store##size##_noabort(void *); \
  212. EXPORT_SYMBOL(__asan_store##size##_noabort)
  213. DEFINE_ASAN_LOAD_STORE(1);
  214. DEFINE_ASAN_LOAD_STORE(2);
  215. DEFINE_ASAN_LOAD_STORE(4);
  216. DEFINE_ASAN_LOAD_STORE(8);
  217. DEFINE_ASAN_LOAD_STORE(16);
  218. void __asan_loadN(void *addr, ssize_t size)
  219. {
  220. kasan_check_range(addr, size, false, _RET_IP_);
  221. }
  222. EXPORT_SYMBOL(__asan_loadN);
  223. __alias(__asan_loadN)
  224. void __asan_loadN_noabort(void *, ssize_t);
  225. EXPORT_SYMBOL(__asan_loadN_noabort);
  226. void __asan_storeN(void *addr, ssize_t size)
  227. {
  228. kasan_check_range(addr, size, true, _RET_IP_);
  229. }
  230. EXPORT_SYMBOL(__asan_storeN);
  231. __alias(__asan_storeN)
  232. void __asan_storeN_noabort(void *, ssize_t);
  233. EXPORT_SYMBOL(__asan_storeN_noabort);
  234. /* to shut up compiler complaints */
  235. void __asan_handle_no_return(void) {}
  236. EXPORT_SYMBOL(__asan_handle_no_return);
  237. /* Emitted by compiler to poison alloca()ed objects. */
  238. void __asan_alloca_poison(void *addr, ssize_t size)
  239. {
  240. size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
  241. size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
  242. rounded_up_size;
  243. size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
  244. const void *left_redzone = (const void *)(addr -
  245. KASAN_ALLOCA_REDZONE_SIZE);
  246. const void *right_redzone = (const void *)(addr + rounded_up_size);
  247. WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
  248. kasan_unpoison((const void *)(addr + rounded_down_size),
  249. size - rounded_down_size, false);
  250. kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
  251. KASAN_ALLOCA_LEFT, false);
  252. kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
  253. KASAN_ALLOCA_RIGHT, false);
  254. }
  255. EXPORT_SYMBOL(__asan_alloca_poison);
  256. /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
  257. void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
  258. {
  259. if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
  260. return;
  261. kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
  262. }
  263. EXPORT_SYMBOL(__asan_allocas_unpoison);
  264. /* Emitted by the compiler to [un]poison local variables. */
  265. #define DEFINE_ASAN_SET_SHADOW(byte) \
  266. void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
  267. { \
  268. __memset((void *)addr, 0x##byte, size); \
  269. } \
  270. EXPORT_SYMBOL(__asan_set_shadow_##byte)
  271. DEFINE_ASAN_SET_SHADOW(00);
  272. DEFINE_ASAN_SET_SHADOW(f1);
  273. DEFINE_ASAN_SET_SHADOW(f2);
  274. DEFINE_ASAN_SET_SHADOW(f3);
  275. DEFINE_ASAN_SET_SHADOW(f5);
  276. DEFINE_ASAN_SET_SHADOW(f8);
  277. /*
  278. * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  279. * For larger allocations larger redzones are used.
  280. */
  281. static inline unsigned int optimal_redzone(unsigned int object_size)
  282. {
  283. return
  284. object_size <= 64 - 16 ? 16 :
  285. object_size <= 128 - 32 ? 32 :
  286. object_size <= 512 - 64 ? 64 :
  287. object_size <= 4096 - 128 ? 128 :
  288. object_size <= (1 << 14) - 256 ? 256 :
  289. object_size <= (1 << 15) - 512 ? 512 :
  290. object_size <= (1 << 16) - 1024 ? 1024 : 2048;
  291. }
  292. void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
  293. slab_flags_t *flags)
  294. {
  295. unsigned int ok_size;
  296. unsigned int optimal_size;
  297. unsigned int rem_free_meta_size;
  298. unsigned int orig_alloc_meta_offset;
  299. if (!kasan_requires_meta())
  300. return;
  301. /*
  302. * SLAB_KASAN is used to mark caches that are sanitized by KASAN and
  303. * that thus have per-object metadata. Currently, this flag is used in
  304. * slab_ksize() to account for per-object metadata when calculating the
  305. * size of the accessible memory within the object. Additionally, we use
  306. * SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
  307. */
  308. *flags |= SLAB_KASAN | SLAB_NO_MERGE;
  309. ok_size = *size;
  310. /* Add alloc meta into the redzone. */
  311. cache->kasan_info.alloc_meta_offset = *size;
  312. *size += sizeof(struct kasan_alloc_meta);
  313. /* If alloc meta doesn't fit, don't add it. */
  314. if (*size > KMALLOC_MAX_SIZE) {
  315. cache->kasan_info.alloc_meta_offset = 0;
  316. *size = ok_size;
  317. /* Continue, since free meta might still fit. */
  318. }
  319. ok_size = *size;
  320. orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
  321. /*
  322. * Store free meta in the redzone when it's not possible to store
  323. * it in the object. This is the case when:
  324. * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
  325. * be touched after it was freed, or
  326. * 2. Object has a constructor, which means it's expected to
  327. * retain its content until the next allocation.
  328. */
  329. if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
  330. cache->kasan_info.free_meta_offset = *size;
  331. *size += sizeof(struct kasan_free_meta);
  332. goto free_meta_added;
  333. }
  334. /*
  335. * Otherwise, if the object is large enough to contain free meta,
  336. * store it within the object.
  337. */
  338. if (sizeof(struct kasan_free_meta) <= cache->object_size) {
  339. /* cache->kasan_info.free_meta_offset = 0 is implied. */
  340. goto free_meta_added;
  341. }
  342. /*
  343. * For smaller objects, store the beginning of free meta within the
  344. * object and the end in the redzone. And thus shift the location of
  345. * alloc meta to free up space for free meta.
  346. * This is only possible when slub_debug is disabled, as otherwise
  347. * the end of free meta will overlap with slub_debug metadata.
  348. */
  349. if (!__slub_debug_enabled()) {
  350. rem_free_meta_size = sizeof(struct kasan_free_meta) -
  351. cache->object_size;
  352. *size += rem_free_meta_size;
  353. if (cache->kasan_info.alloc_meta_offset != 0)
  354. cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
  355. goto free_meta_added;
  356. }
  357. /*
  358. * If the object is small and slub_debug is enabled, store free meta
  359. * in the redzone after alloc meta.
  360. */
  361. cache->kasan_info.free_meta_offset = *size;
  362. *size += sizeof(struct kasan_free_meta);
  363. free_meta_added:
  364. /* If free meta doesn't fit, don't add it. */
  365. if (*size > KMALLOC_MAX_SIZE) {
  366. cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
  367. cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
  368. *size = ok_size;
  369. }
  370. /* Calculate size with optimal redzone. */
  371. optimal_size = cache->object_size + optimal_redzone(cache->object_size);
  372. /* Limit it with KMALLOC_MAX_SIZE. */
  373. if (optimal_size > KMALLOC_MAX_SIZE)
  374. optimal_size = KMALLOC_MAX_SIZE;
  375. /* Use optimal size if the size with added metas is not large enough. */
  376. if (*size < optimal_size)
  377. *size = optimal_size;
  378. }
  379. struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
  380. const void *object)
  381. {
  382. if (!cache->kasan_info.alloc_meta_offset)
  383. return NULL;
  384. return (void *)object + cache->kasan_info.alloc_meta_offset;
  385. }
  386. struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
  387. const void *object)
  388. {
  389. BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
  390. if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
  391. return NULL;
  392. return (void *)object + cache->kasan_info.free_meta_offset;
  393. }
  394. void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
  395. {
  396. struct kasan_alloc_meta *alloc_meta;
  397. alloc_meta = kasan_get_alloc_meta(cache, object);
  398. if (alloc_meta) {
  399. /* Zero out alloc meta to mark it as invalid. */
  400. __memset(alloc_meta, 0, sizeof(*alloc_meta));
  401. }
  402. /*
  403. * Explicitly marking free meta as invalid is not required: the shadow
  404. * value for the first 8 bytes of a newly allocated object is not
  405. * KASAN_SLAB_FREE_META.
  406. */
  407. }
  408. static void release_alloc_meta(struct kasan_alloc_meta *meta)
  409. {
  410. /* Zero out alloc meta to mark it as invalid. */
  411. __memset(meta, 0, sizeof(*meta));
  412. }
  413. static void release_free_meta(const void *object, struct kasan_free_meta *meta)
  414. {
  415. if (!kasan_arch_is_ready())
  416. return;
  417. /* Check if free meta is valid. */
  418. if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
  419. return;
  420. /* Mark free meta as invalid. */
  421. *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
  422. }
  423. size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
  424. {
  425. struct kasan_cache *info = &cache->kasan_info;
  426. if (!kasan_requires_meta())
  427. return 0;
  428. if (in_object)
  429. return (info->free_meta_offset ?
  430. 0 : sizeof(struct kasan_free_meta));
  431. else
  432. return (info->alloc_meta_offset ?
  433. sizeof(struct kasan_alloc_meta) : 0) +
  434. ((info->free_meta_offset &&
  435. info->free_meta_offset != KASAN_NO_FREE_META) ?
  436. sizeof(struct kasan_free_meta) : 0);
  437. }
  438. static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
  439. {
  440. struct slab *slab = kasan_addr_to_slab(addr);
  441. struct kmem_cache *cache;
  442. struct kasan_alloc_meta *alloc_meta;
  443. void *object;
  444. if (is_kfence_address(addr) || !slab)
  445. return;
  446. cache = slab->slab_cache;
  447. object = nearest_obj(cache, slab, addr);
  448. alloc_meta = kasan_get_alloc_meta(cache, object);
  449. if (!alloc_meta)
  450. return;
  451. alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
  452. alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
  453. }
  454. void kasan_record_aux_stack(void *addr)
  455. {
  456. return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
  457. }
  458. void kasan_record_aux_stack_noalloc(void *addr)
  459. {
  460. return __kasan_record_aux_stack(addr, 0);
  461. }
  462. void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
  463. {
  464. struct kasan_alloc_meta *alloc_meta;
  465. alloc_meta = kasan_get_alloc_meta(cache, object);
  466. if (!alloc_meta)
  467. return;
  468. /* Invalidate previous stack traces (might exist for krealloc or mempool). */
  469. release_alloc_meta(alloc_meta);
  470. kasan_save_track(&alloc_meta->alloc_track, flags);
  471. }
  472. void kasan_save_free_info(struct kmem_cache *cache, void *object)
  473. {
  474. struct kasan_free_meta *free_meta;
  475. free_meta = kasan_get_free_meta(cache, object);
  476. if (!free_meta)
  477. return;
  478. /* Invalidate previous stack trace (might exist for mempool). */
  479. release_free_meta(object, free_meta);
  480. kasan_save_track(&free_meta->free_track, 0);
  481. /* Mark free meta as valid. */
  482. *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
  483. }