hooks.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KMSAN hooks for kernel subsystems.
  4. *
  5. * These functions handle creation of KMSAN metadata for memory allocations.
  6. *
  7. * Copyright (C) 2018-2022 Google LLC
  8. * Author: Alexander Potapenko <glider@google.com>
  9. *
  10. */
  11. #include <linux/cacheflush.h>
  12. #include <linux/dma-direction.h>
  13. #include <linux/gfp.h>
  14. #include <linux/kmsan.h>
  15. #include <linux/mm.h>
  16. #include <linux/mm_types.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/slab.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/usb.h>
  21. #include "../internal.h"
  22. #include "../slab.h"
  23. #include "kmsan.h"
  24. /*
  25. * Instrumented functions shouldn't be called under
  26. * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
  27. * skipping effects of functions like memset() inside instrumented code.
  28. */
  29. void kmsan_task_create(struct task_struct *task)
  30. {
  31. kmsan_enter_runtime();
  32. kmsan_internal_task_create(task);
  33. kmsan_leave_runtime();
  34. }
  35. void kmsan_task_exit(struct task_struct *task)
  36. {
  37. if (!kmsan_enabled || kmsan_in_runtime())
  38. return;
  39. kmsan_disable_current();
  40. }
  41. void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
  42. {
  43. if (unlikely(object == NULL))
  44. return;
  45. if (!kmsan_enabled || kmsan_in_runtime())
  46. return;
  47. /*
  48. * There's a ctor or this is an RCU cache - do nothing. The memory
  49. * status hasn't changed since last use.
  50. */
  51. if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
  52. return;
  53. kmsan_enter_runtime();
  54. if (flags & __GFP_ZERO)
  55. kmsan_internal_unpoison_memory(object, s->object_size,
  56. KMSAN_POISON_CHECK);
  57. else
  58. kmsan_internal_poison_memory(object, s->object_size, flags,
  59. KMSAN_POISON_CHECK);
  60. kmsan_leave_runtime();
  61. }
  62. void kmsan_slab_free(struct kmem_cache *s, void *object)
  63. {
  64. if (!kmsan_enabled || kmsan_in_runtime())
  65. return;
  66. /* RCU slabs could be legally used after free within the RCU period */
  67. if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
  68. return;
  69. /*
  70. * If there's a constructor, freed memory must remain in the same state
  71. * until the next allocation. We cannot save its state to detect
  72. * use-after-free bugs, instead we just keep it unpoisoned.
  73. */
  74. if (s->ctor)
  75. return;
  76. kmsan_enter_runtime();
  77. kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
  78. KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
  79. kmsan_leave_runtime();
  80. }
  81. void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
  82. {
  83. if (unlikely(ptr == NULL))
  84. return;
  85. if (!kmsan_enabled || kmsan_in_runtime())
  86. return;
  87. kmsan_enter_runtime();
  88. if (flags & __GFP_ZERO)
  89. kmsan_internal_unpoison_memory((void *)ptr, size,
  90. /*checked*/ true);
  91. else
  92. kmsan_internal_poison_memory((void *)ptr, size, flags,
  93. KMSAN_POISON_CHECK);
  94. kmsan_leave_runtime();
  95. }
  96. void kmsan_kfree_large(const void *ptr)
  97. {
  98. struct page *page;
  99. if (!kmsan_enabled || kmsan_in_runtime())
  100. return;
  101. kmsan_enter_runtime();
  102. page = virt_to_head_page((void *)ptr);
  103. KMSAN_WARN_ON(ptr != page_address(page));
  104. kmsan_internal_poison_memory((void *)ptr,
  105. page_size(page),
  106. GFP_KERNEL,
  107. KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
  108. kmsan_leave_runtime();
  109. }
  110. static unsigned long vmalloc_shadow(unsigned long addr)
  111. {
  112. return (unsigned long)kmsan_get_metadata((void *)addr,
  113. KMSAN_META_SHADOW);
  114. }
  115. static unsigned long vmalloc_origin(unsigned long addr)
  116. {
  117. return (unsigned long)kmsan_get_metadata((void *)addr,
  118. KMSAN_META_ORIGIN);
  119. }
  120. void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
  121. {
  122. __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
  123. __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
  124. flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
  125. flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
  126. }
  127. /*
  128. * This function creates new shadow/origin pages for the physical pages mapped
  129. * into the virtual memory. If those physical pages already had shadow/origin,
  130. * those are ignored.
  131. */
  132. int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
  133. phys_addr_t phys_addr, pgprot_t prot,
  134. unsigned int page_shift)
  135. {
  136. gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
  137. struct page *shadow, *origin;
  138. unsigned long off = 0;
  139. int nr, err = 0, clean = 0, mapped;
  140. if (!kmsan_enabled || kmsan_in_runtime())
  141. return 0;
  142. nr = (end - start) / PAGE_SIZE;
  143. kmsan_enter_runtime();
  144. for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
  145. shadow = alloc_pages(gfp_mask, 1);
  146. origin = alloc_pages(gfp_mask, 1);
  147. if (!shadow || !origin) {
  148. err = -ENOMEM;
  149. goto ret;
  150. }
  151. mapped = __vmap_pages_range_noflush(
  152. vmalloc_shadow(start + off),
  153. vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
  154. PAGE_SHIFT);
  155. if (mapped) {
  156. err = mapped;
  157. goto ret;
  158. }
  159. shadow = NULL;
  160. mapped = __vmap_pages_range_noflush(
  161. vmalloc_origin(start + off),
  162. vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
  163. PAGE_SHIFT);
  164. if (mapped) {
  165. __vunmap_range_noflush(
  166. vmalloc_shadow(start + off),
  167. vmalloc_shadow(start + off + PAGE_SIZE));
  168. err = mapped;
  169. goto ret;
  170. }
  171. origin = NULL;
  172. }
  173. /* Page mapping loop finished normally, nothing to clean up. */
  174. clean = 0;
  175. ret:
  176. if (clean > 0) {
  177. /*
  178. * Something went wrong. Clean up shadow/origin pages allocated
  179. * on the last loop iteration, then delete mappings created
  180. * during the previous iterations.
  181. */
  182. if (shadow)
  183. __free_pages(shadow, 1);
  184. if (origin)
  185. __free_pages(origin, 1);
  186. __vunmap_range_noflush(
  187. vmalloc_shadow(start),
  188. vmalloc_shadow(start + clean * PAGE_SIZE));
  189. __vunmap_range_noflush(
  190. vmalloc_origin(start),
  191. vmalloc_origin(start + clean * PAGE_SIZE));
  192. }
  193. flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
  194. flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
  195. kmsan_leave_runtime();
  196. return err;
  197. }
  198. void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
  199. {
  200. unsigned long v_shadow, v_origin;
  201. struct page *shadow, *origin;
  202. int nr;
  203. if (!kmsan_enabled || kmsan_in_runtime())
  204. return;
  205. nr = (end - start) / PAGE_SIZE;
  206. kmsan_enter_runtime();
  207. v_shadow = (unsigned long)vmalloc_shadow(start);
  208. v_origin = (unsigned long)vmalloc_origin(start);
  209. for (int i = 0; i < nr;
  210. i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
  211. shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
  212. origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
  213. __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
  214. __vunmap_range_noflush(v_origin, vmalloc_origin(end));
  215. if (shadow)
  216. __free_pages(shadow, 1);
  217. if (origin)
  218. __free_pages(origin, 1);
  219. }
  220. flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
  221. flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
  222. kmsan_leave_runtime();
  223. }
  224. void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
  225. size_t left)
  226. {
  227. unsigned long ua_flags;
  228. if (!kmsan_enabled || kmsan_in_runtime())
  229. return;
  230. /*
  231. * At this point we've copied the memory already. It's hard to check it
  232. * before copying, as the size of actually copied buffer is unknown.
  233. */
  234. /* copy_to_user() may copy zero bytes. No need to check. */
  235. if (!to_copy)
  236. return;
  237. /* Or maybe copy_to_user() failed to copy anything. */
  238. if (to_copy <= left)
  239. return;
  240. ua_flags = user_access_save();
  241. if (!IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) ||
  242. (u64)to < TASK_SIZE) {
  243. /* This is a user memory access, check it. */
  244. kmsan_internal_check_memory((void *)from, to_copy - left, to,
  245. REASON_COPY_TO_USER);
  246. } else {
  247. /* Otherwise this is a kernel memory access. This happens when a
  248. * compat syscall passes an argument allocated on the kernel
  249. * stack to a real syscall.
  250. * Don't check anything, just copy the shadow of the copied
  251. * bytes.
  252. */
  253. kmsan_internal_memmove_metadata((void *)to, (void *)from,
  254. to_copy - left);
  255. }
  256. user_access_restore(ua_flags);
  257. }
  258. EXPORT_SYMBOL(kmsan_copy_to_user);
  259. void kmsan_memmove(void *to, const void *from, size_t size)
  260. {
  261. if (!kmsan_enabled || kmsan_in_runtime())
  262. return;
  263. kmsan_enter_runtime();
  264. kmsan_internal_memmove_metadata(to, (void *)from, size);
  265. kmsan_leave_runtime();
  266. }
  267. EXPORT_SYMBOL(kmsan_memmove);
  268. /* Helper function to check an URB. */
  269. void kmsan_handle_urb(const struct urb *urb, bool is_out)
  270. {
  271. if (!urb)
  272. return;
  273. if (is_out)
  274. kmsan_internal_check_memory(urb->transfer_buffer,
  275. urb->transfer_buffer_length,
  276. /*user_addr*/ NULL,
  277. REASON_SUBMIT_URB);
  278. else
  279. kmsan_internal_unpoison_memory(urb->transfer_buffer,
  280. urb->transfer_buffer_length,
  281. /*checked*/ false);
  282. }
  283. EXPORT_SYMBOL_GPL(kmsan_handle_urb);
  284. static void kmsan_handle_dma_page(const void *addr, size_t size,
  285. enum dma_data_direction dir)
  286. {
  287. switch (dir) {
  288. case DMA_BIDIRECTIONAL:
  289. kmsan_internal_check_memory((void *)addr, size,
  290. /*user_addr*/ NULL, REASON_ANY);
  291. kmsan_internal_unpoison_memory((void *)addr, size,
  292. /*checked*/ false);
  293. break;
  294. case DMA_TO_DEVICE:
  295. kmsan_internal_check_memory((void *)addr, size,
  296. /*user_addr*/ NULL, REASON_ANY);
  297. break;
  298. case DMA_FROM_DEVICE:
  299. kmsan_internal_unpoison_memory((void *)addr, size,
  300. /*checked*/ false);
  301. break;
  302. case DMA_NONE:
  303. break;
  304. }
  305. }
  306. /* Helper function to handle DMA data transfers. */
  307. void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
  308. enum dma_data_direction dir)
  309. {
  310. u64 page_offset, to_go, addr;
  311. if (PageHighMem(page))
  312. return;
  313. addr = (u64)page_address(page) + offset;
  314. /*
  315. * The kernel may occasionally give us adjacent DMA pages not belonging
  316. * to the same allocation. Process them separately to avoid triggering
  317. * internal KMSAN checks.
  318. */
  319. while (size > 0) {
  320. page_offset = offset_in_page(addr);
  321. to_go = min(PAGE_SIZE - page_offset, (u64)size);
  322. kmsan_handle_dma_page((void *)addr, to_go, dir);
  323. addr += to_go;
  324. size -= to_go;
  325. }
  326. }
  327. void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
  328. enum dma_data_direction dir)
  329. {
  330. struct scatterlist *item;
  331. int i;
  332. for_each_sg(sg, item, nents, i)
  333. kmsan_handle_dma(sg_page(item), item->offset, item->length,
  334. dir);
  335. }
  336. /* Functions from kmsan-checks.h follow. */
  337. /*
  338. * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
  339. * into the stack depot. This may cause deadlocks if done from within KMSAN
  340. * runtime, therefore we bail out if kmsan_in_runtime().
  341. */
  342. void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
  343. {
  344. if (!kmsan_enabled || kmsan_in_runtime())
  345. return;
  346. kmsan_enter_runtime();
  347. /* The users may want to poison/unpoison random memory. */
  348. kmsan_internal_poison_memory((void *)address, size, flags,
  349. KMSAN_POISON_NOCHECK);
  350. kmsan_leave_runtime();
  351. }
  352. EXPORT_SYMBOL(kmsan_poison_memory);
  353. /*
  354. * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
  355. * runtime, because it does not trigger allocations or call instrumented code.
  356. */
  357. void kmsan_unpoison_memory(const void *address, size_t size)
  358. {
  359. unsigned long ua_flags;
  360. if (!kmsan_enabled)
  361. return;
  362. ua_flags = user_access_save();
  363. /* The users may want to poison/unpoison random memory. */
  364. kmsan_internal_unpoison_memory((void *)address, size,
  365. KMSAN_POISON_NOCHECK);
  366. user_access_restore(ua_flags);
  367. }
  368. EXPORT_SYMBOL(kmsan_unpoison_memory);
  369. /*
  370. * Version of kmsan_unpoison_memory() called from IRQ entry functions.
  371. */
  372. void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
  373. {
  374. kmsan_unpoison_memory((void *)regs, sizeof(*regs));
  375. }
  376. void kmsan_check_memory(const void *addr, size_t size)
  377. {
  378. if (!kmsan_enabled)
  379. return;
  380. return kmsan_internal_check_memory((void *)addr, size,
  381. /*user_addr*/ NULL, REASON_ANY);
  382. }
  383. EXPORT_SYMBOL(kmsan_check_memory);
  384. void kmsan_enable_current(void)
  385. {
  386. KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
  387. current->kmsan_ctx.depth--;
  388. }
  389. EXPORT_SYMBOL(kmsan_enable_current);
  390. void kmsan_disable_current(void)
  391. {
  392. current->kmsan_ctx.depth++;
  393. KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
  394. }
  395. EXPORT_SYMBOL(kmsan_disable_current);