shadow.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KMSAN shadow implementation.
  4. *
  5. * Copyright (C) 2017-2022 Google LLC
  6. * Author: Alexander Potapenko <glider@google.com>
  7. *
  8. */
  9. #include <asm/kmsan.h>
  10. #include <asm/tlbflush.h>
  11. #include <linux/cacheflush.h>
  12. #include <linux/memblock.h>
  13. #include <linux/mm_types.h>
  14. #include <linux/slab.h>
  15. #include <linux/smp.h>
  16. #include <linux/stddef.h>
  17. #include "../internal.h"
  18. #include "kmsan.h"
  19. #define shadow_page_for(page) ((page)->kmsan_shadow)
  20. #define origin_page_for(page) ((page)->kmsan_origin)
  21. static void *shadow_ptr_for(struct page *page)
  22. {
  23. return page_address(shadow_page_for(page));
  24. }
  25. static void *origin_ptr_for(struct page *page)
  26. {
  27. return page_address(origin_page_for(page));
  28. }
  29. static bool page_has_metadata(struct page *page)
  30. {
  31. return shadow_page_for(page) && origin_page_for(page);
  32. }
  33. static void set_no_shadow_origin_page(struct page *page)
  34. {
  35. shadow_page_for(page) = NULL;
  36. origin_page_for(page) = NULL;
  37. }
  38. /*
  39. * Dummy load and store pages to be used when the real metadata is unavailable.
  40. * There are separate pages for loads and stores, so that every load returns a
  41. * zero, and every store doesn't affect other loads.
  42. */
  43. static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
  44. static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
  45. static unsigned long vmalloc_meta(void *addr, bool is_origin)
  46. {
  47. unsigned long addr64 = (unsigned long)addr, off;
  48. KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
  49. if (kmsan_internal_is_vmalloc_addr(addr)) {
  50. off = addr64 - VMALLOC_START;
  51. return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
  52. KMSAN_VMALLOC_SHADOW_START);
  53. }
  54. if (kmsan_internal_is_module_addr(addr)) {
  55. off = addr64 - MODULES_VADDR;
  56. return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
  57. KMSAN_MODULES_SHADOW_START);
  58. }
  59. return 0;
  60. }
  61. static struct page *virt_to_page_or_null(void *vaddr)
  62. {
  63. if (kmsan_virt_addr_valid(vaddr))
  64. return virt_to_page(vaddr);
  65. else
  66. return NULL;
  67. }
  68. struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
  69. bool store)
  70. {
  71. struct shadow_origin_ptr ret;
  72. void *shadow;
  73. /*
  74. * Even if we redirect this memory access to the dummy page, it will
  75. * go out of bounds.
  76. */
  77. KMSAN_WARN_ON(size > PAGE_SIZE);
  78. if (!kmsan_enabled)
  79. goto return_dummy;
  80. KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
  81. shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
  82. if (!shadow)
  83. goto return_dummy;
  84. ret.shadow = shadow;
  85. ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
  86. return ret;
  87. return_dummy:
  88. if (store) {
  89. /* Ignore this store. */
  90. ret.shadow = dummy_store_page;
  91. ret.origin = dummy_store_page;
  92. } else {
  93. /* This load will return zero. */
  94. ret.shadow = dummy_load_page;
  95. ret.origin = dummy_load_page;
  96. }
  97. return ret;
  98. }
  99. /*
  100. * Obtain the shadow or origin pointer for the given address, or NULL if there's
  101. * none. The caller must check the return value for being non-NULL if needed.
  102. * The return value of this function should not depend on whether we're in the
  103. * runtime or not.
  104. */
  105. void *kmsan_get_metadata(void *address, bool is_origin)
  106. {
  107. u64 addr = (u64)address, off;
  108. struct page *page;
  109. void *ret;
  110. if (is_origin)
  111. addr = ALIGN_DOWN(addr, KMSAN_ORIGIN_SIZE);
  112. address = (void *)addr;
  113. if (kmsan_internal_is_vmalloc_addr(address) ||
  114. kmsan_internal_is_module_addr(address))
  115. return (void *)vmalloc_meta(address, is_origin);
  116. ret = arch_kmsan_get_meta_or_null(address, is_origin);
  117. if (ret)
  118. return ret;
  119. page = virt_to_page_or_null(address);
  120. if (!page)
  121. return NULL;
  122. if (!page_has_metadata(page))
  123. return NULL;
  124. off = offset_in_page(addr);
  125. return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
  126. }
  127. void kmsan_copy_page_meta(struct page *dst, struct page *src)
  128. {
  129. if (!kmsan_enabled || kmsan_in_runtime())
  130. return;
  131. if (!dst || !page_has_metadata(dst))
  132. return;
  133. if (!src || !page_has_metadata(src)) {
  134. kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
  135. /*checked*/ false);
  136. return;
  137. }
  138. kmsan_enter_runtime();
  139. __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
  140. __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
  141. kmsan_leave_runtime();
  142. }
  143. EXPORT_SYMBOL(kmsan_copy_page_meta);
  144. void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
  145. {
  146. bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
  147. struct page *shadow, *origin;
  148. depot_stack_handle_t handle;
  149. int pages = 1 << order;
  150. if (!page)
  151. return;
  152. shadow = shadow_page_for(page);
  153. origin = origin_page_for(page);
  154. if (initialized) {
  155. __memset(page_address(shadow), 0, PAGE_SIZE * pages);
  156. __memset(page_address(origin), 0, PAGE_SIZE * pages);
  157. return;
  158. }
  159. /* Zero pages allocated by the runtime should also be initialized. */
  160. if (kmsan_in_runtime())
  161. return;
  162. __memset(page_address(shadow), -1, PAGE_SIZE * pages);
  163. kmsan_enter_runtime();
  164. handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
  165. kmsan_leave_runtime();
  166. /*
  167. * Addresses are page-aligned, pages are contiguous, so it's ok
  168. * to just fill the origin pages with @handle.
  169. */
  170. for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
  171. ((depot_stack_handle_t *)page_address(origin))[i] = handle;
  172. }
  173. void kmsan_free_page(struct page *page, unsigned int order)
  174. {
  175. if (!kmsan_enabled || kmsan_in_runtime())
  176. return;
  177. kmsan_enter_runtime();
  178. kmsan_internal_poison_memory(page_address(page),
  179. page_size(page),
  180. GFP_KERNEL,
  181. KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
  182. kmsan_leave_runtime();
  183. }
  184. int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
  185. pgprot_t prot, struct page **pages,
  186. unsigned int page_shift)
  187. {
  188. unsigned long shadow_start, origin_start, shadow_end, origin_end;
  189. struct page **s_pages, **o_pages;
  190. int nr, mapped, err = 0;
  191. if (!kmsan_enabled)
  192. return 0;
  193. shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
  194. shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
  195. if (!shadow_start)
  196. return 0;
  197. nr = (end - start) / PAGE_SIZE;
  198. s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
  199. o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
  200. if (!s_pages || !o_pages) {
  201. err = -ENOMEM;
  202. goto ret;
  203. }
  204. for (int i = 0; i < nr; i++) {
  205. s_pages[i] = shadow_page_for(pages[i]);
  206. o_pages[i] = origin_page_for(pages[i]);
  207. }
  208. prot = PAGE_KERNEL;
  209. origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
  210. origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
  211. kmsan_enter_runtime();
  212. mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
  213. s_pages, page_shift);
  214. if (mapped) {
  215. err = mapped;
  216. goto ret;
  217. }
  218. mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
  219. o_pages, page_shift);
  220. if (mapped) {
  221. err = mapped;
  222. goto ret;
  223. }
  224. kmsan_leave_runtime();
  225. flush_tlb_kernel_range(shadow_start, shadow_end);
  226. flush_tlb_kernel_range(origin_start, origin_end);
  227. flush_cache_vmap(shadow_start, shadow_end);
  228. flush_cache_vmap(origin_start, origin_end);
  229. ret:
  230. kfree(s_pages);
  231. kfree(o_pages);
  232. return err;
  233. }
  234. /* Allocate metadata for pages allocated at boot time. */
  235. void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
  236. {
  237. struct page *shadow_p, *origin_p;
  238. void *shadow, *origin;
  239. struct page *page;
  240. u64 size;
  241. start = (void *)PAGE_ALIGN_DOWN((u64)start);
  242. size = PAGE_ALIGN((u64)end - (u64)start);
  243. shadow = memblock_alloc(size, PAGE_SIZE);
  244. origin = memblock_alloc(size, PAGE_SIZE);
  245. if (!shadow || !origin)
  246. panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
  247. __func__, size);
  248. for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
  249. page = virt_to_page_or_null((char *)start + addr);
  250. shadow_p = virt_to_page((char *)shadow + addr);
  251. set_no_shadow_origin_page(shadow_p);
  252. shadow_page_for(page) = shadow_p;
  253. origin_p = virt_to_page((char *)origin + addr);
  254. set_no_shadow_origin_page(origin_p);
  255. origin_page_for(page) = origin_p;
  256. }
  257. }
  258. void kmsan_setup_meta(struct page *page, struct page *shadow,
  259. struct page *origin, int order)
  260. {
  261. for (int i = 0; i < (1 << order); i++) {
  262. set_no_shadow_origin_page(&shadow[i]);
  263. set_no_shadow_origin_page(&origin[i]);
  264. shadow_page_for(&page[i]) = &shadow[i];
  265. origin_page_for(&page[i]) = &origin[i];
  266. }
  267. }