kasan_init.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Andes Technology Corporation
  3. #include <linux/pfn.h>
  4. #include <linux/init_task.h>
  5. #include <linux/kasan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/memblock.h>
  8. #include <linux/pgtable.h>
  9. #include <asm/tlbflush.h>
  10. #include <asm/fixmap.h>
  11. #include <asm/pgalloc.h>
  12. /*
  13. * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
  14. * which is right before the kernel.
  15. *
  16. * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
  17. * the page global directory with kasan_early_shadow_pmd.
  18. *
  19. * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
  20. * region is not and then we have to go down to the PUD level.
  21. */
  22. static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
  23. static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
  24. static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
  25. static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
  26. {
  27. phys_addr_t phys_addr;
  28. pte_t *ptep, *p;
  29. if (pmd_none(pmdp_get(pmd))) {
  30. p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
  31. set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
  32. }
  33. ptep = pte_offset_kernel(pmd, vaddr);
  34. do {
  35. if (pte_none(ptep_get(ptep))) {
  36. phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
  37. set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
  38. memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
  39. }
  40. } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
  41. }
  42. static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
  43. {
  44. phys_addr_t phys_addr;
  45. pmd_t *pmdp, *p;
  46. unsigned long next;
  47. if (pud_none(pudp_get(pud))) {
  48. p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
  49. set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
  50. }
  51. pmdp = pmd_offset(pud, vaddr);
  52. do {
  53. next = pmd_addr_end(vaddr, end);
  54. if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
  55. (next - vaddr) >= PMD_SIZE) {
  56. phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
  57. if (phys_addr) {
  58. set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
  59. memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
  60. continue;
  61. }
  62. }
  63. kasan_populate_pte(pmdp, vaddr, next);
  64. } while (pmdp++, vaddr = next, vaddr != end);
  65. }
  66. static void __init kasan_populate_pud(p4d_t *p4d,
  67. unsigned long vaddr, unsigned long end)
  68. {
  69. phys_addr_t phys_addr;
  70. pud_t *pudp, *p;
  71. unsigned long next;
  72. if (p4d_none(p4dp_get(p4d))) {
  73. p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
  74. set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
  75. }
  76. pudp = pud_offset(p4d, vaddr);
  77. do {
  78. next = pud_addr_end(vaddr, end);
  79. if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
  80. (next - vaddr) >= PUD_SIZE) {
  81. phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
  82. if (phys_addr) {
  83. set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
  84. memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
  85. continue;
  86. }
  87. }
  88. kasan_populate_pmd(pudp, vaddr, next);
  89. } while (pudp++, vaddr = next, vaddr != end);
  90. }
  91. static void __init kasan_populate_p4d(pgd_t *pgd,
  92. unsigned long vaddr, unsigned long end)
  93. {
  94. phys_addr_t phys_addr;
  95. p4d_t *p4dp, *p;
  96. unsigned long next;
  97. if (pgd_none(pgdp_get(pgd))) {
  98. p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
  99. set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
  100. }
  101. p4dp = p4d_offset(pgd, vaddr);
  102. do {
  103. next = p4d_addr_end(vaddr, end);
  104. if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
  105. (next - vaddr) >= P4D_SIZE) {
  106. phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
  107. if (phys_addr) {
  108. set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
  109. memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
  110. continue;
  111. }
  112. }
  113. kasan_populate_pud(p4dp, vaddr, next);
  114. } while (p4dp++, vaddr = next, vaddr != end);
  115. }
  116. static void __init kasan_populate_pgd(pgd_t *pgdp,
  117. unsigned long vaddr, unsigned long end)
  118. {
  119. phys_addr_t phys_addr;
  120. unsigned long next;
  121. do {
  122. next = pgd_addr_end(vaddr, end);
  123. if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
  124. (next - vaddr) >= PGDIR_SIZE) {
  125. phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
  126. if (phys_addr) {
  127. set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
  128. memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
  129. continue;
  130. }
  131. }
  132. kasan_populate_p4d(pgdp, vaddr, next);
  133. } while (pgdp++, vaddr = next, vaddr != end);
  134. }
  135. static void __init kasan_early_clear_pud(p4d_t *p4dp,
  136. unsigned long vaddr, unsigned long end)
  137. {
  138. pud_t *pudp, *base_pud;
  139. unsigned long next;
  140. if (!pgtable_l4_enabled) {
  141. pudp = (pud_t *)p4dp;
  142. } else {
  143. base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
  144. pudp = base_pud + pud_index(vaddr);
  145. }
  146. do {
  147. next = pud_addr_end(vaddr, end);
  148. if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
  149. pud_clear(pudp);
  150. continue;
  151. }
  152. BUG();
  153. } while (pudp++, vaddr = next, vaddr != end);
  154. }
  155. static void __init kasan_early_clear_p4d(pgd_t *pgdp,
  156. unsigned long vaddr, unsigned long end)
  157. {
  158. p4d_t *p4dp, *base_p4d;
  159. unsigned long next;
  160. if (!pgtable_l5_enabled) {
  161. p4dp = (p4d_t *)pgdp;
  162. } else {
  163. base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
  164. p4dp = base_p4d + p4d_index(vaddr);
  165. }
  166. do {
  167. next = p4d_addr_end(vaddr, end);
  168. if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
  169. (next - vaddr) >= P4D_SIZE) {
  170. p4d_clear(p4dp);
  171. continue;
  172. }
  173. kasan_early_clear_pud(p4dp, vaddr, next);
  174. } while (p4dp++, vaddr = next, vaddr != end);
  175. }
  176. static void __init kasan_early_clear_pgd(pgd_t *pgdp,
  177. unsigned long vaddr, unsigned long end)
  178. {
  179. unsigned long next;
  180. do {
  181. next = pgd_addr_end(vaddr, end);
  182. if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
  183. (next - vaddr) >= PGDIR_SIZE) {
  184. pgd_clear(pgdp);
  185. continue;
  186. }
  187. kasan_early_clear_p4d(pgdp, vaddr, next);
  188. } while (pgdp++, vaddr = next, vaddr != end);
  189. }
  190. static void __init kasan_early_populate_pud(p4d_t *p4dp,
  191. unsigned long vaddr,
  192. unsigned long end)
  193. {
  194. pud_t *pudp, *base_pud;
  195. phys_addr_t phys_addr;
  196. unsigned long next;
  197. if (!pgtable_l4_enabled) {
  198. pudp = (pud_t *)p4dp;
  199. } else {
  200. base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
  201. pudp = base_pud + pud_index(vaddr);
  202. }
  203. do {
  204. next = pud_addr_end(vaddr, end);
  205. if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
  206. (next - vaddr) >= PUD_SIZE) {
  207. phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
  208. set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
  209. continue;
  210. }
  211. BUG();
  212. } while (pudp++, vaddr = next, vaddr != end);
  213. }
  214. static void __init kasan_early_populate_p4d(pgd_t *pgdp,
  215. unsigned long vaddr,
  216. unsigned long end)
  217. {
  218. p4d_t *p4dp, *base_p4d;
  219. phys_addr_t phys_addr;
  220. unsigned long next;
  221. /*
  222. * We can't use pgd_page_vaddr here as it would return a linear
  223. * mapping address but it is not mapped yet, but when populating
  224. * early_pg_dir, we need the physical address and when populating
  225. * swapper_pg_dir, we need the kernel virtual address so use
  226. * pt_ops facility.
  227. * Note that this test is then completely equivalent to
  228. * p4dp = p4d_offset(pgdp, vaddr)
  229. */
  230. if (!pgtable_l5_enabled) {
  231. p4dp = (p4d_t *)pgdp;
  232. } else {
  233. base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
  234. p4dp = base_p4d + p4d_index(vaddr);
  235. }
  236. do {
  237. next = p4d_addr_end(vaddr, end);
  238. if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
  239. (next - vaddr) >= P4D_SIZE) {
  240. phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
  241. set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
  242. continue;
  243. }
  244. kasan_early_populate_pud(p4dp, vaddr, next);
  245. } while (p4dp++, vaddr = next, vaddr != end);
  246. }
  247. static void __init kasan_early_populate_pgd(pgd_t *pgdp,
  248. unsigned long vaddr,
  249. unsigned long end)
  250. {
  251. phys_addr_t phys_addr;
  252. unsigned long next;
  253. do {
  254. next = pgd_addr_end(vaddr, end);
  255. if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
  256. (next - vaddr) >= PGDIR_SIZE) {
  257. phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
  258. set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
  259. continue;
  260. }
  261. kasan_early_populate_p4d(pgdp, vaddr, next);
  262. } while (pgdp++, vaddr = next, vaddr != end);
  263. }
  264. asmlinkage void __init kasan_early_init(void)
  265. {
  266. uintptr_t i;
  267. BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
  268. KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
  269. for (i = 0; i < PTRS_PER_PTE; ++i)
  270. set_pte(kasan_early_shadow_pte + i,
  271. pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
  272. for (i = 0; i < PTRS_PER_PMD; ++i)
  273. set_pmd(kasan_early_shadow_pmd + i,
  274. pfn_pmd(PFN_DOWN
  275. (__pa((uintptr_t)kasan_early_shadow_pte)),
  276. PAGE_TABLE));
  277. if (pgtable_l4_enabled) {
  278. for (i = 0; i < PTRS_PER_PUD; ++i)
  279. set_pud(kasan_early_shadow_pud + i,
  280. pfn_pud(PFN_DOWN
  281. (__pa(((uintptr_t)kasan_early_shadow_pmd))),
  282. PAGE_TABLE));
  283. }
  284. if (pgtable_l5_enabled) {
  285. for (i = 0; i < PTRS_PER_P4D; ++i)
  286. set_p4d(kasan_early_shadow_p4d + i,
  287. pfn_p4d(PFN_DOWN
  288. (__pa(((uintptr_t)kasan_early_shadow_pud))),
  289. PAGE_TABLE));
  290. }
  291. kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
  292. KASAN_SHADOW_START, KASAN_SHADOW_END);
  293. local_flush_tlb_all();
  294. }
  295. void __init kasan_swapper_init(void)
  296. {
  297. kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
  298. KASAN_SHADOW_START, KASAN_SHADOW_END);
  299. local_flush_tlb_all();
  300. }
  301. static void __init kasan_populate(void *start, void *end)
  302. {
  303. unsigned long vaddr = (unsigned long)start & PAGE_MASK;
  304. unsigned long vend = PAGE_ALIGN((unsigned long)end);
  305. kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
  306. }
  307. static void __init kasan_shallow_populate_pud(p4d_t *p4d,
  308. unsigned long vaddr, unsigned long end)
  309. {
  310. unsigned long next;
  311. void *p;
  312. pud_t *pud_k = pud_offset(p4d, vaddr);
  313. do {
  314. next = pud_addr_end(vaddr, end);
  315. if (pud_none(pudp_get(pud_k))) {
  316. p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  317. set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
  318. continue;
  319. }
  320. BUG();
  321. } while (pud_k++, vaddr = next, vaddr != end);
  322. }
  323. static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
  324. unsigned long vaddr, unsigned long end)
  325. {
  326. unsigned long next;
  327. void *p;
  328. p4d_t *p4d_k = p4d_offset(pgd, vaddr);
  329. do {
  330. next = p4d_addr_end(vaddr, end);
  331. if (p4d_none(p4dp_get(p4d_k))) {
  332. p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  333. set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
  334. continue;
  335. }
  336. kasan_shallow_populate_pud(p4d_k, vaddr, end);
  337. } while (p4d_k++, vaddr = next, vaddr != end);
  338. }
  339. static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
  340. {
  341. unsigned long next;
  342. void *p;
  343. pgd_t *pgd_k = pgd_offset_k(vaddr);
  344. do {
  345. next = pgd_addr_end(vaddr, end);
  346. if (pgd_none(pgdp_get(pgd_k))) {
  347. p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  348. set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
  349. continue;
  350. }
  351. kasan_shallow_populate_p4d(pgd_k, vaddr, next);
  352. } while (pgd_k++, vaddr = next, vaddr != end);
  353. }
  354. static void __init kasan_shallow_populate(void *start, void *end)
  355. {
  356. unsigned long vaddr = (unsigned long)start & PAGE_MASK;
  357. unsigned long vend = PAGE_ALIGN((unsigned long)end);
  358. kasan_shallow_populate_pgd(vaddr, vend);
  359. }
  360. #ifdef CONFIG_KASAN_VMALLOC
  361. void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
  362. {
  363. kasan_populate(kasan_mem_to_shadow(start),
  364. kasan_mem_to_shadow(start + size));
  365. }
  366. #endif
  367. static void __init create_tmp_mapping(void)
  368. {
  369. void *ptr;
  370. p4d_t *base_p4d;
  371. /*
  372. * We need to clean the early mapping: this is hard to achieve "in-place",
  373. * so install a temporary mapping like arm64 and x86 do.
  374. */
  375. memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
  376. /* Copy the last p4d since it is shared with the kernel mapping. */
  377. if (pgtable_l5_enabled) {
  378. ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
  379. memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
  380. set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
  381. pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
  382. base_p4d = tmp_p4d;
  383. } else {
  384. base_p4d = (p4d_t *)tmp_pg_dir;
  385. }
  386. /* Copy the last pud since it is shared with the kernel mapping. */
  387. if (pgtable_l4_enabled) {
  388. ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
  389. memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
  390. set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
  391. pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
  392. }
  393. }
  394. void __init kasan_init(void)
  395. {
  396. phys_addr_t p_start, p_end;
  397. u64 i;
  398. create_tmp_mapping();
  399. csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
  400. kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
  401. KASAN_SHADOW_START, KASAN_SHADOW_END);
  402. kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
  403. (void *)kasan_mem_to_shadow((void *)VMALLOC_START));
  404. if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
  405. kasan_shallow_populate(
  406. (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
  407. (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
  408. /* Shallow populate modules and BPF which are vmalloc-allocated */
  409. kasan_shallow_populate(
  410. (void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
  411. (void *)kasan_mem_to_shadow((void *)MODULES_END));
  412. } else {
  413. kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
  414. (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
  415. }
  416. /* Populate the linear mapping */
  417. for_each_mem_range(i, &p_start, &p_end) {
  418. void *start = (void *)__va(p_start);
  419. void *end = (void *)__va(p_end);
  420. if (start >= end)
  421. break;
  422. kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
  423. }
  424. /* Populate kernel */
  425. kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
  426. kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
  427. for (i = 0; i < PTRS_PER_PTE; i++)
  428. set_pte(&kasan_early_shadow_pte[i],
  429. mk_pte(virt_to_page(kasan_early_shadow_page),
  430. __pgprot(_PAGE_PRESENT | _PAGE_READ |
  431. _PAGE_ACCESSED)));
  432. memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
  433. init_task.kasan_depth = 0;
  434. csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
  435. local_flush_tlb_all();
  436. }