pageattr.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2019 SiFive
  4. */
  5. #include <linux/pagewalk.h>
  6. #include <linux/pgtable.h>
  7. #include <linux/vmalloc.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/bitops.h>
  10. #include <asm/set_memory.h>
  11. struct pageattr_masks {
  12. pgprot_t set_mask;
  13. pgprot_t clear_mask;
  14. };
  15. static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
  16. {
  17. struct pageattr_masks *masks = walk->private;
  18. unsigned long new_val = val;
  19. new_val &= ~(pgprot_val(masks->clear_mask));
  20. new_val |= (pgprot_val(masks->set_mask));
  21. return new_val;
  22. }
  23. static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
  24. unsigned long next, struct mm_walk *walk)
  25. {
  26. p4d_t val = p4dp_get(p4d);
  27. if (p4d_leaf(val)) {
  28. val = __p4d(set_pageattr_masks(p4d_val(val), walk));
  29. set_p4d(p4d, val);
  30. }
  31. return 0;
  32. }
  33. static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
  34. unsigned long next, struct mm_walk *walk)
  35. {
  36. pud_t val = pudp_get(pud);
  37. if (pud_leaf(val)) {
  38. val = __pud(set_pageattr_masks(pud_val(val), walk));
  39. set_pud(pud, val);
  40. }
  41. return 0;
  42. }
  43. static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
  44. unsigned long next, struct mm_walk *walk)
  45. {
  46. pmd_t val = pmdp_get(pmd);
  47. if (pmd_leaf(val)) {
  48. val = __pmd(set_pageattr_masks(pmd_val(val), walk));
  49. set_pmd(pmd, val);
  50. }
  51. return 0;
  52. }
  53. static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
  54. unsigned long next, struct mm_walk *walk)
  55. {
  56. pte_t val = ptep_get(pte);
  57. val = __pte(set_pageattr_masks(pte_val(val), walk));
  58. set_pte(pte, val);
  59. return 0;
  60. }
  61. static int pageattr_pte_hole(unsigned long addr, unsigned long next,
  62. int depth, struct mm_walk *walk)
  63. {
  64. /* Nothing to do here */
  65. return 0;
  66. }
  67. static const struct mm_walk_ops pageattr_ops = {
  68. .p4d_entry = pageattr_p4d_entry,
  69. .pud_entry = pageattr_pud_entry,
  70. .pmd_entry = pageattr_pmd_entry,
  71. .pte_entry = pageattr_pte_entry,
  72. .pte_hole = pageattr_pte_hole,
  73. .walk_lock = PGWALK_RDLOCK,
  74. };
  75. #ifdef CONFIG_64BIT
  76. static int __split_linear_mapping_pmd(pud_t *pudp,
  77. unsigned long vaddr, unsigned long end)
  78. {
  79. pmd_t *pmdp;
  80. unsigned long next;
  81. pmdp = pmd_offset(pudp, vaddr);
  82. do {
  83. next = pmd_addr_end(vaddr, end);
  84. if (next - vaddr >= PMD_SIZE &&
  85. vaddr <= (vaddr & PMD_MASK) && end >= next)
  86. continue;
  87. if (pmd_leaf(pmdp_get(pmdp))) {
  88. struct page *pte_page;
  89. unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
  90. pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
  91. pte_t *ptep_new;
  92. int i;
  93. pte_page = alloc_page(GFP_KERNEL);
  94. if (!pte_page)
  95. return -ENOMEM;
  96. ptep_new = (pte_t *)page_address(pte_page);
  97. for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
  98. set_pte(ptep_new, pfn_pte(pfn + i, prot));
  99. smp_wmb();
  100. set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
  101. }
  102. } while (pmdp++, vaddr = next, vaddr != end);
  103. return 0;
  104. }
  105. static int __split_linear_mapping_pud(p4d_t *p4dp,
  106. unsigned long vaddr, unsigned long end)
  107. {
  108. pud_t *pudp;
  109. unsigned long next;
  110. int ret;
  111. pudp = pud_offset(p4dp, vaddr);
  112. do {
  113. next = pud_addr_end(vaddr, end);
  114. if (next - vaddr >= PUD_SIZE &&
  115. vaddr <= (vaddr & PUD_MASK) && end >= next)
  116. continue;
  117. if (pud_leaf(pudp_get(pudp))) {
  118. struct page *pmd_page;
  119. unsigned long pfn = _pud_pfn(pudp_get(pudp));
  120. pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
  121. pmd_t *pmdp_new;
  122. int i;
  123. pmd_page = alloc_page(GFP_KERNEL);
  124. if (!pmd_page)
  125. return -ENOMEM;
  126. pmdp_new = (pmd_t *)page_address(pmd_page);
  127. for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
  128. set_pmd(pmdp_new,
  129. pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
  130. smp_wmb();
  131. set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
  132. }
  133. ret = __split_linear_mapping_pmd(pudp, vaddr, next);
  134. if (ret)
  135. return ret;
  136. } while (pudp++, vaddr = next, vaddr != end);
  137. return 0;
  138. }
  139. static int __split_linear_mapping_p4d(pgd_t *pgdp,
  140. unsigned long vaddr, unsigned long end)
  141. {
  142. p4d_t *p4dp;
  143. unsigned long next;
  144. int ret;
  145. p4dp = p4d_offset(pgdp, vaddr);
  146. do {
  147. next = p4d_addr_end(vaddr, end);
  148. /*
  149. * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
  150. * need to split, we'll change the protections on the whole P4D.
  151. */
  152. if (next - vaddr >= P4D_SIZE &&
  153. vaddr <= (vaddr & P4D_MASK) && end >= next)
  154. continue;
  155. if (p4d_leaf(p4dp_get(p4dp))) {
  156. struct page *pud_page;
  157. unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
  158. pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
  159. pud_t *pudp_new;
  160. int i;
  161. pud_page = alloc_page(GFP_KERNEL);
  162. if (!pud_page)
  163. return -ENOMEM;
  164. /*
  165. * Fill the pud level with leaf puds that have the same
  166. * protections as the leaf p4d.
  167. */
  168. pudp_new = (pud_t *)page_address(pud_page);
  169. for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
  170. set_pud(pudp_new,
  171. pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
  172. /*
  173. * Make sure the pud filling is not reordered with the
  174. * p4d store which could result in seeing a partially
  175. * filled pud level.
  176. */
  177. smp_wmb();
  178. set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
  179. }
  180. ret = __split_linear_mapping_pud(p4dp, vaddr, next);
  181. if (ret)
  182. return ret;
  183. } while (p4dp++, vaddr = next, vaddr != end);
  184. return 0;
  185. }
  186. static int __split_linear_mapping_pgd(pgd_t *pgdp,
  187. unsigned long vaddr,
  188. unsigned long end)
  189. {
  190. unsigned long next;
  191. int ret;
  192. do {
  193. next = pgd_addr_end(vaddr, end);
  194. /* We never use PGD mappings for the linear mapping */
  195. ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
  196. if (ret)
  197. return ret;
  198. } while (pgdp++, vaddr = next, vaddr != end);
  199. return 0;
  200. }
  201. static int split_linear_mapping(unsigned long start, unsigned long end)
  202. {
  203. return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
  204. }
  205. #endif /* CONFIG_64BIT */
  206. static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
  207. pgprot_t clear_mask)
  208. {
  209. int ret;
  210. unsigned long start = addr;
  211. unsigned long end = start + PAGE_SIZE * numpages;
  212. unsigned long __maybe_unused lm_start;
  213. unsigned long __maybe_unused lm_end;
  214. struct pageattr_masks masks = {
  215. .set_mask = set_mask,
  216. .clear_mask = clear_mask
  217. };
  218. if (!numpages)
  219. return 0;
  220. mmap_write_lock(&init_mm);
  221. #ifdef CONFIG_64BIT
  222. /*
  223. * We are about to change the permissions of a kernel mapping, we must
  224. * apply the same changes to its linear mapping alias, which may imply
  225. * splitting a huge mapping.
  226. */
  227. if (is_vmalloc_or_module_addr((void *)start)) {
  228. struct vm_struct *area = NULL;
  229. int i, page_start;
  230. area = find_vm_area((void *)start);
  231. page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
  232. for (i = page_start; i < page_start + numpages; ++i) {
  233. lm_start = (unsigned long)page_address(area->pages[i]);
  234. lm_end = lm_start + PAGE_SIZE;
  235. ret = split_linear_mapping(lm_start, lm_end);
  236. if (ret)
  237. goto unlock;
  238. ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
  239. &pageattr_ops, NULL, &masks);
  240. if (ret)
  241. goto unlock;
  242. }
  243. } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
  244. if (is_kernel_mapping(start)) {
  245. lm_start = (unsigned long)lm_alias(start);
  246. lm_end = (unsigned long)lm_alias(end);
  247. } else {
  248. lm_start = start;
  249. lm_end = end;
  250. }
  251. ret = split_linear_mapping(lm_start, lm_end);
  252. if (ret)
  253. goto unlock;
  254. ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
  255. &pageattr_ops, NULL, &masks);
  256. if (ret)
  257. goto unlock;
  258. }
  259. ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
  260. &masks);
  261. unlock:
  262. mmap_write_unlock(&init_mm);
  263. /*
  264. * We can't use flush_tlb_kernel_range() here as we may have split a
  265. * hugepage that is larger than that, so let's flush everything.
  266. */
  267. flush_tlb_all();
  268. #else
  269. ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
  270. &masks);
  271. mmap_write_unlock(&init_mm);
  272. flush_tlb_kernel_range(start, end);
  273. #endif
  274. return ret;
  275. }
  276. int set_memory_rw_nx(unsigned long addr, int numpages)
  277. {
  278. return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
  279. __pgprot(_PAGE_EXEC));
  280. }
  281. int set_memory_ro(unsigned long addr, int numpages)
  282. {
  283. return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
  284. __pgprot(_PAGE_WRITE));
  285. }
  286. int set_memory_rw(unsigned long addr, int numpages)
  287. {
  288. return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
  289. __pgprot(0));
  290. }
  291. int set_memory_x(unsigned long addr, int numpages)
  292. {
  293. return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
  294. }
  295. int set_memory_nx(unsigned long addr, int numpages)
  296. {
  297. return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
  298. }
  299. int set_direct_map_invalid_noflush(struct page *page)
  300. {
  301. return __set_memory((unsigned long)page_address(page), 1,
  302. __pgprot(0), __pgprot(_PAGE_PRESENT));
  303. }
  304. int set_direct_map_default_noflush(struct page *page)
  305. {
  306. return __set_memory((unsigned long)page_address(page), 1,
  307. PAGE_KERNEL, __pgprot(_PAGE_EXEC));
  308. }
  309. #ifdef CONFIG_DEBUG_PAGEALLOC
  310. static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
  311. {
  312. int enable = *(int *)data;
  313. unsigned long val = pte_val(ptep_get(pte));
  314. if (enable)
  315. val |= _PAGE_PRESENT;
  316. else
  317. val &= ~_PAGE_PRESENT;
  318. set_pte(pte, __pte(val));
  319. return 0;
  320. }
  321. void __kernel_map_pages(struct page *page, int numpages, int enable)
  322. {
  323. if (!debug_pagealloc_enabled())
  324. return;
  325. unsigned long start = (unsigned long)page_address(page);
  326. unsigned long size = PAGE_SIZE * numpages;
  327. apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
  328. flush_tlb_kernel_range(start, start + size);
  329. }
  330. #endif
  331. bool kernel_page_present(struct page *page)
  332. {
  333. unsigned long addr = (unsigned long)page_address(page);
  334. pgd_t *pgd;
  335. pud_t *pud;
  336. p4d_t *p4d;
  337. pmd_t *pmd;
  338. pte_t *pte;
  339. pgd = pgd_offset_k(addr);
  340. if (!pgd_present(pgdp_get(pgd)))
  341. return false;
  342. if (pgd_leaf(pgdp_get(pgd)))
  343. return true;
  344. p4d = p4d_offset(pgd, addr);
  345. if (!p4d_present(p4dp_get(p4d)))
  346. return false;
  347. if (p4d_leaf(p4dp_get(p4d)))
  348. return true;
  349. pud = pud_offset(p4d, addr);
  350. if (!pud_present(pudp_get(pud)))
  351. return false;
  352. if (pud_leaf(pudp_get(pud)))
  353. return true;
  354. pmd = pmd_offset(pud, addr);
  355. if (!pmd_present(pmdp_get(pmd)))
  356. return false;
  357. if (pmd_leaf(pmdp_get(pmd)))
  358. return true;
  359. pte = pte_offset_kernel(pmd, addr);
  360. return pte_present(ptep_get(pte));
  361. }