pgtable.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * This file contains common routines for dealing with free of page tables
  4. * Along with common page table handling code
  5. *
  6. * Derived from arch/powerpc/mm/tlb_64.c:
  7. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8. *
  9. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  10. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  11. * Copyright (C) 1996 Paul Mackerras
  12. *
  13. * Derived from "arch/i386/mm/init.c"
  14. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  15. *
  16. * Dave Engebretsen <engebret@us.ibm.com>
  17. * Rework for PPC64 port.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/gfp.h>
  21. #include <linux/mm.h>
  22. #include <linux/percpu.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/hugetlb.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/tlb.h>
  27. #include <asm/hugetlb.h>
  28. #include <asm/pte-walk.h>
  29. #ifdef CONFIG_PPC64
  30. #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
  31. #else
  32. #define PGD_ALIGN PAGE_SIZE
  33. #endif
  34. pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
  35. static inline int is_exec_fault(void)
  36. {
  37. return current->thread.regs && TRAP(current->thread.regs) == 0x400;
  38. }
  39. /* We only try to do i/d cache coherency on stuff that looks like
  40. * reasonably "normal" PTEs. We currently require a PTE to be present
  41. * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
  42. * on userspace PTEs
  43. */
  44. static inline int pte_looks_normal(pte_t pte, unsigned long addr)
  45. {
  46. if (pte_present(pte) && !pte_special(pte)) {
  47. if (pte_ci(pte))
  48. return 0;
  49. if (!is_kernel_addr(addr))
  50. return 1;
  51. }
  52. return 0;
  53. }
  54. static struct folio *maybe_pte_to_folio(pte_t pte)
  55. {
  56. unsigned long pfn = pte_pfn(pte);
  57. struct page *page;
  58. if (unlikely(!pfn_valid(pfn)))
  59. return NULL;
  60. page = pfn_to_page(pfn);
  61. if (PageReserved(page))
  62. return NULL;
  63. return page_folio(page);
  64. }
  65. #ifdef CONFIG_PPC_BOOK3S
  66. /* Server-style MMU handles coherency when hashing if HW exec permission
  67. * is supposed per page (currently 64-bit only). If not, then, we always
  68. * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
  69. * support falls into the same category.
  70. */
  71. static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
  72. {
  73. pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
  74. if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
  75. cpu_has_feature(CPU_FTR_NOEXECUTE))) {
  76. struct folio *folio = maybe_pte_to_folio(pte);
  77. if (!folio)
  78. return pte;
  79. if (!test_bit(PG_dcache_clean, &folio->flags)) {
  80. flush_dcache_icache_folio(folio);
  81. set_bit(PG_dcache_clean, &folio->flags);
  82. }
  83. }
  84. return pte;
  85. }
  86. #else /* CONFIG_PPC_BOOK3S */
  87. static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
  88. #endif /* CONFIG_PPC_BOOK3S */
  89. /* Embedded type MMU with HW exec support. This is a bit more complicated
  90. * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
  91. * instead we "filter out" the exec permission for non clean pages.
  92. *
  93. * This is also called once for the folio. So only work with folio->flags here.
  94. */
  95. static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
  96. {
  97. struct folio *folio;
  98. if (radix_enabled())
  99. return pte;
  100. if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
  101. return set_pte_filter_hash(pte, addr);
  102. /* No exec permission in the first place, move on */
  103. if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
  104. return pte;
  105. /* If you set _PAGE_EXEC on weird pages you're on your own */
  106. folio = maybe_pte_to_folio(pte);
  107. if (unlikely(!folio))
  108. return pte;
  109. /* If the page clean, we move on */
  110. if (test_bit(PG_dcache_clean, &folio->flags))
  111. return pte;
  112. /* If it's an exec fault, we flush the cache and make it clean */
  113. if (is_exec_fault()) {
  114. flush_dcache_icache_folio(folio);
  115. set_bit(PG_dcache_clean, &folio->flags);
  116. return pte;
  117. }
  118. /* Else, we filter out _PAGE_EXEC */
  119. return pte_exprotect(pte);
  120. }
  121. static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
  122. int dirty)
  123. {
  124. struct folio *folio;
  125. if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  126. return pte;
  127. if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
  128. return pte;
  129. /* So here, we only care about exec faults, as we use them
  130. * to recover lost _PAGE_EXEC and perform I$/D$ coherency
  131. * if necessary. Also if _PAGE_EXEC is already set, same deal,
  132. * we just bail out
  133. */
  134. if (dirty || pte_exec(pte) || !is_exec_fault())
  135. return pte;
  136. #ifdef CONFIG_DEBUG_VM
  137. /* So this is an exec fault, _PAGE_EXEC is not set. If it was
  138. * an error we would have bailed out earlier in do_page_fault()
  139. * but let's make sure of it
  140. */
  141. if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
  142. return pte;
  143. #endif /* CONFIG_DEBUG_VM */
  144. /* If you set _PAGE_EXEC on weird pages you're on your own */
  145. folio = maybe_pte_to_folio(pte);
  146. if (unlikely(!folio))
  147. goto bail;
  148. /* If the page is already clean, we move on */
  149. if (test_bit(PG_dcache_clean, &folio->flags))
  150. goto bail;
  151. /* Clean the page and set PG_dcache_clean */
  152. flush_dcache_icache_folio(folio);
  153. set_bit(PG_dcache_clean, &folio->flags);
  154. bail:
  155. return pte_mkexec(pte);
  156. }
  157. /*
  158. * set_pte stores a linux PTE into the linux page table.
  159. */
  160. void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  161. pte_t pte, unsigned int nr)
  162. {
  163. /* Note: mm->context.id might not yet have been assigned as
  164. * this context might not have been activated yet when this
  165. * is called. Filter the pte value and use the filtered value
  166. * to setup all the ptes in the range.
  167. */
  168. pte = set_pte_filter(pte, addr);
  169. /*
  170. * We don't need to call arch_enter/leave_lazy_mmu_mode()
  171. * because we expect set_ptes to be only be used on not present
  172. * and not hw_valid ptes. Hence there is no translation cache flush
  173. * involved that need to be batched.
  174. */
  175. for (;;) {
  176. /*
  177. * Make sure hardware valid bit is not set. We don't do
  178. * tlb flush for this update.
  179. */
  180. VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
  181. /* Perform the setting of the PTE */
  182. __set_pte_at(mm, addr, ptep, pte, 0);
  183. if (--nr == 0)
  184. break;
  185. ptep++;
  186. addr += PAGE_SIZE;
  187. pte = pte_next_pfn(pte);
  188. }
  189. }
  190. void unmap_kernel_page(unsigned long va)
  191. {
  192. pmd_t *pmdp = pmd_off_k(va);
  193. pte_t *ptep = pte_offset_kernel(pmdp, va);
  194. pte_clear(&init_mm, va, ptep);
  195. flush_tlb_kernel_range(va, va + PAGE_SIZE);
  196. }
  197. /*
  198. * This is called when relaxing access to a PTE. It's also called in the page
  199. * fault path when we don't hit any of the major fault cases, ie, a minor
  200. * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
  201. * handled those two for us, we additionally deal with missing execute
  202. * permission here on some processors
  203. */
  204. int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  205. pte_t *ptep, pte_t entry, int dirty)
  206. {
  207. int changed;
  208. entry = set_access_flags_filter(entry, vma, dirty);
  209. changed = !pte_same(*(ptep), entry);
  210. if (changed) {
  211. assert_pte_locked(vma->vm_mm, address);
  212. __ptep_set_access_flags(vma, ptep, entry,
  213. address, mmu_virtual_psize);
  214. }
  215. return changed;
  216. }
  217. #ifdef CONFIG_HUGETLB_PAGE
  218. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  219. unsigned long addr, pte_t *ptep,
  220. pte_t pte, int dirty)
  221. {
  222. #ifdef HUGETLB_NEED_PRELOAD
  223. /*
  224. * The "return 1" forces a call of update_mmu_cache, which will write a
  225. * TLB entry. Without this, platforms that don't do a write of the TLB
  226. * entry in the TLB miss handler asm will fault ad infinitum.
  227. */
  228. ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  229. return 1;
  230. #else
  231. int changed, psize;
  232. pte = set_access_flags_filter(pte, vma, dirty);
  233. changed = !pte_same(*(ptep), pte);
  234. if (changed) {
  235. #ifdef CONFIG_PPC_BOOK3S_64
  236. struct hstate *h = hstate_vma(vma);
  237. psize = hstate_get_psize(h);
  238. #ifdef CONFIG_DEBUG_VM
  239. assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
  240. #endif
  241. #else
  242. /*
  243. * Not used on non book3s64 platforms.
  244. * 8xx compares it with mmu_virtual_psize to
  245. * know if it is a huge page or not.
  246. */
  247. psize = MMU_PAGE_COUNT;
  248. #endif
  249. __ptep_set_access_flags(vma, ptep, pte, addr, psize);
  250. }
  251. return changed;
  252. #endif
  253. }
  254. #if defined(CONFIG_PPC_8xx)
  255. #if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS)
  256. /* We need the same lock to protect the PMD table and the two PTE tables. */
  257. #error "8M hugetlb folios are incompatible with split page table locks"
  258. #endif
  259. static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
  260. {
  261. pte_basic_t *entry = (pte_basic_t *)ptep;
  262. int num, i;
  263. /*
  264. * Make sure hardware valid bit is not set. We don't do
  265. * tlb flush for this update.
  266. */
  267. VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
  268. num = number_of_cells_per_pte(pmd, val, 1);
  269. for (i = 0; i < num; i++, entry++, val += SZ_4K)
  270. *entry = val;
  271. }
  272. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  273. pte_t pte, unsigned long sz)
  274. {
  275. pmd_t *pmdp = pmd_off(mm, addr);
  276. pte = set_pte_filter(pte, addr);
  277. if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */
  278. *pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M);
  279. *(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M);
  280. __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte));
  281. __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M);
  282. } else {
  283. __set_huge_pte_at(pmdp, ptep, pte_val(pte));
  284. }
  285. }
  286. #else
  287. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  288. pte_t pte, unsigned long sz)
  289. {
  290. unsigned long pdsize;
  291. int i;
  292. pte = set_pte_filter(pte, addr);
  293. /*
  294. * Make sure hardware valid bit is not set. We don't do
  295. * tlb flush for this update.
  296. */
  297. VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
  298. if (sz < PMD_SIZE)
  299. pdsize = PAGE_SIZE;
  300. else if (sz < PUD_SIZE)
  301. pdsize = PMD_SIZE;
  302. else if (sz < P4D_SIZE)
  303. pdsize = PUD_SIZE;
  304. else if (sz < PGDIR_SIZE)
  305. pdsize = P4D_SIZE;
  306. else
  307. pdsize = PGDIR_SIZE;
  308. for (i = 0; i < sz / pdsize; i++, ptep++, addr += pdsize) {
  309. __set_pte_at(mm, addr, ptep, pte, 0);
  310. pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT));
  311. }
  312. }
  313. #endif
  314. #endif /* CONFIG_HUGETLB_PAGE */
  315. #ifdef CONFIG_DEBUG_VM
  316. void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
  317. {
  318. pgd_t *pgd;
  319. p4d_t *p4d;
  320. pud_t *pud;
  321. pmd_t *pmd;
  322. pte_t *pte;
  323. spinlock_t *ptl;
  324. if (mm == &init_mm)
  325. return;
  326. pgd = mm->pgd + pgd_index(addr);
  327. BUG_ON(pgd_none(*pgd));
  328. p4d = p4d_offset(pgd, addr);
  329. BUG_ON(p4d_none(*p4d));
  330. pud = pud_offset(p4d, addr);
  331. BUG_ON(pud_none(*pud));
  332. pmd = pmd_offset(pud, addr);
  333. /*
  334. * khugepaged to collapse normal pages to hugepage, first set
  335. * pmd to none to force page fault/gup to take mmap_lock. After
  336. * pmd is set to none, we do a pte_clear which does this assertion
  337. * so if we find pmd none, return.
  338. */
  339. if (pmd_none(*pmd))
  340. return;
  341. pte = pte_offset_map_nolock(mm, pmd, addr, &ptl);
  342. BUG_ON(!pte);
  343. assert_spin_locked(ptl);
  344. pte_unmap(pte);
  345. }
  346. #endif /* CONFIG_DEBUG_VM */
  347. unsigned long vmalloc_to_phys(void *va)
  348. {
  349. unsigned long pfn = vmalloc_to_pfn(va);
  350. BUG_ON(!pfn);
  351. return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
  352. }
  353. EXPORT_SYMBOL_GPL(vmalloc_to_phys);
  354. /*
  355. * We have 3 cases for pgds and pmds:
  356. * (1) invalid (all zeroes)
  357. * (2) pointer to next table, as normal; bottom 6 bits == 0
  358. * (3) leaf pte for huge page _PAGE_PTE set
  359. *
  360. * So long as we atomically load page table pointers we are safe against teardown,
  361. * we can follow the address down to the page and take a ref on it.
  362. * This function need to be called with interrupts disabled. We use this variant
  363. * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
  364. */
  365. pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
  366. bool *is_thp, unsigned *hpage_shift)
  367. {
  368. pgd_t *pgdp;
  369. #ifdef CONFIG_PPC64
  370. p4d_t p4d, *p4dp;
  371. pud_t pud, *pudp;
  372. #endif
  373. pmd_t pmd, *pmdp;
  374. pte_t *ret_pte;
  375. unsigned pdshift;
  376. if (hpage_shift)
  377. *hpage_shift = 0;
  378. if (is_thp)
  379. *is_thp = false;
  380. /*
  381. * Always operate on the local stack value. This make sure the
  382. * value don't get updated by a parallel THP split/collapse,
  383. * page fault or a page unmap. The return pte_t * is still not
  384. * stable. So should be checked there for above conditions.
  385. * Top level is an exception because it is folded into p4d.
  386. *
  387. * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to
  388. * PMD level.
  389. */
  390. pgdp = pgdir + pgd_index(ea);
  391. #ifdef CONFIG_PPC64
  392. p4dp = p4d_offset(pgdp, ea);
  393. p4d = READ_ONCE(*p4dp);
  394. pdshift = P4D_SHIFT;
  395. if (p4d_none(p4d))
  396. return NULL;
  397. if (p4d_leaf(p4d)) {
  398. ret_pte = (pte_t *)p4dp;
  399. goto out;
  400. }
  401. /*
  402. * Even if we end up with an unmap, the pgtable will not
  403. * be freed, because we do an rcu free and here we are
  404. * irq disabled
  405. */
  406. pdshift = PUD_SHIFT;
  407. pudp = pud_offset(&p4d, ea);
  408. pud = READ_ONCE(*pudp);
  409. if (pud_none(pud))
  410. return NULL;
  411. if (pud_leaf(pud)) {
  412. ret_pte = (pte_t *)pudp;
  413. goto out;
  414. }
  415. pmdp = pmd_offset(&pud, ea);
  416. #else
  417. pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea);
  418. #endif
  419. pdshift = PMD_SHIFT;
  420. pmd = READ_ONCE(*pmdp);
  421. /*
  422. * A hugepage collapse is captured by this condition, see
  423. * pmdp_collapse_flush.
  424. */
  425. if (pmd_none(pmd))
  426. return NULL;
  427. #ifdef CONFIG_PPC_BOOK3S_64
  428. /*
  429. * A hugepage split is captured by this condition, see
  430. * pmdp_invalidate.
  431. *
  432. * Huge page modification can be caught here too.
  433. */
  434. if (pmd_is_serializing(pmd))
  435. return NULL;
  436. #endif
  437. if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
  438. if (is_thp)
  439. *is_thp = true;
  440. ret_pte = (pte_t *)pmdp;
  441. goto out;
  442. }
  443. if (pmd_leaf(pmd)) {
  444. ret_pte = (pte_t *)pmdp;
  445. goto out;
  446. }
  447. return pte_offset_kernel(&pmd, ea);
  448. out:
  449. if (hpage_shift)
  450. *hpage_shift = pdshift;
  451. return ret_pte;
  452. }
  453. EXPORT_SYMBOL_GPL(__find_linux_pte);
  454. /* Note due to the way vm flags are laid out, the bits are XWR */
  455. const pgprot_t protection_map[16] = {
  456. [VM_NONE] = PAGE_NONE,
  457. [VM_READ] = PAGE_READONLY,
  458. [VM_WRITE] = PAGE_COPY,
  459. [VM_WRITE | VM_READ] = PAGE_COPY,
  460. [VM_EXEC] = PAGE_EXECONLY_X,
  461. [VM_EXEC | VM_READ] = PAGE_READONLY_X,
  462. [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
  463. [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
  464. [VM_SHARED] = PAGE_NONE,
  465. [VM_SHARED | VM_READ] = PAGE_READONLY,
  466. [VM_SHARED | VM_WRITE] = PAGE_SHARED,
  467. [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
  468. [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X,
  469. [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
  470. [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
  471. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
  472. };
  473. #ifndef CONFIG_PPC_BOOK3S_64
  474. DECLARE_VM_GET_PAGE_PROT
  475. #endif