page_vma_mapped.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/mm.h>
  3. #include <linux/rmap.h>
  4. #include <linux/hugetlb.h>
  5. #include <linux/swap.h>
  6. #include <linux/swapops.h>
  7. #include "internal.h"
  8. static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  9. {
  10. page_vma_mapped_walk_done(pvmw);
  11. return false;
  12. }
  13. static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
  14. {
  15. pte_t ptent;
  16. if (pvmw->flags & PVMW_SYNC) {
  17. /* Use the stricter lookup */
  18. pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
  19. pvmw->address, &pvmw->ptl);
  20. *ptlp = pvmw->ptl;
  21. return !!pvmw->pte;
  22. }
  23. /*
  24. * It is important to return the ptl corresponding to pte,
  25. * in case *pvmw->pmd changes underneath us; so we need to
  26. * return it even when choosing not to lock, in case caller
  27. * proceeds to loop over next ptes, and finds a match later.
  28. * Though, in most cases, page lock already protects this.
  29. */
  30. pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
  31. pvmw->address, ptlp);
  32. if (!pvmw->pte)
  33. return false;
  34. ptent = ptep_get(pvmw->pte);
  35. if (pvmw->flags & PVMW_MIGRATION) {
  36. if (!is_swap_pte(ptent))
  37. return false;
  38. } else if (is_swap_pte(ptent)) {
  39. swp_entry_t entry;
  40. /*
  41. * Handle un-addressable ZONE_DEVICE memory.
  42. *
  43. * We get here when we are trying to unmap a private
  44. * device page from the process address space. Such
  45. * page is not CPU accessible and thus is mapped as
  46. * a special swap entry, nonetheless it still does
  47. * count as a valid regular mapping for the page
  48. * (and is accounted as such in page maps count).
  49. *
  50. * So handle this special case as if it was a normal
  51. * page mapping ie lock CPU page table and return true.
  52. *
  53. * For more details on device private memory see HMM
  54. * (include/linux/hmm.h or mm/hmm.c).
  55. */
  56. entry = pte_to_swp_entry(ptent);
  57. if (!is_device_private_entry(entry) &&
  58. !is_device_exclusive_entry(entry))
  59. return false;
  60. } else if (!pte_present(ptent)) {
  61. return false;
  62. }
  63. pvmw->ptl = *ptlp;
  64. spin_lock(pvmw->ptl);
  65. return true;
  66. }
  67. /**
  68. * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
  69. * mapped at the @pvmw->pte
  70. * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
  71. * for checking
  72. *
  73. * page_vma_mapped_walk() found a place where pfn range is *potentially*
  74. * mapped. check_pte() has to validate this.
  75. *
  76. * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
  77. * arbitrary page.
  78. *
  79. * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
  80. * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
  81. *
  82. * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
  83. * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
  84. *
  85. * Otherwise, return false.
  86. *
  87. */
  88. static bool check_pte(struct page_vma_mapped_walk *pvmw)
  89. {
  90. unsigned long pfn;
  91. pte_t ptent = ptep_get(pvmw->pte);
  92. if (pvmw->flags & PVMW_MIGRATION) {
  93. swp_entry_t entry;
  94. if (!is_swap_pte(ptent))
  95. return false;
  96. entry = pte_to_swp_entry(ptent);
  97. if (!is_migration_entry(entry) &&
  98. !is_device_exclusive_entry(entry))
  99. return false;
  100. pfn = swp_offset_pfn(entry);
  101. } else if (is_swap_pte(ptent)) {
  102. swp_entry_t entry;
  103. /* Handle un-addressable ZONE_DEVICE memory */
  104. entry = pte_to_swp_entry(ptent);
  105. if (!is_device_private_entry(entry) &&
  106. !is_device_exclusive_entry(entry))
  107. return false;
  108. pfn = swp_offset_pfn(entry);
  109. } else {
  110. if (!pte_present(ptent))
  111. return false;
  112. pfn = pte_pfn(ptent);
  113. }
  114. return (pfn - pvmw->pfn) < pvmw->nr_pages;
  115. }
  116. /* Returns true if the two ranges overlap. Careful to not overflow. */
  117. static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
  118. {
  119. if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
  120. return false;
  121. if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
  122. return false;
  123. return true;
  124. }
  125. static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
  126. {
  127. pvmw->address = (pvmw->address + size) & ~(size - 1);
  128. if (!pvmw->address)
  129. pvmw->address = ULONG_MAX;
  130. }
  131. /**
  132. * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
  133. * @pvmw->address
  134. * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
  135. * must be set. pmd, pte and ptl must be NULL.
  136. *
  137. * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
  138. * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
  139. * adjusted if needed (for PTE-mapped THPs).
  140. *
  141. * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
  142. * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
  143. * a loop to find all PTEs that map the THP.
  144. *
  145. * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
  146. * regardless of which page table level the page is mapped at. @pvmw->pmd is
  147. * NULL.
  148. *
  149. * Returns false if there are no more page table entries for the page in
  150. * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  151. *
  152. * If you need to stop the walk before page_vma_mapped_walk() returned false,
  153. * use page_vma_mapped_walk_done(). It will do the housekeeping.
  154. */
  155. bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  156. {
  157. struct vm_area_struct *vma = pvmw->vma;
  158. struct mm_struct *mm = vma->vm_mm;
  159. unsigned long end;
  160. spinlock_t *ptl;
  161. pgd_t *pgd;
  162. p4d_t *p4d;
  163. pud_t *pud;
  164. pmd_t pmde;
  165. /* The only possible pmd mapping has been handled on last iteration */
  166. if (pvmw->pmd && !pvmw->pte)
  167. return not_found(pvmw);
  168. if (unlikely(is_vm_hugetlb_page(vma))) {
  169. struct hstate *hstate = hstate_vma(vma);
  170. unsigned long size = huge_page_size(hstate);
  171. /* The only possible mapping was handled on last iteration */
  172. if (pvmw->pte)
  173. return not_found(pvmw);
  174. /*
  175. * All callers that get here will already hold the
  176. * i_mmap_rwsem. Therefore, no additional locks need to be
  177. * taken before calling hugetlb_walk().
  178. */
  179. pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
  180. if (!pvmw->pte)
  181. return false;
  182. pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
  183. if (!check_pte(pvmw))
  184. return not_found(pvmw);
  185. return true;
  186. }
  187. end = vma_address_end(pvmw);
  188. if (pvmw->pte)
  189. goto next_pte;
  190. restart:
  191. do {
  192. pgd = pgd_offset(mm, pvmw->address);
  193. if (!pgd_present(*pgd)) {
  194. step_forward(pvmw, PGDIR_SIZE);
  195. continue;
  196. }
  197. p4d = p4d_offset(pgd, pvmw->address);
  198. if (!p4d_present(*p4d)) {
  199. step_forward(pvmw, P4D_SIZE);
  200. continue;
  201. }
  202. pud = pud_offset(p4d, pvmw->address);
  203. if (!pud_present(*pud)) {
  204. step_forward(pvmw, PUD_SIZE);
  205. continue;
  206. }
  207. pvmw->pmd = pmd_offset(pud, pvmw->address);
  208. /*
  209. * Make sure the pmd value isn't cached in a register by the
  210. * compiler and used as a stale value after we've observed a
  211. * subsequent update.
  212. */
  213. pmde = pmdp_get_lockless(pvmw->pmd);
  214. if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
  215. (pmd_present(pmde) && pmd_devmap(pmde))) {
  216. pvmw->ptl = pmd_lock(mm, pvmw->pmd);
  217. pmde = *pvmw->pmd;
  218. if (!pmd_present(pmde)) {
  219. swp_entry_t entry;
  220. if (!thp_migration_supported() ||
  221. !(pvmw->flags & PVMW_MIGRATION))
  222. return not_found(pvmw);
  223. entry = pmd_to_swp_entry(pmde);
  224. if (!is_migration_entry(entry) ||
  225. !check_pmd(swp_offset_pfn(entry), pvmw))
  226. return not_found(pvmw);
  227. return true;
  228. }
  229. if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
  230. if (pvmw->flags & PVMW_MIGRATION)
  231. return not_found(pvmw);
  232. if (!check_pmd(pmd_pfn(pmde), pvmw))
  233. return not_found(pvmw);
  234. return true;
  235. }
  236. /* THP pmd was split under us: handle on pte level */
  237. spin_unlock(pvmw->ptl);
  238. pvmw->ptl = NULL;
  239. } else if (!pmd_present(pmde)) {
  240. /*
  241. * If PVMW_SYNC, take and drop THP pmd lock so that we
  242. * cannot return prematurely, while zap_huge_pmd() has
  243. * cleared *pmd but not decremented compound_mapcount().
  244. */
  245. if ((pvmw->flags & PVMW_SYNC) &&
  246. thp_vma_suitable_order(vma, pvmw->address,
  247. PMD_ORDER) &&
  248. (pvmw->nr_pages >= HPAGE_PMD_NR)) {
  249. spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
  250. spin_unlock(ptl);
  251. }
  252. step_forward(pvmw, PMD_SIZE);
  253. continue;
  254. }
  255. if (!map_pte(pvmw, &ptl)) {
  256. if (!pvmw->pte)
  257. goto restart;
  258. goto next_pte;
  259. }
  260. this_pte:
  261. if (check_pte(pvmw))
  262. return true;
  263. next_pte:
  264. do {
  265. pvmw->address += PAGE_SIZE;
  266. if (pvmw->address >= end)
  267. return not_found(pvmw);
  268. /* Did we cross page table boundary? */
  269. if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
  270. if (pvmw->ptl) {
  271. spin_unlock(pvmw->ptl);
  272. pvmw->ptl = NULL;
  273. }
  274. pte_unmap(pvmw->pte);
  275. pvmw->pte = NULL;
  276. goto restart;
  277. }
  278. pvmw->pte++;
  279. } while (pte_none(ptep_get(pvmw->pte)));
  280. if (!pvmw->ptl) {
  281. pvmw->ptl = ptl;
  282. spin_lock(pvmw->ptl);
  283. }
  284. goto this_pte;
  285. } while (pvmw->address < end);
  286. return false;
  287. }
  288. #ifdef CONFIG_MEMORY_FAILURE
  289. /**
  290. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  291. * @page: the page to test
  292. * @vma: the VMA to test
  293. *
  294. * Return: The address the page is mapped at if the page is in the range
  295. * covered by the VMA and present in the page table. If the page is
  296. * outside the VMA or not present, returns -EFAULT.
  297. * Only valid for normal file or anonymous VMAs.
  298. */
  299. unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  300. {
  301. struct folio *folio = page_folio(page);
  302. pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
  303. struct page_vma_mapped_walk pvmw = {
  304. .pfn = page_to_pfn(page),
  305. .nr_pages = 1,
  306. .vma = vma,
  307. .flags = PVMW_SYNC,
  308. };
  309. pvmw.address = vma_address(vma, pgoff, 1);
  310. if (pvmw.address == -EFAULT)
  311. goto out;
  312. if (!page_vma_mapped_walk(&pvmw))
  313. return -EFAULT;
  314. page_vma_mapped_walk_done(&pvmw);
  315. out:
  316. return pvmw.address;
  317. }
  318. #endif