migrate_device.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Device Memory Migration functionality.
  4. *
  5. * Originally written by Jérôme Glisse.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/memremap.h>
  9. #include <linux/migrate.h>
  10. #include <linux/mm.h>
  11. #include <linux/mm_inline.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/oom.h>
  14. #include <linux/pagewalk.h>
  15. #include <linux/rmap.h>
  16. #include <linux/swapops.h>
  17. #include <asm/tlbflush.h>
  18. #include "internal.h"
  19. static int migrate_vma_collect_skip(unsigned long start,
  20. unsigned long end,
  21. struct mm_walk *walk)
  22. {
  23. struct migrate_vma *migrate = walk->private;
  24. unsigned long addr;
  25. for (addr = start; addr < end; addr += PAGE_SIZE) {
  26. migrate->dst[migrate->npages] = 0;
  27. migrate->src[migrate->npages++] = 0;
  28. }
  29. return 0;
  30. }
  31. static int migrate_vma_collect_hole(unsigned long start,
  32. unsigned long end,
  33. __always_unused int depth,
  34. struct mm_walk *walk)
  35. {
  36. struct migrate_vma *migrate = walk->private;
  37. unsigned long addr;
  38. /* Only allow populating anonymous memory. */
  39. if (!vma_is_anonymous(walk->vma))
  40. return migrate_vma_collect_skip(start, end, walk);
  41. for (addr = start; addr < end; addr += PAGE_SIZE) {
  42. migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
  43. migrate->dst[migrate->npages] = 0;
  44. migrate->npages++;
  45. migrate->cpages++;
  46. }
  47. return 0;
  48. }
  49. static int migrate_vma_collect_pmd(pmd_t *pmdp,
  50. unsigned long start,
  51. unsigned long end,
  52. struct mm_walk *walk)
  53. {
  54. struct migrate_vma *migrate = walk->private;
  55. struct vm_area_struct *vma = walk->vma;
  56. struct mm_struct *mm = vma->vm_mm;
  57. unsigned long addr = start, unmapped = 0;
  58. spinlock_t *ptl;
  59. pte_t *ptep;
  60. again:
  61. if (pmd_none(*pmdp))
  62. return migrate_vma_collect_hole(start, end, -1, walk);
  63. if (pmd_trans_huge(*pmdp)) {
  64. struct folio *folio;
  65. ptl = pmd_lock(mm, pmdp);
  66. if (unlikely(!pmd_trans_huge(*pmdp))) {
  67. spin_unlock(ptl);
  68. goto again;
  69. }
  70. folio = pmd_folio(*pmdp);
  71. if (is_huge_zero_folio(folio)) {
  72. spin_unlock(ptl);
  73. split_huge_pmd(vma, pmdp, addr);
  74. } else {
  75. int ret;
  76. folio_get(folio);
  77. spin_unlock(ptl);
  78. if (unlikely(!folio_trylock(folio)))
  79. return migrate_vma_collect_skip(start, end,
  80. walk);
  81. ret = split_folio(folio);
  82. folio_unlock(folio);
  83. folio_put(folio);
  84. if (ret)
  85. return migrate_vma_collect_skip(start, end,
  86. walk);
  87. }
  88. }
  89. ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
  90. if (!ptep)
  91. goto again;
  92. arch_enter_lazy_mmu_mode();
  93. for (; addr < end; addr += PAGE_SIZE, ptep++) {
  94. unsigned long mpfn = 0, pfn;
  95. struct folio *folio;
  96. struct page *page;
  97. swp_entry_t entry;
  98. pte_t pte;
  99. pte = ptep_get(ptep);
  100. if (pte_none(pte)) {
  101. if (vma_is_anonymous(vma)) {
  102. mpfn = MIGRATE_PFN_MIGRATE;
  103. migrate->cpages++;
  104. }
  105. goto next;
  106. }
  107. if (!pte_present(pte)) {
  108. /*
  109. * Only care about unaddressable device page special
  110. * page table entry. Other special swap entries are not
  111. * migratable, and we ignore regular swapped page.
  112. */
  113. entry = pte_to_swp_entry(pte);
  114. if (!is_device_private_entry(entry))
  115. goto next;
  116. page = pfn_swap_entry_to_page(entry);
  117. if (!(migrate->flags &
  118. MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
  119. page->pgmap->owner != migrate->pgmap_owner)
  120. goto next;
  121. mpfn = migrate_pfn(page_to_pfn(page)) |
  122. MIGRATE_PFN_MIGRATE;
  123. if (is_writable_device_private_entry(entry))
  124. mpfn |= MIGRATE_PFN_WRITE;
  125. } else {
  126. pfn = pte_pfn(pte);
  127. if (is_zero_pfn(pfn) &&
  128. (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
  129. mpfn = MIGRATE_PFN_MIGRATE;
  130. migrate->cpages++;
  131. goto next;
  132. }
  133. page = vm_normal_page(migrate->vma, addr, pte);
  134. if (page && !is_zone_device_page(page) &&
  135. !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
  136. goto next;
  137. else if (page && is_device_coherent_page(page) &&
  138. (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
  139. page->pgmap->owner != migrate->pgmap_owner))
  140. goto next;
  141. mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
  142. mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
  143. }
  144. /* FIXME support THP */
  145. if (!page || !page->mapping || PageTransCompound(page)) {
  146. mpfn = 0;
  147. goto next;
  148. }
  149. /*
  150. * By getting a reference on the folio we pin it and that blocks
  151. * any kind of migration. Side effect is that it "freezes" the
  152. * pte.
  153. *
  154. * We drop this reference after isolating the folio from the lru
  155. * for non device folio (device folio are not on the lru and thus
  156. * can't be dropped from it).
  157. */
  158. folio = page_folio(page);
  159. folio_get(folio);
  160. /*
  161. * We rely on folio_trylock() to avoid deadlock between
  162. * concurrent migrations where each is waiting on the others
  163. * folio lock. If we can't immediately lock the folio we fail this
  164. * migration as it is only best effort anyway.
  165. *
  166. * If we can lock the folio it's safe to set up a migration entry
  167. * now. In the common case where the folio is mapped once in a
  168. * single process setting up the migration entry now is an
  169. * optimisation to avoid walking the rmap later with
  170. * try_to_migrate().
  171. */
  172. if (folio_trylock(folio)) {
  173. bool anon_exclusive;
  174. pte_t swp_pte;
  175. flush_cache_page(vma, addr, pte_pfn(pte));
  176. anon_exclusive = folio_test_anon(folio) &&
  177. PageAnonExclusive(page);
  178. if (anon_exclusive) {
  179. pte = ptep_clear_flush(vma, addr, ptep);
  180. if (folio_try_share_anon_rmap_pte(folio, page)) {
  181. set_pte_at(mm, addr, ptep, pte);
  182. folio_unlock(folio);
  183. folio_put(folio);
  184. mpfn = 0;
  185. goto next;
  186. }
  187. } else {
  188. pte = ptep_get_and_clear(mm, addr, ptep);
  189. }
  190. migrate->cpages++;
  191. /* Set the dirty flag on the folio now the pte is gone. */
  192. if (pte_dirty(pte))
  193. folio_mark_dirty(folio);
  194. /* Setup special migration page table entry */
  195. if (mpfn & MIGRATE_PFN_WRITE)
  196. entry = make_writable_migration_entry(
  197. page_to_pfn(page));
  198. else if (anon_exclusive)
  199. entry = make_readable_exclusive_migration_entry(
  200. page_to_pfn(page));
  201. else
  202. entry = make_readable_migration_entry(
  203. page_to_pfn(page));
  204. if (pte_present(pte)) {
  205. if (pte_young(pte))
  206. entry = make_migration_entry_young(entry);
  207. if (pte_dirty(pte))
  208. entry = make_migration_entry_dirty(entry);
  209. }
  210. swp_pte = swp_entry_to_pte(entry);
  211. if (pte_present(pte)) {
  212. if (pte_soft_dirty(pte))
  213. swp_pte = pte_swp_mksoft_dirty(swp_pte);
  214. if (pte_uffd_wp(pte))
  215. swp_pte = pte_swp_mkuffd_wp(swp_pte);
  216. } else {
  217. if (pte_swp_soft_dirty(pte))
  218. swp_pte = pte_swp_mksoft_dirty(swp_pte);
  219. if (pte_swp_uffd_wp(pte))
  220. swp_pte = pte_swp_mkuffd_wp(swp_pte);
  221. }
  222. set_pte_at(mm, addr, ptep, swp_pte);
  223. /*
  224. * This is like regular unmap: we remove the rmap and
  225. * drop the folio refcount. The folio won't be freed, as
  226. * we took a reference just above.
  227. */
  228. folio_remove_rmap_pte(folio, page, vma);
  229. folio_put(folio);
  230. if (pte_present(pte))
  231. unmapped++;
  232. } else {
  233. folio_put(folio);
  234. mpfn = 0;
  235. }
  236. next:
  237. migrate->dst[migrate->npages] = 0;
  238. migrate->src[migrate->npages++] = mpfn;
  239. }
  240. /* Only flush the TLB if we actually modified any entries */
  241. if (unmapped)
  242. flush_tlb_range(walk->vma, start, end);
  243. arch_leave_lazy_mmu_mode();
  244. pte_unmap_unlock(ptep - 1, ptl);
  245. return 0;
  246. }
  247. static const struct mm_walk_ops migrate_vma_walk_ops = {
  248. .pmd_entry = migrate_vma_collect_pmd,
  249. .pte_hole = migrate_vma_collect_hole,
  250. .walk_lock = PGWALK_RDLOCK,
  251. };
  252. /*
  253. * migrate_vma_collect() - collect pages over a range of virtual addresses
  254. * @migrate: migrate struct containing all migration information
  255. *
  256. * This will walk the CPU page table. For each virtual address backed by a
  257. * valid page, it updates the src array and takes a reference on the page, in
  258. * order to pin the page until we lock it and unmap it.
  259. */
  260. static void migrate_vma_collect(struct migrate_vma *migrate)
  261. {
  262. struct mmu_notifier_range range;
  263. /*
  264. * Note that the pgmap_owner is passed to the mmu notifier callback so
  265. * that the registered device driver can skip invalidating device
  266. * private page mappings that won't be migrated.
  267. */
  268. mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
  269. migrate->vma->vm_mm, migrate->start, migrate->end,
  270. migrate->pgmap_owner);
  271. mmu_notifier_invalidate_range_start(&range);
  272. walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
  273. &migrate_vma_walk_ops, migrate);
  274. mmu_notifier_invalidate_range_end(&range);
  275. migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
  276. }
  277. /*
  278. * migrate_vma_check_page() - check if page is pinned or not
  279. * @page: struct page to check
  280. *
  281. * Pinned pages cannot be migrated. This is the same test as in
  282. * folio_migrate_mapping(), except that here we allow migration of a
  283. * ZONE_DEVICE page.
  284. */
  285. static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
  286. {
  287. struct folio *folio = page_folio(page);
  288. /*
  289. * One extra ref because caller holds an extra reference, either from
  290. * folio_isolate_lru() for a regular folio, or migrate_vma_collect() for
  291. * a device folio.
  292. */
  293. int extra = 1 + (page == fault_page);
  294. /*
  295. * FIXME support THP (transparent huge page), it is bit more complex to
  296. * check them than regular pages, because they can be mapped with a pmd
  297. * or with a pte (split pte mapping).
  298. */
  299. if (folio_test_large(folio))
  300. return false;
  301. /* Page from ZONE_DEVICE have one extra reference */
  302. if (folio_is_zone_device(folio))
  303. extra++;
  304. /* For file back page */
  305. if (folio_mapping(folio))
  306. extra += 1 + folio_has_private(folio);
  307. if ((folio_ref_count(folio) - extra) > folio_mapcount(folio))
  308. return false;
  309. return true;
  310. }
  311. /*
  312. * Unmaps pages for migration. Returns number of source pfns marked as
  313. * migrating.
  314. */
  315. static unsigned long migrate_device_unmap(unsigned long *src_pfns,
  316. unsigned long npages,
  317. struct page *fault_page)
  318. {
  319. unsigned long i, restore = 0;
  320. bool allow_drain = true;
  321. unsigned long unmapped = 0;
  322. lru_add_drain();
  323. for (i = 0; i < npages; i++) {
  324. struct page *page = migrate_pfn_to_page(src_pfns[i]);
  325. struct folio *folio;
  326. if (!page) {
  327. if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
  328. unmapped++;
  329. continue;
  330. }
  331. folio = page_folio(page);
  332. /* ZONE_DEVICE folios are not on LRU */
  333. if (!folio_is_zone_device(folio)) {
  334. if (!folio_test_lru(folio) && allow_drain) {
  335. /* Drain CPU's lru cache */
  336. lru_add_drain_all();
  337. allow_drain = false;
  338. }
  339. if (!folio_isolate_lru(folio)) {
  340. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  341. restore++;
  342. continue;
  343. }
  344. /* Drop the reference we took in collect */
  345. folio_put(folio);
  346. }
  347. if (folio_mapped(folio))
  348. try_to_migrate(folio, 0);
  349. if (folio_mapped(folio) ||
  350. !migrate_vma_check_page(page, fault_page)) {
  351. if (!folio_is_zone_device(folio)) {
  352. folio_get(folio);
  353. folio_putback_lru(folio);
  354. }
  355. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  356. restore++;
  357. continue;
  358. }
  359. unmapped++;
  360. }
  361. for (i = 0; i < npages && restore; i++) {
  362. struct page *page = migrate_pfn_to_page(src_pfns[i]);
  363. struct folio *folio;
  364. if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE))
  365. continue;
  366. folio = page_folio(page);
  367. remove_migration_ptes(folio, folio, 0);
  368. src_pfns[i] = 0;
  369. folio_unlock(folio);
  370. folio_put(folio);
  371. restore--;
  372. }
  373. return unmapped;
  374. }
  375. /*
  376. * migrate_vma_unmap() - replace page mapping with special migration pte entry
  377. * @migrate: migrate struct containing all migration information
  378. *
  379. * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
  380. * special migration pte entry and check if it has been pinned. Pinned pages are
  381. * restored because we cannot migrate them.
  382. *
  383. * This is the last step before we call the device driver callback to allocate
  384. * destination memory and copy contents of original page over to new page.
  385. */
  386. static void migrate_vma_unmap(struct migrate_vma *migrate)
  387. {
  388. migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages,
  389. migrate->fault_page);
  390. }
  391. /**
  392. * migrate_vma_setup() - prepare to migrate a range of memory
  393. * @args: contains the vma, start, and pfns arrays for the migration
  394. *
  395. * Returns: negative errno on failures, 0 when 0 or more pages were migrated
  396. * without an error.
  397. *
  398. * Prepare to migrate a range of memory virtual address range by collecting all
  399. * the pages backing each virtual address in the range, saving them inside the
  400. * src array. Then lock those pages and unmap them. Once the pages are locked
  401. * and unmapped, check whether each page is pinned or not. Pages that aren't
  402. * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
  403. * corresponding src array entry. Then restores any pages that are pinned, by
  404. * remapping and unlocking those pages.
  405. *
  406. * The caller should then allocate destination memory and copy source memory to
  407. * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
  408. * flag set). Once these are allocated and copied, the caller must update each
  409. * corresponding entry in the dst array with the pfn value of the destination
  410. * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
  411. * lock_page().
  412. *
  413. * Note that the caller does not have to migrate all the pages that are marked
  414. * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
  415. * device memory to system memory. If the caller cannot migrate a device page
  416. * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
  417. * consequences for the userspace process, so it must be avoided if at all
  418. * possible.
  419. *
  420. * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
  421. * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
  422. * allowing the caller to allocate device memory for those unbacked virtual
  423. * addresses. For this the caller simply has to allocate device memory and
  424. * properly set the destination entry like for regular migration. Note that
  425. * this can still fail, and thus inside the device driver you must check if the
  426. * migration was successful for those entries after calling migrate_vma_pages(),
  427. * just like for regular migration.
  428. *
  429. * After that, the callers must call migrate_vma_pages() to go over each entry
  430. * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
  431. * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
  432. * then migrate_vma_pages() to migrate struct page information from the source
  433. * struct page to the destination struct page. If it fails to migrate the
  434. * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
  435. * src array.
  436. *
  437. * At this point all successfully migrated pages have an entry in the src
  438. * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
  439. * array entry with MIGRATE_PFN_VALID flag set.
  440. *
  441. * Once migrate_vma_pages() returns the caller may inspect which pages were
  442. * successfully migrated, and which were not. Successfully migrated pages will
  443. * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
  444. *
  445. * It is safe to update device page table after migrate_vma_pages() because
  446. * both destination and source page are still locked, and the mmap_lock is held
  447. * in read mode (hence no one can unmap the range being migrated).
  448. *
  449. * Once the caller is done cleaning up things and updating its page table (if it
  450. * chose to do so, this is not an obligation) it finally calls
  451. * migrate_vma_finalize() to update the CPU page table to point to new pages
  452. * for successfully migrated pages or otherwise restore the CPU page table to
  453. * point to the original source pages.
  454. */
  455. int migrate_vma_setup(struct migrate_vma *args)
  456. {
  457. long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
  458. args->start &= PAGE_MASK;
  459. args->end &= PAGE_MASK;
  460. if (!args->vma || is_vm_hugetlb_page(args->vma) ||
  461. (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
  462. return -EINVAL;
  463. if (nr_pages <= 0)
  464. return -EINVAL;
  465. if (args->start < args->vma->vm_start ||
  466. args->start >= args->vma->vm_end)
  467. return -EINVAL;
  468. if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
  469. return -EINVAL;
  470. if (!args->src || !args->dst)
  471. return -EINVAL;
  472. if (args->fault_page && !is_device_private_page(args->fault_page))
  473. return -EINVAL;
  474. memset(args->src, 0, sizeof(*args->src) * nr_pages);
  475. args->cpages = 0;
  476. args->npages = 0;
  477. migrate_vma_collect(args);
  478. if (args->cpages)
  479. migrate_vma_unmap(args);
  480. /*
  481. * At this point pages are locked and unmapped, and thus they have
  482. * stable content and can safely be copied to destination memory that
  483. * is allocated by the drivers.
  484. */
  485. return 0;
  486. }
  487. EXPORT_SYMBOL(migrate_vma_setup);
  488. /*
  489. * This code closely matches the code in:
  490. * __handle_mm_fault()
  491. * handle_pte_fault()
  492. * do_anonymous_page()
  493. * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
  494. * private or coherent page.
  495. */
  496. static void migrate_vma_insert_page(struct migrate_vma *migrate,
  497. unsigned long addr,
  498. struct page *page,
  499. unsigned long *src)
  500. {
  501. struct folio *folio = page_folio(page);
  502. struct vm_area_struct *vma = migrate->vma;
  503. struct mm_struct *mm = vma->vm_mm;
  504. bool flush = false;
  505. spinlock_t *ptl;
  506. pte_t entry;
  507. pgd_t *pgdp;
  508. p4d_t *p4dp;
  509. pud_t *pudp;
  510. pmd_t *pmdp;
  511. pte_t *ptep;
  512. pte_t orig_pte;
  513. /* Only allow populating anonymous memory */
  514. if (!vma_is_anonymous(vma))
  515. goto abort;
  516. pgdp = pgd_offset(mm, addr);
  517. p4dp = p4d_alloc(mm, pgdp, addr);
  518. if (!p4dp)
  519. goto abort;
  520. pudp = pud_alloc(mm, p4dp, addr);
  521. if (!pudp)
  522. goto abort;
  523. pmdp = pmd_alloc(mm, pudp, addr);
  524. if (!pmdp)
  525. goto abort;
  526. if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
  527. goto abort;
  528. if (pte_alloc(mm, pmdp))
  529. goto abort;
  530. if (unlikely(anon_vma_prepare(vma)))
  531. goto abort;
  532. if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
  533. goto abort;
  534. /*
  535. * The memory barrier inside __folio_mark_uptodate makes sure that
  536. * preceding stores to the folio contents become visible before
  537. * the set_pte_at() write.
  538. */
  539. __folio_mark_uptodate(folio);
  540. if (folio_is_device_private(folio)) {
  541. swp_entry_t swp_entry;
  542. if (vma->vm_flags & VM_WRITE)
  543. swp_entry = make_writable_device_private_entry(
  544. page_to_pfn(page));
  545. else
  546. swp_entry = make_readable_device_private_entry(
  547. page_to_pfn(page));
  548. entry = swp_entry_to_pte(swp_entry);
  549. } else {
  550. if (folio_is_zone_device(folio) &&
  551. !folio_is_device_coherent(folio)) {
  552. pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
  553. goto abort;
  554. }
  555. entry = mk_pte(page, vma->vm_page_prot);
  556. if (vma->vm_flags & VM_WRITE)
  557. entry = pte_mkwrite(pte_mkdirty(entry), vma);
  558. }
  559. ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
  560. if (!ptep)
  561. goto abort;
  562. orig_pte = ptep_get(ptep);
  563. if (check_stable_address_space(mm))
  564. goto unlock_abort;
  565. if (pte_present(orig_pte)) {
  566. unsigned long pfn = pte_pfn(orig_pte);
  567. if (!is_zero_pfn(pfn))
  568. goto unlock_abort;
  569. flush = true;
  570. } else if (!pte_none(orig_pte))
  571. goto unlock_abort;
  572. /*
  573. * Check for userfaultfd but do not deliver the fault. Instead,
  574. * just back off.
  575. */
  576. if (userfaultfd_missing(vma))
  577. goto unlock_abort;
  578. inc_mm_counter(mm, MM_ANONPAGES);
  579. folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
  580. if (!folio_is_zone_device(folio))
  581. folio_add_lru_vma(folio, vma);
  582. folio_get(folio);
  583. if (flush) {
  584. flush_cache_page(vma, addr, pte_pfn(orig_pte));
  585. ptep_clear_flush(vma, addr, ptep);
  586. }
  587. set_pte_at(mm, addr, ptep, entry);
  588. update_mmu_cache(vma, addr, ptep);
  589. pte_unmap_unlock(ptep, ptl);
  590. *src = MIGRATE_PFN_MIGRATE;
  591. return;
  592. unlock_abort:
  593. pte_unmap_unlock(ptep, ptl);
  594. abort:
  595. *src &= ~MIGRATE_PFN_MIGRATE;
  596. }
  597. static void __migrate_device_pages(unsigned long *src_pfns,
  598. unsigned long *dst_pfns, unsigned long npages,
  599. struct migrate_vma *migrate)
  600. {
  601. struct mmu_notifier_range range;
  602. unsigned long i;
  603. bool notified = false;
  604. for (i = 0; i < npages; i++) {
  605. struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
  606. struct page *page = migrate_pfn_to_page(src_pfns[i]);
  607. struct address_space *mapping;
  608. struct folio *newfolio, *folio;
  609. int r, extra_cnt = 0;
  610. if (!newpage) {
  611. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  612. continue;
  613. }
  614. if (!page) {
  615. unsigned long addr;
  616. if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE))
  617. continue;
  618. /*
  619. * The only time there is no vma is when called from
  620. * migrate_device_coherent_folio(). However this isn't
  621. * called if the page could not be unmapped.
  622. */
  623. VM_BUG_ON(!migrate);
  624. addr = migrate->start + i*PAGE_SIZE;
  625. if (!notified) {
  626. notified = true;
  627. mmu_notifier_range_init_owner(&range,
  628. MMU_NOTIFY_MIGRATE, 0,
  629. migrate->vma->vm_mm, addr, migrate->end,
  630. migrate->pgmap_owner);
  631. mmu_notifier_invalidate_range_start(&range);
  632. }
  633. migrate_vma_insert_page(migrate, addr, newpage,
  634. &src_pfns[i]);
  635. continue;
  636. }
  637. newfolio = page_folio(newpage);
  638. folio = page_folio(page);
  639. mapping = folio_mapping(folio);
  640. if (folio_is_device_private(newfolio) ||
  641. folio_is_device_coherent(newfolio)) {
  642. if (mapping) {
  643. /*
  644. * For now only support anonymous memory migrating to
  645. * device private or coherent memory.
  646. *
  647. * Try to get rid of swap cache if possible.
  648. */
  649. if (!folio_test_anon(folio) ||
  650. !folio_free_swap(folio)) {
  651. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  652. continue;
  653. }
  654. }
  655. } else if (folio_is_zone_device(newfolio)) {
  656. /*
  657. * Other types of ZONE_DEVICE page are not supported.
  658. */
  659. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  660. continue;
  661. }
  662. BUG_ON(folio_test_writeback(folio));
  663. if (migrate && migrate->fault_page == page)
  664. extra_cnt = 1;
  665. r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
  666. if (r != MIGRATEPAGE_SUCCESS)
  667. src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
  668. else
  669. folio_migrate_flags(newfolio, folio);
  670. }
  671. if (notified)
  672. mmu_notifier_invalidate_range_end(&range);
  673. }
  674. /**
  675. * migrate_device_pages() - migrate meta-data from src page to dst page
  676. * @src_pfns: src_pfns returned from migrate_device_range()
  677. * @dst_pfns: array of pfns allocated by the driver to migrate memory to
  678. * @npages: number of pages in the range
  679. *
  680. * Equivalent to migrate_vma_pages(). This is called to migrate struct page
  681. * meta-data from source struct page to destination.
  682. */
  683. void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
  684. unsigned long npages)
  685. {
  686. __migrate_device_pages(src_pfns, dst_pfns, npages, NULL);
  687. }
  688. EXPORT_SYMBOL(migrate_device_pages);
  689. /**
  690. * migrate_vma_pages() - migrate meta-data from src page to dst page
  691. * @migrate: migrate struct containing all migration information
  692. *
  693. * This migrates struct page meta-data from source struct page to destination
  694. * struct page. This effectively finishes the migration from source page to the
  695. * destination page.
  696. */
  697. void migrate_vma_pages(struct migrate_vma *migrate)
  698. {
  699. __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate);
  700. }
  701. EXPORT_SYMBOL(migrate_vma_pages);
  702. /*
  703. * migrate_device_finalize() - complete page migration
  704. * @src_pfns: src_pfns returned from migrate_device_range()
  705. * @dst_pfns: array of pfns allocated by the driver to migrate memory to
  706. * @npages: number of pages in the range
  707. *
  708. * Completes migration of the page by removing special migration entries.
  709. * Drivers must ensure copying of page data is complete and visible to the CPU
  710. * before calling this.
  711. */
  712. void migrate_device_finalize(unsigned long *src_pfns,
  713. unsigned long *dst_pfns, unsigned long npages)
  714. {
  715. unsigned long i;
  716. for (i = 0; i < npages; i++) {
  717. struct folio *dst = NULL, *src = NULL;
  718. struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
  719. struct page *page = migrate_pfn_to_page(src_pfns[i]);
  720. if (newpage)
  721. dst = page_folio(newpage);
  722. if (!page) {
  723. if (dst) {
  724. folio_unlock(dst);
  725. folio_put(dst);
  726. }
  727. continue;
  728. }
  729. src = page_folio(page);
  730. if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
  731. if (dst) {
  732. folio_unlock(dst);
  733. folio_put(dst);
  734. }
  735. dst = src;
  736. }
  737. remove_migration_ptes(src, dst, 0);
  738. folio_unlock(src);
  739. if (folio_is_zone_device(src))
  740. folio_put(src);
  741. else
  742. folio_putback_lru(src);
  743. if (dst != src) {
  744. folio_unlock(dst);
  745. if (folio_is_zone_device(dst))
  746. folio_put(dst);
  747. else
  748. folio_putback_lru(dst);
  749. }
  750. }
  751. }
  752. EXPORT_SYMBOL(migrate_device_finalize);
  753. /**
  754. * migrate_vma_finalize() - restore CPU page table entry
  755. * @migrate: migrate struct containing all migration information
  756. *
  757. * This replaces the special migration pte entry with either a mapping to the
  758. * new page if migration was successful for that page, or to the original page
  759. * otherwise.
  760. *
  761. * This also unlocks the pages and puts them back on the lru, or drops the extra
  762. * refcount, for device pages.
  763. */
  764. void migrate_vma_finalize(struct migrate_vma *migrate)
  765. {
  766. migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
  767. }
  768. EXPORT_SYMBOL(migrate_vma_finalize);
  769. /**
  770. * migrate_device_range() - migrate device private pfns to normal memory.
  771. * @src_pfns: array large enough to hold migrating source device private pfns.
  772. * @start: starting pfn in the range to migrate.
  773. * @npages: number of pages to migrate.
  774. *
  775. * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
  776. * instead of looking up pages based on virtual address mappings a range of
  777. * device pfns that should be migrated to system memory is used instead.
  778. *
  779. * This is useful when a driver needs to free device memory but doesn't know the
  780. * virtual mappings of every page that may be in device memory. For example this
  781. * is often the case when a driver is being unloaded or unbound from a device.
  782. *
  783. * Like migrate_vma_setup() this function will take a reference and lock any
  784. * migrating pages that aren't free before unmapping them. Drivers may then
  785. * allocate destination pages and start copying data from the device to CPU
  786. * memory before calling migrate_device_pages().
  787. */
  788. int migrate_device_range(unsigned long *src_pfns, unsigned long start,
  789. unsigned long npages)
  790. {
  791. unsigned long i, pfn;
  792. for (pfn = start, i = 0; i < npages; pfn++, i++) {
  793. struct folio *folio;
  794. folio = folio_get_nontail_page(pfn_to_page(pfn));
  795. if (!folio) {
  796. src_pfns[i] = 0;
  797. continue;
  798. }
  799. if (!folio_trylock(folio)) {
  800. src_pfns[i] = 0;
  801. folio_put(folio);
  802. continue;
  803. }
  804. src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
  805. }
  806. migrate_device_unmap(src_pfns, npages, NULL);
  807. return 0;
  808. }
  809. EXPORT_SYMBOL(migrate_device_range);
  810. /*
  811. * Migrate a device coherent folio back to normal memory. The caller should have
  812. * a reference on folio which will be copied to the new folio if migration is
  813. * successful or dropped on failure.
  814. */
  815. int migrate_device_coherent_folio(struct folio *folio)
  816. {
  817. unsigned long src_pfn, dst_pfn = 0;
  818. struct folio *dfolio;
  819. WARN_ON_ONCE(folio_test_large(folio));
  820. folio_lock(folio);
  821. src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
  822. /*
  823. * We don't have a VMA and don't need to walk the page tables to find
  824. * the source folio. So call migrate_vma_unmap() directly to unmap the
  825. * folio as migrate_vma_setup() will fail if args.vma == NULL.
  826. */
  827. migrate_device_unmap(&src_pfn, 1, NULL);
  828. if (!(src_pfn & MIGRATE_PFN_MIGRATE))
  829. return -EBUSY;
  830. dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
  831. if (dfolio) {
  832. folio_lock(dfolio);
  833. dst_pfn = migrate_pfn(folio_pfn(dfolio));
  834. }
  835. migrate_device_pages(&src_pfn, &dst_pfn, 1);
  836. if (src_pfn & MIGRATE_PFN_MIGRATE)
  837. folio_copy(dfolio, folio);
  838. migrate_device_finalize(&src_pfn, &dst_pfn, 1);
  839. if (src_pfn & MIGRATE_PFN_MIGRATE)
  840. return 0;
  841. return -EBUSY;
  842. }