userfaultfd.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mm/userfaultfd.c
  4. *
  5. * Copyright (C) 2015 Red Hat, Inc.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/sched/signal.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/rmap.h>
  11. #include <linux/swap.h>
  12. #include <linux/swapops.h>
  13. #include <linux/userfaultfd_k.h>
  14. #include <linux/mmu_notifier.h>
  15. #include <linux/hugetlb.h>
  16. #include <linux/shmem_fs.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/tlb.h>
  19. #include "internal.h"
  20. #include "swap.h"
  21. static __always_inline
  22. bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
  23. {
  24. /* Make sure that the dst range is fully within dst_vma. */
  25. if (dst_end > dst_vma->vm_end)
  26. return false;
  27. /*
  28. * Check the vma is registered in uffd, this is required to
  29. * enforce the VM_MAYWRITE check done at uffd registration
  30. * time.
  31. */
  32. if (!dst_vma->vm_userfaultfd_ctx.ctx)
  33. return false;
  34. return true;
  35. }
  36. static __always_inline
  37. struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
  38. unsigned long addr)
  39. {
  40. struct vm_area_struct *vma;
  41. mmap_assert_locked(mm);
  42. vma = vma_lookup(mm, addr);
  43. if (!vma)
  44. vma = ERR_PTR(-ENOENT);
  45. else if (!(vma->vm_flags & VM_SHARED) &&
  46. unlikely(anon_vma_prepare(vma)))
  47. vma = ERR_PTR(-ENOMEM);
  48. return vma;
  49. }
  50. #ifdef CONFIG_PER_VMA_LOCK
  51. /*
  52. * uffd_lock_vma() - Lookup and lock vma corresponding to @address.
  53. * @mm: mm to search vma in.
  54. * @address: address that the vma should contain.
  55. *
  56. * Should be called without holding mmap_lock.
  57. *
  58. * Return: A locked vma containing @address, -ENOENT if no vma is found, or
  59. * -ENOMEM if anon_vma couldn't be allocated.
  60. */
  61. static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm,
  62. unsigned long address)
  63. {
  64. struct vm_area_struct *vma;
  65. vma = lock_vma_under_rcu(mm, address);
  66. if (vma) {
  67. /*
  68. * We know we're going to need to use anon_vma, so check
  69. * that early.
  70. */
  71. if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
  72. vma_end_read(vma);
  73. else
  74. return vma;
  75. }
  76. mmap_read_lock(mm);
  77. vma = find_vma_and_prepare_anon(mm, address);
  78. if (!IS_ERR(vma)) {
  79. /*
  80. * We cannot use vma_start_read() as it may fail due to
  81. * false locked (see comment in vma_start_read()). We
  82. * can avoid that by directly locking vm_lock under
  83. * mmap_lock, which guarantees that nobody can lock the
  84. * vma for write (vma_start_write()) under us.
  85. */
  86. down_read(&vma->vm_lock->lock);
  87. }
  88. mmap_read_unlock(mm);
  89. return vma;
  90. }
  91. static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
  92. unsigned long dst_start,
  93. unsigned long len)
  94. {
  95. struct vm_area_struct *dst_vma;
  96. dst_vma = uffd_lock_vma(dst_mm, dst_start);
  97. if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
  98. return dst_vma;
  99. vma_end_read(dst_vma);
  100. return ERR_PTR(-ENOENT);
  101. }
  102. static void uffd_mfill_unlock(struct vm_area_struct *vma)
  103. {
  104. vma_end_read(vma);
  105. }
  106. #else
  107. static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
  108. unsigned long dst_start,
  109. unsigned long len)
  110. {
  111. struct vm_area_struct *dst_vma;
  112. mmap_read_lock(dst_mm);
  113. dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
  114. if (IS_ERR(dst_vma))
  115. goto out_unlock;
  116. if (validate_dst_vma(dst_vma, dst_start + len))
  117. return dst_vma;
  118. dst_vma = ERR_PTR(-ENOENT);
  119. out_unlock:
  120. mmap_read_unlock(dst_mm);
  121. return dst_vma;
  122. }
  123. static void uffd_mfill_unlock(struct vm_area_struct *vma)
  124. {
  125. mmap_read_unlock(vma->vm_mm);
  126. }
  127. #endif
  128. /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
  129. static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
  130. unsigned long dst_addr)
  131. {
  132. struct inode *inode;
  133. pgoff_t offset, max_off;
  134. if (!dst_vma->vm_file)
  135. return false;
  136. inode = dst_vma->vm_file->f_inode;
  137. offset = linear_page_index(dst_vma, dst_addr);
  138. max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
  139. return offset >= max_off;
  140. }
  141. /*
  142. * Install PTEs, to map dst_addr (within dst_vma) to page.
  143. *
  144. * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
  145. * and anon, and for both shared and private VMAs.
  146. */
  147. int mfill_atomic_install_pte(pmd_t *dst_pmd,
  148. struct vm_area_struct *dst_vma,
  149. unsigned long dst_addr, struct page *page,
  150. bool newly_allocated, uffd_flags_t flags)
  151. {
  152. int ret;
  153. struct mm_struct *dst_mm = dst_vma->vm_mm;
  154. pte_t _dst_pte, *dst_pte;
  155. bool writable = dst_vma->vm_flags & VM_WRITE;
  156. bool vm_shared = dst_vma->vm_flags & VM_SHARED;
  157. spinlock_t *ptl;
  158. struct folio *folio = page_folio(page);
  159. bool page_in_cache = folio_mapping(folio);
  160. _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
  161. _dst_pte = pte_mkdirty(_dst_pte);
  162. if (page_in_cache && !vm_shared)
  163. writable = false;
  164. if (writable)
  165. _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
  166. if (flags & MFILL_ATOMIC_WP)
  167. _dst_pte = pte_mkuffd_wp(_dst_pte);
  168. ret = -EAGAIN;
  169. dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
  170. if (!dst_pte)
  171. goto out;
  172. if (mfill_file_over_size(dst_vma, dst_addr)) {
  173. ret = -EFAULT;
  174. goto out_unlock;
  175. }
  176. ret = -EEXIST;
  177. /*
  178. * We allow to overwrite a pte marker: consider when both MISSING|WP
  179. * registered, we firstly wr-protect a none pte which has no page cache
  180. * page backing it, then access the page.
  181. */
  182. if (!pte_none_mostly(ptep_get(dst_pte)))
  183. goto out_unlock;
  184. if (page_in_cache) {
  185. /* Usually, cache pages are already added to LRU */
  186. if (newly_allocated)
  187. folio_add_lru(folio);
  188. folio_add_file_rmap_pte(folio, page, dst_vma);
  189. } else {
  190. folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE);
  191. folio_add_lru_vma(folio, dst_vma);
  192. }
  193. /*
  194. * Must happen after rmap, as mm_counter() checks mapping (via
  195. * PageAnon()), which is set by __page_set_anon_rmap().
  196. */
  197. inc_mm_counter(dst_mm, mm_counter(folio));
  198. set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  199. /* No need to invalidate - it was non-present before */
  200. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  201. ret = 0;
  202. out_unlock:
  203. pte_unmap_unlock(dst_pte, ptl);
  204. out:
  205. return ret;
  206. }
  207. static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
  208. struct vm_area_struct *dst_vma,
  209. unsigned long dst_addr,
  210. unsigned long src_addr,
  211. uffd_flags_t flags,
  212. struct folio **foliop)
  213. {
  214. void *kaddr;
  215. int ret;
  216. struct folio *folio;
  217. if (!*foliop) {
  218. ret = -ENOMEM;
  219. folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
  220. dst_addr, false);
  221. if (!folio)
  222. goto out;
  223. kaddr = kmap_local_folio(folio, 0);
  224. /*
  225. * The read mmap_lock is held here. Despite the
  226. * mmap_lock being read recursive a deadlock is still
  227. * possible if a writer has taken a lock. For example:
  228. *
  229. * process A thread 1 takes read lock on own mmap_lock
  230. * process A thread 2 calls mmap, blocks taking write lock
  231. * process B thread 1 takes page fault, read lock on own mmap lock
  232. * process B thread 2 calls mmap, blocks taking write lock
  233. * process A thread 1 blocks taking read lock on process B
  234. * process B thread 1 blocks taking read lock on process A
  235. *
  236. * Disable page faults to prevent potential deadlock
  237. * and retry the copy outside the mmap_lock.
  238. */
  239. pagefault_disable();
  240. ret = copy_from_user(kaddr, (const void __user *) src_addr,
  241. PAGE_SIZE);
  242. pagefault_enable();
  243. kunmap_local(kaddr);
  244. /* fallback to copy_from_user outside mmap_lock */
  245. if (unlikely(ret)) {
  246. ret = -ENOENT;
  247. *foliop = folio;
  248. /* don't free the page */
  249. goto out;
  250. }
  251. flush_dcache_folio(folio);
  252. } else {
  253. folio = *foliop;
  254. *foliop = NULL;
  255. }
  256. /*
  257. * The memory barrier inside __folio_mark_uptodate makes sure that
  258. * preceding stores to the page contents become visible before
  259. * the set_pte_at() write.
  260. */
  261. __folio_mark_uptodate(folio);
  262. ret = -ENOMEM;
  263. if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
  264. goto out_release;
  265. ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
  266. &folio->page, true, flags);
  267. if (ret)
  268. goto out_release;
  269. out:
  270. return ret;
  271. out_release:
  272. folio_put(folio);
  273. goto out;
  274. }
  275. static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
  276. struct vm_area_struct *dst_vma,
  277. unsigned long dst_addr)
  278. {
  279. struct folio *folio;
  280. int ret = -ENOMEM;
  281. folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
  282. if (!folio)
  283. return ret;
  284. if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
  285. goto out_put;
  286. /*
  287. * The memory barrier inside __folio_mark_uptodate makes sure that
  288. * zeroing out the folio become visible before mapping the page
  289. * using set_pte_at(). See do_anonymous_page().
  290. */
  291. __folio_mark_uptodate(folio);
  292. ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
  293. &folio->page, true, 0);
  294. if (ret)
  295. goto out_put;
  296. return 0;
  297. out_put:
  298. folio_put(folio);
  299. return ret;
  300. }
  301. static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
  302. struct vm_area_struct *dst_vma,
  303. unsigned long dst_addr)
  304. {
  305. pte_t _dst_pte, *dst_pte;
  306. spinlock_t *ptl;
  307. int ret;
  308. if (mm_forbids_zeropage(dst_vma->vm_mm))
  309. return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
  310. _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
  311. dst_vma->vm_page_prot));
  312. ret = -EAGAIN;
  313. dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
  314. if (!dst_pte)
  315. goto out;
  316. if (mfill_file_over_size(dst_vma, dst_addr)) {
  317. ret = -EFAULT;
  318. goto out_unlock;
  319. }
  320. ret = -EEXIST;
  321. if (!pte_none(ptep_get(dst_pte)))
  322. goto out_unlock;
  323. set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
  324. /* No need to invalidate - it was non-present before */
  325. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  326. ret = 0;
  327. out_unlock:
  328. pte_unmap_unlock(dst_pte, ptl);
  329. out:
  330. return ret;
  331. }
  332. /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
  333. static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
  334. struct vm_area_struct *dst_vma,
  335. unsigned long dst_addr,
  336. uffd_flags_t flags)
  337. {
  338. struct inode *inode = file_inode(dst_vma->vm_file);
  339. pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
  340. struct folio *folio;
  341. struct page *page;
  342. int ret;
  343. ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
  344. /* Our caller expects us to return -EFAULT if we failed to find folio */
  345. if (ret == -ENOENT)
  346. ret = -EFAULT;
  347. if (ret)
  348. goto out;
  349. if (!folio) {
  350. ret = -EFAULT;
  351. goto out;
  352. }
  353. page = folio_file_page(folio, pgoff);
  354. if (PageHWPoison(page)) {
  355. ret = -EIO;
  356. goto out_release;
  357. }
  358. ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
  359. page, false, flags);
  360. if (ret)
  361. goto out_release;
  362. folio_unlock(folio);
  363. ret = 0;
  364. out:
  365. return ret;
  366. out_release:
  367. folio_unlock(folio);
  368. folio_put(folio);
  369. goto out;
  370. }
  371. /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
  372. static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
  373. struct vm_area_struct *dst_vma,
  374. unsigned long dst_addr,
  375. uffd_flags_t flags)
  376. {
  377. int ret;
  378. struct mm_struct *dst_mm = dst_vma->vm_mm;
  379. pte_t _dst_pte, *dst_pte;
  380. spinlock_t *ptl;
  381. _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
  382. ret = -EAGAIN;
  383. dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
  384. if (!dst_pte)
  385. goto out;
  386. if (mfill_file_over_size(dst_vma, dst_addr)) {
  387. ret = -EFAULT;
  388. goto out_unlock;
  389. }
  390. ret = -EEXIST;
  391. /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
  392. if (!pte_none(ptep_get(dst_pte)))
  393. goto out_unlock;
  394. set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  395. /* No need to invalidate - it was non-present before */
  396. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  397. ret = 0;
  398. out_unlock:
  399. pte_unmap_unlock(dst_pte, ptl);
  400. out:
  401. return ret;
  402. }
  403. static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
  404. {
  405. pgd_t *pgd;
  406. p4d_t *p4d;
  407. pud_t *pud;
  408. pgd = pgd_offset(mm, address);
  409. p4d = p4d_alloc(mm, pgd, address);
  410. if (!p4d)
  411. return NULL;
  412. pud = pud_alloc(mm, p4d, address);
  413. if (!pud)
  414. return NULL;
  415. /*
  416. * Note that we didn't run this because the pmd was
  417. * missing, the *pmd may be already established and in
  418. * turn it may also be a trans_huge_pmd.
  419. */
  420. return pmd_alloc(mm, pud, address);
  421. }
  422. #ifdef CONFIG_HUGETLB_PAGE
  423. /*
  424. * mfill_atomic processing for HUGETLB vmas. Note that this routine is
  425. * called with either vma-lock or mmap_lock held, it will release the lock
  426. * before returning.
  427. */
  428. static __always_inline ssize_t mfill_atomic_hugetlb(
  429. struct userfaultfd_ctx *ctx,
  430. struct vm_area_struct *dst_vma,
  431. unsigned long dst_start,
  432. unsigned long src_start,
  433. unsigned long len,
  434. uffd_flags_t flags)
  435. {
  436. struct mm_struct *dst_mm = dst_vma->vm_mm;
  437. ssize_t err;
  438. pte_t *dst_pte;
  439. unsigned long src_addr, dst_addr;
  440. long copied;
  441. struct folio *folio;
  442. unsigned long vma_hpagesize;
  443. pgoff_t idx;
  444. u32 hash;
  445. struct address_space *mapping;
  446. /*
  447. * There is no default zero huge page for all huge page sizes as
  448. * supported by hugetlb. A PMD_SIZE huge pages may exist as used
  449. * by THP. Since we can not reliably insert a zero page, this
  450. * feature is not supported.
  451. */
  452. if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
  453. up_read(&ctx->map_changing_lock);
  454. uffd_mfill_unlock(dst_vma);
  455. return -EINVAL;
  456. }
  457. src_addr = src_start;
  458. dst_addr = dst_start;
  459. copied = 0;
  460. folio = NULL;
  461. vma_hpagesize = vma_kernel_pagesize(dst_vma);
  462. /*
  463. * Validate alignment based on huge page size
  464. */
  465. err = -EINVAL;
  466. if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
  467. goto out_unlock;
  468. retry:
  469. /*
  470. * On routine entry dst_vma is set. If we had to drop mmap_lock and
  471. * retry, dst_vma will be set to NULL and we must lookup again.
  472. */
  473. if (!dst_vma) {
  474. dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
  475. if (IS_ERR(dst_vma)) {
  476. err = PTR_ERR(dst_vma);
  477. goto out;
  478. }
  479. err = -ENOENT;
  480. if (!is_vm_hugetlb_page(dst_vma))
  481. goto out_unlock_vma;
  482. err = -EINVAL;
  483. if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
  484. goto out_unlock_vma;
  485. /*
  486. * If memory mappings are changing because of non-cooperative
  487. * operation (e.g. mremap) running in parallel, bail out and
  488. * request the user to retry later
  489. */
  490. down_read(&ctx->map_changing_lock);
  491. err = -EAGAIN;
  492. if (atomic_read(&ctx->mmap_changing))
  493. goto out_unlock;
  494. }
  495. while (src_addr < src_start + len) {
  496. BUG_ON(dst_addr >= dst_start + len);
  497. /*
  498. * Serialize via vma_lock and hugetlb_fault_mutex.
  499. * vma_lock ensures the dst_pte remains valid even
  500. * in the case of shared pmds. fault mutex prevents
  501. * races with other faulting threads.
  502. */
  503. idx = linear_page_index(dst_vma, dst_addr);
  504. mapping = dst_vma->vm_file->f_mapping;
  505. hash = hugetlb_fault_mutex_hash(mapping, idx);
  506. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  507. hugetlb_vma_lock_read(dst_vma);
  508. err = -ENOMEM;
  509. dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
  510. if (!dst_pte) {
  511. hugetlb_vma_unlock_read(dst_vma);
  512. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  513. goto out_unlock;
  514. }
  515. if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
  516. !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
  517. err = -EEXIST;
  518. hugetlb_vma_unlock_read(dst_vma);
  519. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  520. goto out_unlock;
  521. }
  522. err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
  523. src_addr, flags, &folio);
  524. hugetlb_vma_unlock_read(dst_vma);
  525. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  526. cond_resched();
  527. if (unlikely(err == -ENOENT)) {
  528. up_read(&ctx->map_changing_lock);
  529. uffd_mfill_unlock(dst_vma);
  530. BUG_ON(!folio);
  531. err = copy_folio_from_user(folio,
  532. (const void __user *)src_addr, true);
  533. if (unlikely(err)) {
  534. err = -EFAULT;
  535. goto out;
  536. }
  537. dst_vma = NULL;
  538. goto retry;
  539. } else
  540. BUG_ON(folio);
  541. if (!err) {
  542. dst_addr += vma_hpagesize;
  543. src_addr += vma_hpagesize;
  544. copied += vma_hpagesize;
  545. if (fatal_signal_pending(current))
  546. err = -EINTR;
  547. }
  548. if (err)
  549. break;
  550. }
  551. out_unlock:
  552. up_read(&ctx->map_changing_lock);
  553. out_unlock_vma:
  554. uffd_mfill_unlock(dst_vma);
  555. out:
  556. if (folio)
  557. folio_put(folio);
  558. BUG_ON(copied < 0);
  559. BUG_ON(err > 0);
  560. BUG_ON(!copied && !err);
  561. return copied ? copied : err;
  562. }
  563. #else /* !CONFIG_HUGETLB_PAGE */
  564. /* fail at build time if gcc attempts to use this */
  565. extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
  566. struct vm_area_struct *dst_vma,
  567. unsigned long dst_start,
  568. unsigned long src_start,
  569. unsigned long len,
  570. uffd_flags_t flags);
  571. #endif /* CONFIG_HUGETLB_PAGE */
  572. static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
  573. struct vm_area_struct *dst_vma,
  574. unsigned long dst_addr,
  575. unsigned long src_addr,
  576. uffd_flags_t flags,
  577. struct folio **foliop)
  578. {
  579. ssize_t err;
  580. if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
  581. return mfill_atomic_pte_continue(dst_pmd, dst_vma,
  582. dst_addr, flags);
  583. } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
  584. return mfill_atomic_pte_poison(dst_pmd, dst_vma,
  585. dst_addr, flags);
  586. }
  587. /*
  588. * The normal page fault path for a shmem will invoke the
  589. * fault, fill the hole in the file and COW it right away. The
  590. * result generates plain anonymous memory. So when we are
  591. * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
  592. * generate anonymous memory directly without actually filling
  593. * the hole. For the MAP_PRIVATE case the robustness check
  594. * only happens in the pagetable (to verify it's still none)
  595. * and not in the radix tree.
  596. */
  597. if (!(dst_vma->vm_flags & VM_SHARED)) {
  598. if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
  599. err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
  600. dst_addr, src_addr,
  601. flags, foliop);
  602. else
  603. err = mfill_atomic_pte_zeropage(dst_pmd,
  604. dst_vma, dst_addr);
  605. } else {
  606. err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
  607. dst_addr, src_addr,
  608. flags, foliop);
  609. }
  610. return err;
  611. }
  612. static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
  613. unsigned long dst_start,
  614. unsigned long src_start,
  615. unsigned long len,
  616. uffd_flags_t flags)
  617. {
  618. struct mm_struct *dst_mm = ctx->mm;
  619. struct vm_area_struct *dst_vma;
  620. ssize_t err;
  621. pmd_t *dst_pmd;
  622. unsigned long src_addr, dst_addr;
  623. long copied;
  624. struct folio *folio;
  625. /*
  626. * Sanitize the command parameters:
  627. */
  628. BUG_ON(dst_start & ~PAGE_MASK);
  629. BUG_ON(len & ~PAGE_MASK);
  630. /* Does the address range wrap, or is the span zero-sized? */
  631. BUG_ON(src_start + len <= src_start);
  632. BUG_ON(dst_start + len <= dst_start);
  633. src_addr = src_start;
  634. dst_addr = dst_start;
  635. copied = 0;
  636. folio = NULL;
  637. retry:
  638. /*
  639. * Make sure the vma is not shared, that the dst range is
  640. * both valid and fully within a single existing vma.
  641. */
  642. dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
  643. if (IS_ERR(dst_vma)) {
  644. err = PTR_ERR(dst_vma);
  645. goto out;
  646. }
  647. /*
  648. * If memory mappings are changing because of non-cooperative
  649. * operation (e.g. mremap) running in parallel, bail out and
  650. * request the user to retry later
  651. */
  652. down_read(&ctx->map_changing_lock);
  653. err = -EAGAIN;
  654. if (atomic_read(&ctx->mmap_changing))
  655. goto out_unlock;
  656. err = -EINVAL;
  657. /*
  658. * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
  659. * it will overwrite vm_ops, so vma_is_anonymous must return false.
  660. */
  661. if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
  662. dst_vma->vm_flags & VM_SHARED))
  663. goto out_unlock;
  664. /*
  665. * validate 'mode' now that we know the dst_vma: don't allow
  666. * a wrprotect copy if the userfaultfd didn't register as WP.
  667. */
  668. if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
  669. goto out_unlock;
  670. /*
  671. * If this is a HUGETLB vma, pass off to appropriate routine
  672. */
  673. if (is_vm_hugetlb_page(dst_vma))
  674. return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
  675. src_start, len, flags);
  676. if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
  677. goto out_unlock;
  678. if (!vma_is_shmem(dst_vma) &&
  679. uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
  680. goto out_unlock;
  681. while (src_addr < src_start + len) {
  682. pmd_t dst_pmdval;
  683. BUG_ON(dst_addr >= dst_start + len);
  684. dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
  685. if (unlikely(!dst_pmd)) {
  686. err = -ENOMEM;
  687. break;
  688. }
  689. dst_pmdval = pmdp_get_lockless(dst_pmd);
  690. if (unlikely(pmd_none(dst_pmdval)) &&
  691. unlikely(__pte_alloc(dst_mm, dst_pmd))) {
  692. err = -ENOMEM;
  693. break;
  694. }
  695. dst_pmdval = pmdp_get_lockless(dst_pmd);
  696. /*
  697. * If the dst_pmd is THP don't override it and just be strict.
  698. * (This includes the case where the PMD used to be THP and
  699. * changed back to none after __pte_alloc().)
  700. */
  701. if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
  702. pmd_devmap(dst_pmdval))) {
  703. err = -EEXIST;
  704. break;
  705. }
  706. if (unlikely(pmd_bad(dst_pmdval))) {
  707. err = -EFAULT;
  708. break;
  709. }
  710. /*
  711. * For shmem mappings, khugepaged is allowed to remove page
  712. * tables under us; pte_offset_map_lock() will deal with that.
  713. */
  714. err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
  715. src_addr, flags, &folio);
  716. cond_resched();
  717. if (unlikely(err == -ENOENT)) {
  718. void *kaddr;
  719. up_read(&ctx->map_changing_lock);
  720. uffd_mfill_unlock(dst_vma);
  721. BUG_ON(!folio);
  722. kaddr = kmap_local_folio(folio, 0);
  723. err = copy_from_user(kaddr,
  724. (const void __user *) src_addr,
  725. PAGE_SIZE);
  726. kunmap_local(kaddr);
  727. if (unlikely(err)) {
  728. err = -EFAULT;
  729. goto out;
  730. }
  731. flush_dcache_folio(folio);
  732. goto retry;
  733. } else
  734. BUG_ON(folio);
  735. if (!err) {
  736. dst_addr += PAGE_SIZE;
  737. src_addr += PAGE_SIZE;
  738. copied += PAGE_SIZE;
  739. if (fatal_signal_pending(current))
  740. err = -EINTR;
  741. }
  742. if (err)
  743. break;
  744. }
  745. out_unlock:
  746. up_read(&ctx->map_changing_lock);
  747. uffd_mfill_unlock(dst_vma);
  748. out:
  749. if (folio)
  750. folio_put(folio);
  751. BUG_ON(copied < 0);
  752. BUG_ON(err > 0);
  753. BUG_ON(!copied && !err);
  754. return copied ? copied : err;
  755. }
  756. ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
  757. unsigned long src_start, unsigned long len,
  758. uffd_flags_t flags)
  759. {
  760. return mfill_atomic(ctx, dst_start, src_start, len,
  761. uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
  762. }
  763. ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
  764. unsigned long start,
  765. unsigned long len)
  766. {
  767. return mfill_atomic(ctx, start, 0, len,
  768. uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
  769. }
  770. ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
  771. unsigned long len, uffd_flags_t flags)
  772. {
  773. /*
  774. * A caller might reasonably assume that UFFDIO_CONTINUE contains an
  775. * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
  776. * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to
  777. * subsequent loads from the page through the newly mapped address range.
  778. */
  779. smp_wmb();
  780. return mfill_atomic(ctx, start, 0, len,
  781. uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
  782. }
  783. ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
  784. unsigned long len, uffd_flags_t flags)
  785. {
  786. return mfill_atomic(ctx, start, 0, len,
  787. uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
  788. }
  789. long uffd_wp_range(struct vm_area_struct *dst_vma,
  790. unsigned long start, unsigned long len, bool enable_wp)
  791. {
  792. unsigned int mm_cp_flags;
  793. struct mmu_gather tlb;
  794. long ret;
  795. VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
  796. "The address range exceeds VMA boundary.\n");
  797. if (enable_wp)
  798. mm_cp_flags = MM_CP_UFFD_WP;
  799. else
  800. mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
  801. /*
  802. * vma->vm_page_prot already reflects that uffd-wp is enabled for this
  803. * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
  804. * to be write-protected as default whenever protection changes.
  805. * Try upgrading write permissions manually.
  806. */
  807. if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
  808. mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
  809. tlb_gather_mmu(&tlb, dst_vma->vm_mm);
  810. ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
  811. tlb_finish_mmu(&tlb);
  812. return ret;
  813. }
  814. int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
  815. unsigned long len, bool enable_wp)
  816. {
  817. struct mm_struct *dst_mm = ctx->mm;
  818. unsigned long end = start + len;
  819. unsigned long _start, _end;
  820. struct vm_area_struct *dst_vma;
  821. unsigned long page_mask;
  822. long err;
  823. VMA_ITERATOR(vmi, dst_mm, start);
  824. /*
  825. * Sanitize the command parameters:
  826. */
  827. BUG_ON(start & ~PAGE_MASK);
  828. BUG_ON(len & ~PAGE_MASK);
  829. /* Does the address range wrap, or is the span zero-sized? */
  830. BUG_ON(start + len <= start);
  831. mmap_read_lock(dst_mm);
  832. /*
  833. * If memory mappings are changing because of non-cooperative
  834. * operation (e.g. mremap) running in parallel, bail out and
  835. * request the user to retry later
  836. */
  837. down_read(&ctx->map_changing_lock);
  838. err = -EAGAIN;
  839. if (atomic_read(&ctx->mmap_changing))
  840. goto out_unlock;
  841. err = -ENOENT;
  842. for_each_vma_range(vmi, dst_vma, end) {
  843. if (!userfaultfd_wp(dst_vma)) {
  844. err = -ENOENT;
  845. break;
  846. }
  847. if (is_vm_hugetlb_page(dst_vma)) {
  848. err = -EINVAL;
  849. page_mask = vma_kernel_pagesize(dst_vma) - 1;
  850. if ((start & page_mask) || (len & page_mask))
  851. break;
  852. }
  853. _start = max(dst_vma->vm_start, start);
  854. _end = min(dst_vma->vm_end, end);
  855. err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
  856. /* Return 0 on success, <0 on failures */
  857. if (err < 0)
  858. break;
  859. err = 0;
  860. }
  861. out_unlock:
  862. up_read(&ctx->map_changing_lock);
  863. mmap_read_unlock(dst_mm);
  864. return err;
  865. }
  866. void double_pt_lock(spinlock_t *ptl1,
  867. spinlock_t *ptl2)
  868. __acquires(ptl1)
  869. __acquires(ptl2)
  870. {
  871. if (ptl1 > ptl2)
  872. swap(ptl1, ptl2);
  873. /* lock in virtual address order to avoid lock inversion */
  874. spin_lock(ptl1);
  875. if (ptl1 != ptl2)
  876. spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
  877. else
  878. __acquire(ptl2);
  879. }
  880. void double_pt_unlock(spinlock_t *ptl1,
  881. spinlock_t *ptl2)
  882. __releases(ptl1)
  883. __releases(ptl2)
  884. {
  885. spin_unlock(ptl1);
  886. if (ptl1 != ptl2)
  887. spin_unlock(ptl2);
  888. else
  889. __release(ptl2);
  890. }
  891. static int move_present_pte(struct mm_struct *mm,
  892. struct vm_area_struct *dst_vma,
  893. struct vm_area_struct *src_vma,
  894. unsigned long dst_addr, unsigned long src_addr,
  895. pte_t *dst_pte, pte_t *src_pte,
  896. pte_t orig_dst_pte, pte_t orig_src_pte,
  897. spinlock_t *dst_ptl, spinlock_t *src_ptl,
  898. struct folio *src_folio)
  899. {
  900. int err = 0;
  901. double_pt_lock(dst_ptl, src_ptl);
  902. if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
  903. !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
  904. err = -EAGAIN;
  905. goto out;
  906. }
  907. if (folio_test_large(src_folio) ||
  908. folio_maybe_dma_pinned(src_folio) ||
  909. !PageAnonExclusive(&src_folio->page)) {
  910. err = -EBUSY;
  911. goto out;
  912. }
  913. orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
  914. /* Folio got pinned from under us. Put it back and fail the move. */
  915. if (folio_maybe_dma_pinned(src_folio)) {
  916. set_pte_at(mm, src_addr, src_pte, orig_src_pte);
  917. err = -EBUSY;
  918. goto out;
  919. }
  920. folio_move_anon_rmap(src_folio, dst_vma);
  921. src_folio->index = linear_page_index(dst_vma, dst_addr);
  922. orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
  923. /* Set soft dirty bit so userspace can notice the pte was moved */
  924. #ifdef CONFIG_MEM_SOFT_DIRTY
  925. orig_dst_pte = pte_mksoft_dirty(orig_dst_pte);
  926. #endif
  927. if (pte_dirty(orig_src_pte))
  928. orig_dst_pte = pte_mkdirty(orig_dst_pte);
  929. orig_dst_pte = pte_mkwrite(orig_dst_pte, dst_vma);
  930. set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
  931. out:
  932. double_pt_unlock(dst_ptl, src_ptl);
  933. return err;
  934. }
  935. static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
  936. unsigned long dst_addr, unsigned long src_addr,
  937. pte_t *dst_pte, pte_t *src_pte,
  938. pte_t orig_dst_pte, pte_t orig_src_pte,
  939. spinlock_t *dst_ptl, spinlock_t *src_ptl,
  940. struct folio *src_folio,
  941. struct swap_info_struct *si, swp_entry_t entry)
  942. {
  943. /*
  944. * Check if the folio still belongs to the target swap entry after
  945. * acquiring the lock. Folio can be freed in the swap cache while
  946. * not locked.
  947. */
  948. if (src_folio && unlikely(!folio_test_swapcache(src_folio) ||
  949. entry.val != src_folio->swap.val))
  950. return -EAGAIN;
  951. double_pt_lock(dst_ptl, src_ptl);
  952. if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
  953. !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
  954. double_pt_unlock(dst_ptl, src_ptl);
  955. return -EAGAIN;
  956. }
  957. /*
  958. * The src_folio resides in the swapcache, requiring an update to its
  959. * index and mapping to align with the dst_vma, where a swap-in may
  960. * occur and hit the swapcache after moving the PTE.
  961. */
  962. if (src_folio) {
  963. folio_move_anon_rmap(src_folio, dst_vma);
  964. src_folio->index = linear_page_index(dst_vma, dst_addr);
  965. } else {
  966. /*
  967. * Check if the swap entry is cached after acquiring the src_pte
  968. * lock. Otherwise, we might miss a newly loaded swap cache folio.
  969. *
  970. * Check swap_map directly to minimize overhead, READ_ONCE is sufficient.
  971. * We are trying to catch newly added swap cache, the only possible case is
  972. * when a folio is swapped in and out again staying in swap cache, using the
  973. * same entry before the PTE check above. The PTL is acquired and released
  974. * twice, each time after updating the swap_map's flag. So holding
  975. * the PTL here ensures we see the updated value. False positive is possible,
  976. * e.g. SWP_SYNCHRONOUS_IO swapin may set the flag without touching the
  977. * cache, or during the tiny synchronization window between swap cache and
  978. * swap_map, but it will be gone very quickly, worst result is retry jitters.
  979. */
  980. if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) {
  981. double_pt_unlock(dst_ptl, src_ptl);
  982. return -EAGAIN;
  983. }
  984. }
  985. orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
  986. #ifdef CONFIG_MEM_SOFT_DIRTY
  987. orig_src_pte = pte_swp_mksoft_dirty(orig_src_pte);
  988. #endif
  989. set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
  990. double_pt_unlock(dst_ptl, src_ptl);
  991. return 0;
  992. }
  993. static int move_zeropage_pte(struct mm_struct *mm,
  994. struct vm_area_struct *dst_vma,
  995. struct vm_area_struct *src_vma,
  996. unsigned long dst_addr, unsigned long src_addr,
  997. pte_t *dst_pte, pte_t *src_pte,
  998. pte_t orig_dst_pte, pte_t orig_src_pte,
  999. spinlock_t *dst_ptl, spinlock_t *src_ptl)
  1000. {
  1001. pte_t zero_pte;
  1002. double_pt_lock(dst_ptl, src_ptl);
  1003. if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
  1004. !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
  1005. double_pt_unlock(dst_ptl, src_ptl);
  1006. return -EAGAIN;
  1007. }
  1008. zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
  1009. dst_vma->vm_page_prot));
  1010. ptep_clear_flush(src_vma, src_addr, src_pte);
  1011. set_pte_at(mm, dst_addr, dst_pte, zero_pte);
  1012. double_pt_unlock(dst_ptl, src_ptl);
  1013. return 0;
  1014. }
  1015. /*
  1016. * The mmap_lock for reading is held by the caller. Just move the page
  1017. * from src_pmd to dst_pmd if possible, and return true if succeeded
  1018. * in moving the page.
  1019. */
  1020. static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
  1021. struct vm_area_struct *dst_vma,
  1022. struct vm_area_struct *src_vma,
  1023. unsigned long dst_addr, unsigned long src_addr,
  1024. __u64 mode)
  1025. {
  1026. swp_entry_t entry;
  1027. struct swap_info_struct *si = NULL;
  1028. pte_t orig_src_pte, orig_dst_pte;
  1029. pte_t src_folio_pte;
  1030. spinlock_t *src_ptl, *dst_ptl;
  1031. pte_t *src_pte = NULL;
  1032. pte_t *dst_pte = NULL;
  1033. struct folio *src_folio = NULL;
  1034. struct anon_vma *src_anon_vma = NULL;
  1035. struct mmu_notifier_range range;
  1036. int err = 0;
  1037. flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
  1038. mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
  1039. src_addr, src_addr + PAGE_SIZE);
  1040. mmu_notifier_invalidate_range_start(&range);
  1041. retry:
  1042. dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
  1043. /* Retry if a huge pmd materialized from under us */
  1044. if (unlikely(!dst_pte)) {
  1045. err = -EAGAIN;
  1046. goto out;
  1047. }
  1048. src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
  1049. /*
  1050. * We held the mmap_lock for reading so MADV_DONTNEED
  1051. * can zap transparent huge pages under us, or the
  1052. * transparent huge page fault can establish new
  1053. * transparent huge pages under us.
  1054. */
  1055. if (unlikely(!src_pte)) {
  1056. err = -EAGAIN;
  1057. goto out;
  1058. }
  1059. /* Sanity checks before the operation */
  1060. if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
  1061. WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
  1062. err = -EINVAL;
  1063. goto out;
  1064. }
  1065. spin_lock(dst_ptl);
  1066. orig_dst_pte = ptep_get(dst_pte);
  1067. spin_unlock(dst_ptl);
  1068. if (!pte_none(orig_dst_pte)) {
  1069. err = -EEXIST;
  1070. goto out;
  1071. }
  1072. spin_lock(src_ptl);
  1073. orig_src_pte = ptep_get(src_pte);
  1074. spin_unlock(src_ptl);
  1075. if (pte_none(orig_src_pte)) {
  1076. if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
  1077. err = -ENOENT;
  1078. else /* nothing to do to move a hole */
  1079. err = 0;
  1080. goto out;
  1081. }
  1082. /* If PTE changed after we locked the folio them start over */
  1083. if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
  1084. err = -EAGAIN;
  1085. goto out;
  1086. }
  1087. if (pte_present(orig_src_pte)) {
  1088. if (is_zero_pfn(pte_pfn(orig_src_pte))) {
  1089. err = move_zeropage_pte(mm, dst_vma, src_vma,
  1090. dst_addr, src_addr, dst_pte, src_pte,
  1091. orig_dst_pte, orig_src_pte,
  1092. dst_ptl, src_ptl);
  1093. goto out;
  1094. }
  1095. /*
  1096. * Pin and lock both source folio and anon_vma. Since we are in
  1097. * RCU read section, we can't block, so on contention have to
  1098. * unmap the ptes, obtain the lock and retry.
  1099. */
  1100. if (!src_folio) {
  1101. struct folio *folio;
  1102. bool locked;
  1103. /*
  1104. * Pin the page while holding the lock to be sure the
  1105. * page isn't freed under us
  1106. */
  1107. spin_lock(src_ptl);
  1108. if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
  1109. spin_unlock(src_ptl);
  1110. err = -EAGAIN;
  1111. goto out;
  1112. }
  1113. folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
  1114. if (!folio || !PageAnonExclusive(&folio->page)) {
  1115. spin_unlock(src_ptl);
  1116. err = -EBUSY;
  1117. goto out;
  1118. }
  1119. locked = folio_trylock(folio);
  1120. /*
  1121. * We avoid waiting for folio lock with a raised
  1122. * refcount for large folios because extra refcounts
  1123. * will result in split_folio() failing later and
  1124. * retrying. If multiple tasks are trying to move a
  1125. * large folio we can end up livelocking.
  1126. */
  1127. if (!locked && folio_test_large(folio)) {
  1128. spin_unlock(src_ptl);
  1129. err = -EAGAIN;
  1130. goto out;
  1131. }
  1132. folio_get(folio);
  1133. src_folio = folio;
  1134. src_folio_pte = orig_src_pte;
  1135. spin_unlock(src_ptl);
  1136. if (!locked) {
  1137. pte_unmap(src_pte);
  1138. pte_unmap(dst_pte);
  1139. src_pte = dst_pte = NULL;
  1140. /* now we can block and wait */
  1141. folio_lock(src_folio);
  1142. goto retry;
  1143. }
  1144. if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
  1145. err = -EBUSY;
  1146. goto out;
  1147. }
  1148. }
  1149. /* at this point we have src_folio locked */
  1150. if (folio_test_large(src_folio)) {
  1151. /* split_folio() can block */
  1152. pte_unmap(src_pte);
  1153. pte_unmap(dst_pte);
  1154. src_pte = dst_pte = NULL;
  1155. err = split_folio(src_folio);
  1156. if (err)
  1157. goto out;
  1158. /* have to reacquire the folio after it got split */
  1159. folio_unlock(src_folio);
  1160. folio_put(src_folio);
  1161. src_folio = NULL;
  1162. goto retry;
  1163. }
  1164. if (!src_anon_vma) {
  1165. /*
  1166. * folio_referenced walks the anon_vma chain
  1167. * without the folio lock. Serialize against it with
  1168. * the anon_vma lock, the folio lock is not enough.
  1169. */
  1170. src_anon_vma = folio_get_anon_vma(src_folio);
  1171. if (!src_anon_vma) {
  1172. /* page was unmapped from under us */
  1173. err = -EAGAIN;
  1174. goto out;
  1175. }
  1176. if (!anon_vma_trylock_write(src_anon_vma)) {
  1177. pte_unmap(src_pte);
  1178. pte_unmap(dst_pte);
  1179. src_pte = dst_pte = NULL;
  1180. /* now we can block and wait */
  1181. anon_vma_lock_write(src_anon_vma);
  1182. goto retry;
  1183. }
  1184. }
  1185. err = move_present_pte(mm, dst_vma, src_vma,
  1186. dst_addr, src_addr, dst_pte, src_pte,
  1187. orig_dst_pte, orig_src_pte,
  1188. dst_ptl, src_ptl, src_folio);
  1189. } else {
  1190. struct folio *folio = NULL;
  1191. entry = pte_to_swp_entry(orig_src_pte);
  1192. if (non_swap_entry(entry)) {
  1193. if (is_migration_entry(entry)) {
  1194. pte_unmap(src_pte);
  1195. pte_unmap(dst_pte);
  1196. src_pte = dst_pte = NULL;
  1197. migration_entry_wait(mm, src_pmd, src_addr);
  1198. err = -EAGAIN;
  1199. } else
  1200. err = -EFAULT;
  1201. goto out;
  1202. }
  1203. if (!pte_swp_exclusive(orig_src_pte)) {
  1204. err = -EBUSY;
  1205. goto out;
  1206. }
  1207. si = get_swap_device(entry);
  1208. if (unlikely(!si)) {
  1209. err = -EAGAIN;
  1210. goto out;
  1211. }
  1212. /*
  1213. * Verify the existence of the swapcache. If present, the folio's
  1214. * index and mapping must be updated even when the PTE is a swap
  1215. * entry. The anon_vma lock is not taken during this process since
  1216. * the folio has already been unmapped, and the swap entry is
  1217. * exclusive, preventing rmap walks.
  1218. *
  1219. * For large folios, return -EBUSY immediately, as split_folio()
  1220. * also returns -EBUSY when attempting to split unmapped large
  1221. * folios in the swapcache. This issue needs to be resolved
  1222. * separately to allow proper handling.
  1223. */
  1224. if (!src_folio)
  1225. folio = filemap_get_folio(swap_address_space(entry),
  1226. swap_cache_index(entry));
  1227. if (!IS_ERR_OR_NULL(folio)) {
  1228. if (folio_test_large(folio)) {
  1229. err = -EBUSY;
  1230. folio_put(folio);
  1231. goto out;
  1232. }
  1233. src_folio = folio;
  1234. src_folio_pte = orig_src_pte;
  1235. if (!folio_trylock(src_folio)) {
  1236. pte_unmap(src_pte);
  1237. pte_unmap(dst_pte);
  1238. src_pte = dst_pte = NULL;
  1239. put_swap_device(si);
  1240. si = NULL;
  1241. /* now we can block and wait */
  1242. folio_lock(src_folio);
  1243. goto retry;
  1244. }
  1245. }
  1246. err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte,
  1247. orig_dst_pte, orig_src_pte,
  1248. dst_ptl, src_ptl, src_folio, si, entry);
  1249. }
  1250. out:
  1251. if (src_anon_vma) {
  1252. anon_vma_unlock_write(src_anon_vma);
  1253. put_anon_vma(src_anon_vma);
  1254. }
  1255. if (src_folio) {
  1256. folio_unlock(src_folio);
  1257. folio_put(src_folio);
  1258. }
  1259. /*
  1260. * Unmap in reverse order (LIFO) to maintain proper kmap_local
  1261. * index ordering when CONFIG_HIGHPTE is enabled. We mapped dst_pte
  1262. * first, then src_pte, so we must unmap src_pte first, then dst_pte.
  1263. */
  1264. if (src_pte)
  1265. pte_unmap(src_pte);
  1266. if (dst_pte)
  1267. pte_unmap(dst_pte);
  1268. mmu_notifier_invalidate_range_end(&range);
  1269. if (si)
  1270. put_swap_device(si);
  1271. return err;
  1272. }
  1273. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1274. static inline bool move_splits_huge_pmd(unsigned long dst_addr,
  1275. unsigned long src_addr,
  1276. unsigned long src_end)
  1277. {
  1278. return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
  1279. src_end - src_addr < HPAGE_PMD_SIZE;
  1280. }
  1281. #else
  1282. static inline bool move_splits_huge_pmd(unsigned long dst_addr,
  1283. unsigned long src_addr,
  1284. unsigned long src_end)
  1285. {
  1286. /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
  1287. return false;
  1288. }
  1289. #endif
  1290. static inline bool vma_move_compatible(struct vm_area_struct *vma)
  1291. {
  1292. return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
  1293. VM_MIXEDMAP | VM_SHADOW_STACK));
  1294. }
  1295. static int validate_move_areas(struct userfaultfd_ctx *ctx,
  1296. struct vm_area_struct *src_vma,
  1297. struct vm_area_struct *dst_vma)
  1298. {
  1299. /* Only allow moving if both have the same access and protection */
  1300. if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
  1301. pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
  1302. return -EINVAL;
  1303. /* Only allow moving if both are mlocked or both aren't */
  1304. if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
  1305. return -EINVAL;
  1306. /*
  1307. * For now, we keep it simple and only move between writable VMAs.
  1308. * Access flags are equal, therefore cheching only the source is enough.
  1309. */
  1310. if (!(src_vma->vm_flags & VM_WRITE))
  1311. return -EINVAL;
  1312. /* Check if vma flags indicate content which can be moved */
  1313. if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
  1314. return -EINVAL;
  1315. /* Ensure dst_vma is registered in uffd we are operating on */
  1316. if (!dst_vma->vm_userfaultfd_ctx.ctx ||
  1317. dst_vma->vm_userfaultfd_ctx.ctx != ctx)
  1318. return -EINVAL;
  1319. /* Only allow moving across anonymous vmas */
  1320. if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
  1321. return -EINVAL;
  1322. return 0;
  1323. }
  1324. static __always_inline
  1325. int find_vmas_mm_locked(struct mm_struct *mm,
  1326. unsigned long dst_start,
  1327. unsigned long src_start,
  1328. struct vm_area_struct **dst_vmap,
  1329. struct vm_area_struct **src_vmap)
  1330. {
  1331. struct vm_area_struct *vma;
  1332. mmap_assert_locked(mm);
  1333. vma = find_vma_and_prepare_anon(mm, dst_start);
  1334. if (IS_ERR(vma))
  1335. return PTR_ERR(vma);
  1336. *dst_vmap = vma;
  1337. /* Skip finding src_vma if src_start is in dst_vma */
  1338. if (src_start >= vma->vm_start && src_start < vma->vm_end)
  1339. goto out_success;
  1340. vma = vma_lookup(mm, src_start);
  1341. if (!vma)
  1342. return -ENOENT;
  1343. out_success:
  1344. *src_vmap = vma;
  1345. return 0;
  1346. }
  1347. #ifdef CONFIG_PER_VMA_LOCK
  1348. static int uffd_move_lock(struct mm_struct *mm,
  1349. unsigned long dst_start,
  1350. unsigned long src_start,
  1351. struct vm_area_struct **dst_vmap,
  1352. struct vm_area_struct **src_vmap)
  1353. {
  1354. struct vm_area_struct *vma;
  1355. int err;
  1356. vma = uffd_lock_vma(mm, dst_start);
  1357. if (IS_ERR(vma))
  1358. return PTR_ERR(vma);
  1359. *dst_vmap = vma;
  1360. /*
  1361. * Skip finding src_vma if src_start is in dst_vma. This also ensures
  1362. * that we don't lock the same vma twice.
  1363. */
  1364. if (src_start >= vma->vm_start && src_start < vma->vm_end) {
  1365. *src_vmap = vma;
  1366. return 0;
  1367. }
  1368. /*
  1369. * Using uffd_lock_vma() to get src_vma can lead to following deadlock:
  1370. *
  1371. * Thread1 Thread2
  1372. * ------- -------
  1373. * vma_start_read(dst_vma)
  1374. * mmap_write_lock(mm)
  1375. * vma_start_write(src_vma)
  1376. * vma_start_read(src_vma)
  1377. * mmap_read_lock(mm)
  1378. * vma_start_write(dst_vma)
  1379. */
  1380. *src_vmap = lock_vma_under_rcu(mm, src_start);
  1381. if (likely(*src_vmap))
  1382. return 0;
  1383. /* Undo any locking and retry in mmap_lock critical section */
  1384. vma_end_read(*dst_vmap);
  1385. mmap_read_lock(mm);
  1386. err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
  1387. if (!err) {
  1388. /*
  1389. * See comment in uffd_lock_vma() as to why not using
  1390. * vma_start_read() here.
  1391. */
  1392. down_read(&(*dst_vmap)->vm_lock->lock);
  1393. if (*dst_vmap != *src_vmap)
  1394. down_read_nested(&(*src_vmap)->vm_lock->lock,
  1395. SINGLE_DEPTH_NESTING);
  1396. }
  1397. mmap_read_unlock(mm);
  1398. return err;
  1399. }
  1400. static void uffd_move_unlock(struct vm_area_struct *dst_vma,
  1401. struct vm_area_struct *src_vma)
  1402. {
  1403. vma_end_read(src_vma);
  1404. if (src_vma != dst_vma)
  1405. vma_end_read(dst_vma);
  1406. }
  1407. #else
  1408. static int uffd_move_lock(struct mm_struct *mm,
  1409. unsigned long dst_start,
  1410. unsigned long src_start,
  1411. struct vm_area_struct **dst_vmap,
  1412. struct vm_area_struct **src_vmap)
  1413. {
  1414. int err;
  1415. mmap_read_lock(mm);
  1416. err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
  1417. if (err)
  1418. mmap_read_unlock(mm);
  1419. return err;
  1420. }
  1421. static void uffd_move_unlock(struct vm_area_struct *dst_vma,
  1422. struct vm_area_struct *src_vma)
  1423. {
  1424. mmap_assert_locked(src_vma->vm_mm);
  1425. mmap_read_unlock(dst_vma->vm_mm);
  1426. }
  1427. #endif
  1428. /**
  1429. * move_pages - move arbitrary anonymous pages of an existing vma
  1430. * @ctx: pointer to the userfaultfd context
  1431. * @dst_start: start of the destination virtual memory range
  1432. * @src_start: start of the source virtual memory range
  1433. * @len: length of the virtual memory range
  1434. * @mode: flags from uffdio_move.mode
  1435. *
  1436. * It will either use the mmap_lock in read mode or per-vma locks
  1437. *
  1438. * move_pages() remaps arbitrary anonymous pages atomically in zero
  1439. * copy. It only works on non shared anonymous pages because those can
  1440. * be relocated without generating non linear anon_vmas in the rmap
  1441. * code.
  1442. *
  1443. * It provides a zero copy mechanism to handle userspace page faults.
  1444. * The source vma pages should have mapcount == 1, which can be
  1445. * enforced by using madvise(MADV_DONTFORK) on src vma.
  1446. *
  1447. * The thread receiving the page during the userland page fault
  1448. * will receive the faulting page in the source vma through the network,
  1449. * storage or any other I/O device (MADV_DONTFORK in the source vma
  1450. * avoids move_pages() to fail with -EBUSY if the process forks before
  1451. * move_pages() is called), then it will call move_pages() to map the
  1452. * page in the faulting address in the destination vma.
  1453. *
  1454. * This userfaultfd command works purely via pagetables, so it's the
  1455. * most efficient way to move physical non shared anonymous pages
  1456. * across different virtual addresses. Unlike mremap()/mmap()/munmap()
  1457. * it does not create any new vmas. The mapping in the destination
  1458. * address is atomic.
  1459. *
  1460. * It only works if the vma protection bits are identical from the
  1461. * source and destination vma.
  1462. *
  1463. * It can remap non shared anonymous pages within the same vma too.
  1464. *
  1465. * If the source virtual memory range has any unmapped holes, or if
  1466. * the destination virtual memory range is not a whole unmapped hole,
  1467. * move_pages() will fail respectively with -ENOENT or -EEXIST. This
  1468. * provides a very strict behavior to avoid any chance of memory
  1469. * corruption going unnoticed if there are userland race conditions.
  1470. * Only one thread should resolve the userland page fault at any given
  1471. * time for any given faulting address. This means that if two threads
  1472. * try to both call move_pages() on the same destination address at the
  1473. * same time, the second thread will get an explicit error from this
  1474. * command.
  1475. *
  1476. * The command retval will return "len" is successful. The command
  1477. * however can be interrupted by fatal signals or errors. If
  1478. * interrupted it will return the number of bytes successfully
  1479. * remapped before the interruption if any, or the negative error if
  1480. * none. It will never return zero. Either it will return an error or
  1481. * an amount of bytes successfully moved. If the retval reports a
  1482. * "short" remap, the move_pages() command should be repeated by
  1483. * userland with src+retval, dst+reval, len-retval if it wants to know
  1484. * about the error that interrupted it.
  1485. *
  1486. * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
  1487. * prevent -ENOENT errors to materialize if there are holes in the
  1488. * source virtual range that is being remapped. The holes will be
  1489. * accounted as successfully remapped in the retval of the
  1490. * command. This is mostly useful to remap hugepage naturally aligned
  1491. * virtual regions without knowing if there are transparent hugepage
  1492. * in the regions or not, but preventing the risk of having to split
  1493. * the hugepmd during the remap.
  1494. *
  1495. * If there's any rmap walk that is taking the anon_vma locks without
  1496. * first obtaining the folio lock (the only current instance is
  1497. * folio_referenced), they will have to verify if the folio->mapping
  1498. * has changed after taking the anon_vma lock. If it changed they
  1499. * should release the lock and retry obtaining a new anon_vma, because
  1500. * it means the anon_vma was changed by move_pages() before the lock
  1501. * could be obtained. This is the only additional complexity added to
  1502. * the rmap code to provide this anonymous page remapping functionality.
  1503. */
  1504. ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
  1505. unsigned long src_start, unsigned long len, __u64 mode)
  1506. {
  1507. struct mm_struct *mm = ctx->mm;
  1508. struct vm_area_struct *src_vma, *dst_vma;
  1509. unsigned long src_addr, dst_addr;
  1510. pmd_t *src_pmd, *dst_pmd;
  1511. long err = -EINVAL;
  1512. ssize_t moved = 0;
  1513. /* Sanitize the command parameters. */
  1514. if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
  1515. WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
  1516. WARN_ON_ONCE(len & ~PAGE_MASK))
  1517. goto out;
  1518. /* Does the address range wrap, or is the span zero-sized? */
  1519. if (WARN_ON_ONCE(src_start + len <= src_start) ||
  1520. WARN_ON_ONCE(dst_start + len <= dst_start))
  1521. goto out;
  1522. err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
  1523. if (err)
  1524. goto out;
  1525. /* Re-check after taking map_changing_lock */
  1526. err = -EAGAIN;
  1527. down_read(&ctx->map_changing_lock);
  1528. if (likely(atomic_read(&ctx->mmap_changing)))
  1529. goto out_unlock;
  1530. /*
  1531. * Make sure the vma is not shared, that the src and dst remap
  1532. * ranges are both valid and fully within a single existing
  1533. * vma.
  1534. */
  1535. err = -EINVAL;
  1536. if (src_vma->vm_flags & VM_SHARED)
  1537. goto out_unlock;
  1538. if (src_start + len > src_vma->vm_end)
  1539. goto out_unlock;
  1540. if (dst_vma->vm_flags & VM_SHARED)
  1541. goto out_unlock;
  1542. if (dst_start + len > dst_vma->vm_end)
  1543. goto out_unlock;
  1544. err = validate_move_areas(ctx, src_vma, dst_vma);
  1545. if (err)
  1546. goto out_unlock;
  1547. for (src_addr = src_start, dst_addr = dst_start;
  1548. src_addr < src_start + len;) {
  1549. spinlock_t *ptl;
  1550. pmd_t dst_pmdval;
  1551. unsigned long step_size;
  1552. /*
  1553. * Below works because anonymous area would not have a
  1554. * transparent huge PUD. If file-backed support is added,
  1555. * that case would need to be handled here.
  1556. */
  1557. src_pmd = mm_find_pmd(mm, src_addr);
  1558. if (unlikely(!src_pmd)) {
  1559. if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
  1560. err = -ENOENT;
  1561. break;
  1562. }
  1563. src_pmd = mm_alloc_pmd(mm, src_addr);
  1564. if (unlikely(!src_pmd)) {
  1565. err = -ENOMEM;
  1566. break;
  1567. }
  1568. }
  1569. dst_pmd = mm_alloc_pmd(mm, dst_addr);
  1570. if (unlikely(!dst_pmd)) {
  1571. err = -ENOMEM;
  1572. break;
  1573. }
  1574. dst_pmdval = pmdp_get_lockless(dst_pmd);
  1575. /*
  1576. * If the dst_pmd is mapped as THP don't override it and just
  1577. * be strict. If dst_pmd changes into TPH after this check, the
  1578. * move_pages_huge_pmd() will detect the change and retry
  1579. * while move_pages_pte() will detect the change and fail.
  1580. */
  1581. if (unlikely(pmd_trans_huge(dst_pmdval))) {
  1582. err = -EEXIST;
  1583. break;
  1584. }
  1585. ptl = pmd_trans_huge_lock(src_pmd, src_vma);
  1586. if (ptl) {
  1587. if (pmd_devmap(*src_pmd)) {
  1588. spin_unlock(ptl);
  1589. err = -ENOENT;
  1590. break;
  1591. }
  1592. /* Check if we can move the pmd without splitting it. */
  1593. if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
  1594. !pmd_none(dst_pmdval)) {
  1595. /* Can be a migration entry */
  1596. if (pmd_present(*src_pmd)) {
  1597. struct folio *folio = pmd_folio(*src_pmd);
  1598. if (!is_huge_zero_folio(folio) &&
  1599. !PageAnonExclusive(&folio->page)) {
  1600. spin_unlock(ptl);
  1601. err = -EBUSY;
  1602. break;
  1603. }
  1604. }
  1605. spin_unlock(ptl);
  1606. split_huge_pmd(src_vma, src_pmd, src_addr);
  1607. /* The folio will be split by move_pages_pte() */
  1608. continue;
  1609. }
  1610. err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
  1611. dst_pmdval, dst_vma, src_vma,
  1612. dst_addr, src_addr);
  1613. step_size = HPAGE_PMD_SIZE;
  1614. } else {
  1615. if (pmd_none(*src_pmd)) {
  1616. if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
  1617. err = -ENOENT;
  1618. break;
  1619. }
  1620. if (unlikely(__pte_alloc(mm, src_pmd))) {
  1621. err = -ENOMEM;
  1622. break;
  1623. }
  1624. }
  1625. if (unlikely(pte_alloc(mm, dst_pmd))) {
  1626. err = -ENOMEM;
  1627. break;
  1628. }
  1629. err = move_pages_pte(mm, dst_pmd, src_pmd,
  1630. dst_vma, src_vma,
  1631. dst_addr, src_addr, mode);
  1632. step_size = PAGE_SIZE;
  1633. }
  1634. cond_resched();
  1635. if (fatal_signal_pending(current)) {
  1636. /* Do not override an error */
  1637. if (!err || err == -EAGAIN)
  1638. err = -EINTR;
  1639. break;
  1640. }
  1641. if (err) {
  1642. if (err == -EAGAIN)
  1643. continue;
  1644. break;
  1645. }
  1646. /* Proceed to the next page */
  1647. dst_addr += step_size;
  1648. src_addr += step_size;
  1649. moved += step_size;
  1650. }
  1651. out_unlock:
  1652. up_read(&ctx->map_changing_lock);
  1653. uffd_move_unlock(dst_vma, src_vma);
  1654. out:
  1655. VM_WARN_ON(moved < 0);
  1656. VM_WARN_ON(err > 0);
  1657. VM_WARN_ON(!moved && !err);
  1658. return moved ? moved : err;
  1659. }
  1660. static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
  1661. vm_flags_t flags)
  1662. {
  1663. const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
  1664. vm_flags_reset(vma, flags);
  1665. /*
  1666. * For shared mappings, we want to enable writenotify while
  1667. * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
  1668. * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
  1669. */
  1670. if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
  1671. vma_set_page_prot(vma);
  1672. }
  1673. static void userfaultfd_set_ctx(struct vm_area_struct *vma,
  1674. struct userfaultfd_ctx *ctx,
  1675. unsigned long flags)
  1676. {
  1677. vma_start_write(vma);
  1678. vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx};
  1679. userfaultfd_set_vm_flags(vma,
  1680. (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags);
  1681. }
  1682. void userfaultfd_reset_ctx(struct vm_area_struct *vma)
  1683. {
  1684. userfaultfd_set_ctx(vma, NULL, 0);
  1685. }
  1686. struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
  1687. struct vm_area_struct *prev,
  1688. struct vm_area_struct *vma,
  1689. unsigned long start,
  1690. unsigned long end)
  1691. {
  1692. struct vm_area_struct *ret;
  1693. bool give_up_on_oom = false;
  1694. /*
  1695. * If we are modifying only and not splitting, just give up on the merge
  1696. * if OOM prevents us from merging successfully.
  1697. */
  1698. if (start == vma->vm_start && end == vma->vm_end)
  1699. give_up_on_oom = true;
  1700. /* Reset ptes for the whole vma range if wr-protected */
  1701. if (userfaultfd_wp(vma))
  1702. uffd_wp_range(vma, start, end - start, false);
  1703. ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
  1704. vma->vm_flags & ~__VM_UFFD_FLAGS,
  1705. NULL_VM_UFFD_CTX, give_up_on_oom);
  1706. /*
  1707. * In the vma_merge() successful mprotect-like case 8:
  1708. * the next vma was merged into the current one and
  1709. * the current one has not been updated yet.
  1710. */
  1711. if (!IS_ERR(ret))
  1712. userfaultfd_reset_ctx(ret);
  1713. return ret;
  1714. }
  1715. /* Assumes mmap write lock taken, and mm_struct pinned. */
  1716. int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
  1717. struct vm_area_struct *vma,
  1718. unsigned long vm_flags,
  1719. unsigned long start, unsigned long end,
  1720. bool wp_async)
  1721. {
  1722. VMA_ITERATOR(vmi, ctx->mm, start);
  1723. struct vm_area_struct *prev = vma_prev(&vmi);
  1724. unsigned long vma_end;
  1725. unsigned long new_flags;
  1726. if (vma->vm_start < start)
  1727. prev = vma;
  1728. for_each_vma_range(vmi, vma, end) {
  1729. cond_resched();
  1730. BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
  1731. BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
  1732. vma->vm_userfaultfd_ctx.ctx != ctx);
  1733. WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
  1734. /*
  1735. * Nothing to do: this vma is already registered into this
  1736. * userfaultfd and with the right tracking mode too.
  1737. */
  1738. if (vma->vm_userfaultfd_ctx.ctx == ctx &&
  1739. (vma->vm_flags & vm_flags) == vm_flags)
  1740. goto skip;
  1741. if (vma->vm_start > start)
  1742. start = vma->vm_start;
  1743. vma_end = min(end, vma->vm_end);
  1744. new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
  1745. vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
  1746. new_flags,
  1747. (struct vm_userfaultfd_ctx){ctx},
  1748. /* give_up_on_oom = */false);
  1749. if (IS_ERR(vma))
  1750. return PTR_ERR(vma);
  1751. /*
  1752. * In the vma_merge() successful mprotect-like case 8:
  1753. * the next vma was merged into the current one and
  1754. * the current one has not been updated yet.
  1755. */
  1756. userfaultfd_set_ctx(vma, ctx, vm_flags);
  1757. if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
  1758. hugetlb_unshare_all_pmds(vma);
  1759. skip:
  1760. prev = vma;
  1761. start = vma->vm_end;
  1762. }
  1763. return 0;
  1764. }
  1765. void userfaultfd_release_new(struct userfaultfd_ctx *ctx)
  1766. {
  1767. struct mm_struct *mm = ctx->mm;
  1768. struct vm_area_struct *vma;
  1769. VMA_ITERATOR(vmi, mm, 0);
  1770. /* the various vma->vm_userfaultfd_ctx still points to it */
  1771. mmap_write_lock(mm);
  1772. for_each_vma(vmi, vma) {
  1773. if (vma->vm_userfaultfd_ctx.ctx == ctx)
  1774. userfaultfd_reset_ctx(vma);
  1775. }
  1776. mmap_write_unlock(mm);
  1777. }
  1778. void userfaultfd_release_all(struct mm_struct *mm,
  1779. struct userfaultfd_ctx *ctx)
  1780. {
  1781. struct vm_area_struct *vma, *prev;
  1782. VMA_ITERATOR(vmi, mm, 0);
  1783. if (!mmget_not_zero(mm))
  1784. return;
  1785. /*
  1786. * Flush page faults out of all CPUs. NOTE: all page faults
  1787. * must be retried without returning VM_FAULT_SIGBUS if
  1788. * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
  1789. * changes while handle_userfault released the mmap_lock. So
  1790. * it's critical that released is set to true (above), before
  1791. * taking the mmap_lock for writing.
  1792. */
  1793. mmap_write_lock(mm);
  1794. prev = NULL;
  1795. for_each_vma(vmi, vma) {
  1796. cond_resched();
  1797. BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
  1798. !!(vma->vm_flags & __VM_UFFD_FLAGS));
  1799. if (vma->vm_userfaultfd_ctx.ctx != ctx) {
  1800. prev = vma;
  1801. continue;
  1802. }
  1803. vma = userfaultfd_clear_vma(&vmi, prev, vma,
  1804. vma->vm_start, vma->vm_end);
  1805. prev = vma;
  1806. }
  1807. mmap_write_unlock(mm);
  1808. mmput(mm);
  1809. }