gup.c 107 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/err.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/mm.h>
  7. #include <linux/memfd.h>
  8. #include <linux/memremap.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/rmap.h>
  11. #include <linux/swap.h>
  12. #include <linux/swapops.h>
  13. #include <linux/secretmem.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/rwsem.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/migrate.h>
  18. #include <linux/mm_inline.h>
  19. #include <linux/pagevec.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/shmem_fs.h>
  22. #include <asm/mmu_context.h>
  23. #include <asm/tlbflush.h>
  24. #include "internal.h"
  25. struct follow_page_context {
  26. struct dev_pagemap *pgmap;
  27. unsigned int page_mask;
  28. };
  29. static inline void sanity_check_pinned_pages(struct page **pages,
  30. unsigned long npages)
  31. {
  32. if (!IS_ENABLED(CONFIG_DEBUG_VM))
  33. return;
  34. /*
  35. * We only pin anonymous pages if they are exclusive. Once pinned, we
  36. * can no longer turn them possibly shared and PageAnonExclusive() will
  37. * stick around until the page is freed.
  38. *
  39. * We'd like to verify that our pinned anonymous pages are still mapped
  40. * exclusively. The issue with anon THP is that we don't know how
  41. * they are/were mapped when pinning them. However, for anon
  42. * THP we can assume that either the given page (PTE-mapped THP) or
  43. * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
  44. * neither is the case, there is certainly something wrong.
  45. */
  46. for (; npages; npages--, pages++) {
  47. struct page *page = *pages;
  48. struct folio *folio;
  49. if (!page)
  50. continue;
  51. folio = page_folio(page);
  52. if (is_zero_page(page) ||
  53. !folio_test_anon(folio))
  54. continue;
  55. if (!folio_test_large(folio) || folio_test_hugetlb(folio))
  56. VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
  57. else
  58. /* Either a PTE-mapped or a PMD-mapped THP. */
  59. VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
  60. !PageAnonExclusive(page), page);
  61. }
  62. }
  63. /*
  64. * Return the folio with ref appropriately incremented,
  65. * or NULL if that failed.
  66. */
  67. static inline struct folio *try_get_folio(struct page *page, int refs)
  68. {
  69. struct folio *folio;
  70. retry:
  71. folio = page_folio(page);
  72. if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
  73. return NULL;
  74. if (unlikely(!folio_ref_try_add(folio, refs)))
  75. return NULL;
  76. /*
  77. * At this point we have a stable reference to the folio; but it
  78. * could be that between calling page_folio() and the refcount
  79. * increment, the folio was split, in which case we'd end up
  80. * holding a reference on a folio that has nothing to do with the page
  81. * we were given anymore.
  82. * So now that the folio is stable, recheck that the page still
  83. * belongs to this folio.
  84. */
  85. if (unlikely(page_folio(page) != folio)) {
  86. if (!put_devmap_managed_folio_refs(folio, refs))
  87. folio_put_refs(folio, refs);
  88. goto retry;
  89. }
  90. return folio;
  91. }
  92. static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
  93. {
  94. if (flags & FOLL_PIN) {
  95. if (is_zero_folio(folio))
  96. return;
  97. node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
  98. if (folio_test_large(folio))
  99. atomic_sub(refs, &folio->_pincount);
  100. else
  101. refs *= GUP_PIN_COUNTING_BIAS;
  102. }
  103. if (!put_devmap_managed_folio_refs(folio, refs))
  104. folio_put_refs(folio, refs);
  105. }
  106. /**
  107. * try_grab_folio() - add a folio's refcount by a flag-dependent amount
  108. * @folio: pointer to folio to be grabbed
  109. * @refs: the value to (effectively) add to the folio's refcount
  110. * @flags: gup flags: these are the FOLL_* flag values
  111. *
  112. * This might not do anything at all, depending on the flags argument.
  113. *
  114. * "grab" names in this file mean, "look at flags to decide whether to use
  115. * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
  116. *
  117. * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
  118. * time.
  119. *
  120. * Return: 0 for success, or if no action was required (if neither FOLL_PIN
  121. * nor FOLL_GET was set, nothing is done). A negative error code for failure:
  122. *
  123. * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
  124. * be grabbed.
  125. *
  126. * It is called when we have a stable reference for the folio, typically in
  127. * GUP slow path.
  128. */
  129. int __must_check try_grab_folio(struct folio *folio, int refs,
  130. unsigned int flags)
  131. {
  132. if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
  133. return -ENOMEM;
  134. if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
  135. return -EREMOTEIO;
  136. if (flags & FOLL_GET)
  137. folio_ref_add(folio, refs);
  138. else if (flags & FOLL_PIN) {
  139. /*
  140. * Don't take a pin on the zero page - it's not going anywhere
  141. * and it is used in a *lot* of places.
  142. */
  143. if (is_zero_folio(folio))
  144. return 0;
  145. /*
  146. * Increment the normal page refcount field at least once,
  147. * so that the page really is pinned.
  148. */
  149. if (folio_test_large(folio)) {
  150. folio_ref_add(folio, refs);
  151. atomic_add(refs, &folio->_pincount);
  152. } else {
  153. folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
  154. }
  155. node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
  156. }
  157. return 0;
  158. }
  159. /**
  160. * unpin_user_page() - release a dma-pinned page
  161. * @page: pointer to page to be released
  162. *
  163. * Pages that were pinned via pin_user_pages*() must be released via either
  164. * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
  165. * that such pages can be separately tracked and uniquely handled. In
  166. * particular, interactions with RDMA and filesystems need special handling.
  167. */
  168. void unpin_user_page(struct page *page)
  169. {
  170. sanity_check_pinned_pages(&page, 1);
  171. gup_put_folio(page_folio(page), 1, FOLL_PIN);
  172. }
  173. EXPORT_SYMBOL(unpin_user_page);
  174. /**
  175. * unpin_folio() - release a dma-pinned folio
  176. * @folio: pointer to folio to be released
  177. *
  178. * Folios that were pinned via memfd_pin_folios() or other similar routines
  179. * must be released either using unpin_folio() or unpin_folios().
  180. */
  181. void unpin_folio(struct folio *folio)
  182. {
  183. gup_put_folio(folio, 1, FOLL_PIN);
  184. }
  185. EXPORT_SYMBOL_GPL(unpin_folio);
  186. /**
  187. * folio_add_pin - Try to get an additional pin on a pinned folio
  188. * @folio: The folio to be pinned
  189. *
  190. * Get an additional pin on a folio we already have a pin on. Makes no change
  191. * if the folio is a zero_page.
  192. */
  193. void folio_add_pin(struct folio *folio)
  194. {
  195. if (is_zero_folio(folio))
  196. return;
  197. /*
  198. * Similar to try_grab_folio(): be sure to *also* increment the normal
  199. * page refcount field at least once, so that the page really is
  200. * pinned.
  201. */
  202. if (folio_test_large(folio)) {
  203. WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
  204. folio_ref_inc(folio);
  205. atomic_inc(&folio->_pincount);
  206. } else {
  207. WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
  208. folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
  209. }
  210. }
  211. static inline struct folio *gup_folio_range_next(struct page *start,
  212. unsigned long npages, unsigned long i, unsigned int *ntails)
  213. {
  214. struct page *next = nth_page(start, i);
  215. struct folio *folio = page_folio(next);
  216. unsigned int nr = 1;
  217. if (folio_test_large(folio))
  218. nr = min_t(unsigned int, npages - i,
  219. folio_nr_pages(folio) - folio_page_idx(folio, next));
  220. *ntails = nr;
  221. return folio;
  222. }
  223. static inline struct folio *gup_folio_next(struct page **list,
  224. unsigned long npages, unsigned long i, unsigned int *ntails)
  225. {
  226. struct folio *folio = page_folio(list[i]);
  227. unsigned int nr;
  228. for (nr = i + 1; nr < npages; nr++) {
  229. if (page_folio(list[nr]) != folio)
  230. break;
  231. }
  232. *ntails = nr - i;
  233. return folio;
  234. }
  235. /**
  236. * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
  237. * @pages: array of pages to be maybe marked dirty, and definitely released.
  238. * @npages: number of pages in the @pages array.
  239. * @make_dirty: whether to mark the pages dirty
  240. *
  241. * "gup-pinned page" refers to a page that has had one of the get_user_pages()
  242. * variants called on that page.
  243. *
  244. * For each page in the @pages array, make that page (or its head page, if a
  245. * compound page) dirty, if @make_dirty is true, and if the page was previously
  246. * listed as clean. In any case, releases all pages using unpin_user_page(),
  247. * possibly via unpin_user_pages(), for the non-dirty case.
  248. *
  249. * Please see the unpin_user_page() documentation for details.
  250. *
  251. * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
  252. * required, then the caller should a) verify that this is really correct,
  253. * because _lock() is usually required, and b) hand code it:
  254. * set_page_dirty_lock(), unpin_user_page().
  255. *
  256. */
  257. void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
  258. bool make_dirty)
  259. {
  260. unsigned long i;
  261. struct folio *folio;
  262. unsigned int nr;
  263. if (!make_dirty) {
  264. unpin_user_pages(pages, npages);
  265. return;
  266. }
  267. sanity_check_pinned_pages(pages, npages);
  268. for (i = 0; i < npages; i += nr) {
  269. folio = gup_folio_next(pages, npages, i, &nr);
  270. /*
  271. * Checking PageDirty at this point may race with
  272. * clear_page_dirty_for_io(), but that's OK. Two key
  273. * cases:
  274. *
  275. * 1) This code sees the page as already dirty, so it
  276. * skips the call to set_page_dirty(). That could happen
  277. * because clear_page_dirty_for_io() called
  278. * folio_mkclean(), followed by set_page_dirty().
  279. * However, now the page is going to get written back,
  280. * which meets the original intention of setting it
  281. * dirty, so all is well: clear_page_dirty_for_io() goes
  282. * on to call TestClearPageDirty(), and write the page
  283. * back.
  284. *
  285. * 2) This code sees the page as clean, so it calls
  286. * set_page_dirty(). The page stays dirty, despite being
  287. * written back, so it gets written back again in the
  288. * next writeback cycle. This is harmless.
  289. */
  290. if (!folio_test_dirty(folio)) {
  291. folio_lock(folio);
  292. folio_mark_dirty(folio);
  293. folio_unlock(folio);
  294. }
  295. gup_put_folio(folio, nr, FOLL_PIN);
  296. }
  297. }
  298. EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
  299. /**
  300. * unpin_user_page_range_dirty_lock() - release and optionally dirty
  301. * gup-pinned page range
  302. *
  303. * @page: the starting page of a range maybe marked dirty, and definitely released.
  304. * @npages: number of consecutive pages to release.
  305. * @make_dirty: whether to mark the pages dirty
  306. *
  307. * "gup-pinned page range" refers to a range of pages that has had one of the
  308. * pin_user_pages() variants called on that page.
  309. *
  310. * For the page ranges defined by [page .. page+npages], make that range (or
  311. * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
  312. * page range was previously listed as clean.
  313. *
  314. * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
  315. * required, then the caller should a) verify that this is really correct,
  316. * because _lock() is usually required, and b) hand code it:
  317. * set_page_dirty_lock(), unpin_user_page().
  318. *
  319. */
  320. void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
  321. bool make_dirty)
  322. {
  323. unsigned long i;
  324. struct folio *folio;
  325. unsigned int nr;
  326. for (i = 0; i < npages; i += nr) {
  327. folio = gup_folio_range_next(page, npages, i, &nr);
  328. if (make_dirty && !folio_test_dirty(folio)) {
  329. folio_lock(folio);
  330. folio_mark_dirty(folio);
  331. folio_unlock(folio);
  332. }
  333. gup_put_folio(folio, nr, FOLL_PIN);
  334. }
  335. }
  336. EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
  337. static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages)
  338. {
  339. unsigned long i;
  340. struct folio *folio;
  341. unsigned int nr;
  342. /*
  343. * Don't perform any sanity checks because we might have raced with
  344. * fork() and some anonymous pages might now actually be shared --
  345. * which is why we're unpinning after all.
  346. */
  347. for (i = 0; i < npages; i += nr) {
  348. folio = gup_folio_next(pages, npages, i, &nr);
  349. gup_put_folio(folio, nr, FOLL_PIN);
  350. }
  351. }
  352. /**
  353. * unpin_user_pages() - release an array of gup-pinned pages.
  354. * @pages: array of pages to be marked dirty and released.
  355. * @npages: number of pages in the @pages array.
  356. *
  357. * For each page in the @pages array, release the page using unpin_user_page().
  358. *
  359. * Please see the unpin_user_page() documentation for details.
  360. */
  361. void unpin_user_pages(struct page **pages, unsigned long npages)
  362. {
  363. unsigned long i;
  364. struct folio *folio;
  365. unsigned int nr;
  366. /*
  367. * If this WARN_ON() fires, then the system *might* be leaking pages (by
  368. * leaving them pinned), but probably not. More likely, gup/pup returned
  369. * a hard -ERRNO error to the caller, who erroneously passed it here.
  370. */
  371. if (WARN_ON(IS_ERR_VALUE(npages)))
  372. return;
  373. sanity_check_pinned_pages(pages, npages);
  374. for (i = 0; i < npages; i += nr) {
  375. if (!pages[i]) {
  376. nr = 1;
  377. continue;
  378. }
  379. folio = gup_folio_next(pages, npages, i, &nr);
  380. gup_put_folio(folio, nr, FOLL_PIN);
  381. }
  382. }
  383. EXPORT_SYMBOL(unpin_user_pages);
  384. /**
  385. * unpin_user_folio() - release pages of a folio
  386. * @folio: pointer to folio to be released
  387. * @npages: number of pages of same folio
  388. *
  389. * Release npages of the folio
  390. */
  391. void unpin_user_folio(struct folio *folio, unsigned long npages)
  392. {
  393. gup_put_folio(folio, npages, FOLL_PIN);
  394. }
  395. EXPORT_SYMBOL(unpin_user_folio);
  396. /**
  397. * unpin_folios() - release an array of gup-pinned folios.
  398. * @folios: array of folios to be marked dirty and released.
  399. * @nfolios: number of folios in the @folios array.
  400. *
  401. * For each folio in the @folios array, release the folio using gup_put_folio.
  402. *
  403. * Please see the unpin_folio() documentation for details.
  404. */
  405. void unpin_folios(struct folio **folios, unsigned long nfolios)
  406. {
  407. unsigned long i = 0, j;
  408. /*
  409. * If this WARN_ON() fires, then the system *might* be leaking folios
  410. * (by leaving them pinned), but probably not. More likely, gup/pup
  411. * returned a hard -ERRNO error to the caller, who erroneously passed
  412. * it here.
  413. */
  414. if (WARN_ON(IS_ERR_VALUE(nfolios)))
  415. return;
  416. while (i < nfolios) {
  417. for (j = i + 1; j < nfolios; j++)
  418. if (folios[i] != folios[j])
  419. break;
  420. if (folios[i])
  421. gup_put_folio(folios[i], j - i, FOLL_PIN);
  422. i = j;
  423. }
  424. }
  425. EXPORT_SYMBOL_GPL(unpin_folios);
  426. /*
  427. * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
  428. * lifecycle. Avoid setting the bit unless necessary, or it might cause write
  429. * cache bouncing on large SMP machines for concurrent pinned gups.
  430. */
  431. static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
  432. {
  433. if (!test_bit(MMF_HAS_PINNED, mm_flags))
  434. set_bit(MMF_HAS_PINNED, mm_flags);
  435. }
  436. #ifdef CONFIG_MMU
  437. #ifdef CONFIG_HAVE_GUP_FAST
  438. static int record_subpages(struct page *page, unsigned long sz,
  439. unsigned long addr, unsigned long end,
  440. struct page **pages)
  441. {
  442. struct page *start_page;
  443. int nr;
  444. start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
  445. for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
  446. pages[nr] = nth_page(start_page, nr);
  447. return nr;
  448. }
  449. /**
  450. * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
  451. * @page: pointer to page to be grabbed
  452. * @refs: the value to (effectively) add to the folio's refcount
  453. * @flags: gup flags: these are the FOLL_* flag values.
  454. *
  455. * "grab" names in this file mean, "look at flags to decide whether to use
  456. * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
  457. *
  458. * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
  459. * same time. (That's true throughout the get_user_pages*() and
  460. * pin_user_pages*() APIs.) Cases:
  461. *
  462. * FOLL_GET: folio's refcount will be incremented by @refs.
  463. *
  464. * FOLL_PIN on large folios: folio's refcount will be incremented by
  465. * @refs, and its pincount will be incremented by @refs.
  466. *
  467. * FOLL_PIN on single-page folios: folio's refcount will be incremented by
  468. * @refs * GUP_PIN_COUNTING_BIAS.
  469. *
  470. * Return: The folio containing @page (with refcount appropriately
  471. * incremented) for success, or NULL upon failure. If neither FOLL_GET
  472. * nor FOLL_PIN was set, that's considered failure, and furthermore,
  473. * a likely bug in the caller, so a warning is also emitted.
  474. *
  475. * It uses add ref unless zero to elevate the folio refcount and must be called
  476. * in fast path only.
  477. */
  478. static struct folio *try_grab_folio_fast(struct page *page, int refs,
  479. unsigned int flags)
  480. {
  481. struct folio *folio;
  482. /* Raise warn if it is not called in fast GUP */
  483. VM_WARN_ON_ONCE(!irqs_disabled());
  484. if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
  485. return NULL;
  486. if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
  487. return NULL;
  488. if (flags & FOLL_GET)
  489. return try_get_folio(page, refs);
  490. /* FOLL_PIN is set */
  491. /*
  492. * Don't take a pin on the zero page - it's not going anywhere
  493. * and it is used in a *lot* of places.
  494. */
  495. if (is_zero_page(page))
  496. return page_folio(page);
  497. folio = try_get_folio(page, refs);
  498. if (!folio)
  499. return NULL;
  500. /*
  501. * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
  502. * right zone, so fail and let the caller fall back to the slow
  503. * path.
  504. */
  505. if (unlikely((flags & FOLL_LONGTERM) &&
  506. !folio_is_longterm_pinnable(folio))) {
  507. if (!put_devmap_managed_folio_refs(folio, refs))
  508. folio_put_refs(folio, refs);
  509. return NULL;
  510. }
  511. /*
  512. * When pinning a large folio, use an exact count to track it.
  513. *
  514. * However, be sure to *also* increment the normal folio
  515. * refcount field at least once, so that the folio really
  516. * is pinned. That's why the refcount from the earlier
  517. * try_get_folio() is left intact.
  518. */
  519. if (folio_test_large(folio))
  520. atomic_add(refs, &folio->_pincount);
  521. else
  522. folio_ref_add(folio,
  523. refs * (GUP_PIN_COUNTING_BIAS - 1));
  524. /*
  525. * Adjust the pincount before re-checking the PTE for changes.
  526. * This is essentially a smp_mb() and is paired with a memory
  527. * barrier in folio_try_share_anon_rmap_*().
  528. */
  529. smp_mb__after_atomic();
  530. node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
  531. return folio;
  532. }
  533. #endif /* CONFIG_HAVE_GUP_FAST */
  534. static struct page *no_page_table(struct vm_area_struct *vma,
  535. unsigned int flags, unsigned long address)
  536. {
  537. if (!(flags & FOLL_DUMP))
  538. return NULL;
  539. /*
  540. * When core dumping, we don't want to allocate unnecessary pages or
  541. * page tables. Return error instead of NULL to skip handle_mm_fault,
  542. * then get_dump_page() will return NULL to leave a hole in the dump.
  543. * But we can only make this optimization where a hole would surely
  544. * be zero-filled if handle_mm_fault() actually did handle it.
  545. */
  546. if (is_vm_hugetlb_page(vma)) {
  547. struct hstate *h = hstate_vma(vma);
  548. if (!hugetlbfs_pagecache_present(h, vma, address))
  549. return ERR_PTR(-EFAULT);
  550. } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
  551. return ERR_PTR(-EFAULT);
  552. }
  553. return NULL;
  554. }
  555. #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
  556. static struct page *follow_huge_pud(struct vm_area_struct *vma,
  557. unsigned long addr, pud_t *pudp,
  558. int flags, struct follow_page_context *ctx)
  559. {
  560. struct mm_struct *mm = vma->vm_mm;
  561. struct page *page;
  562. pud_t pud = *pudp;
  563. unsigned long pfn = pud_pfn(pud);
  564. int ret;
  565. assert_spin_locked(pud_lockptr(mm, pudp));
  566. if ((flags & FOLL_WRITE) && !pud_write(pud))
  567. return NULL;
  568. if (!pud_present(pud))
  569. return NULL;
  570. pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
  571. if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
  572. pud_devmap(pud)) {
  573. /*
  574. * device mapped pages can only be returned if the caller
  575. * will manage the page reference count.
  576. *
  577. * At least one of FOLL_GET | FOLL_PIN must be set, so
  578. * assert that here:
  579. */
  580. if (!(flags & (FOLL_GET | FOLL_PIN)))
  581. return ERR_PTR(-EEXIST);
  582. if (flags & FOLL_TOUCH)
  583. touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
  584. ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
  585. if (!ctx->pgmap)
  586. return ERR_PTR(-EFAULT);
  587. }
  588. page = pfn_to_page(pfn);
  589. if (!pud_devmap(pud) && !pud_write(pud) &&
  590. gup_must_unshare(vma, flags, page))
  591. return ERR_PTR(-EMLINK);
  592. ret = try_grab_folio(page_folio(page), 1, flags);
  593. if (ret)
  594. page = ERR_PTR(ret);
  595. else
  596. ctx->page_mask = HPAGE_PUD_NR - 1;
  597. return page;
  598. }
  599. /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
  600. static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
  601. struct vm_area_struct *vma,
  602. unsigned int flags)
  603. {
  604. /* If the pmd is writable, we can write to the page. */
  605. if (pmd_write(pmd))
  606. return true;
  607. /* Maybe FOLL_FORCE is set to override it? */
  608. if (!(flags & FOLL_FORCE))
  609. return false;
  610. /* But FOLL_FORCE has no effect on shared mappings */
  611. if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
  612. return false;
  613. /* ... or read-only private ones */
  614. if (!(vma->vm_flags & VM_MAYWRITE))
  615. return false;
  616. /* ... or already writable ones that just need to take a write fault */
  617. if (vma->vm_flags & VM_WRITE)
  618. return false;
  619. /*
  620. * See can_change_pte_writable(): we broke COW and could map the page
  621. * writable if we have an exclusive anonymous page ...
  622. */
  623. if (!page || !PageAnon(page) || !PageAnonExclusive(page))
  624. return false;
  625. /* ... and a write-fault isn't required for other reasons. */
  626. if (pmd_needs_soft_dirty_wp(vma, pmd))
  627. return false;
  628. return !userfaultfd_huge_pmd_wp(vma, pmd);
  629. }
  630. static struct page *follow_huge_pmd(struct vm_area_struct *vma,
  631. unsigned long addr, pmd_t *pmd,
  632. unsigned int flags,
  633. struct follow_page_context *ctx)
  634. {
  635. struct mm_struct *mm = vma->vm_mm;
  636. pmd_t pmdval = *pmd;
  637. struct page *page;
  638. int ret;
  639. assert_spin_locked(pmd_lockptr(mm, pmd));
  640. page = pmd_page(pmdval);
  641. if ((flags & FOLL_WRITE) &&
  642. !can_follow_write_pmd(pmdval, page, vma, flags))
  643. return NULL;
  644. /* Avoid dumping huge zero page */
  645. if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
  646. return ERR_PTR(-EFAULT);
  647. if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
  648. return NULL;
  649. if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
  650. return ERR_PTR(-EMLINK);
  651. VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
  652. !PageAnonExclusive(page), page);
  653. ret = try_grab_folio(page_folio(page), 1, flags);
  654. if (ret)
  655. return ERR_PTR(ret);
  656. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  657. if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
  658. touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
  659. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  660. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  661. ctx->page_mask = HPAGE_PMD_NR - 1;
  662. return page;
  663. }
  664. #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
  665. static struct page *follow_huge_pud(struct vm_area_struct *vma,
  666. unsigned long addr, pud_t *pudp,
  667. int flags, struct follow_page_context *ctx)
  668. {
  669. return NULL;
  670. }
  671. static struct page *follow_huge_pmd(struct vm_area_struct *vma,
  672. unsigned long addr, pmd_t *pmd,
  673. unsigned int flags,
  674. struct follow_page_context *ctx)
  675. {
  676. return NULL;
  677. }
  678. #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
  679. static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  680. pte_t *pte, unsigned int flags)
  681. {
  682. if (flags & FOLL_TOUCH) {
  683. pte_t orig_entry = ptep_get(pte);
  684. pte_t entry = orig_entry;
  685. if (flags & FOLL_WRITE)
  686. entry = pte_mkdirty(entry);
  687. entry = pte_mkyoung(entry);
  688. if (!pte_same(orig_entry, entry)) {
  689. set_pte_at(vma->vm_mm, address, pte, entry);
  690. update_mmu_cache(vma, address, pte);
  691. }
  692. }
  693. /* Proper page table entry exists, but no corresponding struct page */
  694. return -EEXIST;
  695. }
  696. /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
  697. static inline bool can_follow_write_pte(pte_t pte, struct page *page,
  698. struct vm_area_struct *vma,
  699. unsigned int flags)
  700. {
  701. /* If the pte is writable, we can write to the page. */
  702. if (pte_write(pte))
  703. return true;
  704. /* Maybe FOLL_FORCE is set to override it? */
  705. if (!(flags & FOLL_FORCE))
  706. return false;
  707. /* But FOLL_FORCE has no effect on shared mappings */
  708. if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
  709. return false;
  710. /* ... or read-only private ones */
  711. if (!(vma->vm_flags & VM_MAYWRITE))
  712. return false;
  713. /* ... or already writable ones that just need to take a write fault */
  714. if (vma->vm_flags & VM_WRITE)
  715. return false;
  716. /*
  717. * See can_change_pte_writable(): we broke COW and could map the page
  718. * writable if we have an exclusive anonymous page ...
  719. */
  720. if (!page || !PageAnon(page) || !PageAnonExclusive(page))
  721. return false;
  722. /* ... and a write-fault isn't required for other reasons. */
  723. if (pte_needs_soft_dirty_wp(vma, pte))
  724. return false;
  725. return !userfaultfd_pte_wp(vma, pte);
  726. }
  727. static struct page *follow_page_pte(struct vm_area_struct *vma,
  728. unsigned long address, pmd_t *pmd, unsigned int flags,
  729. struct dev_pagemap **pgmap)
  730. {
  731. struct mm_struct *mm = vma->vm_mm;
  732. struct folio *folio;
  733. struct page *page;
  734. spinlock_t *ptl;
  735. pte_t *ptep, pte;
  736. int ret;
  737. /* FOLL_GET and FOLL_PIN are mutually exclusive. */
  738. if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
  739. (FOLL_PIN | FOLL_GET)))
  740. return ERR_PTR(-EINVAL);
  741. ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  742. if (!ptep)
  743. return no_page_table(vma, flags, address);
  744. pte = ptep_get(ptep);
  745. if (!pte_present(pte))
  746. goto no_page;
  747. if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
  748. goto no_page;
  749. page = vm_normal_page(vma, address, pte);
  750. /*
  751. * We only care about anon pages in can_follow_write_pte() and don't
  752. * have to worry about pte_devmap() because they are never anon.
  753. */
  754. if ((flags & FOLL_WRITE) &&
  755. !can_follow_write_pte(pte, page, vma, flags)) {
  756. page = NULL;
  757. goto out;
  758. }
  759. if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
  760. /*
  761. * Only return device mapping pages in the FOLL_GET or FOLL_PIN
  762. * case since they are only valid while holding the pgmap
  763. * reference.
  764. */
  765. *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
  766. if (*pgmap)
  767. page = pte_page(pte);
  768. else
  769. goto no_page;
  770. } else if (unlikely(!page)) {
  771. if (flags & FOLL_DUMP) {
  772. /* Avoid special (like zero) pages in core dumps */
  773. page = ERR_PTR(-EFAULT);
  774. goto out;
  775. }
  776. if (is_zero_pfn(pte_pfn(pte))) {
  777. page = pte_page(pte);
  778. } else {
  779. ret = follow_pfn_pte(vma, address, ptep, flags);
  780. page = ERR_PTR(ret);
  781. goto out;
  782. }
  783. }
  784. folio = page_folio(page);
  785. if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
  786. page = ERR_PTR(-EMLINK);
  787. goto out;
  788. }
  789. VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
  790. !PageAnonExclusive(page), page);
  791. /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
  792. ret = try_grab_folio(folio, 1, flags);
  793. if (unlikely(ret)) {
  794. page = ERR_PTR(ret);
  795. goto out;
  796. }
  797. /*
  798. * We need to make the page accessible if and only if we are going
  799. * to access its content (the FOLL_PIN case). Please see
  800. * Documentation/core-api/pin_user_pages.rst for details.
  801. */
  802. if (flags & FOLL_PIN) {
  803. ret = arch_make_folio_accessible(folio);
  804. if (ret) {
  805. unpin_user_page(page);
  806. page = ERR_PTR(ret);
  807. goto out;
  808. }
  809. }
  810. if (flags & FOLL_TOUCH) {
  811. if ((flags & FOLL_WRITE) &&
  812. !pte_dirty(pte) && !PageDirty(page))
  813. set_page_dirty(page);
  814. /*
  815. * pte_mkyoung() would be more correct here, but atomic care
  816. * is needed to avoid losing the dirty bit: it is easier to use
  817. * mark_page_accessed().
  818. */
  819. mark_page_accessed(page);
  820. }
  821. out:
  822. pte_unmap_unlock(ptep, ptl);
  823. return page;
  824. no_page:
  825. pte_unmap_unlock(ptep, ptl);
  826. if (!pte_none(pte))
  827. return NULL;
  828. return no_page_table(vma, flags, address);
  829. }
  830. static struct page *follow_pmd_mask(struct vm_area_struct *vma,
  831. unsigned long address, pud_t *pudp,
  832. unsigned int flags,
  833. struct follow_page_context *ctx)
  834. {
  835. pmd_t *pmd, pmdval;
  836. spinlock_t *ptl;
  837. struct page *page;
  838. struct mm_struct *mm = vma->vm_mm;
  839. pmd = pmd_offset(pudp, address);
  840. pmdval = pmdp_get_lockless(pmd);
  841. if (pmd_none(pmdval))
  842. return no_page_table(vma, flags, address);
  843. if (!pmd_present(pmdval))
  844. return no_page_table(vma, flags, address);
  845. if (pmd_devmap(pmdval)) {
  846. ptl = pmd_lock(mm, pmd);
  847. page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
  848. spin_unlock(ptl);
  849. if (page)
  850. return page;
  851. return no_page_table(vma, flags, address);
  852. }
  853. if (likely(!pmd_leaf(pmdval)))
  854. return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  855. if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
  856. return no_page_table(vma, flags, address);
  857. ptl = pmd_lock(mm, pmd);
  858. pmdval = *pmd;
  859. if (unlikely(!pmd_present(pmdval))) {
  860. spin_unlock(ptl);
  861. return no_page_table(vma, flags, address);
  862. }
  863. if (unlikely(!pmd_leaf(pmdval))) {
  864. spin_unlock(ptl);
  865. return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  866. }
  867. if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
  868. spin_unlock(ptl);
  869. split_huge_pmd(vma, pmd, address);
  870. /* If pmd was left empty, stuff a page table in there quickly */
  871. return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
  872. follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  873. }
  874. page = follow_huge_pmd(vma, address, pmd, flags, ctx);
  875. spin_unlock(ptl);
  876. return page;
  877. }
  878. static struct page *follow_pud_mask(struct vm_area_struct *vma,
  879. unsigned long address, p4d_t *p4dp,
  880. unsigned int flags,
  881. struct follow_page_context *ctx)
  882. {
  883. pud_t *pudp, pud;
  884. spinlock_t *ptl;
  885. struct page *page;
  886. struct mm_struct *mm = vma->vm_mm;
  887. pudp = pud_offset(p4dp, address);
  888. pud = READ_ONCE(*pudp);
  889. if (!pud_present(pud))
  890. return no_page_table(vma, flags, address);
  891. if (pud_leaf(pud)) {
  892. ptl = pud_lock(mm, pudp);
  893. page = follow_huge_pud(vma, address, pudp, flags, ctx);
  894. spin_unlock(ptl);
  895. if (page)
  896. return page;
  897. return no_page_table(vma, flags, address);
  898. }
  899. if (unlikely(pud_bad(pud)))
  900. return no_page_table(vma, flags, address);
  901. return follow_pmd_mask(vma, address, pudp, flags, ctx);
  902. }
  903. static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  904. unsigned long address, pgd_t *pgdp,
  905. unsigned int flags,
  906. struct follow_page_context *ctx)
  907. {
  908. p4d_t *p4dp, p4d;
  909. p4dp = p4d_offset(pgdp, address);
  910. p4d = READ_ONCE(*p4dp);
  911. BUILD_BUG_ON(p4d_leaf(p4d));
  912. if (!p4d_present(p4d) || p4d_bad(p4d))
  913. return no_page_table(vma, flags, address);
  914. return follow_pud_mask(vma, address, p4dp, flags, ctx);
  915. }
  916. /**
  917. * follow_page_mask - look up a page descriptor from a user-virtual address
  918. * @vma: vm_area_struct mapping @address
  919. * @address: virtual address to look up
  920. * @flags: flags modifying lookup behaviour
  921. * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
  922. * pointer to output page_mask
  923. *
  924. * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  925. *
  926. * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
  927. * the device's dev_pagemap metadata to avoid repeating expensive lookups.
  928. *
  929. * When getting an anonymous page and the caller has to trigger unsharing
  930. * of a shared anonymous page first, -EMLINK is returned. The caller should
  931. * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
  932. * relevant with FOLL_PIN and !FOLL_WRITE.
  933. *
  934. * On output, the @ctx->page_mask is set according to the size of the page.
  935. *
  936. * Return: the mapped (struct page *), %NULL if no mapping exists, or
  937. * an error pointer if there is a mapping to something not represented
  938. * by a page descriptor (see also vm_normal_page()).
  939. */
  940. static struct page *follow_page_mask(struct vm_area_struct *vma,
  941. unsigned long address, unsigned int flags,
  942. struct follow_page_context *ctx)
  943. {
  944. pgd_t *pgd;
  945. struct mm_struct *mm = vma->vm_mm;
  946. struct page *page;
  947. vma_pgtable_walk_begin(vma);
  948. ctx->page_mask = 0;
  949. pgd = pgd_offset(mm, address);
  950. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  951. page = no_page_table(vma, flags, address);
  952. else
  953. page = follow_p4d_mask(vma, address, pgd, flags, ctx);
  954. vma_pgtable_walk_end(vma);
  955. return page;
  956. }
  957. static int get_gate_page(struct mm_struct *mm, unsigned long address,
  958. unsigned int gup_flags, struct vm_area_struct **vma,
  959. struct page **page)
  960. {
  961. pgd_t *pgd;
  962. p4d_t *p4d;
  963. pud_t *pud;
  964. pmd_t *pmd;
  965. pte_t *pte;
  966. pte_t entry;
  967. int ret = -EFAULT;
  968. /* user gate pages are read-only */
  969. if (gup_flags & FOLL_WRITE)
  970. return -EFAULT;
  971. if (address > TASK_SIZE)
  972. pgd = pgd_offset_k(address);
  973. else
  974. pgd = pgd_offset_gate(mm, address);
  975. if (pgd_none(*pgd))
  976. return -EFAULT;
  977. p4d = p4d_offset(pgd, address);
  978. if (p4d_none(*p4d))
  979. return -EFAULT;
  980. pud = pud_offset(p4d, address);
  981. if (pud_none(*pud))
  982. return -EFAULT;
  983. pmd = pmd_offset(pud, address);
  984. if (!pmd_present(*pmd))
  985. return -EFAULT;
  986. pte = pte_offset_map(pmd, address);
  987. if (!pte)
  988. return -EFAULT;
  989. entry = ptep_get(pte);
  990. if (pte_none(entry))
  991. goto unmap;
  992. *vma = get_gate_vma(mm);
  993. if (!page)
  994. goto out;
  995. *page = vm_normal_page(*vma, address, entry);
  996. if (!*page) {
  997. if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
  998. goto unmap;
  999. *page = pte_page(entry);
  1000. }
  1001. ret = try_grab_folio(page_folio(*page), 1, gup_flags);
  1002. if (unlikely(ret))
  1003. goto unmap;
  1004. out:
  1005. ret = 0;
  1006. unmap:
  1007. pte_unmap(pte);
  1008. return ret;
  1009. }
  1010. /*
  1011. * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
  1012. * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
  1013. * to 0 and -EBUSY returned.
  1014. */
  1015. static int faultin_page(struct vm_area_struct *vma,
  1016. unsigned long address, unsigned int flags, bool unshare,
  1017. int *locked)
  1018. {
  1019. unsigned int fault_flags = 0;
  1020. vm_fault_t ret;
  1021. if (flags & FOLL_NOFAULT)
  1022. return -EFAULT;
  1023. if (flags & FOLL_WRITE)
  1024. fault_flags |= FAULT_FLAG_WRITE;
  1025. if (flags & FOLL_REMOTE)
  1026. fault_flags |= FAULT_FLAG_REMOTE;
  1027. if (flags & FOLL_UNLOCKABLE) {
  1028. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  1029. /*
  1030. * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
  1031. * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
  1032. * That's because some callers may not be prepared to
  1033. * handle early exits caused by non-fatal signals.
  1034. */
  1035. if (flags & FOLL_INTERRUPTIBLE)
  1036. fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
  1037. }
  1038. if (flags & FOLL_NOWAIT)
  1039. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
  1040. if (flags & FOLL_TRIED) {
  1041. /*
  1042. * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
  1043. * can co-exist
  1044. */
  1045. fault_flags |= FAULT_FLAG_TRIED;
  1046. }
  1047. if (unshare) {
  1048. fault_flags |= FAULT_FLAG_UNSHARE;
  1049. /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
  1050. VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
  1051. }
  1052. ret = handle_mm_fault(vma, address, fault_flags, NULL);
  1053. if (ret & VM_FAULT_COMPLETED) {
  1054. /*
  1055. * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
  1056. * mmap lock in the page fault handler. Sanity check this.
  1057. */
  1058. WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
  1059. *locked = 0;
  1060. /*
  1061. * We should do the same as VM_FAULT_RETRY, but let's not
  1062. * return -EBUSY since that's not reflecting the reality of
  1063. * what has happened - we've just fully completed a page
  1064. * fault, with the mmap lock released. Use -EAGAIN to show
  1065. * that we want to take the mmap lock _again_.
  1066. */
  1067. return -EAGAIN;
  1068. }
  1069. if (ret & VM_FAULT_ERROR) {
  1070. int err = vm_fault_to_errno(ret, flags);
  1071. if (err)
  1072. return err;
  1073. BUG();
  1074. }
  1075. if (ret & VM_FAULT_RETRY) {
  1076. if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
  1077. *locked = 0;
  1078. return -EBUSY;
  1079. }
  1080. return 0;
  1081. }
  1082. /*
  1083. * Writing to file-backed mappings which require folio dirty tracking using GUP
  1084. * is a fundamentally broken operation, as kernel write access to GUP mappings
  1085. * do not adhere to the semantics expected by a file system.
  1086. *
  1087. * Consider the following scenario:-
  1088. *
  1089. * 1. A folio is written to via GUP which write-faults the memory, notifying
  1090. * the file system and dirtying the folio.
  1091. * 2. Later, writeback is triggered, resulting in the folio being cleaned and
  1092. * the PTE being marked read-only.
  1093. * 3. The GUP caller writes to the folio, as it is mapped read/write via the
  1094. * direct mapping.
  1095. * 4. The GUP caller, now done with the page, unpins it and sets it dirty
  1096. * (though it does not have to).
  1097. *
  1098. * This results in both data being written to a folio without writenotify, and
  1099. * the folio being dirtied unexpectedly (if the caller decides to do so).
  1100. */
  1101. static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
  1102. unsigned long gup_flags)
  1103. {
  1104. /*
  1105. * If we aren't pinning then no problematic write can occur. A long term
  1106. * pin is the most egregious case so this is the case we disallow.
  1107. */
  1108. if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
  1109. (FOLL_PIN | FOLL_LONGTERM))
  1110. return true;
  1111. /*
  1112. * If the VMA does not require dirty tracking then no problematic write
  1113. * can occur either.
  1114. */
  1115. return !vma_needs_dirty_tracking(vma);
  1116. }
  1117. static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  1118. {
  1119. vm_flags_t vm_flags = vma->vm_flags;
  1120. int write = (gup_flags & FOLL_WRITE);
  1121. int foreign = (gup_flags & FOLL_REMOTE);
  1122. bool vma_anon = vma_is_anonymous(vma);
  1123. if (vm_flags & (VM_IO | VM_PFNMAP))
  1124. return -EFAULT;
  1125. if ((gup_flags & FOLL_ANON) && !vma_anon)
  1126. return -EFAULT;
  1127. if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
  1128. return -EOPNOTSUPP;
  1129. if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma))
  1130. return -EOPNOTSUPP;
  1131. if (vma_is_secretmem(vma))
  1132. return -EFAULT;
  1133. if (write) {
  1134. if (!vma_anon &&
  1135. !writable_file_mapping_allowed(vma, gup_flags))
  1136. return -EFAULT;
  1137. if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
  1138. if (!(gup_flags & FOLL_FORCE))
  1139. return -EFAULT;
  1140. /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
  1141. if (is_vm_hugetlb_page(vma))
  1142. return -EFAULT;
  1143. /*
  1144. * We used to let the write,force case do COW in a
  1145. * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
  1146. * set a breakpoint in a read-only mapping of an
  1147. * executable, without corrupting the file (yet only
  1148. * when that file had been opened for writing!).
  1149. * Anon pages in shared mappings are surprising: now
  1150. * just reject it.
  1151. */
  1152. if (!is_cow_mapping(vm_flags))
  1153. return -EFAULT;
  1154. }
  1155. } else if (!(vm_flags & VM_READ)) {
  1156. if (!(gup_flags & FOLL_FORCE))
  1157. return -EFAULT;
  1158. /*
  1159. * Is there actually any vma we can reach here which does not
  1160. * have VM_MAYREAD set?
  1161. */
  1162. if (!(vm_flags & VM_MAYREAD))
  1163. return -EFAULT;
  1164. }
  1165. /*
  1166. * gups are always data accesses, not instruction
  1167. * fetches, so execute=false here
  1168. */
  1169. if (!arch_vma_access_permitted(vma, write, false, foreign))
  1170. return -EFAULT;
  1171. return 0;
  1172. }
  1173. /*
  1174. * This is "vma_lookup()", but with a warning if we would have
  1175. * historically expanded the stack in the GUP code.
  1176. */
  1177. static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
  1178. unsigned long addr)
  1179. {
  1180. #ifdef CONFIG_STACK_GROWSUP
  1181. return vma_lookup(mm, addr);
  1182. #else
  1183. static volatile unsigned long next_warn;
  1184. struct vm_area_struct *vma;
  1185. unsigned long now, next;
  1186. vma = find_vma(mm, addr);
  1187. if (!vma || (addr >= vma->vm_start))
  1188. return vma;
  1189. /* Only warn for half-way relevant accesses */
  1190. if (!(vma->vm_flags & VM_GROWSDOWN))
  1191. return NULL;
  1192. if (vma->vm_start - addr > 65536)
  1193. return NULL;
  1194. /* Let's not warn more than once an hour.. */
  1195. now = jiffies; next = next_warn;
  1196. if (next && time_before(now, next))
  1197. return NULL;
  1198. next_warn = now + 60*60*HZ;
  1199. /* Let people know things may have changed. */
  1200. pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
  1201. current->comm, task_pid_nr(current),
  1202. vma->vm_start, vma->vm_end, addr);
  1203. dump_stack();
  1204. return NULL;
  1205. #endif
  1206. }
  1207. /**
  1208. * __get_user_pages() - pin user pages in memory
  1209. * @mm: mm_struct of target mm
  1210. * @start: starting user address
  1211. * @nr_pages: number of pages from start to pin
  1212. * @gup_flags: flags modifying pin behaviour
  1213. * @pages: array that receives pointers to the pages pinned.
  1214. * Should be at least nr_pages long. Or NULL, if caller
  1215. * only intends to ensure the pages are faulted in.
  1216. * @locked: whether we're still with the mmap_lock held
  1217. *
  1218. * Returns either number of pages pinned (which may be less than the
  1219. * number requested), or an error. Details about the return value:
  1220. *
  1221. * -- If nr_pages is 0, returns 0.
  1222. * -- If nr_pages is >0, but no pages were pinned, returns -errno.
  1223. * -- If nr_pages is >0, and some pages were pinned, returns the number of
  1224. * pages pinned. Again, this may be less than nr_pages.
  1225. * -- 0 return value is possible when the fault would need to be retried.
  1226. *
  1227. * The caller is responsible for releasing returned @pages, via put_page().
  1228. *
  1229. * Must be called with mmap_lock held. It may be released. See below.
  1230. *
  1231. * __get_user_pages walks a process's page tables and takes a reference to
  1232. * each struct page that each user address corresponds to at a given
  1233. * instant. That is, it takes the page that would be accessed if a user
  1234. * thread accesses the given user virtual address at that instant.
  1235. *
  1236. * This does not guarantee that the page exists in the user mappings when
  1237. * __get_user_pages returns, and there may even be a completely different
  1238. * page there in some cases (eg. if mmapped pagecache has been invalidated
  1239. * and subsequently re-faulted). However it does guarantee that the page
  1240. * won't be freed completely. And mostly callers simply care that the page
  1241. * contains data that was valid *at some point in time*. Typically, an IO
  1242. * or similar operation cannot guarantee anything stronger anyway because
  1243. * locks can't be held over the syscall boundary.
  1244. *
  1245. * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
  1246. * the page is written to, set_page_dirty (or set_page_dirty_lock, as
  1247. * appropriate) must be called after the page is finished with, and
  1248. * before put_page is called.
  1249. *
  1250. * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
  1251. * be released. If this happens *@locked will be set to 0 on return.
  1252. *
  1253. * A caller using such a combination of @gup_flags must therefore hold the
  1254. * mmap_lock for reading only, and recognize when it's been released. Otherwise,
  1255. * it must be held for either reading or writing and will not be released.
  1256. *
  1257. * In most cases, get_user_pages or get_user_pages_fast should be used
  1258. * instead of __get_user_pages. __get_user_pages should be used only if
  1259. * you need some special @gup_flags.
  1260. */
  1261. static long __get_user_pages(struct mm_struct *mm,
  1262. unsigned long start, unsigned long nr_pages,
  1263. unsigned int gup_flags, struct page **pages,
  1264. int *locked)
  1265. {
  1266. long ret = 0, i = 0;
  1267. struct vm_area_struct *vma = NULL;
  1268. struct follow_page_context ctx = { NULL };
  1269. if (!nr_pages)
  1270. return 0;
  1271. start = untagged_addr_remote(mm, start);
  1272. VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
  1273. do {
  1274. struct page *page;
  1275. unsigned int page_increm;
  1276. /* first iteration or cross vma bound */
  1277. if (!vma || start >= vma->vm_end) {
  1278. /*
  1279. * MADV_POPULATE_(READ|WRITE) wants to handle VMA
  1280. * lookups+error reporting differently.
  1281. */
  1282. if (gup_flags & FOLL_MADV_POPULATE) {
  1283. vma = vma_lookup(mm, start);
  1284. if (!vma) {
  1285. ret = -ENOMEM;
  1286. goto out;
  1287. }
  1288. if (check_vma_flags(vma, gup_flags)) {
  1289. ret = -EINVAL;
  1290. goto out;
  1291. }
  1292. goto retry;
  1293. }
  1294. vma = gup_vma_lookup(mm, start);
  1295. if (!vma && in_gate_area(mm, start)) {
  1296. ret = get_gate_page(mm, start & PAGE_MASK,
  1297. gup_flags, &vma,
  1298. pages ? &page : NULL);
  1299. if (ret)
  1300. goto out;
  1301. ctx.page_mask = 0;
  1302. goto next_page;
  1303. }
  1304. if (!vma) {
  1305. ret = -EFAULT;
  1306. goto out;
  1307. }
  1308. ret = check_vma_flags(vma, gup_flags);
  1309. if (ret)
  1310. goto out;
  1311. }
  1312. retry:
  1313. /*
  1314. * If we have a pending SIGKILL, don't keep faulting pages and
  1315. * potentially allocating memory.
  1316. */
  1317. if (fatal_signal_pending(current)) {
  1318. ret = -EINTR;
  1319. goto out;
  1320. }
  1321. cond_resched();
  1322. page = follow_page_mask(vma, start, gup_flags, &ctx);
  1323. if (!page || PTR_ERR(page) == -EMLINK) {
  1324. ret = faultin_page(vma, start, gup_flags,
  1325. PTR_ERR(page) == -EMLINK, locked);
  1326. switch (ret) {
  1327. case 0:
  1328. goto retry;
  1329. case -EBUSY:
  1330. case -EAGAIN:
  1331. ret = 0;
  1332. fallthrough;
  1333. case -EFAULT:
  1334. case -ENOMEM:
  1335. case -EHWPOISON:
  1336. goto out;
  1337. }
  1338. BUG();
  1339. } else if (PTR_ERR(page) == -EEXIST) {
  1340. /*
  1341. * Proper page table entry exists, but no corresponding
  1342. * struct page. If the caller expects **pages to be
  1343. * filled in, bail out now, because that can't be done
  1344. * for this page.
  1345. */
  1346. if (pages) {
  1347. ret = PTR_ERR(page);
  1348. goto out;
  1349. }
  1350. } else if (IS_ERR(page)) {
  1351. ret = PTR_ERR(page);
  1352. goto out;
  1353. }
  1354. next_page:
  1355. page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
  1356. if (page_increm > nr_pages)
  1357. page_increm = nr_pages;
  1358. if (pages) {
  1359. struct page *subpage;
  1360. unsigned int j;
  1361. /*
  1362. * This must be a large folio (and doesn't need to
  1363. * be the whole folio; it can be part of it), do
  1364. * the refcount work for all the subpages too.
  1365. *
  1366. * NOTE: here the page may not be the head page
  1367. * e.g. when start addr is not thp-size aligned.
  1368. * try_grab_folio() should have taken care of tail
  1369. * pages.
  1370. */
  1371. if (page_increm > 1) {
  1372. struct folio *folio = page_folio(page);
  1373. /*
  1374. * Since we already hold refcount on the
  1375. * large folio, this should never fail.
  1376. */
  1377. if (try_grab_folio(folio, page_increm - 1,
  1378. gup_flags)) {
  1379. /*
  1380. * Release the 1st page ref if the
  1381. * folio is problematic, fail hard.
  1382. */
  1383. gup_put_folio(folio, 1, gup_flags);
  1384. ret = -EFAULT;
  1385. goto out;
  1386. }
  1387. }
  1388. for (j = 0; j < page_increm; j++) {
  1389. subpage = nth_page(page, j);
  1390. pages[i + j] = subpage;
  1391. flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
  1392. flush_dcache_page(subpage);
  1393. }
  1394. }
  1395. i += page_increm;
  1396. start += page_increm * PAGE_SIZE;
  1397. nr_pages -= page_increm;
  1398. } while (nr_pages);
  1399. out:
  1400. if (ctx.pgmap)
  1401. put_dev_pagemap(ctx.pgmap);
  1402. return i ? i : ret;
  1403. }
  1404. static bool vma_permits_fault(struct vm_area_struct *vma,
  1405. unsigned int fault_flags)
  1406. {
  1407. bool write = !!(fault_flags & FAULT_FLAG_WRITE);
  1408. bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
  1409. vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
  1410. if (!(vm_flags & vma->vm_flags))
  1411. return false;
  1412. /*
  1413. * The architecture might have a hardware protection
  1414. * mechanism other than read/write that can deny access.
  1415. *
  1416. * gup always represents data access, not instruction
  1417. * fetches, so execute=false here:
  1418. */
  1419. if (!arch_vma_access_permitted(vma, write, false, foreign))
  1420. return false;
  1421. return true;
  1422. }
  1423. /**
  1424. * fixup_user_fault() - manually resolve a user page fault
  1425. * @mm: mm_struct of target mm
  1426. * @address: user address
  1427. * @fault_flags:flags to pass down to handle_mm_fault()
  1428. * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
  1429. * does not allow retry. If NULL, the caller must guarantee
  1430. * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
  1431. *
  1432. * This is meant to be called in the specific scenario where for locking reasons
  1433. * we try to access user memory in atomic context (within a pagefault_disable()
  1434. * section), this returns -EFAULT, and we want to resolve the user fault before
  1435. * trying again.
  1436. *
  1437. * Typically this is meant to be used by the futex code.
  1438. *
  1439. * The main difference with get_user_pages() is that this function will
  1440. * unconditionally call handle_mm_fault() which will in turn perform all the
  1441. * necessary SW fixup of the dirty and young bits in the PTE, while
  1442. * get_user_pages() only guarantees to update these in the struct page.
  1443. *
  1444. * This is important for some architectures where those bits also gate the
  1445. * access permission to the page because they are maintained in software. On
  1446. * such architectures, gup() will not be enough to make a subsequent access
  1447. * succeed.
  1448. *
  1449. * This function will not return with an unlocked mmap_lock. So it has not the
  1450. * same semantics wrt the @mm->mmap_lock as does filemap_fault().
  1451. */
  1452. int fixup_user_fault(struct mm_struct *mm,
  1453. unsigned long address, unsigned int fault_flags,
  1454. bool *unlocked)
  1455. {
  1456. struct vm_area_struct *vma;
  1457. vm_fault_t ret;
  1458. address = untagged_addr_remote(mm, address);
  1459. if (unlocked)
  1460. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  1461. retry:
  1462. vma = gup_vma_lookup(mm, address);
  1463. if (!vma)
  1464. return -EFAULT;
  1465. if (!vma_permits_fault(vma, fault_flags))
  1466. return -EFAULT;
  1467. if ((fault_flags & FAULT_FLAG_KILLABLE) &&
  1468. fatal_signal_pending(current))
  1469. return -EINTR;
  1470. ret = handle_mm_fault(vma, address, fault_flags, NULL);
  1471. if (ret & VM_FAULT_COMPLETED) {
  1472. /*
  1473. * NOTE: it's a pity that we need to retake the lock here
  1474. * to pair with the unlock() in the callers. Ideally we
  1475. * could tell the callers so they do not need to unlock.
  1476. */
  1477. mmap_read_lock(mm);
  1478. *unlocked = true;
  1479. return 0;
  1480. }
  1481. if (ret & VM_FAULT_ERROR) {
  1482. int err = vm_fault_to_errno(ret, 0);
  1483. if (err)
  1484. return err;
  1485. BUG();
  1486. }
  1487. if (ret & VM_FAULT_RETRY) {
  1488. mmap_read_lock(mm);
  1489. *unlocked = true;
  1490. fault_flags |= FAULT_FLAG_TRIED;
  1491. goto retry;
  1492. }
  1493. return 0;
  1494. }
  1495. EXPORT_SYMBOL_GPL(fixup_user_fault);
  1496. /*
  1497. * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
  1498. * specified, it'll also respond to generic signals. The caller of GUP
  1499. * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
  1500. */
  1501. static bool gup_signal_pending(unsigned int flags)
  1502. {
  1503. if (fatal_signal_pending(current))
  1504. return true;
  1505. if (!(flags & FOLL_INTERRUPTIBLE))
  1506. return false;
  1507. return signal_pending(current);
  1508. }
  1509. /*
  1510. * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
  1511. * the caller. This function may drop the mmap_lock. If it does so, then it will
  1512. * set (*locked = 0).
  1513. *
  1514. * (*locked == 0) means that the caller expects this function to acquire and
  1515. * drop the mmap_lock. Therefore, the value of *locked will still be zero when
  1516. * the function returns, even though it may have changed temporarily during
  1517. * function execution.
  1518. *
  1519. * Please note that this function, unlike __get_user_pages(), will not return 0
  1520. * for nr_pages > 0, unless FOLL_NOWAIT is used.
  1521. */
  1522. static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
  1523. unsigned long start,
  1524. unsigned long nr_pages,
  1525. struct page **pages,
  1526. int *locked,
  1527. unsigned int flags)
  1528. {
  1529. long ret, pages_done;
  1530. bool must_unlock = false;
  1531. if (!nr_pages)
  1532. return 0;
  1533. /*
  1534. * The internal caller expects GUP to manage the lock internally and the
  1535. * lock must be released when this returns.
  1536. */
  1537. if (!*locked) {
  1538. if (mmap_read_lock_killable(mm))
  1539. return -EAGAIN;
  1540. must_unlock = true;
  1541. *locked = 1;
  1542. }
  1543. else
  1544. mmap_assert_locked(mm);
  1545. if (flags & FOLL_PIN)
  1546. mm_set_has_pinned_flag(&mm->flags);
  1547. /*
  1548. * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
  1549. * is to set FOLL_GET if the caller wants pages[] filled in (but has
  1550. * carelessly failed to specify FOLL_GET), so keep doing that, but only
  1551. * for FOLL_GET, not for the newer FOLL_PIN.
  1552. *
  1553. * FOLL_PIN always expects pages to be non-null, but no need to assert
  1554. * that here, as any failures will be obvious enough.
  1555. */
  1556. if (pages && !(flags & FOLL_PIN))
  1557. flags |= FOLL_GET;
  1558. pages_done = 0;
  1559. for (;;) {
  1560. ret = __get_user_pages(mm, start, nr_pages, flags, pages,
  1561. locked);
  1562. if (!(flags & FOLL_UNLOCKABLE)) {
  1563. /* VM_FAULT_RETRY couldn't trigger, bypass */
  1564. pages_done = ret;
  1565. break;
  1566. }
  1567. /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
  1568. if (!*locked) {
  1569. BUG_ON(ret < 0);
  1570. BUG_ON(ret >= nr_pages);
  1571. }
  1572. if (ret > 0) {
  1573. nr_pages -= ret;
  1574. pages_done += ret;
  1575. if (!nr_pages)
  1576. break;
  1577. }
  1578. if (*locked) {
  1579. /*
  1580. * VM_FAULT_RETRY didn't trigger or it was a
  1581. * FOLL_NOWAIT.
  1582. */
  1583. if (!pages_done)
  1584. pages_done = ret;
  1585. break;
  1586. }
  1587. /*
  1588. * VM_FAULT_RETRY triggered, so seek to the faulting offset.
  1589. * For the prefault case (!pages) we only update counts.
  1590. */
  1591. if (likely(pages))
  1592. pages += ret;
  1593. start += ret << PAGE_SHIFT;
  1594. /* The lock was temporarily dropped, so we must unlock later */
  1595. must_unlock = true;
  1596. retry:
  1597. /*
  1598. * Repeat on the address that fired VM_FAULT_RETRY
  1599. * with both FAULT_FLAG_ALLOW_RETRY and
  1600. * FAULT_FLAG_TRIED. Note that GUP can be interrupted
  1601. * by fatal signals of even common signals, depending on
  1602. * the caller's request. So we need to check it before we
  1603. * start trying again otherwise it can loop forever.
  1604. */
  1605. if (gup_signal_pending(flags)) {
  1606. if (!pages_done)
  1607. pages_done = -EINTR;
  1608. break;
  1609. }
  1610. ret = mmap_read_lock_killable(mm);
  1611. if (ret) {
  1612. BUG_ON(ret > 0);
  1613. if (!pages_done)
  1614. pages_done = ret;
  1615. break;
  1616. }
  1617. *locked = 1;
  1618. ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
  1619. pages, locked);
  1620. if (!*locked) {
  1621. /* Continue to retry until we succeeded */
  1622. BUG_ON(ret != 0);
  1623. goto retry;
  1624. }
  1625. if (ret != 1) {
  1626. BUG_ON(ret > 1);
  1627. if (!pages_done)
  1628. pages_done = ret;
  1629. break;
  1630. }
  1631. nr_pages--;
  1632. pages_done++;
  1633. if (!nr_pages)
  1634. break;
  1635. if (likely(pages))
  1636. pages++;
  1637. start += PAGE_SIZE;
  1638. }
  1639. if (must_unlock && *locked) {
  1640. /*
  1641. * We either temporarily dropped the lock, or the caller
  1642. * requested that we both acquire and drop the lock. Either way,
  1643. * we must now unlock, and notify the caller of that state.
  1644. */
  1645. mmap_read_unlock(mm);
  1646. *locked = 0;
  1647. }
  1648. /*
  1649. * Failing to pin anything implies something has gone wrong (except when
  1650. * FOLL_NOWAIT is specified).
  1651. */
  1652. if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
  1653. return -EFAULT;
  1654. return pages_done;
  1655. }
  1656. /**
  1657. * populate_vma_page_range() - populate a range of pages in the vma.
  1658. * @vma: target vma
  1659. * @start: start address
  1660. * @end: end address
  1661. * @locked: whether the mmap_lock is still held
  1662. *
  1663. * This takes care of mlocking the pages too if VM_LOCKED is set.
  1664. *
  1665. * Return either number of pages pinned in the vma, or a negative error
  1666. * code on error.
  1667. *
  1668. * vma->vm_mm->mmap_lock must be held.
  1669. *
  1670. * If @locked is NULL, it may be held for read or write and will
  1671. * be unperturbed.
  1672. *
  1673. * If @locked is non-NULL, it must held for read only and may be
  1674. * released. If it's released, *@locked will be set to 0.
  1675. */
  1676. long populate_vma_page_range(struct vm_area_struct *vma,
  1677. unsigned long start, unsigned long end, int *locked)
  1678. {
  1679. struct mm_struct *mm = vma->vm_mm;
  1680. unsigned long nr_pages = (end - start) / PAGE_SIZE;
  1681. int local_locked = 1;
  1682. int gup_flags;
  1683. long ret;
  1684. VM_BUG_ON(!PAGE_ALIGNED(start));
  1685. VM_BUG_ON(!PAGE_ALIGNED(end));
  1686. VM_BUG_ON_VMA(start < vma->vm_start, vma);
  1687. VM_BUG_ON_VMA(end > vma->vm_end, vma);
  1688. mmap_assert_locked(mm);
  1689. /*
  1690. * Rightly or wrongly, the VM_LOCKONFAULT case has never used
  1691. * faultin_page() to break COW, so it has no work to do here.
  1692. */
  1693. if (vma->vm_flags & VM_LOCKONFAULT)
  1694. return nr_pages;
  1695. /* ... similarly, we've never faulted in PROT_NONE pages */
  1696. if (!vma_is_accessible(vma))
  1697. return -EFAULT;
  1698. gup_flags = FOLL_TOUCH;
  1699. /*
  1700. * We want to touch writable mappings with a write fault in order
  1701. * to break COW, except for shared mappings because these don't COW
  1702. * and we would not want to dirty them for nothing.
  1703. *
  1704. * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
  1705. * readable (ie write-only or executable).
  1706. */
  1707. if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
  1708. gup_flags |= FOLL_WRITE;
  1709. else
  1710. gup_flags |= FOLL_FORCE;
  1711. if (locked)
  1712. gup_flags |= FOLL_UNLOCKABLE;
  1713. /*
  1714. * We made sure addr is within a VMA, so the following will
  1715. * not result in a stack expansion that recurses back here.
  1716. */
  1717. ret = __get_user_pages(mm, start, nr_pages, gup_flags,
  1718. NULL, locked ? locked : &local_locked);
  1719. lru_add_drain();
  1720. return ret;
  1721. }
  1722. /*
  1723. * faultin_page_range() - populate (prefault) page tables inside the
  1724. * given range readable/writable
  1725. *
  1726. * This takes care of mlocking the pages, too, if VM_LOCKED is set.
  1727. *
  1728. * @mm: the mm to populate page tables in
  1729. * @start: start address
  1730. * @end: end address
  1731. * @write: whether to prefault readable or writable
  1732. * @locked: whether the mmap_lock is still held
  1733. *
  1734. * Returns either number of processed pages in the MM, or a negative error
  1735. * code on error (see __get_user_pages()). Note that this function reports
  1736. * errors related to VMAs, such as incompatible mappings, as expected by
  1737. * MADV_POPULATE_(READ|WRITE).
  1738. *
  1739. * The range must be page-aligned.
  1740. *
  1741. * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
  1742. */
  1743. long faultin_page_range(struct mm_struct *mm, unsigned long start,
  1744. unsigned long end, bool write, int *locked)
  1745. {
  1746. unsigned long nr_pages = (end - start) / PAGE_SIZE;
  1747. int gup_flags;
  1748. long ret;
  1749. VM_BUG_ON(!PAGE_ALIGNED(start));
  1750. VM_BUG_ON(!PAGE_ALIGNED(end));
  1751. mmap_assert_locked(mm);
  1752. /*
  1753. * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
  1754. * the page dirty with FOLL_WRITE -- which doesn't make a
  1755. * difference with !FOLL_FORCE, because the page is writable
  1756. * in the page table.
  1757. * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
  1758. * a poisoned page.
  1759. * !FOLL_FORCE: Require proper access permissions.
  1760. */
  1761. gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
  1762. FOLL_MADV_POPULATE;
  1763. if (write)
  1764. gup_flags |= FOLL_WRITE;
  1765. ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
  1766. gup_flags);
  1767. lru_add_drain();
  1768. return ret;
  1769. }
  1770. /*
  1771. * __mm_populate - populate and/or mlock pages within a range of address space.
  1772. *
  1773. * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
  1774. * flags. VMAs must be already marked with the desired vm_flags, and
  1775. * mmap_lock must not be held.
  1776. */
  1777. int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
  1778. {
  1779. struct mm_struct *mm = current->mm;
  1780. unsigned long end, nstart, nend;
  1781. struct vm_area_struct *vma = NULL;
  1782. int locked = 0;
  1783. long ret = 0;
  1784. end = start + len;
  1785. for (nstart = start; nstart < end; nstart = nend) {
  1786. /*
  1787. * We want to fault in pages for [nstart; end) address range.
  1788. * Find first corresponding VMA.
  1789. */
  1790. if (!locked) {
  1791. locked = 1;
  1792. mmap_read_lock(mm);
  1793. vma = find_vma_intersection(mm, nstart, end);
  1794. } else if (nstart >= vma->vm_end)
  1795. vma = find_vma_intersection(mm, vma->vm_end, end);
  1796. if (!vma)
  1797. break;
  1798. /*
  1799. * Set [nstart; nend) to intersection of desired address
  1800. * range with the first VMA. Also, skip undesirable VMA types.
  1801. */
  1802. nend = min(end, vma->vm_end);
  1803. if (vma->vm_flags & (VM_IO | VM_PFNMAP))
  1804. continue;
  1805. if (nstart < vma->vm_start)
  1806. nstart = vma->vm_start;
  1807. /*
  1808. * Now fault in a range of pages. populate_vma_page_range()
  1809. * double checks the vma flags, so that it won't mlock pages
  1810. * if the vma was already munlocked.
  1811. */
  1812. ret = populate_vma_page_range(vma, nstart, nend, &locked);
  1813. if (ret < 0) {
  1814. if (ignore_errors) {
  1815. ret = 0;
  1816. continue; /* continue at next VMA */
  1817. }
  1818. break;
  1819. }
  1820. nend = nstart + ret * PAGE_SIZE;
  1821. ret = 0;
  1822. }
  1823. if (locked)
  1824. mmap_read_unlock(mm);
  1825. return ret; /* 0 or negative error code */
  1826. }
  1827. #else /* CONFIG_MMU */
  1828. static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
  1829. unsigned long nr_pages, struct page **pages,
  1830. int *locked, unsigned int foll_flags)
  1831. {
  1832. struct vm_area_struct *vma;
  1833. bool must_unlock = false;
  1834. unsigned long vm_flags;
  1835. long i;
  1836. if (!nr_pages)
  1837. return 0;
  1838. /*
  1839. * The internal caller expects GUP to manage the lock internally and the
  1840. * lock must be released when this returns.
  1841. */
  1842. if (!*locked) {
  1843. if (mmap_read_lock_killable(mm))
  1844. return -EAGAIN;
  1845. must_unlock = true;
  1846. *locked = 1;
  1847. }
  1848. /* calculate required read or write permissions.
  1849. * If FOLL_FORCE is set, we only require the "MAY" flags.
  1850. */
  1851. vm_flags = (foll_flags & FOLL_WRITE) ?
  1852. (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  1853. vm_flags &= (foll_flags & FOLL_FORCE) ?
  1854. (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  1855. for (i = 0; i < nr_pages; i++) {
  1856. vma = find_vma(mm, start);
  1857. if (!vma)
  1858. break;
  1859. /* protect what we can, including chardevs */
  1860. if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  1861. !(vm_flags & vma->vm_flags))
  1862. break;
  1863. if (pages) {
  1864. pages[i] = virt_to_page((void *)start);
  1865. if (pages[i])
  1866. get_page(pages[i]);
  1867. }
  1868. start = (start + PAGE_SIZE) & PAGE_MASK;
  1869. }
  1870. if (must_unlock && *locked) {
  1871. mmap_read_unlock(mm);
  1872. *locked = 0;
  1873. }
  1874. return i ? : -EFAULT;
  1875. }
  1876. #endif /* !CONFIG_MMU */
  1877. /**
  1878. * fault_in_writeable - fault in userspace address range for writing
  1879. * @uaddr: start of address range
  1880. * @size: size of address range
  1881. *
  1882. * Returns the number of bytes not faulted in (like copy_to_user() and
  1883. * copy_from_user()).
  1884. */
  1885. size_t fault_in_writeable(char __user *uaddr, size_t size)
  1886. {
  1887. char __user *start = uaddr, *end;
  1888. if (unlikely(size == 0))
  1889. return 0;
  1890. if (!user_write_access_begin(uaddr, size))
  1891. return size;
  1892. if (!PAGE_ALIGNED(uaddr)) {
  1893. unsafe_put_user(0, uaddr, out);
  1894. uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
  1895. }
  1896. end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
  1897. if (unlikely(end < start))
  1898. end = NULL;
  1899. while (uaddr != end) {
  1900. unsafe_put_user(0, uaddr, out);
  1901. uaddr += PAGE_SIZE;
  1902. }
  1903. out:
  1904. user_write_access_end();
  1905. if (size > uaddr - start)
  1906. return size - (uaddr - start);
  1907. return 0;
  1908. }
  1909. EXPORT_SYMBOL(fault_in_writeable);
  1910. /**
  1911. * fault_in_subpage_writeable - fault in an address range for writing
  1912. * @uaddr: start of address range
  1913. * @size: size of address range
  1914. *
  1915. * Fault in a user address range for writing while checking for permissions at
  1916. * sub-page granularity (e.g. arm64 MTE). This function should be used when
  1917. * the caller cannot guarantee forward progress of a copy_to_user() loop.
  1918. *
  1919. * Returns the number of bytes not faulted in (like copy_to_user() and
  1920. * copy_from_user()).
  1921. */
  1922. size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
  1923. {
  1924. size_t faulted_in;
  1925. /*
  1926. * Attempt faulting in at page granularity first for page table
  1927. * permission checking. The arch-specific probe_subpage_writeable()
  1928. * functions may not check for this.
  1929. */
  1930. faulted_in = size - fault_in_writeable(uaddr, size);
  1931. if (faulted_in)
  1932. faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
  1933. return size - faulted_in;
  1934. }
  1935. EXPORT_SYMBOL(fault_in_subpage_writeable);
  1936. /*
  1937. * fault_in_safe_writeable - fault in an address range for writing
  1938. * @uaddr: start of address range
  1939. * @size: length of address range
  1940. *
  1941. * Faults in an address range for writing. This is primarily useful when we
  1942. * already know that some or all of the pages in the address range aren't in
  1943. * memory.
  1944. *
  1945. * Unlike fault_in_writeable(), this function is non-destructive.
  1946. *
  1947. * Note that we don't pin or otherwise hold the pages referenced that we fault
  1948. * in. There's no guarantee that they'll stay in memory for any duration of
  1949. * time.
  1950. *
  1951. * Returns the number of bytes not faulted in, like copy_to_user() and
  1952. * copy_from_user().
  1953. */
  1954. size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
  1955. {
  1956. unsigned long start = (unsigned long)uaddr, end;
  1957. struct mm_struct *mm = current->mm;
  1958. bool unlocked = false;
  1959. if (unlikely(size == 0))
  1960. return 0;
  1961. end = PAGE_ALIGN(start + size);
  1962. if (end < start)
  1963. end = 0;
  1964. mmap_read_lock(mm);
  1965. do {
  1966. if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
  1967. break;
  1968. start = (start + PAGE_SIZE) & PAGE_MASK;
  1969. } while (start != end);
  1970. mmap_read_unlock(mm);
  1971. if (size > start - (unsigned long)uaddr)
  1972. return size - (start - (unsigned long)uaddr);
  1973. return 0;
  1974. }
  1975. EXPORT_SYMBOL(fault_in_safe_writeable);
  1976. /**
  1977. * fault_in_readable - fault in userspace address range for reading
  1978. * @uaddr: start of user address range
  1979. * @size: size of user address range
  1980. *
  1981. * Returns the number of bytes not faulted in (like copy_to_user() and
  1982. * copy_from_user()).
  1983. */
  1984. size_t fault_in_readable(const char __user *uaddr, size_t size)
  1985. {
  1986. const char __user *start = uaddr, *end;
  1987. volatile char c;
  1988. if (unlikely(size == 0))
  1989. return 0;
  1990. if (!user_read_access_begin(uaddr, size))
  1991. return size;
  1992. if (!PAGE_ALIGNED(uaddr)) {
  1993. unsafe_get_user(c, uaddr, out);
  1994. uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
  1995. }
  1996. end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
  1997. if (unlikely(end < start))
  1998. end = NULL;
  1999. while (uaddr != end) {
  2000. unsafe_get_user(c, uaddr, out);
  2001. uaddr += PAGE_SIZE;
  2002. }
  2003. out:
  2004. user_read_access_end();
  2005. (void)c;
  2006. if (size > uaddr - start)
  2007. return size - (uaddr - start);
  2008. return 0;
  2009. }
  2010. EXPORT_SYMBOL(fault_in_readable);
  2011. /**
  2012. * get_dump_page() - pin user page in memory while writing it to core dump
  2013. * @addr: user address
  2014. *
  2015. * Returns struct page pointer of user page pinned for dump,
  2016. * to be freed afterwards by put_page().
  2017. *
  2018. * Returns NULL on any kind of failure - a hole must then be inserted into
  2019. * the corefile, to preserve alignment with its headers; and also returns
  2020. * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
  2021. * allowing a hole to be left in the corefile to save disk space.
  2022. *
  2023. * Called without mmap_lock (takes and releases the mmap_lock by itself).
  2024. */
  2025. #ifdef CONFIG_ELF_CORE
  2026. struct page *get_dump_page(unsigned long addr)
  2027. {
  2028. struct page *page;
  2029. int locked = 0;
  2030. int ret;
  2031. ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
  2032. FOLL_FORCE | FOLL_DUMP | FOLL_GET);
  2033. return (ret == 1) ? page : NULL;
  2034. }
  2035. #endif /* CONFIG_ELF_CORE */
  2036. #ifdef CONFIG_MIGRATION
  2037. /*
  2038. * An array of either pages or folios ("pofs"). Although it may seem tempting to
  2039. * avoid this complication, by simply interpreting a list of folios as a list of
  2040. * pages, that approach won't work in the longer term, because eventually the
  2041. * layouts of struct page and struct folio will become completely different.
  2042. * Furthermore, this pof approach avoids excessive page_folio() calls.
  2043. */
  2044. struct pages_or_folios {
  2045. union {
  2046. struct page **pages;
  2047. struct folio **folios;
  2048. void **entries;
  2049. };
  2050. bool has_folios;
  2051. long nr_entries;
  2052. };
  2053. static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
  2054. {
  2055. if (pofs->has_folios)
  2056. return pofs->folios[i];
  2057. return page_folio(pofs->pages[i]);
  2058. }
  2059. static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
  2060. {
  2061. pofs->entries[i] = NULL;
  2062. }
  2063. static void pofs_unpin(struct pages_or_folios *pofs)
  2064. {
  2065. if (pofs->has_folios)
  2066. unpin_folios(pofs->folios, pofs->nr_entries);
  2067. else
  2068. unpin_user_pages(pofs->pages, pofs->nr_entries);
  2069. }
  2070. static struct folio *pofs_next_folio(struct folio *folio,
  2071. struct pages_or_folios *pofs, long *index_ptr)
  2072. {
  2073. long i = *index_ptr + 1;
  2074. if (!pofs->has_folios && folio_test_large(folio)) {
  2075. const unsigned long start_pfn = folio_pfn(folio);
  2076. const unsigned long end_pfn = start_pfn + folio_nr_pages(folio);
  2077. for (; i < pofs->nr_entries; i++) {
  2078. unsigned long pfn = page_to_pfn(pofs->pages[i]);
  2079. /* Is this page part of this folio? */
  2080. if (pfn < start_pfn || pfn >= end_pfn)
  2081. break;
  2082. }
  2083. }
  2084. if (unlikely(i == pofs->nr_entries))
  2085. return NULL;
  2086. *index_ptr = i;
  2087. return pofs_get_folio(pofs, i);
  2088. }
  2089. /*
  2090. * Returns the number of collected folios. Return value is always >= 0.
  2091. */
  2092. static unsigned long collect_longterm_unpinnable_folios(
  2093. struct list_head *movable_folio_list,
  2094. struct pages_or_folios *pofs)
  2095. {
  2096. unsigned long collected = 0;
  2097. struct folio *folio;
  2098. int drained = 0;
  2099. long i = 0;
  2100. for (folio = pofs_get_folio(pofs, i); folio;
  2101. folio = pofs_next_folio(folio, pofs, &i)) {
  2102. if (folio_is_longterm_pinnable(folio))
  2103. continue;
  2104. collected++;
  2105. if (folio_is_device_coherent(folio))
  2106. continue;
  2107. if (folio_test_hugetlb(folio)) {
  2108. isolate_hugetlb(folio, movable_folio_list);
  2109. continue;
  2110. }
  2111. if (drained == 0 && folio_may_be_lru_cached(folio) &&
  2112. folio_ref_count(folio) !=
  2113. folio_expected_ref_count(folio) + 1) {
  2114. lru_add_drain();
  2115. drained = 1;
  2116. }
  2117. if (drained == 1 && folio_may_be_lru_cached(folio) &&
  2118. folio_ref_count(folio) !=
  2119. folio_expected_ref_count(folio) + 1) {
  2120. lru_add_drain_all();
  2121. drained = 2;
  2122. }
  2123. if (!folio_isolate_lru(folio))
  2124. continue;
  2125. list_add_tail(&folio->lru, movable_folio_list);
  2126. node_stat_mod_folio(folio,
  2127. NR_ISOLATED_ANON + folio_is_file_lru(folio),
  2128. folio_nr_pages(folio));
  2129. }
  2130. return collected;
  2131. }
  2132. /*
  2133. * Unpins all folios and migrates device coherent folios and movable_folio_list.
  2134. * Returns -EAGAIN if all folios were successfully migrated or -errno for
  2135. * failure (or partial success).
  2136. */
  2137. static int
  2138. migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
  2139. struct pages_or_folios *pofs)
  2140. {
  2141. int ret;
  2142. unsigned long i;
  2143. for (i = 0; i < pofs->nr_entries; i++) {
  2144. struct folio *folio = pofs_get_folio(pofs, i);
  2145. if (folio_is_device_coherent(folio)) {
  2146. /*
  2147. * Migration will fail if the folio is pinned, so
  2148. * convert the pin on the source folio to a normal
  2149. * reference.
  2150. */
  2151. pofs_clear_entry(pofs, i);
  2152. folio_get(folio);
  2153. gup_put_folio(folio, 1, FOLL_PIN);
  2154. if (migrate_device_coherent_folio(folio)) {
  2155. ret = -EBUSY;
  2156. goto err;
  2157. }
  2158. continue;
  2159. }
  2160. /*
  2161. * We can't migrate folios with unexpected references, so drop
  2162. * the reference obtained by __get_user_pages_locked().
  2163. * Migrating folios have been added to movable_folio_list after
  2164. * calling folio_isolate_lru() which takes a reference so the
  2165. * folio won't be freed if it's migrating.
  2166. */
  2167. unpin_folio(folio);
  2168. pofs_clear_entry(pofs, i);
  2169. }
  2170. if (!list_empty(movable_folio_list)) {
  2171. struct migration_target_control mtc = {
  2172. .nid = NUMA_NO_NODE,
  2173. .gfp_mask = GFP_USER | __GFP_NOWARN,
  2174. .reason = MR_LONGTERM_PIN,
  2175. };
  2176. if (migrate_pages(movable_folio_list, alloc_migration_target,
  2177. NULL, (unsigned long)&mtc, MIGRATE_SYNC,
  2178. MR_LONGTERM_PIN, NULL)) {
  2179. ret = -ENOMEM;
  2180. goto err;
  2181. }
  2182. }
  2183. putback_movable_pages(movable_folio_list);
  2184. return -EAGAIN;
  2185. err:
  2186. pofs_unpin(pofs);
  2187. putback_movable_pages(movable_folio_list);
  2188. return ret;
  2189. }
  2190. static long
  2191. check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
  2192. {
  2193. LIST_HEAD(movable_folio_list);
  2194. unsigned long collected;
  2195. collected = collect_longterm_unpinnable_folios(&movable_folio_list,
  2196. pofs);
  2197. if (!collected)
  2198. return 0;
  2199. return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
  2200. }
  2201. /*
  2202. * Check whether all folios are *allowed* to be pinned indefinitely (long term).
  2203. * Rather confusingly, all folios in the range are required to be pinned via
  2204. * FOLL_PIN, before calling this routine.
  2205. *
  2206. * Return values:
  2207. *
  2208. * 0: if everything is OK and all folios in the range are allowed to be pinned,
  2209. * then this routine leaves all folios pinned and returns zero for success.
  2210. *
  2211. * -EAGAIN: if any folios in the range are not allowed to be pinned, then this
  2212. * routine will migrate those folios away, unpin all the folios in the range. If
  2213. * migration of the entire set of folios succeeds, then -EAGAIN is returned. The
  2214. * caller should re-pin the entire range with FOLL_PIN and then call this
  2215. * routine again.
  2216. *
  2217. * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
  2218. * indicates a migration failure. The caller should give up, and propagate the
  2219. * error back up the call stack. The caller does not need to unpin any folios in
  2220. * that case, because this routine will do the unpinning.
  2221. */
  2222. static long check_and_migrate_movable_folios(unsigned long nr_folios,
  2223. struct folio **folios)
  2224. {
  2225. struct pages_or_folios pofs = {
  2226. .folios = folios,
  2227. .has_folios = true,
  2228. .nr_entries = nr_folios,
  2229. };
  2230. return check_and_migrate_movable_pages_or_folios(&pofs);
  2231. }
  2232. /*
  2233. * Return values and behavior are the same as those for
  2234. * check_and_migrate_movable_folios().
  2235. */
  2236. static long check_and_migrate_movable_pages(unsigned long nr_pages,
  2237. struct page **pages)
  2238. {
  2239. struct pages_or_folios pofs = {
  2240. .pages = pages,
  2241. .has_folios = false,
  2242. .nr_entries = nr_pages,
  2243. };
  2244. return check_and_migrate_movable_pages_or_folios(&pofs);
  2245. }
  2246. #else
  2247. static long check_and_migrate_movable_pages(unsigned long nr_pages,
  2248. struct page **pages)
  2249. {
  2250. return 0;
  2251. }
  2252. static long check_and_migrate_movable_folios(unsigned long nr_folios,
  2253. struct folio **folios)
  2254. {
  2255. return 0;
  2256. }
  2257. #endif /* CONFIG_MIGRATION */
  2258. /*
  2259. * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  2260. * allows us to process the FOLL_LONGTERM flag.
  2261. */
  2262. static long __gup_longterm_locked(struct mm_struct *mm,
  2263. unsigned long start,
  2264. unsigned long nr_pages,
  2265. struct page **pages,
  2266. int *locked,
  2267. unsigned int gup_flags)
  2268. {
  2269. unsigned int flags;
  2270. long rc, nr_pinned_pages;
  2271. if (!(gup_flags & FOLL_LONGTERM))
  2272. return __get_user_pages_locked(mm, start, nr_pages, pages,
  2273. locked, gup_flags);
  2274. flags = memalloc_pin_save();
  2275. do {
  2276. nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
  2277. pages, locked,
  2278. gup_flags);
  2279. if (nr_pinned_pages <= 0) {
  2280. rc = nr_pinned_pages;
  2281. break;
  2282. }
  2283. /* FOLL_LONGTERM implies FOLL_PIN */
  2284. rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
  2285. } while (rc == -EAGAIN);
  2286. memalloc_pin_restore(flags);
  2287. return rc ? rc : nr_pinned_pages;
  2288. }
  2289. /*
  2290. * Check that the given flags are valid for the exported gup/pup interface, and
  2291. * update them with the required flags that the caller must have set.
  2292. */
  2293. static bool is_valid_gup_args(struct page **pages, int *locked,
  2294. unsigned int *gup_flags_p, unsigned int to_set)
  2295. {
  2296. unsigned int gup_flags = *gup_flags_p;
  2297. /*
  2298. * These flags not allowed to be specified externally to the gup
  2299. * interfaces:
  2300. * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
  2301. * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
  2302. * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
  2303. */
  2304. if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
  2305. return false;
  2306. gup_flags |= to_set;
  2307. if (locked) {
  2308. /* At the external interface locked must be set */
  2309. if (WARN_ON_ONCE(*locked != 1))
  2310. return false;
  2311. gup_flags |= FOLL_UNLOCKABLE;
  2312. }
  2313. /* FOLL_GET and FOLL_PIN are mutually exclusive. */
  2314. if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
  2315. (FOLL_PIN | FOLL_GET)))
  2316. return false;
  2317. /* LONGTERM can only be specified when pinning */
  2318. if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
  2319. return false;
  2320. /* Pages input must be given if using GET/PIN */
  2321. if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
  2322. return false;
  2323. /* We want to allow the pgmap to be hot-unplugged at all times */
  2324. if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
  2325. (gup_flags & FOLL_PCI_P2PDMA)))
  2326. return false;
  2327. *gup_flags_p = gup_flags;
  2328. return true;
  2329. }
  2330. #ifdef CONFIG_MMU
  2331. /**
  2332. * get_user_pages_remote() - pin user pages in memory
  2333. * @mm: mm_struct of target mm
  2334. * @start: starting user address
  2335. * @nr_pages: number of pages from start to pin
  2336. * @gup_flags: flags modifying lookup behaviour
  2337. * @pages: array that receives pointers to the pages pinned.
  2338. * Should be at least nr_pages long. Or NULL, if caller
  2339. * only intends to ensure the pages are faulted in.
  2340. * @locked: pointer to lock flag indicating whether lock is held and
  2341. * subsequently whether VM_FAULT_RETRY functionality can be
  2342. * utilised. Lock must initially be held.
  2343. *
  2344. * Returns either number of pages pinned (which may be less than the
  2345. * number requested), or an error. Details about the return value:
  2346. *
  2347. * -- If nr_pages is 0, returns 0.
  2348. * -- If nr_pages is >0, but no pages were pinned, returns -errno.
  2349. * -- If nr_pages is >0, and some pages were pinned, returns the number of
  2350. * pages pinned. Again, this may be less than nr_pages.
  2351. *
  2352. * The caller is responsible for releasing returned @pages, via put_page().
  2353. *
  2354. * Must be called with mmap_lock held for read or write.
  2355. *
  2356. * get_user_pages_remote walks a process's page tables and takes a reference
  2357. * to each struct page that each user address corresponds to at a given
  2358. * instant. That is, it takes the page that would be accessed if a user
  2359. * thread accesses the given user virtual address at that instant.
  2360. *
  2361. * This does not guarantee that the page exists in the user mappings when
  2362. * get_user_pages_remote returns, and there may even be a completely different
  2363. * page there in some cases (eg. if mmapped pagecache has been invalidated
  2364. * and subsequently re-faulted). However it does guarantee that the page
  2365. * won't be freed completely. And mostly callers simply care that the page
  2366. * contains data that was valid *at some point in time*. Typically, an IO
  2367. * or similar operation cannot guarantee anything stronger anyway because
  2368. * locks can't be held over the syscall boundary.
  2369. *
  2370. * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
  2371. * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
  2372. * be called after the page is finished with, and before put_page is called.
  2373. *
  2374. * get_user_pages_remote is typically used for fewer-copy IO operations,
  2375. * to get a handle on the memory by some means other than accesses
  2376. * via the user virtual addresses. The pages may be submitted for
  2377. * DMA to devices or accessed via their kernel linear mapping (via the
  2378. * kmap APIs). Care should be taken to use the correct cache flushing APIs.
  2379. *
  2380. * See also get_user_pages_fast, for performance critical applications.
  2381. *
  2382. * get_user_pages_remote should be phased out in favor of
  2383. * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
  2384. * should use get_user_pages_remote because it cannot pass
  2385. * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  2386. */
  2387. long get_user_pages_remote(struct mm_struct *mm,
  2388. unsigned long start, unsigned long nr_pages,
  2389. unsigned int gup_flags, struct page **pages,
  2390. int *locked)
  2391. {
  2392. int local_locked = 1;
  2393. if (!is_valid_gup_args(pages, locked, &gup_flags,
  2394. FOLL_TOUCH | FOLL_REMOTE))
  2395. return -EINVAL;
  2396. return __get_user_pages_locked(mm, start, nr_pages, pages,
  2397. locked ? locked : &local_locked,
  2398. gup_flags);
  2399. }
  2400. EXPORT_SYMBOL(get_user_pages_remote);
  2401. #else /* CONFIG_MMU */
  2402. long get_user_pages_remote(struct mm_struct *mm,
  2403. unsigned long start, unsigned long nr_pages,
  2404. unsigned int gup_flags, struct page **pages,
  2405. int *locked)
  2406. {
  2407. return 0;
  2408. }
  2409. #endif /* !CONFIG_MMU */
  2410. /**
  2411. * get_user_pages() - pin user pages in memory
  2412. * @start: starting user address
  2413. * @nr_pages: number of pages from start to pin
  2414. * @gup_flags: flags modifying lookup behaviour
  2415. * @pages: array that receives pointers to the pages pinned.
  2416. * Should be at least nr_pages long. Or NULL, if caller
  2417. * only intends to ensure the pages are faulted in.
  2418. *
  2419. * This is the same as get_user_pages_remote(), just with a less-flexible
  2420. * calling convention where we assume that the mm being operated on belongs to
  2421. * the current task, and doesn't allow passing of a locked parameter. We also
  2422. * obviously don't pass FOLL_REMOTE in here.
  2423. */
  2424. long get_user_pages(unsigned long start, unsigned long nr_pages,
  2425. unsigned int gup_flags, struct page **pages)
  2426. {
  2427. int locked = 1;
  2428. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
  2429. return -EINVAL;
  2430. return __get_user_pages_locked(current->mm, start, nr_pages, pages,
  2431. &locked, gup_flags);
  2432. }
  2433. EXPORT_SYMBOL(get_user_pages);
  2434. /*
  2435. * get_user_pages_unlocked() is suitable to replace the form:
  2436. *
  2437. * mmap_read_lock(mm);
  2438. * get_user_pages(mm, ..., pages, NULL);
  2439. * mmap_read_unlock(mm);
  2440. *
  2441. * with:
  2442. *
  2443. * get_user_pages_unlocked(mm, ..., pages);
  2444. *
  2445. * It is functionally equivalent to get_user_pages_fast so
  2446. * get_user_pages_fast should be used instead if specific gup_flags
  2447. * (e.g. FOLL_FORCE) are not required.
  2448. */
  2449. long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
  2450. struct page **pages, unsigned int gup_flags)
  2451. {
  2452. int locked = 0;
  2453. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  2454. FOLL_TOUCH | FOLL_UNLOCKABLE))
  2455. return -EINVAL;
  2456. return __get_user_pages_locked(current->mm, start, nr_pages, pages,
  2457. &locked, gup_flags);
  2458. }
  2459. EXPORT_SYMBOL(get_user_pages_unlocked);
  2460. /*
  2461. * GUP-fast
  2462. *
  2463. * get_user_pages_fast attempts to pin user pages by walking the page
  2464. * tables directly and avoids taking locks. Thus the walker needs to be
  2465. * protected from page table pages being freed from under it, and should
  2466. * block any THP splits.
  2467. *
  2468. * One way to achieve this is to have the walker disable interrupts, and
  2469. * rely on IPIs from the TLB flushing code blocking before the page table
  2470. * pages are freed. This is unsuitable for architectures that do not need
  2471. * to broadcast an IPI when invalidating TLBs.
  2472. *
  2473. * Another way to achieve this is to batch up page table containing pages
  2474. * belonging to more than one mm_user, then rcu_sched a callback to free those
  2475. * pages. Disabling interrupts will allow the gup_fast() walker to both block
  2476. * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
  2477. * (which is a relatively rare event). The code below adopts this strategy.
  2478. *
  2479. * Before activating this code, please be aware that the following assumptions
  2480. * are currently made:
  2481. *
  2482. * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
  2483. * free pages containing page tables or TLB flushing requires IPI broadcast.
  2484. *
  2485. * *) ptes can be read atomically by the architecture.
  2486. *
  2487. * *) access_ok is sufficient to validate userspace address ranges.
  2488. *
  2489. * The last two assumptions can be relaxed by the addition of helper functions.
  2490. *
  2491. * This code is based heavily on the PowerPC implementation by Nick Piggin.
  2492. */
  2493. #ifdef CONFIG_HAVE_GUP_FAST
  2494. /*
  2495. * Used in the GUP-fast path to determine whether GUP is permitted to work on
  2496. * a specific folio.
  2497. *
  2498. * This call assumes the caller has pinned the folio, that the lowest page table
  2499. * level still points to this folio, and that interrupts have been disabled.
  2500. *
  2501. * GUP-fast must reject all secretmem folios.
  2502. *
  2503. * Writing to pinned file-backed dirty tracked folios is inherently problematic
  2504. * (see comment describing the writable_file_mapping_allowed() function). We
  2505. * therefore try to avoid the most egregious case of a long-term mapping doing
  2506. * so.
  2507. *
  2508. * This function cannot be as thorough as that one as the VMA is not available
  2509. * in the fast path, so instead we whitelist known good cases and if in doubt,
  2510. * fall back to the slow path.
  2511. */
  2512. static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
  2513. {
  2514. bool reject_file_backed = false;
  2515. struct address_space *mapping;
  2516. bool check_secretmem = false;
  2517. unsigned long mapping_flags;
  2518. /*
  2519. * If we aren't pinning then no problematic write can occur. A long term
  2520. * pin is the most egregious case so this is the one we disallow.
  2521. */
  2522. if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
  2523. (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
  2524. reject_file_backed = true;
  2525. /* We hold a folio reference, so we can safely access folio fields. */
  2526. /* secretmem folios are always order-0 folios. */
  2527. if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
  2528. check_secretmem = true;
  2529. if (!reject_file_backed && !check_secretmem)
  2530. return true;
  2531. if (WARN_ON_ONCE(folio_test_slab(folio)))
  2532. return false;
  2533. /* hugetlb neither requires dirty-tracking nor can be secretmem. */
  2534. if (folio_test_hugetlb(folio))
  2535. return true;
  2536. /*
  2537. * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
  2538. * cannot proceed, which means no actions performed under RCU can
  2539. * proceed either.
  2540. *
  2541. * inodes and thus their mappings are freed under RCU, which means the
  2542. * mapping cannot be freed beneath us and thus we can safely dereference
  2543. * it.
  2544. */
  2545. lockdep_assert_irqs_disabled();
  2546. /*
  2547. * However, there may be operations which _alter_ the mapping, so ensure
  2548. * we read it once and only once.
  2549. */
  2550. mapping = READ_ONCE(folio->mapping);
  2551. /*
  2552. * The mapping may have been truncated, in any case we cannot determine
  2553. * if this mapping is safe - fall back to slow path to determine how to
  2554. * proceed.
  2555. */
  2556. if (!mapping)
  2557. return false;
  2558. /* Anonymous folios pose no problem. */
  2559. mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
  2560. if (mapping_flags)
  2561. return mapping_flags & PAGE_MAPPING_ANON;
  2562. /*
  2563. * At this point, we know the mapping is non-null and points to an
  2564. * address_space object.
  2565. */
  2566. if (check_secretmem && secretmem_mapping(mapping))
  2567. return false;
  2568. /* The only remaining allowed file system is shmem. */
  2569. return !reject_file_backed || shmem_mapping(mapping);
  2570. }
  2571. static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start,
  2572. unsigned int flags, struct page **pages)
  2573. {
  2574. while ((*nr) - nr_start) {
  2575. struct folio *folio = page_folio(pages[--(*nr)]);
  2576. folio_clear_referenced(folio);
  2577. gup_put_folio(folio, 1, flags);
  2578. }
  2579. }
  2580. #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
  2581. /*
  2582. * GUP-fast relies on pte change detection to avoid concurrent pgtable
  2583. * operations.
  2584. *
  2585. * To pin the page, GUP-fast needs to do below in order:
  2586. * (1) pin the page (by prefetching pte), then (2) check pte not changed.
  2587. *
  2588. * For the rest of pgtable operations where pgtable updates can be racy
  2589. * with GUP-fast, we need to do (1) clear pte, then (2) check whether page
  2590. * is pinned.
  2591. *
  2592. * Above will work for all pte-level operations, including THP split.
  2593. *
  2594. * For THP collapse, it's a bit more complicated because GUP-fast may be
  2595. * walking a pgtable page that is being freed (pte is still valid but pmd
  2596. * can be cleared already). To avoid race in such condition, we need to
  2597. * also check pmd here to make sure pmd doesn't change (corresponds to
  2598. * pmdp_collapse_flush() in the THP collapse code path).
  2599. */
  2600. static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
  2601. unsigned long end, unsigned int flags, struct page **pages,
  2602. int *nr)
  2603. {
  2604. struct dev_pagemap *pgmap = NULL;
  2605. int nr_start = *nr, ret = 0;
  2606. pte_t *ptep, *ptem;
  2607. ptem = ptep = pte_offset_map(&pmd, addr);
  2608. if (!ptep)
  2609. return 0;
  2610. do {
  2611. pte_t pte = ptep_get_lockless(ptep);
  2612. struct page *page;
  2613. struct folio *folio;
  2614. /*
  2615. * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
  2616. * pte_access_permitted() better should reject these pages
  2617. * either way: otherwise, GUP-fast might succeed in
  2618. * cases where ordinary GUP would fail due to VMA access
  2619. * permissions.
  2620. */
  2621. if (pte_protnone(pte))
  2622. goto pte_unmap;
  2623. if (!pte_access_permitted(pte, flags & FOLL_WRITE))
  2624. goto pte_unmap;
  2625. if (pte_devmap(pte)) {
  2626. if (unlikely(flags & FOLL_LONGTERM))
  2627. goto pte_unmap;
  2628. pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
  2629. if (unlikely(!pgmap)) {
  2630. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2631. goto pte_unmap;
  2632. }
  2633. } else if (pte_special(pte))
  2634. goto pte_unmap;
  2635. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  2636. page = pte_page(pte);
  2637. folio = try_grab_folio_fast(page, 1, flags);
  2638. if (!folio)
  2639. goto pte_unmap;
  2640. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
  2641. unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
  2642. gup_put_folio(folio, 1, flags);
  2643. goto pte_unmap;
  2644. }
  2645. if (!gup_fast_folio_allowed(folio, flags)) {
  2646. gup_put_folio(folio, 1, flags);
  2647. goto pte_unmap;
  2648. }
  2649. if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
  2650. gup_put_folio(folio, 1, flags);
  2651. goto pte_unmap;
  2652. }
  2653. /*
  2654. * We need to make the page accessible if and only if we are
  2655. * going to access its content (the FOLL_PIN case). Please
  2656. * see Documentation/core-api/pin_user_pages.rst for
  2657. * details.
  2658. */
  2659. if (flags & FOLL_PIN) {
  2660. ret = arch_make_folio_accessible(folio);
  2661. if (ret) {
  2662. gup_put_folio(folio, 1, flags);
  2663. goto pte_unmap;
  2664. }
  2665. }
  2666. folio_set_referenced(folio);
  2667. pages[*nr] = page;
  2668. (*nr)++;
  2669. } while (ptep++, addr += PAGE_SIZE, addr != end);
  2670. ret = 1;
  2671. pte_unmap:
  2672. if (pgmap)
  2673. put_dev_pagemap(pgmap);
  2674. pte_unmap(ptem);
  2675. return ret;
  2676. }
  2677. #else
  2678. /*
  2679. * If we can't determine whether or not a pte is special, then fail immediately
  2680. * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
  2681. * to be special.
  2682. *
  2683. * For a futex to be placed on a THP tail page, get_futex_key requires a
  2684. * get_user_pages_fast_only implementation that can pin pages. Thus it's still
  2685. * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
  2686. */
  2687. static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
  2688. unsigned long end, unsigned int flags, struct page **pages,
  2689. int *nr)
  2690. {
  2691. return 0;
  2692. }
  2693. #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
  2694. #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  2695. static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
  2696. unsigned long end, unsigned int flags, struct page **pages, int *nr)
  2697. {
  2698. int nr_start = *nr;
  2699. struct dev_pagemap *pgmap = NULL;
  2700. do {
  2701. struct folio *folio;
  2702. struct page *page = pfn_to_page(pfn);
  2703. pgmap = get_dev_pagemap(pfn, pgmap);
  2704. if (unlikely(!pgmap)) {
  2705. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2706. break;
  2707. }
  2708. if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
  2709. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2710. break;
  2711. }
  2712. folio = try_grab_folio_fast(page, 1, flags);
  2713. if (!folio) {
  2714. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2715. break;
  2716. }
  2717. folio_set_referenced(folio);
  2718. pages[*nr] = page;
  2719. (*nr)++;
  2720. pfn++;
  2721. } while (addr += PAGE_SIZE, addr != end);
  2722. put_dev_pagemap(pgmap);
  2723. return addr == end;
  2724. }
  2725. static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2726. unsigned long end, unsigned int flags, struct page **pages,
  2727. int *nr)
  2728. {
  2729. unsigned long fault_pfn;
  2730. int nr_start = *nr;
  2731. fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  2732. if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
  2733. return 0;
  2734. if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  2735. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2736. return 0;
  2737. }
  2738. return 1;
  2739. }
  2740. static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
  2741. unsigned long end, unsigned int flags, struct page **pages,
  2742. int *nr)
  2743. {
  2744. unsigned long fault_pfn;
  2745. int nr_start = *nr;
  2746. fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  2747. if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
  2748. return 0;
  2749. if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  2750. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2751. return 0;
  2752. }
  2753. return 1;
  2754. }
  2755. #else
  2756. static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2757. unsigned long end, unsigned int flags, struct page **pages,
  2758. int *nr)
  2759. {
  2760. BUILD_BUG();
  2761. return 0;
  2762. }
  2763. static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr,
  2764. unsigned long end, unsigned int flags, struct page **pages,
  2765. int *nr)
  2766. {
  2767. BUILD_BUG();
  2768. return 0;
  2769. }
  2770. #endif
  2771. static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2772. unsigned long end, unsigned int flags, struct page **pages,
  2773. int *nr)
  2774. {
  2775. struct page *page;
  2776. struct folio *folio;
  2777. int refs;
  2778. if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
  2779. return 0;
  2780. if (pmd_special(orig))
  2781. return 0;
  2782. if (pmd_devmap(orig)) {
  2783. if (unlikely(flags & FOLL_LONGTERM))
  2784. return 0;
  2785. return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
  2786. pages, nr);
  2787. }
  2788. page = pmd_page(orig);
  2789. refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
  2790. folio = try_grab_folio_fast(page, refs, flags);
  2791. if (!folio)
  2792. return 0;
  2793. if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  2794. gup_put_folio(folio, refs, flags);
  2795. return 0;
  2796. }
  2797. if (!gup_fast_folio_allowed(folio, flags)) {
  2798. gup_put_folio(folio, refs, flags);
  2799. return 0;
  2800. }
  2801. if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2802. gup_put_folio(folio, refs, flags);
  2803. return 0;
  2804. }
  2805. *nr += refs;
  2806. folio_set_referenced(folio);
  2807. return 1;
  2808. }
  2809. static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
  2810. unsigned long end, unsigned int flags, struct page **pages,
  2811. int *nr)
  2812. {
  2813. struct page *page;
  2814. struct folio *folio;
  2815. int refs;
  2816. if (!pud_access_permitted(orig, flags & FOLL_WRITE))
  2817. return 0;
  2818. if (pud_special(orig))
  2819. return 0;
  2820. if (pud_devmap(orig)) {
  2821. if (unlikely(flags & FOLL_LONGTERM))
  2822. return 0;
  2823. return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
  2824. pages, nr);
  2825. }
  2826. page = pud_page(orig);
  2827. refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
  2828. folio = try_grab_folio_fast(page, refs, flags);
  2829. if (!folio)
  2830. return 0;
  2831. if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  2832. gup_put_folio(folio, refs, flags);
  2833. return 0;
  2834. }
  2835. if (!gup_fast_folio_allowed(folio, flags)) {
  2836. gup_put_folio(folio, refs, flags);
  2837. return 0;
  2838. }
  2839. if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2840. gup_put_folio(folio, refs, flags);
  2841. return 0;
  2842. }
  2843. *nr += refs;
  2844. folio_set_referenced(folio);
  2845. return 1;
  2846. }
  2847. static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
  2848. unsigned long end, unsigned int flags, struct page **pages,
  2849. int *nr)
  2850. {
  2851. int refs;
  2852. struct page *page;
  2853. struct folio *folio;
  2854. if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
  2855. return 0;
  2856. BUILD_BUG_ON(pgd_devmap(orig));
  2857. page = pgd_page(orig);
  2858. refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
  2859. folio = try_grab_folio_fast(page, refs, flags);
  2860. if (!folio)
  2861. return 0;
  2862. if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
  2863. gup_put_folio(folio, refs, flags);
  2864. return 0;
  2865. }
  2866. if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2867. gup_put_folio(folio, refs, flags);
  2868. return 0;
  2869. }
  2870. if (!gup_fast_folio_allowed(folio, flags)) {
  2871. gup_put_folio(folio, refs, flags);
  2872. return 0;
  2873. }
  2874. *nr += refs;
  2875. folio_set_referenced(folio);
  2876. return 1;
  2877. }
  2878. static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
  2879. unsigned long end, unsigned int flags, struct page **pages,
  2880. int *nr)
  2881. {
  2882. unsigned long next;
  2883. pmd_t *pmdp;
  2884. pmdp = pmd_offset_lockless(pudp, pud, addr);
  2885. do {
  2886. pmd_t pmd = pmdp_get_lockless(pmdp);
  2887. next = pmd_addr_end(addr, end);
  2888. if (!pmd_present(pmd))
  2889. return 0;
  2890. if (unlikely(pmd_leaf(pmd))) {
  2891. /* See gup_fast_pte_range() */
  2892. if (pmd_protnone(pmd))
  2893. return 0;
  2894. if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
  2895. pages, nr))
  2896. return 0;
  2897. } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
  2898. pages, nr))
  2899. return 0;
  2900. } while (pmdp++, addr = next, addr != end);
  2901. return 1;
  2902. }
  2903. static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
  2904. unsigned long end, unsigned int flags, struct page **pages,
  2905. int *nr)
  2906. {
  2907. unsigned long next;
  2908. pud_t *pudp;
  2909. pudp = pud_offset_lockless(p4dp, p4d, addr);
  2910. do {
  2911. pud_t pud = READ_ONCE(*pudp);
  2912. next = pud_addr_end(addr, end);
  2913. if (unlikely(!pud_present(pud)))
  2914. return 0;
  2915. if (unlikely(pud_leaf(pud))) {
  2916. if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
  2917. pages, nr))
  2918. return 0;
  2919. } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
  2920. pages, nr))
  2921. return 0;
  2922. } while (pudp++, addr = next, addr != end);
  2923. return 1;
  2924. }
  2925. static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
  2926. unsigned long end, unsigned int flags, struct page **pages,
  2927. int *nr)
  2928. {
  2929. unsigned long next;
  2930. p4d_t *p4dp;
  2931. p4dp = p4d_offset_lockless(pgdp, pgd, addr);
  2932. do {
  2933. p4d_t p4d = READ_ONCE(*p4dp);
  2934. next = p4d_addr_end(addr, end);
  2935. if (!p4d_present(p4d))
  2936. return 0;
  2937. BUILD_BUG_ON(p4d_leaf(p4d));
  2938. if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
  2939. pages, nr))
  2940. return 0;
  2941. } while (p4dp++, addr = next, addr != end);
  2942. return 1;
  2943. }
  2944. static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
  2945. unsigned int flags, struct page **pages, int *nr)
  2946. {
  2947. unsigned long next;
  2948. pgd_t *pgdp;
  2949. pgdp = pgd_offset(current->mm, addr);
  2950. do {
  2951. pgd_t pgd = READ_ONCE(*pgdp);
  2952. next = pgd_addr_end(addr, end);
  2953. if (pgd_none(pgd))
  2954. return;
  2955. if (unlikely(pgd_leaf(pgd))) {
  2956. if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
  2957. pages, nr))
  2958. return;
  2959. } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
  2960. pages, nr))
  2961. return;
  2962. } while (pgdp++, addr = next, addr != end);
  2963. }
  2964. #else
  2965. static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
  2966. unsigned int flags, struct page **pages, int *nr)
  2967. {
  2968. }
  2969. #endif /* CONFIG_HAVE_GUP_FAST */
  2970. #ifndef gup_fast_permitted
  2971. /*
  2972. * Check if it's allowed to use get_user_pages_fast_only() for the range, or
  2973. * we need to fall back to the slow version:
  2974. */
  2975. static bool gup_fast_permitted(unsigned long start, unsigned long end)
  2976. {
  2977. return true;
  2978. }
  2979. #endif
  2980. static unsigned long gup_fast(unsigned long start, unsigned long end,
  2981. unsigned int gup_flags, struct page **pages)
  2982. {
  2983. unsigned long flags;
  2984. int nr_pinned = 0;
  2985. unsigned seq;
  2986. if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
  2987. !gup_fast_permitted(start, end))
  2988. return 0;
  2989. if (gup_flags & FOLL_PIN) {
  2990. seq = raw_read_seqcount(&current->mm->write_protect_seq);
  2991. if (seq & 1)
  2992. return 0;
  2993. }
  2994. /*
  2995. * Disable interrupts. The nested form is used, in order to allow full,
  2996. * general purpose use of this routine.
  2997. *
  2998. * With interrupts disabled, we block page table pages from being freed
  2999. * from under us. See struct mmu_table_batch comments in
  3000. * include/asm-generic/tlb.h for more details.
  3001. *
  3002. * We do not adopt an rcu_read_lock() here as we also want to block IPIs
  3003. * that come from THPs splitting.
  3004. */
  3005. local_irq_save(flags);
  3006. gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned);
  3007. local_irq_restore(flags);
  3008. /*
  3009. * When pinning pages for DMA there could be a concurrent write protect
  3010. * from fork() via copy_page_range(), in this case always fail GUP-fast.
  3011. */
  3012. if (gup_flags & FOLL_PIN) {
  3013. if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
  3014. gup_fast_unpin_user_pages(pages, nr_pinned);
  3015. return 0;
  3016. } else {
  3017. sanity_check_pinned_pages(pages, nr_pinned);
  3018. }
  3019. }
  3020. return nr_pinned;
  3021. }
  3022. static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
  3023. unsigned int gup_flags, struct page **pages)
  3024. {
  3025. unsigned long len, end;
  3026. unsigned long nr_pinned;
  3027. int locked = 0;
  3028. int ret;
  3029. if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
  3030. FOLL_FORCE | FOLL_PIN | FOLL_GET |
  3031. FOLL_FAST_ONLY | FOLL_NOFAULT |
  3032. FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
  3033. return -EINVAL;
  3034. if (gup_flags & FOLL_PIN)
  3035. mm_set_has_pinned_flag(&current->mm->flags);
  3036. if (!(gup_flags & FOLL_FAST_ONLY))
  3037. might_lock_read(&current->mm->mmap_lock);
  3038. start = untagged_addr(start) & PAGE_MASK;
  3039. len = nr_pages << PAGE_SHIFT;
  3040. if (check_add_overflow(start, len, &end))
  3041. return -EOVERFLOW;
  3042. if (end > TASK_SIZE_MAX)
  3043. return -EFAULT;
  3044. if (unlikely(!access_ok((void __user *)start, len)))
  3045. return -EFAULT;
  3046. nr_pinned = gup_fast(start, end, gup_flags, pages);
  3047. if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
  3048. return nr_pinned;
  3049. /* Slow path: try to get the remaining pages with get_user_pages */
  3050. start += nr_pinned << PAGE_SHIFT;
  3051. pages += nr_pinned;
  3052. ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
  3053. pages, &locked,
  3054. gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
  3055. if (ret < 0) {
  3056. /*
  3057. * The caller has to unpin the pages we already pinned so
  3058. * returning -errno is not an option
  3059. */
  3060. if (nr_pinned)
  3061. return nr_pinned;
  3062. return ret;
  3063. }
  3064. return ret + nr_pinned;
  3065. }
  3066. /**
  3067. * get_user_pages_fast_only() - pin user pages in memory
  3068. * @start: starting user address
  3069. * @nr_pages: number of pages from start to pin
  3070. * @gup_flags: flags modifying pin behaviour
  3071. * @pages: array that receives pointers to the pages pinned.
  3072. * Should be at least nr_pages long.
  3073. *
  3074. * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
  3075. * the regular GUP.
  3076. *
  3077. * If the architecture does not support this function, simply return with no
  3078. * pages pinned.
  3079. *
  3080. * Careful, careful! COW breaking can go either way, so a non-write
  3081. * access can get ambiguous page results. If you call this function without
  3082. * 'write' set, you'd better be sure that you're ok with that ambiguity.
  3083. */
  3084. int get_user_pages_fast_only(unsigned long start, int nr_pages,
  3085. unsigned int gup_flags, struct page **pages)
  3086. {
  3087. /*
  3088. * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
  3089. * because gup fast is always a "pin with a +1 page refcount" request.
  3090. *
  3091. * FOLL_FAST_ONLY is required in order to match the API description of
  3092. * this routine: no fall back to regular ("slow") GUP.
  3093. */
  3094. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  3095. FOLL_GET | FOLL_FAST_ONLY))
  3096. return -EINVAL;
  3097. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3098. }
  3099. EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
  3100. /**
  3101. * get_user_pages_fast() - pin user pages in memory
  3102. * @start: starting user address
  3103. * @nr_pages: number of pages from start to pin
  3104. * @gup_flags: flags modifying pin behaviour
  3105. * @pages: array that receives pointers to the pages pinned.
  3106. * Should be at least nr_pages long.
  3107. *
  3108. * Attempt to pin user pages in memory without taking mm->mmap_lock.
  3109. * If not successful, it will fall back to taking the lock and
  3110. * calling get_user_pages().
  3111. *
  3112. * Returns number of pages pinned. This may be fewer than the number requested.
  3113. * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
  3114. * -errno.
  3115. */
  3116. int get_user_pages_fast(unsigned long start, int nr_pages,
  3117. unsigned int gup_flags, struct page **pages)
  3118. {
  3119. /*
  3120. * The caller may or may not have explicitly set FOLL_GET; either way is
  3121. * OK. However, internally (within mm/gup.c), gup fast variants must set
  3122. * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
  3123. * request.
  3124. */
  3125. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
  3126. return -EINVAL;
  3127. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3128. }
  3129. EXPORT_SYMBOL_GPL(get_user_pages_fast);
  3130. /**
  3131. * pin_user_pages_fast() - pin user pages in memory without taking locks
  3132. *
  3133. * @start: starting user address
  3134. * @nr_pages: number of pages from start to pin
  3135. * @gup_flags: flags modifying pin behaviour
  3136. * @pages: array that receives pointers to the pages pinned.
  3137. * Should be at least nr_pages long.
  3138. *
  3139. * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
  3140. * get_user_pages_fast() for documentation on the function arguments, because
  3141. * the arguments here are identical.
  3142. *
  3143. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3144. * see Documentation/core-api/pin_user_pages.rst for further details.
  3145. *
  3146. * Note that if a zero_page is amongst the returned pages, it will not have
  3147. * pins in it and unpin_user_page() will not remove pins from it.
  3148. */
  3149. int pin_user_pages_fast(unsigned long start, int nr_pages,
  3150. unsigned int gup_flags, struct page **pages)
  3151. {
  3152. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
  3153. return -EINVAL;
  3154. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3155. }
  3156. EXPORT_SYMBOL_GPL(pin_user_pages_fast);
  3157. /**
  3158. * pin_user_pages_remote() - pin pages of a remote process
  3159. *
  3160. * @mm: mm_struct of target mm
  3161. * @start: starting user address
  3162. * @nr_pages: number of pages from start to pin
  3163. * @gup_flags: flags modifying lookup behaviour
  3164. * @pages: array that receives pointers to the pages pinned.
  3165. * Should be at least nr_pages long.
  3166. * @locked: pointer to lock flag indicating whether lock is held and
  3167. * subsequently whether VM_FAULT_RETRY functionality can be
  3168. * utilised. Lock must initially be held.
  3169. *
  3170. * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
  3171. * get_user_pages_remote() for documentation on the function arguments, because
  3172. * the arguments here are identical.
  3173. *
  3174. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3175. * see Documentation/core-api/pin_user_pages.rst for details.
  3176. *
  3177. * Note that if a zero_page is amongst the returned pages, it will not have
  3178. * pins in it and unpin_user_page*() will not remove pins from it.
  3179. */
  3180. long pin_user_pages_remote(struct mm_struct *mm,
  3181. unsigned long start, unsigned long nr_pages,
  3182. unsigned int gup_flags, struct page **pages,
  3183. int *locked)
  3184. {
  3185. int local_locked = 1;
  3186. if (!is_valid_gup_args(pages, locked, &gup_flags,
  3187. FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
  3188. return 0;
  3189. return __gup_longterm_locked(mm, start, nr_pages, pages,
  3190. locked ? locked : &local_locked,
  3191. gup_flags);
  3192. }
  3193. EXPORT_SYMBOL(pin_user_pages_remote);
  3194. /**
  3195. * pin_user_pages() - pin user pages in memory for use by other devices
  3196. *
  3197. * @start: starting user address
  3198. * @nr_pages: number of pages from start to pin
  3199. * @gup_flags: flags modifying lookup behaviour
  3200. * @pages: array that receives pointers to the pages pinned.
  3201. * Should be at least nr_pages long.
  3202. *
  3203. * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
  3204. * FOLL_PIN is set.
  3205. *
  3206. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3207. * see Documentation/core-api/pin_user_pages.rst for details.
  3208. *
  3209. * Note that if a zero_page is amongst the returned pages, it will not have
  3210. * pins in it and unpin_user_page*() will not remove pins from it.
  3211. */
  3212. long pin_user_pages(unsigned long start, unsigned long nr_pages,
  3213. unsigned int gup_flags, struct page **pages)
  3214. {
  3215. int locked = 1;
  3216. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
  3217. return 0;
  3218. return __gup_longterm_locked(current->mm, start, nr_pages,
  3219. pages, &locked, gup_flags);
  3220. }
  3221. EXPORT_SYMBOL(pin_user_pages);
  3222. /*
  3223. * pin_user_pages_unlocked() is the FOLL_PIN variant of
  3224. * get_user_pages_unlocked(). Behavior is the same, except that this one sets
  3225. * FOLL_PIN and rejects FOLL_GET.
  3226. *
  3227. * Note that if a zero_page is amongst the returned pages, it will not have
  3228. * pins in it and unpin_user_page*() will not remove pins from it.
  3229. */
  3230. long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
  3231. struct page **pages, unsigned int gup_flags)
  3232. {
  3233. int locked = 0;
  3234. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  3235. FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
  3236. return 0;
  3237. return __gup_longterm_locked(current->mm, start, nr_pages, pages,
  3238. &locked, gup_flags);
  3239. }
  3240. EXPORT_SYMBOL(pin_user_pages_unlocked);
  3241. /**
  3242. * memfd_pin_folios() - pin folios associated with a memfd
  3243. * @memfd: the memfd whose folios are to be pinned
  3244. * @start: the first memfd offset
  3245. * @end: the last memfd offset (inclusive)
  3246. * @folios: array that receives pointers to the folios pinned
  3247. * @max_folios: maximum number of entries in @folios
  3248. * @offset: the offset into the first folio
  3249. *
  3250. * Attempt to pin folios associated with a memfd in the contiguous range
  3251. * [start, end]. Given that a memfd is either backed by shmem or hugetlb,
  3252. * the folios can either be found in the page cache or need to be allocated
  3253. * if necessary. Once the folios are located, they are all pinned via
  3254. * FOLL_PIN and @offset is populatedwith the offset into the first folio.
  3255. * And, eventually, these pinned folios must be released either using
  3256. * unpin_folios() or unpin_folio().
  3257. *
  3258. * It must be noted that the folios may be pinned for an indefinite amount
  3259. * of time. And, in most cases, the duration of time they may stay pinned
  3260. * would be controlled by the userspace. This behavior is effectively the
  3261. * same as using FOLL_LONGTERM with other GUP APIs.
  3262. *
  3263. * Returns number of folios pinned, which could be less than @max_folios
  3264. * as it depends on the folio sizes that cover the range [start, end].
  3265. * If no folios were pinned, it returns -errno.
  3266. */
  3267. long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
  3268. struct folio **folios, unsigned int max_folios,
  3269. pgoff_t *offset)
  3270. {
  3271. unsigned int flags, nr_folios, nr_found;
  3272. unsigned int i, pgshift = PAGE_SHIFT;
  3273. pgoff_t start_idx, end_idx, next_idx;
  3274. struct folio *folio = NULL;
  3275. struct folio_batch fbatch;
  3276. struct hstate *h;
  3277. long ret = -EINVAL;
  3278. if (start < 0 || start > end || !max_folios)
  3279. return -EINVAL;
  3280. if (!memfd)
  3281. return -EINVAL;
  3282. if (!shmem_file(memfd) && !is_file_hugepages(memfd))
  3283. return -EINVAL;
  3284. if (end >= i_size_read(file_inode(memfd)))
  3285. return -EINVAL;
  3286. if (is_file_hugepages(memfd)) {
  3287. h = hstate_file(memfd);
  3288. pgshift = huge_page_shift(h);
  3289. }
  3290. flags = memalloc_pin_save();
  3291. do {
  3292. nr_folios = 0;
  3293. start_idx = start >> pgshift;
  3294. end_idx = end >> pgshift;
  3295. if (is_file_hugepages(memfd)) {
  3296. start_idx <<= huge_page_order(h);
  3297. end_idx <<= huge_page_order(h);
  3298. }
  3299. folio_batch_init(&fbatch);
  3300. while (start_idx <= end_idx && nr_folios < max_folios) {
  3301. /*
  3302. * In most cases, we should be able to find the folios
  3303. * in the page cache. If we cannot find them for some
  3304. * reason, we try to allocate them and add them to the
  3305. * page cache.
  3306. */
  3307. nr_found = filemap_get_folios_contig(memfd->f_mapping,
  3308. &start_idx,
  3309. end_idx,
  3310. &fbatch);
  3311. if (folio) {
  3312. folio_put(folio);
  3313. folio = NULL;
  3314. }
  3315. next_idx = 0;
  3316. for (i = 0; i < nr_found; i++) {
  3317. /*
  3318. * As there can be multiple entries for a
  3319. * given folio in the batch returned by
  3320. * filemap_get_folios_contig(), the below
  3321. * check is to ensure that we pin and return a
  3322. * unique set of folios between start and end.
  3323. */
  3324. if (next_idx &&
  3325. next_idx != folio_index(fbatch.folios[i]))
  3326. continue;
  3327. folio = page_folio(&fbatch.folios[i]->page);
  3328. if (try_grab_folio(folio, 1, FOLL_PIN)) {
  3329. folio_batch_release(&fbatch);
  3330. ret = -EINVAL;
  3331. goto err;
  3332. }
  3333. if (nr_folios == 0)
  3334. *offset = offset_in_folio(folio, start);
  3335. folios[nr_folios] = folio;
  3336. next_idx = folio_next_index(folio);
  3337. if (++nr_folios == max_folios)
  3338. break;
  3339. }
  3340. folio = NULL;
  3341. folio_batch_release(&fbatch);
  3342. if (!nr_found) {
  3343. folio = memfd_alloc_folio(memfd, start_idx);
  3344. if (IS_ERR(folio)) {
  3345. ret = PTR_ERR(folio);
  3346. if (ret != -EEXIST)
  3347. goto err;
  3348. folio = NULL;
  3349. }
  3350. }
  3351. }
  3352. ret = check_and_migrate_movable_folios(nr_folios, folios);
  3353. } while (ret == -EAGAIN);
  3354. memalloc_pin_restore(flags);
  3355. return ret ? ret : nr_folios;
  3356. err:
  3357. memalloc_pin_restore(flags);
  3358. unpin_folios(folios, nr_folios);
  3359. return ret;
  3360. }
  3361. EXPORT_SYMBOL_GPL(memfd_pin_folios);