gup.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/err.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/mm.h>
  7. #include <linux/memfd.h>
  8. #include <linux/memremap.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/rmap.h>
  11. #include <linux/swap.h>
  12. #include <linux/swapops.h>
  13. #include <linux/secretmem.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/rwsem.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/migrate.h>
  18. #include <linux/mm_inline.h>
  19. #include <linux/pagevec.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/shmem_fs.h>
  22. #include <asm/mmu_context.h>
  23. #include <asm/tlbflush.h>
  24. #include "internal.h"
  25. struct follow_page_context {
  26. struct dev_pagemap *pgmap;
  27. unsigned int page_mask;
  28. };
  29. static inline void sanity_check_pinned_pages(struct page **pages,
  30. unsigned long npages)
  31. {
  32. if (!IS_ENABLED(CONFIG_DEBUG_VM))
  33. return;
  34. /*
  35. * We only pin anonymous pages if they are exclusive. Once pinned, we
  36. * can no longer turn them possibly shared and PageAnonExclusive() will
  37. * stick around until the page is freed.
  38. *
  39. * We'd like to verify that our pinned anonymous pages are still mapped
  40. * exclusively. The issue with anon THP is that we don't know how
  41. * they are/were mapped when pinning them. However, for anon
  42. * THP we can assume that either the given page (PTE-mapped THP) or
  43. * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
  44. * neither is the case, there is certainly something wrong.
  45. */
  46. for (; npages; npages--, pages++) {
  47. struct page *page = *pages;
  48. struct folio *folio;
  49. if (!page)
  50. continue;
  51. folio = page_folio(page);
  52. if (is_zero_page(page) ||
  53. !folio_test_anon(folio))
  54. continue;
  55. if (!folio_test_large(folio) || folio_test_hugetlb(folio))
  56. VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
  57. else
  58. /* Either a PTE-mapped or a PMD-mapped THP. */
  59. VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
  60. !PageAnonExclusive(page), page);
  61. }
  62. }
  63. /*
  64. * Return the folio with ref appropriately incremented,
  65. * or NULL if that failed.
  66. */
  67. static inline struct folio *try_get_folio(struct page *page, int refs)
  68. {
  69. struct folio *folio;
  70. retry:
  71. folio = page_folio(page);
  72. if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
  73. return NULL;
  74. if (unlikely(!folio_ref_try_add(folio, refs)))
  75. return NULL;
  76. /*
  77. * At this point we have a stable reference to the folio; but it
  78. * could be that between calling page_folio() and the refcount
  79. * increment, the folio was split, in which case we'd end up
  80. * holding a reference on a folio that has nothing to do with the page
  81. * we were given anymore.
  82. * So now that the folio is stable, recheck that the page still
  83. * belongs to this folio.
  84. */
  85. if (unlikely(page_folio(page) != folio)) {
  86. if (!put_devmap_managed_folio_refs(folio, refs))
  87. folio_put_refs(folio, refs);
  88. goto retry;
  89. }
  90. return folio;
  91. }
  92. static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
  93. {
  94. if (flags & FOLL_PIN) {
  95. if (is_zero_folio(folio))
  96. return;
  97. node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
  98. if (folio_test_large(folio))
  99. atomic_sub(refs, &folio->_pincount);
  100. else
  101. refs *= GUP_PIN_COUNTING_BIAS;
  102. }
  103. if (!put_devmap_managed_folio_refs(folio, refs))
  104. folio_put_refs(folio, refs);
  105. }
  106. /**
  107. * try_grab_folio() - add a folio's refcount by a flag-dependent amount
  108. * @folio: pointer to folio to be grabbed
  109. * @refs: the value to (effectively) add to the folio's refcount
  110. * @flags: gup flags: these are the FOLL_* flag values
  111. *
  112. * This might not do anything at all, depending on the flags argument.
  113. *
  114. * "grab" names in this file mean, "look at flags to decide whether to use
  115. * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
  116. *
  117. * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
  118. * time.
  119. *
  120. * Return: 0 for success, or if no action was required (if neither FOLL_PIN
  121. * nor FOLL_GET was set, nothing is done). A negative error code for failure:
  122. *
  123. * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
  124. * be grabbed.
  125. *
  126. * It is called when we have a stable reference for the folio, typically in
  127. * GUP slow path.
  128. */
  129. int __must_check try_grab_folio(struct folio *folio, int refs,
  130. unsigned int flags)
  131. {
  132. if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
  133. return -ENOMEM;
  134. if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
  135. return -EREMOTEIO;
  136. if (flags & FOLL_GET)
  137. folio_ref_add(folio, refs);
  138. else if (flags & FOLL_PIN) {
  139. /*
  140. * Don't take a pin on the zero page - it's not going anywhere
  141. * and it is used in a *lot* of places.
  142. */
  143. if (is_zero_folio(folio))
  144. return 0;
  145. /*
  146. * Increment the normal page refcount field at least once,
  147. * so that the page really is pinned.
  148. */
  149. if (folio_test_large(folio)) {
  150. folio_ref_add(folio, refs);
  151. atomic_add(refs, &folio->_pincount);
  152. } else {
  153. folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
  154. }
  155. node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
  156. }
  157. return 0;
  158. }
  159. /**
  160. * unpin_user_page() - release a dma-pinned page
  161. * @page: pointer to page to be released
  162. *
  163. * Pages that were pinned via pin_user_pages*() must be released via either
  164. * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
  165. * that such pages can be separately tracked and uniquely handled. In
  166. * particular, interactions with RDMA and filesystems need special handling.
  167. */
  168. void unpin_user_page(struct page *page)
  169. {
  170. sanity_check_pinned_pages(&page, 1);
  171. gup_put_folio(page_folio(page), 1, FOLL_PIN);
  172. }
  173. EXPORT_SYMBOL(unpin_user_page);
  174. /**
  175. * unpin_folio() - release a dma-pinned folio
  176. * @folio: pointer to folio to be released
  177. *
  178. * Folios that were pinned via memfd_pin_folios() or other similar routines
  179. * must be released either using unpin_folio() or unpin_folios().
  180. */
  181. void unpin_folio(struct folio *folio)
  182. {
  183. gup_put_folio(folio, 1, FOLL_PIN);
  184. }
  185. EXPORT_SYMBOL_GPL(unpin_folio);
  186. /**
  187. * folio_add_pin - Try to get an additional pin on a pinned folio
  188. * @folio: The folio to be pinned
  189. *
  190. * Get an additional pin on a folio we already have a pin on. Makes no change
  191. * if the folio is a zero_page.
  192. */
  193. void folio_add_pin(struct folio *folio)
  194. {
  195. if (is_zero_folio(folio))
  196. return;
  197. /*
  198. * Similar to try_grab_folio(): be sure to *also* increment the normal
  199. * page refcount field at least once, so that the page really is
  200. * pinned.
  201. */
  202. if (folio_test_large(folio)) {
  203. WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
  204. folio_ref_inc(folio);
  205. atomic_inc(&folio->_pincount);
  206. } else {
  207. WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
  208. folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
  209. }
  210. }
  211. static inline struct folio *gup_folio_range_next(struct page *start,
  212. unsigned long npages, unsigned long i, unsigned int *ntails)
  213. {
  214. struct page *next = nth_page(start, i);
  215. struct folio *folio = page_folio(next);
  216. unsigned int nr = 1;
  217. if (folio_test_large(folio))
  218. nr = min_t(unsigned int, npages - i,
  219. folio_nr_pages(folio) - folio_page_idx(folio, next));
  220. *ntails = nr;
  221. return folio;
  222. }
  223. static inline struct folio *gup_folio_next(struct page **list,
  224. unsigned long npages, unsigned long i, unsigned int *ntails)
  225. {
  226. struct folio *folio = page_folio(list[i]);
  227. unsigned int nr;
  228. for (nr = i + 1; nr < npages; nr++) {
  229. if (page_folio(list[nr]) != folio)
  230. break;
  231. }
  232. *ntails = nr - i;
  233. return folio;
  234. }
  235. /**
  236. * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
  237. * @pages: array of pages to be maybe marked dirty, and definitely released.
  238. * @npages: number of pages in the @pages array.
  239. * @make_dirty: whether to mark the pages dirty
  240. *
  241. * "gup-pinned page" refers to a page that has had one of the get_user_pages()
  242. * variants called on that page.
  243. *
  244. * For each page in the @pages array, make that page (or its head page, if a
  245. * compound page) dirty, if @make_dirty is true, and if the page was previously
  246. * listed as clean. In any case, releases all pages using unpin_user_page(),
  247. * possibly via unpin_user_pages(), for the non-dirty case.
  248. *
  249. * Please see the unpin_user_page() documentation for details.
  250. *
  251. * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
  252. * required, then the caller should a) verify that this is really correct,
  253. * because _lock() is usually required, and b) hand code it:
  254. * set_page_dirty_lock(), unpin_user_page().
  255. *
  256. */
  257. void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
  258. bool make_dirty)
  259. {
  260. unsigned long i;
  261. struct folio *folio;
  262. unsigned int nr;
  263. if (!make_dirty) {
  264. unpin_user_pages(pages, npages);
  265. return;
  266. }
  267. sanity_check_pinned_pages(pages, npages);
  268. for (i = 0; i < npages; i += nr) {
  269. folio = gup_folio_next(pages, npages, i, &nr);
  270. /*
  271. * Checking PageDirty at this point may race with
  272. * clear_page_dirty_for_io(), but that's OK. Two key
  273. * cases:
  274. *
  275. * 1) This code sees the page as already dirty, so it
  276. * skips the call to set_page_dirty(). That could happen
  277. * because clear_page_dirty_for_io() called
  278. * folio_mkclean(), followed by set_page_dirty().
  279. * However, now the page is going to get written back,
  280. * which meets the original intention of setting it
  281. * dirty, so all is well: clear_page_dirty_for_io() goes
  282. * on to call TestClearPageDirty(), and write the page
  283. * back.
  284. *
  285. * 2) This code sees the page as clean, so it calls
  286. * set_page_dirty(). The page stays dirty, despite being
  287. * written back, so it gets written back again in the
  288. * next writeback cycle. This is harmless.
  289. */
  290. if (!folio_test_dirty(folio)) {
  291. folio_lock(folio);
  292. folio_mark_dirty(folio);
  293. folio_unlock(folio);
  294. }
  295. gup_put_folio(folio, nr, FOLL_PIN);
  296. }
  297. }
  298. EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
  299. /**
  300. * unpin_user_page_range_dirty_lock() - release and optionally dirty
  301. * gup-pinned page range
  302. *
  303. * @page: the starting page of a range maybe marked dirty, and definitely released.
  304. * @npages: number of consecutive pages to release.
  305. * @make_dirty: whether to mark the pages dirty
  306. *
  307. * "gup-pinned page range" refers to a range of pages that has had one of the
  308. * pin_user_pages() variants called on that page.
  309. *
  310. * For the page ranges defined by [page .. page+npages], make that range (or
  311. * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
  312. * page range was previously listed as clean.
  313. *
  314. * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
  315. * required, then the caller should a) verify that this is really correct,
  316. * because _lock() is usually required, and b) hand code it:
  317. * set_page_dirty_lock(), unpin_user_page().
  318. *
  319. */
  320. void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
  321. bool make_dirty)
  322. {
  323. unsigned long i;
  324. struct folio *folio;
  325. unsigned int nr;
  326. for (i = 0; i < npages; i += nr) {
  327. folio = gup_folio_range_next(page, npages, i, &nr);
  328. if (make_dirty && !folio_test_dirty(folio)) {
  329. folio_lock(folio);
  330. folio_mark_dirty(folio);
  331. folio_unlock(folio);
  332. }
  333. gup_put_folio(folio, nr, FOLL_PIN);
  334. }
  335. }
  336. EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
  337. static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages)
  338. {
  339. unsigned long i;
  340. struct folio *folio;
  341. unsigned int nr;
  342. /*
  343. * Don't perform any sanity checks because we might have raced with
  344. * fork() and some anonymous pages might now actually be shared --
  345. * which is why we're unpinning after all.
  346. */
  347. for (i = 0; i < npages; i += nr) {
  348. folio = gup_folio_next(pages, npages, i, &nr);
  349. gup_put_folio(folio, nr, FOLL_PIN);
  350. }
  351. }
  352. /**
  353. * unpin_user_pages() - release an array of gup-pinned pages.
  354. * @pages: array of pages to be marked dirty and released.
  355. * @npages: number of pages in the @pages array.
  356. *
  357. * For each page in the @pages array, release the page using unpin_user_page().
  358. *
  359. * Please see the unpin_user_page() documentation for details.
  360. */
  361. void unpin_user_pages(struct page **pages, unsigned long npages)
  362. {
  363. unsigned long i;
  364. struct folio *folio;
  365. unsigned int nr;
  366. /*
  367. * If this WARN_ON() fires, then the system *might* be leaking pages (by
  368. * leaving them pinned), but probably not. More likely, gup/pup returned
  369. * a hard -ERRNO error to the caller, who erroneously passed it here.
  370. */
  371. if (WARN_ON(IS_ERR_VALUE(npages)))
  372. return;
  373. sanity_check_pinned_pages(pages, npages);
  374. for (i = 0; i < npages; i += nr) {
  375. if (!pages[i]) {
  376. nr = 1;
  377. continue;
  378. }
  379. folio = gup_folio_next(pages, npages, i, &nr);
  380. gup_put_folio(folio, nr, FOLL_PIN);
  381. }
  382. }
  383. EXPORT_SYMBOL(unpin_user_pages);
  384. /**
  385. * unpin_user_folio() - release pages of a folio
  386. * @folio: pointer to folio to be released
  387. * @npages: number of pages of same folio
  388. *
  389. * Release npages of the folio
  390. */
  391. void unpin_user_folio(struct folio *folio, unsigned long npages)
  392. {
  393. gup_put_folio(folio, npages, FOLL_PIN);
  394. }
  395. EXPORT_SYMBOL(unpin_user_folio);
  396. /**
  397. * unpin_folios() - release an array of gup-pinned folios.
  398. * @folios: array of folios to be marked dirty and released.
  399. * @nfolios: number of folios in the @folios array.
  400. *
  401. * For each folio in the @folios array, release the folio using gup_put_folio.
  402. *
  403. * Please see the unpin_folio() documentation for details.
  404. */
  405. void unpin_folios(struct folio **folios, unsigned long nfolios)
  406. {
  407. unsigned long i = 0, j;
  408. /*
  409. * If this WARN_ON() fires, then the system *might* be leaking folios
  410. * (by leaving them pinned), but probably not. More likely, gup/pup
  411. * returned a hard -ERRNO error to the caller, who erroneously passed
  412. * it here.
  413. */
  414. if (WARN_ON(IS_ERR_VALUE(nfolios)))
  415. return;
  416. while (i < nfolios) {
  417. for (j = i + 1; j < nfolios; j++)
  418. if (folios[i] != folios[j])
  419. break;
  420. if (folios[i])
  421. gup_put_folio(folios[i], j - i, FOLL_PIN);
  422. i = j;
  423. }
  424. }
  425. EXPORT_SYMBOL_GPL(unpin_folios);
  426. /*
  427. * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
  428. * lifecycle. Avoid setting the bit unless necessary, or it might cause write
  429. * cache bouncing on large SMP machines for concurrent pinned gups.
  430. */
  431. static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
  432. {
  433. if (!test_bit(MMF_HAS_PINNED, mm_flags))
  434. set_bit(MMF_HAS_PINNED, mm_flags);
  435. }
  436. #ifdef CONFIG_MMU
  437. #ifdef CONFIG_HAVE_GUP_FAST
  438. static int record_subpages(struct page *page, unsigned long sz,
  439. unsigned long addr, unsigned long end,
  440. struct page **pages)
  441. {
  442. struct page *start_page;
  443. int nr;
  444. start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
  445. for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
  446. pages[nr] = nth_page(start_page, nr);
  447. return nr;
  448. }
  449. /**
  450. * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
  451. * @page: pointer to page to be grabbed
  452. * @refs: the value to (effectively) add to the folio's refcount
  453. * @flags: gup flags: these are the FOLL_* flag values.
  454. *
  455. * "grab" names in this file mean, "look at flags to decide whether to use
  456. * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
  457. *
  458. * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
  459. * same time. (That's true throughout the get_user_pages*() and
  460. * pin_user_pages*() APIs.) Cases:
  461. *
  462. * FOLL_GET: folio's refcount will be incremented by @refs.
  463. *
  464. * FOLL_PIN on large folios: folio's refcount will be incremented by
  465. * @refs, and its pincount will be incremented by @refs.
  466. *
  467. * FOLL_PIN on single-page folios: folio's refcount will be incremented by
  468. * @refs * GUP_PIN_COUNTING_BIAS.
  469. *
  470. * Return: The folio containing @page (with refcount appropriately
  471. * incremented) for success, or NULL upon failure. If neither FOLL_GET
  472. * nor FOLL_PIN was set, that's considered failure, and furthermore,
  473. * a likely bug in the caller, so a warning is also emitted.
  474. *
  475. * It uses add ref unless zero to elevate the folio refcount and must be called
  476. * in fast path only.
  477. */
  478. static struct folio *try_grab_folio_fast(struct page *page, int refs,
  479. unsigned int flags)
  480. {
  481. struct folio *folio;
  482. /* Raise warn if it is not called in fast GUP */
  483. VM_WARN_ON_ONCE(!irqs_disabled());
  484. if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
  485. return NULL;
  486. if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
  487. return NULL;
  488. if (flags & FOLL_GET)
  489. return try_get_folio(page, refs);
  490. /* FOLL_PIN is set */
  491. /*
  492. * Don't take a pin on the zero page - it's not going anywhere
  493. * and it is used in a *lot* of places.
  494. */
  495. if (is_zero_page(page))
  496. return page_folio(page);
  497. folio = try_get_folio(page, refs);
  498. if (!folio)
  499. return NULL;
  500. /*
  501. * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
  502. * right zone, so fail and let the caller fall back to the slow
  503. * path.
  504. */
  505. if (unlikely((flags & FOLL_LONGTERM) &&
  506. !folio_is_longterm_pinnable(folio))) {
  507. if (!put_devmap_managed_folio_refs(folio, refs))
  508. folio_put_refs(folio, refs);
  509. return NULL;
  510. }
  511. /*
  512. * When pinning a large folio, use an exact count to track it.
  513. *
  514. * However, be sure to *also* increment the normal folio
  515. * refcount field at least once, so that the folio really
  516. * is pinned. That's why the refcount from the earlier
  517. * try_get_folio() is left intact.
  518. */
  519. if (folio_test_large(folio))
  520. atomic_add(refs, &folio->_pincount);
  521. else
  522. folio_ref_add(folio,
  523. refs * (GUP_PIN_COUNTING_BIAS - 1));
  524. /*
  525. * Adjust the pincount before re-checking the PTE for changes.
  526. * This is essentially a smp_mb() and is paired with a memory
  527. * barrier in folio_try_share_anon_rmap_*().
  528. */
  529. smp_mb__after_atomic();
  530. node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
  531. return folio;
  532. }
  533. #endif /* CONFIG_HAVE_GUP_FAST */
  534. static struct page *no_page_table(struct vm_area_struct *vma,
  535. unsigned int flags, unsigned long address)
  536. {
  537. if (!(flags & FOLL_DUMP))
  538. return NULL;
  539. /*
  540. * When core dumping, we don't want to allocate unnecessary pages or
  541. * page tables. Return error instead of NULL to skip handle_mm_fault,
  542. * then get_dump_page() will return NULL to leave a hole in the dump.
  543. * But we can only make this optimization where a hole would surely
  544. * be zero-filled if handle_mm_fault() actually did handle it.
  545. */
  546. if (is_vm_hugetlb_page(vma)) {
  547. struct hstate *h = hstate_vma(vma);
  548. if (!hugetlbfs_pagecache_present(h, vma, address))
  549. return ERR_PTR(-EFAULT);
  550. } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
  551. return ERR_PTR(-EFAULT);
  552. }
  553. return NULL;
  554. }
  555. #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
  556. static struct page *follow_huge_pud(struct vm_area_struct *vma,
  557. unsigned long addr, pud_t *pudp,
  558. int flags, struct follow_page_context *ctx)
  559. {
  560. struct mm_struct *mm = vma->vm_mm;
  561. struct page *page;
  562. pud_t pud = *pudp;
  563. unsigned long pfn = pud_pfn(pud);
  564. int ret;
  565. assert_spin_locked(pud_lockptr(mm, pudp));
  566. if ((flags & FOLL_WRITE) && !pud_write(pud))
  567. return NULL;
  568. if (!pud_present(pud))
  569. return NULL;
  570. pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
  571. if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
  572. pud_devmap(pud)) {
  573. /*
  574. * device mapped pages can only be returned if the caller
  575. * will manage the page reference count.
  576. *
  577. * At least one of FOLL_GET | FOLL_PIN must be set, so
  578. * assert that here:
  579. */
  580. if (!(flags & (FOLL_GET | FOLL_PIN)))
  581. return ERR_PTR(-EEXIST);
  582. if (flags & FOLL_TOUCH)
  583. touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
  584. ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
  585. if (!ctx->pgmap)
  586. return ERR_PTR(-EFAULT);
  587. }
  588. page = pfn_to_page(pfn);
  589. if (!pud_devmap(pud) && !pud_write(pud) &&
  590. gup_must_unshare(vma, flags, page))
  591. return ERR_PTR(-EMLINK);
  592. ret = try_grab_folio(page_folio(page), 1, flags);
  593. if (ret)
  594. page = ERR_PTR(ret);
  595. else
  596. ctx->page_mask = HPAGE_PUD_NR - 1;
  597. return page;
  598. }
  599. /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
  600. static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
  601. struct vm_area_struct *vma,
  602. unsigned int flags)
  603. {
  604. /* If the pmd is writable, we can write to the page. */
  605. if (pmd_write(pmd))
  606. return true;
  607. /* Maybe FOLL_FORCE is set to override it? */
  608. if (!(flags & FOLL_FORCE))
  609. return false;
  610. /* But FOLL_FORCE has no effect on shared mappings */
  611. if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
  612. return false;
  613. /* ... or read-only private ones */
  614. if (!(vma->vm_flags & VM_MAYWRITE))
  615. return false;
  616. /* ... or already writable ones that just need to take a write fault */
  617. if (vma->vm_flags & VM_WRITE)
  618. return false;
  619. /*
  620. * See can_change_pte_writable(): we broke COW and could map the page
  621. * writable if we have an exclusive anonymous page ...
  622. */
  623. if (!page || !PageAnon(page) || !PageAnonExclusive(page))
  624. return false;
  625. /* ... and a write-fault isn't required for other reasons. */
  626. if (pmd_needs_soft_dirty_wp(vma, pmd))
  627. return false;
  628. return !userfaultfd_huge_pmd_wp(vma, pmd);
  629. }
  630. static struct page *follow_huge_pmd(struct vm_area_struct *vma,
  631. unsigned long addr, pmd_t *pmd,
  632. unsigned int flags,
  633. struct follow_page_context *ctx)
  634. {
  635. struct mm_struct *mm = vma->vm_mm;
  636. pmd_t pmdval = *pmd;
  637. struct page *page;
  638. int ret;
  639. assert_spin_locked(pmd_lockptr(mm, pmd));
  640. page = pmd_page(pmdval);
  641. if ((flags & FOLL_WRITE) &&
  642. !can_follow_write_pmd(pmdval, page, vma, flags))
  643. return NULL;
  644. /* Avoid dumping huge zero page */
  645. if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
  646. return ERR_PTR(-EFAULT);
  647. if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
  648. return NULL;
  649. if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
  650. return ERR_PTR(-EMLINK);
  651. VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
  652. !PageAnonExclusive(page), page);
  653. ret = try_grab_folio(page_folio(page), 1, flags);
  654. if (ret)
  655. return ERR_PTR(ret);
  656. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  657. if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
  658. touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
  659. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  660. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  661. ctx->page_mask = HPAGE_PMD_NR - 1;
  662. return page;
  663. }
  664. #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
  665. static struct page *follow_huge_pud(struct vm_area_struct *vma,
  666. unsigned long addr, pud_t *pudp,
  667. int flags, struct follow_page_context *ctx)
  668. {
  669. return NULL;
  670. }
  671. static struct page *follow_huge_pmd(struct vm_area_struct *vma,
  672. unsigned long addr, pmd_t *pmd,
  673. unsigned int flags,
  674. struct follow_page_context *ctx)
  675. {
  676. return NULL;
  677. }
  678. #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
  679. static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  680. pte_t *pte, unsigned int flags)
  681. {
  682. if (flags & FOLL_TOUCH) {
  683. pte_t orig_entry = ptep_get(pte);
  684. pte_t entry = orig_entry;
  685. if (flags & FOLL_WRITE)
  686. entry = pte_mkdirty(entry);
  687. entry = pte_mkyoung(entry);
  688. if (!pte_same(orig_entry, entry)) {
  689. set_pte_at(vma->vm_mm, address, pte, entry);
  690. update_mmu_cache(vma, address, pte);
  691. }
  692. }
  693. /* Proper page table entry exists, but no corresponding struct page */
  694. return -EEXIST;
  695. }
  696. /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
  697. static inline bool can_follow_write_pte(pte_t pte, struct page *page,
  698. struct vm_area_struct *vma,
  699. unsigned int flags)
  700. {
  701. /* If the pte is writable, we can write to the page. */
  702. if (pte_write(pte))
  703. return true;
  704. /* Maybe FOLL_FORCE is set to override it? */
  705. if (!(flags & FOLL_FORCE))
  706. return false;
  707. /* But FOLL_FORCE has no effect on shared mappings */
  708. if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
  709. return false;
  710. /* ... or read-only private ones */
  711. if (!(vma->vm_flags & VM_MAYWRITE))
  712. return false;
  713. /* ... or already writable ones that just need to take a write fault */
  714. if (vma->vm_flags & VM_WRITE)
  715. return false;
  716. /*
  717. * See can_change_pte_writable(): we broke COW and could map the page
  718. * writable if we have an exclusive anonymous page ...
  719. */
  720. if (!page || !PageAnon(page) || !PageAnonExclusive(page))
  721. return false;
  722. /* ... and a write-fault isn't required for other reasons. */
  723. if (pte_needs_soft_dirty_wp(vma, pte))
  724. return false;
  725. return !userfaultfd_pte_wp(vma, pte);
  726. }
  727. static struct page *follow_page_pte(struct vm_area_struct *vma,
  728. unsigned long address, pmd_t *pmd, unsigned int flags,
  729. struct dev_pagemap **pgmap)
  730. {
  731. struct mm_struct *mm = vma->vm_mm;
  732. struct folio *folio;
  733. struct page *page;
  734. spinlock_t *ptl;
  735. pte_t *ptep, pte;
  736. int ret;
  737. /* FOLL_GET and FOLL_PIN are mutually exclusive. */
  738. if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
  739. (FOLL_PIN | FOLL_GET)))
  740. return ERR_PTR(-EINVAL);
  741. ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  742. if (!ptep)
  743. return no_page_table(vma, flags, address);
  744. pte = ptep_get(ptep);
  745. if (!pte_present(pte))
  746. goto no_page;
  747. if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
  748. goto no_page;
  749. page = vm_normal_page(vma, address, pte);
  750. /*
  751. * We only care about anon pages in can_follow_write_pte() and don't
  752. * have to worry about pte_devmap() because they are never anon.
  753. */
  754. if ((flags & FOLL_WRITE) &&
  755. !can_follow_write_pte(pte, page, vma, flags)) {
  756. page = NULL;
  757. goto out;
  758. }
  759. if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
  760. /*
  761. * Only return device mapping pages in the FOLL_GET or FOLL_PIN
  762. * case since they are only valid while holding the pgmap
  763. * reference.
  764. */
  765. *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
  766. if (*pgmap)
  767. page = pte_page(pte);
  768. else
  769. goto no_page;
  770. } else if (unlikely(!page)) {
  771. if (flags & FOLL_DUMP) {
  772. /* Avoid special (like zero) pages in core dumps */
  773. page = ERR_PTR(-EFAULT);
  774. goto out;
  775. }
  776. if (is_zero_pfn(pte_pfn(pte))) {
  777. page = pte_page(pte);
  778. } else {
  779. ret = follow_pfn_pte(vma, address, ptep, flags);
  780. page = ERR_PTR(ret);
  781. goto out;
  782. }
  783. }
  784. folio = page_folio(page);
  785. if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
  786. page = ERR_PTR(-EMLINK);
  787. goto out;
  788. }
  789. VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
  790. !PageAnonExclusive(page), page);
  791. /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
  792. ret = try_grab_folio(folio, 1, flags);
  793. if (unlikely(ret)) {
  794. page = ERR_PTR(ret);
  795. goto out;
  796. }
  797. /*
  798. * We need to make the page accessible if and only if we are going
  799. * to access its content (the FOLL_PIN case). Please see
  800. * Documentation/core-api/pin_user_pages.rst for details.
  801. */
  802. if (flags & FOLL_PIN) {
  803. ret = arch_make_folio_accessible(folio);
  804. if (ret) {
  805. unpin_user_page(page);
  806. page = ERR_PTR(ret);
  807. goto out;
  808. }
  809. }
  810. if (flags & FOLL_TOUCH) {
  811. if ((flags & FOLL_WRITE) &&
  812. !pte_dirty(pte) && !PageDirty(page))
  813. set_page_dirty(page);
  814. /*
  815. * pte_mkyoung() would be more correct here, but atomic care
  816. * is needed to avoid losing the dirty bit: it is easier to use
  817. * mark_page_accessed().
  818. */
  819. mark_page_accessed(page);
  820. }
  821. out:
  822. pte_unmap_unlock(ptep, ptl);
  823. return page;
  824. no_page:
  825. pte_unmap_unlock(ptep, ptl);
  826. if (!pte_none(pte))
  827. return NULL;
  828. return no_page_table(vma, flags, address);
  829. }
  830. static struct page *follow_pmd_mask(struct vm_area_struct *vma,
  831. unsigned long address, pud_t *pudp,
  832. unsigned int flags,
  833. struct follow_page_context *ctx)
  834. {
  835. pmd_t *pmd, pmdval;
  836. spinlock_t *ptl;
  837. struct page *page;
  838. struct mm_struct *mm = vma->vm_mm;
  839. pmd = pmd_offset(pudp, address);
  840. pmdval = pmdp_get_lockless(pmd);
  841. if (pmd_none(pmdval))
  842. return no_page_table(vma, flags, address);
  843. if (!pmd_present(pmdval))
  844. return no_page_table(vma, flags, address);
  845. if (pmd_devmap(pmdval)) {
  846. ptl = pmd_lock(mm, pmd);
  847. page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
  848. spin_unlock(ptl);
  849. if (page)
  850. return page;
  851. return no_page_table(vma, flags, address);
  852. }
  853. if (likely(!pmd_leaf(pmdval)))
  854. return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  855. if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
  856. return no_page_table(vma, flags, address);
  857. ptl = pmd_lock(mm, pmd);
  858. pmdval = *pmd;
  859. if (unlikely(!pmd_present(pmdval))) {
  860. spin_unlock(ptl);
  861. return no_page_table(vma, flags, address);
  862. }
  863. if (unlikely(!pmd_leaf(pmdval))) {
  864. spin_unlock(ptl);
  865. return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  866. }
  867. if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
  868. spin_unlock(ptl);
  869. split_huge_pmd(vma, pmd, address);
  870. /* If pmd was left empty, stuff a page table in there quickly */
  871. return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
  872. follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
  873. }
  874. page = follow_huge_pmd(vma, address, pmd, flags, ctx);
  875. spin_unlock(ptl);
  876. return page;
  877. }
  878. static struct page *follow_pud_mask(struct vm_area_struct *vma,
  879. unsigned long address, p4d_t *p4dp,
  880. unsigned int flags,
  881. struct follow_page_context *ctx)
  882. {
  883. pud_t *pudp, pud;
  884. spinlock_t *ptl;
  885. struct page *page;
  886. struct mm_struct *mm = vma->vm_mm;
  887. pudp = pud_offset(p4dp, address);
  888. pud = READ_ONCE(*pudp);
  889. if (!pud_present(pud))
  890. return no_page_table(vma, flags, address);
  891. if (pud_leaf(pud)) {
  892. ptl = pud_lock(mm, pudp);
  893. page = follow_huge_pud(vma, address, pudp, flags, ctx);
  894. spin_unlock(ptl);
  895. if (page)
  896. return page;
  897. return no_page_table(vma, flags, address);
  898. }
  899. if (unlikely(pud_bad(pud)))
  900. return no_page_table(vma, flags, address);
  901. return follow_pmd_mask(vma, address, pudp, flags, ctx);
  902. }
  903. static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  904. unsigned long address, pgd_t *pgdp,
  905. unsigned int flags,
  906. struct follow_page_context *ctx)
  907. {
  908. p4d_t *p4dp, p4d;
  909. p4dp = p4d_offset(pgdp, address);
  910. p4d = READ_ONCE(*p4dp);
  911. BUILD_BUG_ON(p4d_leaf(p4d));
  912. if (!p4d_present(p4d) || p4d_bad(p4d))
  913. return no_page_table(vma, flags, address);
  914. return follow_pud_mask(vma, address, p4dp, flags, ctx);
  915. }
  916. /**
  917. * follow_page_mask - look up a page descriptor from a user-virtual address
  918. * @vma: vm_area_struct mapping @address
  919. * @address: virtual address to look up
  920. * @flags: flags modifying lookup behaviour
  921. * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
  922. * pointer to output page_mask
  923. *
  924. * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  925. *
  926. * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
  927. * the device's dev_pagemap metadata to avoid repeating expensive lookups.
  928. *
  929. * When getting an anonymous page and the caller has to trigger unsharing
  930. * of a shared anonymous page first, -EMLINK is returned. The caller should
  931. * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
  932. * relevant with FOLL_PIN and !FOLL_WRITE.
  933. *
  934. * On output, the @ctx->page_mask is set according to the size of the page.
  935. *
  936. * Return: the mapped (struct page *), %NULL if no mapping exists, or
  937. * an error pointer if there is a mapping to something not represented
  938. * by a page descriptor (see also vm_normal_page()).
  939. */
  940. static struct page *follow_page_mask(struct vm_area_struct *vma,
  941. unsigned long address, unsigned int flags,
  942. struct follow_page_context *ctx)
  943. {
  944. pgd_t *pgd;
  945. struct mm_struct *mm = vma->vm_mm;
  946. struct page *page;
  947. vma_pgtable_walk_begin(vma);
  948. ctx->page_mask = 0;
  949. pgd = pgd_offset(mm, address);
  950. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  951. page = no_page_table(vma, flags, address);
  952. else
  953. page = follow_p4d_mask(vma, address, pgd, flags, ctx);
  954. vma_pgtable_walk_end(vma);
  955. return page;
  956. }
  957. static int get_gate_page(struct mm_struct *mm, unsigned long address,
  958. unsigned int gup_flags, struct vm_area_struct **vma,
  959. struct page **page)
  960. {
  961. pgd_t *pgd;
  962. p4d_t *p4d;
  963. pud_t *pud;
  964. pmd_t *pmd;
  965. pte_t *pte;
  966. pte_t entry;
  967. int ret = -EFAULT;
  968. /* user gate pages are read-only */
  969. if (gup_flags & FOLL_WRITE)
  970. return -EFAULT;
  971. if (address > TASK_SIZE)
  972. pgd = pgd_offset_k(address);
  973. else
  974. pgd = pgd_offset_gate(mm, address);
  975. if (pgd_none(*pgd))
  976. return -EFAULT;
  977. p4d = p4d_offset(pgd, address);
  978. if (p4d_none(*p4d))
  979. return -EFAULT;
  980. pud = pud_offset(p4d, address);
  981. if (pud_none(*pud))
  982. return -EFAULT;
  983. pmd = pmd_offset(pud, address);
  984. if (!pmd_present(*pmd))
  985. return -EFAULT;
  986. pte = pte_offset_map(pmd, address);
  987. if (!pte)
  988. return -EFAULT;
  989. entry = ptep_get(pte);
  990. if (pte_none(entry))
  991. goto unmap;
  992. *vma = get_gate_vma(mm);
  993. if (!page)
  994. goto out;
  995. *page = vm_normal_page(*vma, address, entry);
  996. if (!*page) {
  997. if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
  998. goto unmap;
  999. *page = pte_page(entry);
  1000. }
  1001. ret = try_grab_folio(page_folio(*page), 1, gup_flags);
  1002. if (unlikely(ret))
  1003. goto unmap;
  1004. out:
  1005. ret = 0;
  1006. unmap:
  1007. pte_unmap(pte);
  1008. return ret;
  1009. }
  1010. /*
  1011. * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
  1012. * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
  1013. * to 0 and -EBUSY returned.
  1014. */
  1015. static int faultin_page(struct vm_area_struct *vma,
  1016. unsigned long address, unsigned int flags, bool unshare,
  1017. int *locked)
  1018. {
  1019. unsigned int fault_flags = 0;
  1020. vm_fault_t ret;
  1021. if (flags & FOLL_NOFAULT)
  1022. return -EFAULT;
  1023. if (flags & FOLL_WRITE)
  1024. fault_flags |= FAULT_FLAG_WRITE;
  1025. if (flags & FOLL_REMOTE)
  1026. fault_flags |= FAULT_FLAG_REMOTE;
  1027. if (flags & FOLL_UNLOCKABLE) {
  1028. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  1029. /*
  1030. * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
  1031. * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
  1032. * That's because some callers may not be prepared to
  1033. * handle early exits caused by non-fatal signals.
  1034. */
  1035. if (flags & FOLL_INTERRUPTIBLE)
  1036. fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
  1037. }
  1038. if (flags & FOLL_NOWAIT)
  1039. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
  1040. if (flags & FOLL_TRIED) {
  1041. /*
  1042. * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
  1043. * can co-exist
  1044. */
  1045. fault_flags |= FAULT_FLAG_TRIED;
  1046. }
  1047. if (unshare) {
  1048. fault_flags |= FAULT_FLAG_UNSHARE;
  1049. /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
  1050. VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
  1051. }
  1052. ret = handle_mm_fault(vma, address, fault_flags, NULL);
  1053. if (ret & VM_FAULT_COMPLETED) {
  1054. /*
  1055. * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
  1056. * mmap lock in the page fault handler. Sanity check this.
  1057. */
  1058. WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
  1059. *locked = 0;
  1060. /*
  1061. * We should do the same as VM_FAULT_RETRY, but let's not
  1062. * return -EBUSY since that's not reflecting the reality of
  1063. * what has happened - we've just fully completed a page
  1064. * fault, with the mmap lock released. Use -EAGAIN to show
  1065. * that we want to take the mmap lock _again_.
  1066. */
  1067. return -EAGAIN;
  1068. }
  1069. if (ret & VM_FAULT_ERROR) {
  1070. int err = vm_fault_to_errno(ret, flags);
  1071. if (err)
  1072. return err;
  1073. BUG();
  1074. }
  1075. if (ret & VM_FAULT_RETRY) {
  1076. if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
  1077. *locked = 0;
  1078. return -EBUSY;
  1079. }
  1080. return 0;
  1081. }
  1082. /*
  1083. * Writing to file-backed mappings which require folio dirty tracking using GUP
  1084. * is a fundamentally broken operation, as kernel write access to GUP mappings
  1085. * do not adhere to the semantics expected by a file system.
  1086. *
  1087. * Consider the following scenario:-
  1088. *
  1089. * 1. A folio is written to via GUP which write-faults the memory, notifying
  1090. * the file system and dirtying the folio.
  1091. * 2. Later, writeback is triggered, resulting in the folio being cleaned and
  1092. * the PTE being marked read-only.
  1093. * 3. The GUP caller writes to the folio, as it is mapped read/write via the
  1094. * direct mapping.
  1095. * 4. The GUP caller, now done with the page, unpins it and sets it dirty
  1096. * (though it does not have to).
  1097. *
  1098. * This results in both data being written to a folio without writenotify, and
  1099. * the folio being dirtied unexpectedly (if the caller decides to do so).
  1100. */
  1101. static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
  1102. unsigned long gup_flags)
  1103. {
  1104. /*
  1105. * If we aren't pinning then no problematic write can occur. A long term
  1106. * pin is the most egregious case so this is the case we disallow.
  1107. */
  1108. if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
  1109. (FOLL_PIN | FOLL_LONGTERM))
  1110. return true;
  1111. /*
  1112. * If the VMA does not require dirty tracking then no problematic write
  1113. * can occur either.
  1114. */
  1115. return !vma_needs_dirty_tracking(vma);
  1116. }
  1117. static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  1118. {
  1119. vm_flags_t vm_flags = vma->vm_flags;
  1120. int write = (gup_flags & FOLL_WRITE);
  1121. int foreign = (gup_flags & FOLL_REMOTE);
  1122. bool vma_anon = vma_is_anonymous(vma);
  1123. if (vm_flags & (VM_IO | VM_PFNMAP))
  1124. return -EFAULT;
  1125. if ((gup_flags & FOLL_ANON) && !vma_anon)
  1126. return -EFAULT;
  1127. if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
  1128. return -EOPNOTSUPP;
  1129. if (vma_is_secretmem(vma))
  1130. return -EFAULT;
  1131. if (write) {
  1132. if (!vma_anon &&
  1133. !writable_file_mapping_allowed(vma, gup_flags))
  1134. return -EFAULT;
  1135. if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
  1136. if (!(gup_flags & FOLL_FORCE))
  1137. return -EFAULT;
  1138. /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
  1139. if (is_vm_hugetlb_page(vma))
  1140. return -EFAULT;
  1141. /*
  1142. * We used to let the write,force case do COW in a
  1143. * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
  1144. * set a breakpoint in a read-only mapping of an
  1145. * executable, without corrupting the file (yet only
  1146. * when that file had been opened for writing!).
  1147. * Anon pages in shared mappings are surprising: now
  1148. * just reject it.
  1149. */
  1150. if (!is_cow_mapping(vm_flags))
  1151. return -EFAULT;
  1152. }
  1153. } else if (!(vm_flags & VM_READ)) {
  1154. if (!(gup_flags & FOLL_FORCE))
  1155. return -EFAULT;
  1156. /*
  1157. * Is there actually any vma we can reach here which does not
  1158. * have VM_MAYREAD set?
  1159. */
  1160. if (!(vm_flags & VM_MAYREAD))
  1161. return -EFAULT;
  1162. }
  1163. /*
  1164. * gups are always data accesses, not instruction
  1165. * fetches, so execute=false here
  1166. */
  1167. if (!arch_vma_access_permitted(vma, write, false, foreign))
  1168. return -EFAULT;
  1169. return 0;
  1170. }
  1171. /*
  1172. * This is "vma_lookup()", but with a warning if we would have
  1173. * historically expanded the stack in the GUP code.
  1174. */
  1175. static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
  1176. unsigned long addr)
  1177. {
  1178. #ifdef CONFIG_STACK_GROWSUP
  1179. return vma_lookup(mm, addr);
  1180. #else
  1181. static volatile unsigned long next_warn;
  1182. struct vm_area_struct *vma;
  1183. unsigned long now, next;
  1184. vma = find_vma(mm, addr);
  1185. if (!vma || (addr >= vma->vm_start))
  1186. return vma;
  1187. /* Only warn for half-way relevant accesses */
  1188. if (!(vma->vm_flags & VM_GROWSDOWN))
  1189. return NULL;
  1190. if (vma->vm_start - addr > 65536)
  1191. return NULL;
  1192. /* Let's not warn more than once an hour.. */
  1193. now = jiffies; next = next_warn;
  1194. if (next && time_before(now, next))
  1195. return NULL;
  1196. next_warn = now + 60*60*HZ;
  1197. /* Let people know things may have changed. */
  1198. pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
  1199. current->comm, task_pid_nr(current),
  1200. vma->vm_start, vma->vm_end, addr);
  1201. dump_stack();
  1202. return NULL;
  1203. #endif
  1204. }
  1205. /**
  1206. * __get_user_pages() - pin user pages in memory
  1207. * @mm: mm_struct of target mm
  1208. * @start: starting user address
  1209. * @nr_pages: number of pages from start to pin
  1210. * @gup_flags: flags modifying pin behaviour
  1211. * @pages: array that receives pointers to the pages pinned.
  1212. * Should be at least nr_pages long. Or NULL, if caller
  1213. * only intends to ensure the pages are faulted in.
  1214. * @locked: whether we're still with the mmap_lock held
  1215. *
  1216. * Returns either number of pages pinned (which may be less than the
  1217. * number requested), or an error. Details about the return value:
  1218. *
  1219. * -- If nr_pages is 0, returns 0.
  1220. * -- If nr_pages is >0, but no pages were pinned, returns -errno.
  1221. * -- If nr_pages is >0, and some pages were pinned, returns the number of
  1222. * pages pinned. Again, this may be less than nr_pages.
  1223. * -- 0 return value is possible when the fault would need to be retried.
  1224. *
  1225. * The caller is responsible for releasing returned @pages, via put_page().
  1226. *
  1227. * Must be called with mmap_lock held. It may be released. See below.
  1228. *
  1229. * __get_user_pages walks a process's page tables and takes a reference to
  1230. * each struct page that each user address corresponds to at a given
  1231. * instant. That is, it takes the page that would be accessed if a user
  1232. * thread accesses the given user virtual address at that instant.
  1233. *
  1234. * This does not guarantee that the page exists in the user mappings when
  1235. * __get_user_pages returns, and there may even be a completely different
  1236. * page there in some cases (eg. if mmapped pagecache has been invalidated
  1237. * and subsequently re-faulted). However it does guarantee that the page
  1238. * won't be freed completely. And mostly callers simply care that the page
  1239. * contains data that was valid *at some point in time*. Typically, an IO
  1240. * or similar operation cannot guarantee anything stronger anyway because
  1241. * locks can't be held over the syscall boundary.
  1242. *
  1243. * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
  1244. * the page is written to, set_page_dirty (or set_page_dirty_lock, as
  1245. * appropriate) must be called after the page is finished with, and
  1246. * before put_page is called.
  1247. *
  1248. * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
  1249. * be released. If this happens *@locked will be set to 0 on return.
  1250. *
  1251. * A caller using such a combination of @gup_flags must therefore hold the
  1252. * mmap_lock for reading only, and recognize when it's been released. Otherwise,
  1253. * it must be held for either reading or writing and will not be released.
  1254. *
  1255. * In most cases, get_user_pages or get_user_pages_fast should be used
  1256. * instead of __get_user_pages. __get_user_pages should be used only if
  1257. * you need some special @gup_flags.
  1258. */
  1259. static long __get_user_pages(struct mm_struct *mm,
  1260. unsigned long start, unsigned long nr_pages,
  1261. unsigned int gup_flags, struct page **pages,
  1262. int *locked)
  1263. {
  1264. long ret = 0, i = 0;
  1265. struct vm_area_struct *vma = NULL;
  1266. struct follow_page_context ctx = { NULL };
  1267. if (!nr_pages)
  1268. return 0;
  1269. start = untagged_addr_remote(mm, start);
  1270. VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
  1271. do {
  1272. struct page *page;
  1273. unsigned int page_increm;
  1274. /* first iteration or cross vma bound */
  1275. if (!vma || start >= vma->vm_end) {
  1276. /*
  1277. * MADV_POPULATE_(READ|WRITE) wants to handle VMA
  1278. * lookups+error reporting differently.
  1279. */
  1280. if (gup_flags & FOLL_MADV_POPULATE) {
  1281. vma = vma_lookup(mm, start);
  1282. if (!vma) {
  1283. ret = -ENOMEM;
  1284. goto out;
  1285. }
  1286. if (check_vma_flags(vma, gup_flags)) {
  1287. ret = -EINVAL;
  1288. goto out;
  1289. }
  1290. goto retry;
  1291. }
  1292. vma = gup_vma_lookup(mm, start);
  1293. if (!vma && in_gate_area(mm, start)) {
  1294. ret = get_gate_page(mm, start & PAGE_MASK,
  1295. gup_flags, &vma,
  1296. pages ? &page : NULL);
  1297. if (ret)
  1298. goto out;
  1299. ctx.page_mask = 0;
  1300. goto next_page;
  1301. }
  1302. if (!vma) {
  1303. ret = -EFAULT;
  1304. goto out;
  1305. }
  1306. ret = check_vma_flags(vma, gup_flags);
  1307. if (ret)
  1308. goto out;
  1309. }
  1310. retry:
  1311. /*
  1312. * If we have a pending SIGKILL, don't keep faulting pages and
  1313. * potentially allocating memory.
  1314. */
  1315. if (fatal_signal_pending(current)) {
  1316. ret = -EINTR;
  1317. goto out;
  1318. }
  1319. cond_resched();
  1320. page = follow_page_mask(vma, start, gup_flags, &ctx);
  1321. if (!page || PTR_ERR(page) == -EMLINK) {
  1322. ret = faultin_page(vma, start, gup_flags,
  1323. PTR_ERR(page) == -EMLINK, locked);
  1324. switch (ret) {
  1325. case 0:
  1326. goto retry;
  1327. case -EBUSY:
  1328. case -EAGAIN:
  1329. ret = 0;
  1330. fallthrough;
  1331. case -EFAULT:
  1332. case -ENOMEM:
  1333. case -EHWPOISON:
  1334. goto out;
  1335. }
  1336. BUG();
  1337. } else if (PTR_ERR(page) == -EEXIST) {
  1338. /*
  1339. * Proper page table entry exists, but no corresponding
  1340. * struct page. If the caller expects **pages to be
  1341. * filled in, bail out now, because that can't be done
  1342. * for this page.
  1343. */
  1344. if (pages) {
  1345. ret = PTR_ERR(page);
  1346. goto out;
  1347. }
  1348. } else if (IS_ERR(page)) {
  1349. ret = PTR_ERR(page);
  1350. goto out;
  1351. }
  1352. next_page:
  1353. page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
  1354. if (page_increm > nr_pages)
  1355. page_increm = nr_pages;
  1356. if (pages) {
  1357. struct page *subpage;
  1358. unsigned int j;
  1359. /*
  1360. * This must be a large folio (and doesn't need to
  1361. * be the whole folio; it can be part of it), do
  1362. * the refcount work for all the subpages too.
  1363. *
  1364. * NOTE: here the page may not be the head page
  1365. * e.g. when start addr is not thp-size aligned.
  1366. * try_grab_folio() should have taken care of tail
  1367. * pages.
  1368. */
  1369. if (page_increm > 1) {
  1370. struct folio *folio = page_folio(page);
  1371. /*
  1372. * Since we already hold refcount on the
  1373. * large folio, this should never fail.
  1374. */
  1375. if (try_grab_folio(folio, page_increm - 1,
  1376. gup_flags)) {
  1377. /*
  1378. * Release the 1st page ref if the
  1379. * folio is problematic, fail hard.
  1380. */
  1381. gup_put_folio(folio, 1, gup_flags);
  1382. ret = -EFAULT;
  1383. goto out;
  1384. }
  1385. }
  1386. for (j = 0; j < page_increm; j++) {
  1387. subpage = nth_page(page, j);
  1388. pages[i + j] = subpage;
  1389. flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
  1390. flush_dcache_page(subpage);
  1391. }
  1392. }
  1393. i += page_increm;
  1394. start += page_increm * PAGE_SIZE;
  1395. nr_pages -= page_increm;
  1396. } while (nr_pages);
  1397. out:
  1398. if (ctx.pgmap)
  1399. put_dev_pagemap(ctx.pgmap);
  1400. return i ? i : ret;
  1401. }
  1402. static bool vma_permits_fault(struct vm_area_struct *vma,
  1403. unsigned int fault_flags)
  1404. {
  1405. bool write = !!(fault_flags & FAULT_FLAG_WRITE);
  1406. bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
  1407. vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
  1408. if (!(vm_flags & vma->vm_flags))
  1409. return false;
  1410. /*
  1411. * The architecture might have a hardware protection
  1412. * mechanism other than read/write that can deny access.
  1413. *
  1414. * gup always represents data access, not instruction
  1415. * fetches, so execute=false here:
  1416. */
  1417. if (!arch_vma_access_permitted(vma, write, false, foreign))
  1418. return false;
  1419. return true;
  1420. }
  1421. /**
  1422. * fixup_user_fault() - manually resolve a user page fault
  1423. * @mm: mm_struct of target mm
  1424. * @address: user address
  1425. * @fault_flags:flags to pass down to handle_mm_fault()
  1426. * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
  1427. * does not allow retry. If NULL, the caller must guarantee
  1428. * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
  1429. *
  1430. * This is meant to be called in the specific scenario where for locking reasons
  1431. * we try to access user memory in atomic context (within a pagefault_disable()
  1432. * section), this returns -EFAULT, and we want to resolve the user fault before
  1433. * trying again.
  1434. *
  1435. * Typically this is meant to be used by the futex code.
  1436. *
  1437. * The main difference with get_user_pages() is that this function will
  1438. * unconditionally call handle_mm_fault() which will in turn perform all the
  1439. * necessary SW fixup of the dirty and young bits in the PTE, while
  1440. * get_user_pages() only guarantees to update these in the struct page.
  1441. *
  1442. * This is important for some architectures where those bits also gate the
  1443. * access permission to the page because they are maintained in software. On
  1444. * such architectures, gup() will not be enough to make a subsequent access
  1445. * succeed.
  1446. *
  1447. * This function will not return with an unlocked mmap_lock. So it has not the
  1448. * same semantics wrt the @mm->mmap_lock as does filemap_fault().
  1449. */
  1450. int fixup_user_fault(struct mm_struct *mm,
  1451. unsigned long address, unsigned int fault_flags,
  1452. bool *unlocked)
  1453. {
  1454. struct vm_area_struct *vma;
  1455. vm_fault_t ret;
  1456. address = untagged_addr_remote(mm, address);
  1457. if (unlocked)
  1458. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  1459. retry:
  1460. vma = gup_vma_lookup(mm, address);
  1461. if (!vma)
  1462. return -EFAULT;
  1463. if (!vma_permits_fault(vma, fault_flags))
  1464. return -EFAULT;
  1465. if ((fault_flags & FAULT_FLAG_KILLABLE) &&
  1466. fatal_signal_pending(current))
  1467. return -EINTR;
  1468. ret = handle_mm_fault(vma, address, fault_flags, NULL);
  1469. if (ret & VM_FAULT_COMPLETED) {
  1470. /*
  1471. * NOTE: it's a pity that we need to retake the lock here
  1472. * to pair with the unlock() in the callers. Ideally we
  1473. * could tell the callers so they do not need to unlock.
  1474. */
  1475. mmap_read_lock(mm);
  1476. *unlocked = true;
  1477. return 0;
  1478. }
  1479. if (ret & VM_FAULT_ERROR) {
  1480. int err = vm_fault_to_errno(ret, 0);
  1481. if (err)
  1482. return err;
  1483. BUG();
  1484. }
  1485. if (ret & VM_FAULT_RETRY) {
  1486. mmap_read_lock(mm);
  1487. *unlocked = true;
  1488. fault_flags |= FAULT_FLAG_TRIED;
  1489. goto retry;
  1490. }
  1491. return 0;
  1492. }
  1493. EXPORT_SYMBOL_GPL(fixup_user_fault);
  1494. /*
  1495. * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
  1496. * specified, it'll also respond to generic signals. The caller of GUP
  1497. * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
  1498. */
  1499. static bool gup_signal_pending(unsigned int flags)
  1500. {
  1501. if (fatal_signal_pending(current))
  1502. return true;
  1503. if (!(flags & FOLL_INTERRUPTIBLE))
  1504. return false;
  1505. return signal_pending(current);
  1506. }
  1507. /*
  1508. * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
  1509. * the caller. This function may drop the mmap_lock. If it does so, then it will
  1510. * set (*locked = 0).
  1511. *
  1512. * (*locked == 0) means that the caller expects this function to acquire and
  1513. * drop the mmap_lock. Therefore, the value of *locked will still be zero when
  1514. * the function returns, even though it may have changed temporarily during
  1515. * function execution.
  1516. *
  1517. * Please note that this function, unlike __get_user_pages(), will not return 0
  1518. * for nr_pages > 0, unless FOLL_NOWAIT is used.
  1519. */
  1520. static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
  1521. unsigned long start,
  1522. unsigned long nr_pages,
  1523. struct page **pages,
  1524. int *locked,
  1525. unsigned int flags)
  1526. {
  1527. long ret, pages_done;
  1528. bool must_unlock = false;
  1529. if (!nr_pages)
  1530. return 0;
  1531. /*
  1532. * The internal caller expects GUP to manage the lock internally and the
  1533. * lock must be released when this returns.
  1534. */
  1535. if (!*locked) {
  1536. if (mmap_read_lock_killable(mm))
  1537. return -EAGAIN;
  1538. must_unlock = true;
  1539. *locked = 1;
  1540. }
  1541. else
  1542. mmap_assert_locked(mm);
  1543. if (flags & FOLL_PIN)
  1544. mm_set_has_pinned_flag(&mm->flags);
  1545. /*
  1546. * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
  1547. * is to set FOLL_GET if the caller wants pages[] filled in (but has
  1548. * carelessly failed to specify FOLL_GET), so keep doing that, but only
  1549. * for FOLL_GET, not for the newer FOLL_PIN.
  1550. *
  1551. * FOLL_PIN always expects pages to be non-null, but no need to assert
  1552. * that here, as any failures will be obvious enough.
  1553. */
  1554. if (pages && !(flags & FOLL_PIN))
  1555. flags |= FOLL_GET;
  1556. pages_done = 0;
  1557. for (;;) {
  1558. ret = __get_user_pages(mm, start, nr_pages, flags, pages,
  1559. locked);
  1560. if (!(flags & FOLL_UNLOCKABLE)) {
  1561. /* VM_FAULT_RETRY couldn't trigger, bypass */
  1562. pages_done = ret;
  1563. break;
  1564. }
  1565. /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
  1566. if (!*locked) {
  1567. BUG_ON(ret < 0);
  1568. BUG_ON(ret >= nr_pages);
  1569. }
  1570. if (ret > 0) {
  1571. nr_pages -= ret;
  1572. pages_done += ret;
  1573. if (!nr_pages)
  1574. break;
  1575. }
  1576. if (*locked) {
  1577. /*
  1578. * VM_FAULT_RETRY didn't trigger or it was a
  1579. * FOLL_NOWAIT.
  1580. */
  1581. if (!pages_done)
  1582. pages_done = ret;
  1583. break;
  1584. }
  1585. /*
  1586. * VM_FAULT_RETRY triggered, so seek to the faulting offset.
  1587. * For the prefault case (!pages) we only update counts.
  1588. */
  1589. if (likely(pages))
  1590. pages += ret;
  1591. start += ret << PAGE_SHIFT;
  1592. /* The lock was temporarily dropped, so we must unlock later */
  1593. must_unlock = true;
  1594. retry:
  1595. /*
  1596. * Repeat on the address that fired VM_FAULT_RETRY
  1597. * with both FAULT_FLAG_ALLOW_RETRY and
  1598. * FAULT_FLAG_TRIED. Note that GUP can be interrupted
  1599. * by fatal signals of even common signals, depending on
  1600. * the caller's request. So we need to check it before we
  1601. * start trying again otherwise it can loop forever.
  1602. */
  1603. if (gup_signal_pending(flags)) {
  1604. if (!pages_done)
  1605. pages_done = -EINTR;
  1606. break;
  1607. }
  1608. ret = mmap_read_lock_killable(mm);
  1609. if (ret) {
  1610. BUG_ON(ret > 0);
  1611. if (!pages_done)
  1612. pages_done = ret;
  1613. break;
  1614. }
  1615. *locked = 1;
  1616. ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
  1617. pages, locked);
  1618. if (!*locked) {
  1619. /* Continue to retry until we succeeded */
  1620. BUG_ON(ret != 0);
  1621. goto retry;
  1622. }
  1623. if (ret != 1) {
  1624. BUG_ON(ret > 1);
  1625. if (!pages_done)
  1626. pages_done = ret;
  1627. break;
  1628. }
  1629. nr_pages--;
  1630. pages_done++;
  1631. if (!nr_pages)
  1632. break;
  1633. if (likely(pages))
  1634. pages++;
  1635. start += PAGE_SIZE;
  1636. }
  1637. if (must_unlock && *locked) {
  1638. /*
  1639. * We either temporarily dropped the lock, or the caller
  1640. * requested that we both acquire and drop the lock. Either way,
  1641. * we must now unlock, and notify the caller of that state.
  1642. */
  1643. mmap_read_unlock(mm);
  1644. *locked = 0;
  1645. }
  1646. /*
  1647. * Failing to pin anything implies something has gone wrong (except when
  1648. * FOLL_NOWAIT is specified).
  1649. */
  1650. if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
  1651. return -EFAULT;
  1652. return pages_done;
  1653. }
  1654. /**
  1655. * populate_vma_page_range() - populate a range of pages in the vma.
  1656. * @vma: target vma
  1657. * @start: start address
  1658. * @end: end address
  1659. * @locked: whether the mmap_lock is still held
  1660. *
  1661. * This takes care of mlocking the pages too if VM_LOCKED is set.
  1662. *
  1663. * Return either number of pages pinned in the vma, or a negative error
  1664. * code on error.
  1665. *
  1666. * vma->vm_mm->mmap_lock must be held.
  1667. *
  1668. * If @locked is NULL, it may be held for read or write and will
  1669. * be unperturbed.
  1670. *
  1671. * If @locked is non-NULL, it must held for read only and may be
  1672. * released. If it's released, *@locked will be set to 0.
  1673. */
  1674. long populate_vma_page_range(struct vm_area_struct *vma,
  1675. unsigned long start, unsigned long end, int *locked)
  1676. {
  1677. struct mm_struct *mm = vma->vm_mm;
  1678. unsigned long nr_pages = (end - start) / PAGE_SIZE;
  1679. int local_locked = 1;
  1680. int gup_flags;
  1681. long ret;
  1682. VM_BUG_ON(!PAGE_ALIGNED(start));
  1683. VM_BUG_ON(!PAGE_ALIGNED(end));
  1684. VM_BUG_ON_VMA(start < vma->vm_start, vma);
  1685. VM_BUG_ON_VMA(end > vma->vm_end, vma);
  1686. mmap_assert_locked(mm);
  1687. /*
  1688. * Rightly or wrongly, the VM_LOCKONFAULT case has never used
  1689. * faultin_page() to break COW, so it has no work to do here.
  1690. */
  1691. if (vma->vm_flags & VM_LOCKONFAULT)
  1692. return nr_pages;
  1693. /* ... similarly, we've never faulted in PROT_NONE pages */
  1694. if (!vma_is_accessible(vma))
  1695. return -EFAULT;
  1696. gup_flags = FOLL_TOUCH;
  1697. /*
  1698. * We want to touch writable mappings with a write fault in order
  1699. * to break COW, except for shared mappings because these don't COW
  1700. * and we would not want to dirty them for nothing.
  1701. *
  1702. * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
  1703. * readable (ie write-only or executable).
  1704. */
  1705. if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
  1706. gup_flags |= FOLL_WRITE;
  1707. else
  1708. gup_flags |= FOLL_FORCE;
  1709. if (locked)
  1710. gup_flags |= FOLL_UNLOCKABLE;
  1711. /*
  1712. * We made sure addr is within a VMA, so the following will
  1713. * not result in a stack expansion that recurses back here.
  1714. */
  1715. ret = __get_user_pages(mm, start, nr_pages, gup_flags,
  1716. NULL, locked ? locked : &local_locked);
  1717. lru_add_drain();
  1718. return ret;
  1719. }
  1720. /*
  1721. * faultin_page_range() - populate (prefault) page tables inside the
  1722. * given range readable/writable
  1723. *
  1724. * This takes care of mlocking the pages, too, if VM_LOCKED is set.
  1725. *
  1726. * @mm: the mm to populate page tables in
  1727. * @start: start address
  1728. * @end: end address
  1729. * @write: whether to prefault readable or writable
  1730. * @locked: whether the mmap_lock is still held
  1731. *
  1732. * Returns either number of processed pages in the MM, or a negative error
  1733. * code on error (see __get_user_pages()). Note that this function reports
  1734. * errors related to VMAs, such as incompatible mappings, as expected by
  1735. * MADV_POPULATE_(READ|WRITE).
  1736. *
  1737. * The range must be page-aligned.
  1738. *
  1739. * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
  1740. */
  1741. long faultin_page_range(struct mm_struct *mm, unsigned long start,
  1742. unsigned long end, bool write, int *locked)
  1743. {
  1744. unsigned long nr_pages = (end - start) / PAGE_SIZE;
  1745. int gup_flags;
  1746. long ret;
  1747. VM_BUG_ON(!PAGE_ALIGNED(start));
  1748. VM_BUG_ON(!PAGE_ALIGNED(end));
  1749. mmap_assert_locked(mm);
  1750. /*
  1751. * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
  1752. * the page dirty with FOLL_WRITE -- which doesn't make a
  1753. * difference with !FOLL_FORCE, because the page is writable
  1754. * in the page table.
  1755. * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
  1756. * a poisoned page.
  1757. * !FOLL_FORCE: Require proper access permissions.
  1758. */
  1759. gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
  1760. FOLL_MADV_POPULATE;
  1761. if (write)
  1762. gup_flags |= FOLL_WRITE;
  1763. ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
  1764. gup_flags);
  1765. lru_add_drain();
  1766. return ret;
  1767. }
  1768. /*
  1769. * __mm_populate - populate and/or mlock pages within a range of address space.
  1770. *
  1771. * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
  1772. * flags. VMAs must be already marked with the desired vm_flags, and
  1773. * mmap_lock must not be held.
  1774. */
  1775. int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
  1776. {
  1777. struct mm_struct *mm = current->mm;
  1778. unsigned long end, nstart, nend;
  1779. struct vm_area_struct *vma = NULL;
  1780. int locked = 0;
  1781. long ret = 0;
  1782. end = start + len;
  1783. for (nstart = start; nstart < end; nstart = nend) {
  1784. /*
  1785. * We want to fault in pages for [nstart; end) address range.
  1786. * Find first corresponding VMA.
  1787. */
  1788. if (!locked) {
  1789. locked = 1;
  1790. mmap_read_lock(mm);
  1791. vma = find_vma_intersection(mm, nstart, end);
  1792. } else if (nstart >= vma->vm_end)
  1793. vma = find_vma_intersection(mm, vma->vm_end, end);
  1794. if (!vma)
  1795. break;
  1796. /*
  1797. * Set [nstart; nend) to intersection of desired address
  1798. * range with the first VMA. Also, skip undesirable VMA types.
  1799. */
  1800. nend = min(end, vma->vm_end);
  1801. if (vma->vm_flags & (VM_IO | VM_PFNMAP))
  1802. continue;
  1803. if (nstart < vma->vm_start)
  1804. nstart = vma->vm_start;
  1805. /*
  1806. * Now fault in a range of pages. populate_vma_page_range()
  1807. * double checks the vma flags, so that it won't mlock pages
  1808. * if the vma was already munlocked.
  1809. */
  1810. ret = populate_vma_page_range(vma, nstart, nend, &locked);
  1811. if (ret < 0) {
  1812. if (ignore_errors) {
  1813. ret = 0;
  1814. continue; /* continue at next VMA */
  1815. }
  1816. break;
  1817. }
  1818. nend = nstart + ret * PAGE_SIZE;
  1819. ret = 0;
  1820. }
  1821. if (locked)
  1822. mmap_read_unlock(mm);
  1823. return ret; /* 0 or negative error code */
  1824. }
  1825. #else /* CONFIG_MMU */
  1826. static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
  1827. unsigned long nr_pages, struct page **pages,
  1828. int *locked, unsigned int foll_flags)
  1829. {
  1830. struct vm_area_struct *vma;
  1831. bool must_unlock = false;
  1832. unsigned long vm_flags;
  1833. long i;
  1834. if (!nr_pages)
  1835. return 0;
  1836. /*
  1837. * The internal caller expects GUP to manage the lock internally and the
  1838. * lock must be released when this returns.
  1839. */
  1840. if (!*locked) {
  1841. if (mmap_read_lock_killable(mm))
  1842. return -EAGAIN;
  1843. must_unlock = true;
  1844. *locked = 1;
  1845. }
  1846. /* calculate required read or write permissions.
  1847. * If FOLL_FORCE is set, we only require the "MAY" flags.
  1848. */
  1849. vm_flags = (foll_flags & FOLL_WRITE) ?
  1850. (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  1851. vm_flags &= (foll_flags & FOLL_FORCE) ?
  1852. (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  1853. for (i = 0; i < nr_pages; i++) {
  1854. vma = find_vma(mm, start);
  1855. if (!vma)
  1856. break;
  1857. /* protect what we can, including chardevs */
  1858. if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  1859. !(vm_flags & vma->vm_flags))
  1860. break;
  1861. if (pages) {
  1862. pages[i] = virt_to_page((void *)start);
  1863. if (pages[i])
  1864. get_page(pages[i]);
  1865. }
  1866. start = (start + PAGE_SIZE) & PAGE_MASK;
  1867. }
  1868. if (must_unlock && *locked) {
  1869. mmap_read_unlock(mm);
  1870. *locked = 0;
  1871. }
  1872. return i ? : -EFAULT;
  1873. }
  1874. #endif /* !CONFIG_MMU */
  1875. /**
  1876. * fault_in_writeable - fault in userspace address range for writing
  1877. * @uaddr: start of address range
  1878. * @size: size of address range
  1879. *
  1880. * Returns the number of bytes not faulted in (like copy_to_user() and
  1881. * copy_from_user()).
  1882. */
  1883. size_t fault_in_writeable(char __user *uaddr, size_t size)
  1884. {
  1885. char __user *start = uaddr, *end;
  1886. if (unlikely(size == 0))
  1887. return 0;
  1888. if (!user_write_access_begin(uaddr, size))
  1889. return size;
  1890. if (!PAGE_ALIGNED(uaddr)) {
  1891. unsafe_put_user(0, uaddr, out);
  1892. uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
  1893. }
  1894. end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
  1895. if (unlikely(end < start))
  1896. end = NULL;
  1897. while (uaddr != end) {
  1898. unsafe_put_user(0, uaddr, out);
  1899. uaddr += PAGE_SIZE;
  1900. }
  1901. out:
  1902. user_write_access_end();
  1903. if (size > uaddr - start)
  1904. return size - (uaddr - start);
  1905. return 0;
  1906. }
  1907. EXPORT_SYMBOL(fault_in_writeable);
  1908. /**
  1909. * fault_in_subpage_writeable - fault in an address range for writing
  1910. * @uaddr: start of address range
  1911. * @size: size of address range
  1912. *
  1913. * Fault in a user address range for writing while checking for permissions at
  1914. * sub-page granularity (e.g. arm64 MTE). This function should be used when
  1915. * the caller cannot guarantee forward progress of a copy_to_user() loop.
  1916. *
  1917. * Returns the number of bytes not faulted in (like copy_to_user() and
  1918. * copy_from_user()).
  1919. */
  1920. size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
  1921. {
  1922. size_t faulted_in;
  1923. /*
  1924. * Attempt faulting in at page granularity first for page table
  1925. * permission checking. The arch-specific probe_subpage_writeable()
  1926. * functions may not check for this.
  1927. */
  1928. faulted_in = size - fault_in_writeable(uaddr, size);
  1929. if (faulted_in)
  1930. faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
  1931. return size - faulted_in;
  1932. }
  1933. EXPORT_SYMBOL(fault_in_subpage_writeable);
  1934. /*
  1935. * fault_in_safe_writeable - fault in an address range for writing
  1936. * @uaddr: start of address range
  1937. * @size: length of address range
  1938. *
  1939. * Faults in an address range for writing. This is primarily useful when we
  1940. * already know that some or all of the pages in the address range aren't in
  1941. * memory.
  1942. *
  1943. * Unlike fault_in_writeable(), this function is non-destructive.
  1944. *
  1945. * Note that we don't pin or otherwise hold the pages referenced that we fault
  1946. * in. There's no guarantee that they'll stay in memory for any duration of
  1947. * time.
  1948. *
  1949. * Returns the number of bytes not faulted in, like copy_to_user() and
  1950. * copy_from_user().
  1951. */
  1952. size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
  1953. {
  1954. unsigned long start = (unsigned long)uaddr, end;
  1955. struct mm_struct *mm = current->mm;
  1956. bool unlocked = false;
  1957. if (unlikely(size == 0))
  1958. return 0;
  1959. end = PAGE_ALIGN(start + size);
  1960. if (end < start)
  1961. end = 0;
  1962. mmap_read_lock(mm);
  1963. do {
  1964. if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
  1965. break;
  1966. start = (start + PAGE_SIZE) & PAGE_MASK;
  1967. } while (start != end);
  1968. mmap_read_unlock(mm);
  1969. if (size > (unsigned long)uaddr - start)
  1970. return size - ((unsigned long)uaddr - start);
  1971. return 0;
  1972. }
  1973. EXPORT_SYMBOL(fault_in_safe_writeable);
  1974. /**
  1975. * fault_in_readable - fault in userspace address range for reading
  1976. * @uaddr: start of user address range
  1977. * @size: size of user address range
  1978. *
  1979. * Returns the number of bytes not faulted in (like copy_to_user() and
  1980. * copy_from_user()).
  1981. */
  1982. size_t fault_in_readable(const char __user *uaddr, size_t size)
  1983. {
  1984. const char __user *start = uaddr, *end;
  1985. volatile char c;
  1986. if (unlikely(size == 0))
  1987. return 0;
  1988. if (!user_read_access_begin(uaddr, size))
  1989. return size;
  1990. if (!PAGE_ALIGNED(uaddr)) {
  1991. unsafe_get_user(c, uaddr, out);
  1992. uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
  1993. }
  1994. end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
  1995. if (unlikely(end < start))
  1996. end = NULL;
  1997. while (uaddr != end) {
  1998. unsafe_get_user(c, uaddr, out);
  1999. uaddr += PAGE_SIZE;
  2000. }
  2001. out:
  2002. user_read_access_end();
  2003. (void)c;
  2004. if (size > uaddr - start)
  2005. return size - (uaddr - start);
  2006. return 0;
  2007. }
  2008. EXPORT_SYMBOL(fault_in_readable);
  2009. /**
  2010. * get_dump_page() - pin user page in memory while writing it to core dump
  2011. * @addr: user address
  2012. *
  2013. * Returns struct page pointer of user page pinned for dump,
  2014. * to be freed afterwards by put_page().
  2015. *
  2016. * Returns NULL on any kind of failure - a hole must then be inserted into
  2017. * the corefile, to preserve alignment with its headers; and also returns
  2018. * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
  2019. * allowing a hole to be left in the corefile to save disk space.
  2020. *
  2021. * Called without mmap_lock (takes and releases the mmap_lock by itself).
  2022. */
  2023. #ifdef CONFIG_ELF_CORE
  2024. struct page *get_dump_page(unsigned long addr)
  2025. {
  2026. struct page *page;
  2027. int locked = 0;
  2028. int ret;
  2029. ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
  2030. FOLL_FORCE | FOLL_DUMP | FOLL_GET);
  2031. return (ret == 1) ? page : NULL;
  2032. }
  2033. #endif /* CONFIG_ELF_CORE */
  2034. #ifdef CONFIG_MIGRATION
  2035. /*
  2036. * An array of either pages or folios ("pofs"). Although it may seem tempting to
  2037. * avoid this complication, by simply interpreting a list of folios as a list of
  2038. * pages, that approach won't work in the longer term, because eventually the
  2039. * layouts of struct page and struct folio will become completely different.
  2040. * Furthermore, this pof approach avoids excessive page_folio() calls.
  2041. */
  2042. struct pages_or_folios {
  2043. union {
  2044. struct page **pages;
  2045. struct folio **folios;
  2046. void **entries;
  2047. };
  2048. bool has_folios;
  2049. long nr_entries;
  2050. };
  2051. static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
  2052. {
  2053. if (pofs->has_folios)
  2054. return pofs->folios[i];
  2055. return page_folio(pofs->pages[i]);
  2056. }
  2057. static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
  2058. {
  2059. pofs->entries[i] = NULL;
  2060. }
  2061. static void pofs_unpin(struct pages_or_folios *pofs)
  2062. {
  2063. if (pofs->has_folios)
  2064. unpin_folios(pofs->folios, pofs->nr_entries);
  2065. else
  2066. unpin_user_pages(pofs->pages, pofs->nr_entries);
  2067. }
  2068. /*
  2069. * Returns the number of collected folios. Return value is always >= 0.
  2070. */
  2071. static void collect_longterm_unpinnable_folios(
  2072. struct list_head *movable_folio_list,
  2073. struct pages_or_folios *pofs)
  2074. {
  2075. struct folio *prev_folio = NULL;
  2076. bool drain_allow = true;
  2077. unsigned long i;
  2078. for (i = 0; i < pofs->nr_entries; i++) {
  2079. struct folio *folio = pofs_get_folio(pofs, i);
  2080. if (folio == prev_folio)
  2081. continue;
  2082. prev_folio = folio;
  2083. if (folio_is_longterm_pinnable(folio))
  2084. continue;
  2085. if (folio_is_device_coherent(folio))
  2086. continue;
  2087. if (folio_test_hugetlb(folio)) {
  2088. isolate_hugetlb(folio, movable_folio_list);
  2089. continue;
  2090. }
  2091. if (!folio_test_lru(folio) && drain_allow) {
  2092. lru_add_drain_all();
  2093. drain_allow = false;
  2094. }
  2095. if (!folio_isolate_lru(folio))
  2096. continue;
  2097. list_add_tail(&folio->lru, movable_folio_list);
  2098. node_stat_mod_folio(folio,
  2099. NR_ISOLATED_ANON + folio_is_file_lru(folio),
  2100. folio_nr_pages(folio));
  2101. }
  2102. }
  2103. /*
  2104. * Unpins all folios and migrates device coherent folios and movable_folio_list.
  2105. * Returns -EAGAIN if all folios were successfully migrated or -errno for
  2106. * failure (or partial success).
  2107. */
  2108. static int
  2109. migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
  2110. struct pages_or_folios *pofs)
  2111. {
  2112. int ret;
  2113. unsigned long i;
  2114. for (i = 0; i < pofs->nr_entries; i++) {
  2115. struct folio *folio = pofs_get_folio(pofs, i);
  2116. if (folio_is_device_coherent(folio)) {
  2117. /*
  2118. * Migration will fail if the folio is pinned, so
  2119. * convert the pin on the source folio to a normal
  2120. * reference.
  2121. */
  2122. pofs_clear_entry(pofs, i);
  2123. folio_get(folio);
  2124. gup_put_folio(folio, 1, FOLL_PIN);
  2125. if (migrate_device_coherent_folio(folio)) {
  2126. ret = -EBUSY;
  2127. goto err;
  2128. }
  2129. continue;
  2130. }
  2131. /*
  2132. * We can't migrate folios with unexpected references, so drop
  2133. * the reference obtained by __get_user_pages_locked().
  2134. * Migrating folios have been added to movable_folio_list after
  2135. * calling folio_isolate_lru() which takes a reference so the
  2136. * folio won't be freed if it's migrating.
  2137. */
  2138. unpin_folio(folio);
  2139. pofs_clear_entry(pofs, i);
  2140. }
  2141. if (!list_empty(movable_folio_list)) {
  2142. struct migration_target_control mtc = {
  2143. .nid = NUMA_NO_NODE,
  2144. .gfp_mask = GFP_USER | __GFP_NOWARN,
  2145. .reason = MR_LONGTERM_PIN,
  2146. };
  2147. if (migrate_pages(movable_folio_list, alloc_migration_target,
  2148. NULL, (unsigned long)&mtc, MIGRATE_SYNC,
  2149. MR_LONGTERM_PIN, NULL)) {
  2150. ret = -ENOMEM;
  2151. goto err;
  2152. }
  2153. }
  2154. putback_movable_pages(movable_folio_list);
  2155. return -EAGAIN;
  2156. err:
  2157. pofs_unpin(pofs);
  2158. putback_movable_pages(movable_folio_list);
  2159. return ret;
  2160. }
  2161. static long
  2162. check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
  2163. {
  2164. LIST_HEAD(movable_folio_list);
  2165. collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
  2166. if (list_empty(&movable_folio_list))
  2167. return 0;
  2168. return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
  2169. }
  2170. /*
  2171. * Check whether all folios are *allowed* to be pinned indefinitely (long term).
  2172. * Rather confusingly, all folios in the range are required to be pinned via
  2173. * FOLL_PIN, before calling this routine.
  2174. *
  2175. * Return values:
  2176. *
  2177. * 0: if everything is OK and all folios in the range are allowed to be pinned,
  2178. * then this routine leaves all folios pinned and returns zero for success.
  2179. *
  2180. * -EAGAIN: if any folios in the range are not allowed to be pinned, then this
  2181. * routine will migrate those folios away, unpin all the folios in the range. If
  2182. * migration of the entire set of folios succeeds, then -EAGAIN is returned. The
  2183. * caller should re-pin the entire range with FOLL_PIN and then call this
  2184. * routine again.
  2185. *
  2186. * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
  2187. * indicates a migration failure. The caller should give up, and propagate the
  2188. * error back up the call stack. The caller does not need to unpin any folios in
  2189. * that case, because this routine will do the unpinning.
  2190. */
  2191. static long check_and_migrate_movable_folios(unsigned long nr_folios,
  2192. struct folio **folios)
  2193. {
  2194. struct pages_or_folios pofs = {
  2195. .folios = folios,
  2196. .has_folios = true,
  2197. .nr_entries = nr_folios,
  2198. };
  2199. return check_and_migrate_movable_pages_or_folios(&pofs);
  2200. }
  2201. /*
  2202. * Return values and behavior are the same as those for
  2203. * check_and_migrate_movable_folios().
  2204. */
  2205. static long check_and_migrate_movable_pages(unsigned long nr_pages,
  2206. struct page **pages)
  2207. {
  2208. struct pages_or_folios pofs = {
  2209. .pages = pages,
  2210. .has_folios = false,
  2211. .nr_entries = nr_pages,
  2212. };
  2213. return check_and_migrate_movable_pages_or_folios(&pofs);
  2214. }
  2215. #else
  2216. static long check_and_migrate_movable_pages(unsigned long nr_pages,
  2217. struct page **pages)
  2218. {
  2219. return 0;
  2220. }
  2221. static long check_and_migrate_movable_folios(unsigned long nr_folios,
  2222. struct folio **folios)
  2223. {
  2224. return 0;
  2225. }
  2226. #endif /* CONFIG_MIGRATION */
  2227. /*
  2228. * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  2229. * allows us to process the FOLL_LONGTERM flag.
  2230. */
  2231. static long __gup_longterm_locked(struct mm_struct *mm,
  2232. unsigned long start,
  2233. unsigned long nr_pages,
  2234. struct page **pages,
  2235. int *locked,
  2236. unsigned int gup_flags)
  2237. {
  2238. unsigned int flags;
  2239. long rc, nr_pinned_pages;
  2240. if (!(gup_flags & FOLL_LONGTERM))
  2241. return __get_user_pages_locked(mm, start, nr_pages, pages,
  2242. locked, gup_flags);
  2243. flags = memalloc_pin_save();
  2244. do {
  2245. nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
  2246. pages, locked,
  2247. gup_flags);
  2248. if (nr_pinned_pages <= 0) {
  2249. rc = nr_pinned_pages;
  2250. break;
  2251. }
  2252. /* FOLL_LONGTERM implies FOLL_PIN */
  2253. rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
  2254. } while (rc == -EAGAIN);
  2255. memalloc_pin_restore(flags);
  2256. return rc ? rc : nr_pinned_pages;
  2257. }
  2258. /*
  2259. * Check that the given flags are valid for the exported gup/pup interface, and
  2260. * update them with the required flags that the caller must have set.
  2261. */
  2262. static bool is_valid_gup_args(struct page **pages, int *locked,
  2263. unsigned int *gup_flags_p, unsigned int to_set)
  2264. {
  2265. unsigned int gup_flags = *gup_flags_p;
  2266. /*
  2267. * These flags not allowed to be specified externally to the gup
  2268. * interfaces:
  2269. * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
  2270. * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
  2271. * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
  2272. */
  2273. if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
  2274. return false;
  2275. gup_flags |= to_set;
  2276. if (locked) {
  2277. /* At the external interface locked must be set */
  2278. if (WARN_ON_ONCE(*locked != 1))
  2279. return false;
  2280. gup_flags |= FOLL_UNLOCKABLE;
  2281. }
  2282. /* FOLL_GET and FOLL_PIN are mutually exclusive. */
  2283. if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
  2284. (FOLL_PIN | FOLL_GET)))
  2285. return false;
  2286. /* LONGTERM can only be specified when pinning */
  2287. if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
  2288. return false;
  2289. /* Pages input must be given if using GET/PIN */
  2290. if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
  2291. return false;
  2292. /* We want to allow the pgmap to be hot-unplugged at all times */
  2293. if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
  2294. (gup_flags & FOLL_PCI_P2PDMA)))
  2295. return false;
  2296. *gup_flags_p = gup_flags;
  2297. return true;
  2298. }
  2299. #ifdef CONFIG_MMU
  2300. /**
  2301. * get_user_pages_remote() - pin user pages in memory
  2302. * @mm: mm_struct of target mm
  2303. * @start: starting user address
  2304. * @nr_pages: number of pages from start to pin
  2305. * @gup_flags: flags modifying lookup behaviour
  2306. * @pages: array that receives pointers to the pages pinned.
  2307. * Should be at least nr_pages long. Or NULL, if caller
  2308. * only intends to ensure the pages are faulted in.
  2309. * @locked: pointer to lock flag indicating whether lock is held and
  2310. * subsequently whether VM_FAULT_RETRY functionality can be
  2311. * utilised. Lock must initially be held.
  2312. *
  2313. * Returns either number of pages pinned (which may be less than the
  2314. * number requested), or an error. Details about the return value:
  2315. *
  2316. * -- If nr_pages is 0, returns 0.
  2317. * -- If nr_pages is >0, but no pages were pinned, returns -errno.
  2318. * -- If nr_pages is >0, and some pages were pinned, returns the number of
  2319. * pages pinned. Again, this may be less than nr_pages.
  2320. *
  2321. * The caller is responsible for releasing returned @pages, via put_page().
  2322. *
  2323. * Must be called with mmap_lock held for read or write.
  2324. *
  2325. * get_user_pages_remote walks a process's page tables and takes a reference
  2326. * to each struct page that each user address corresponds to at a given
  2327. * instant. That is, it takes the page that would be accessed if a user
  2328. * thread accesses the given user virtual address at that instant.
  2329. *
  2330. * This does not guarantee that the page exists in the user mappings when
  2331. * get_user_pages_remote returns, and there may even be a completely different
  2332. * page there in some cases (eg. if mmapped pagecache has been invalidated
  2333. * and subsequently re-faulted). However it does guarantee that the page
  2334. * won't be freed completely. And mostly callers simply care that the page
  2335. * contains data that was valid *at some point in time*. Typically, an IO
  2336. * or similar operation cannot guarantee anything stronger anyway because
  2337. * locks can't be held over the syscall boundary.
  2338. *
  2339. * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
  2340. * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
  2341. * be called after the page is finished with, and before put_page is called.
  2342. *
  2343. * get_user_pages_remote is typically used for fewer-copy IO operations,
  2344. * to get a handle on the memory by some means other than accesses
  2345. * via the user virtual addresses. The pages may be submitted for
  2346. * DMA to devices or accessed via their kernel linear mapping (via the
  2347. * kmap APIs). Care should be taken to use the correct cache flushing APIs.
  2348. *
  2349. * See also get_user_pages_fast, for performance critical applications.
  2350. *
  2351. * get_user_pages_remote should be phased out in favor of
  2352. * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
  2353. * should use get_user_pages_remote because it cannot pass
  2354. * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  2355. */
  2356. long get_user_pages_remote(struct mm_struct *mm,
  2357. unsigned long start, unsigned long nr_pages,
  2358. unsigned int gup_flags, struct page **pages,
  2359. int *locked)
  2360. {
  2361. int local_locked = 1;
  2362. if (!is_valid_gup_args(pages, locked, &gup_flags,
  2363. FOLL_TOUCH | FOLL_REMOTE))
  2364. return -EINVAL;
  2365. return __get_user_pages_locked(mm, start, nr_pages, pages,
  2366. locked ? locked : &local_locked,
  2367. gup_flags);
  2368. }
  2369. EXPORT_SYMBOL(get_user_pages_remote);
  2370. #else /* CONFIG_MMU */
  2371. long get_user_pages_remote(struct mm_struct *mm,
  2372. unsigned long start, unsigned long nr_pages,
  2373. unsigned int gup_flags, struct page **pages,
  2374. int *locked)
  2375. {
  2376. return 0;
  2377. }
  2378. #endif /* !CONFIG_MMU */
  2379. /**
  2380. * get_user_pages() - pin user pages in memory
  2381. * @start: starting user address
  2382. * @nr_pages: number of pages from start to pin
  2383. * @gup_flags: flags modifying lookup behaviour
  2384. * @pages: array that receives pointers to the pages pinned.
  2385. * Should be at least nr_pages long. Or NULL, if caller
  2386. * only intends to ensure the pages are faulted in.
  2387. *
  2388. * This is the same as get_user_pages_remote(), just with a less-flexible
  2389. * calling convention where we assume that the mm being operated on belongs to
  2390. * the current task, and doesn't allow passing of a locked parameter. We also
  2391. * obviously don't pass FOLL_REMOTE in here.
  2392. */
  2393. long get_user_pages(unsigned long start, unsigned long nr_pages,
  2394. unsigned int gup_flags, struct page **pages)
  2395. {
  2396. int locked = 1;
  2397. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
  2398. return -EINVAL;
  2399. return __get_user_pages_locked(current->mm, start, nr_pages, pages,
  2400. &locked, gup_flags);
  2401. }
  2402. EXPORT_SYMBOL(get_user_pages);
  2403. /*
  2404. * get_user_pages_unlocked() is suitable to replace the form:
  2405. *
  2406. * mmap_read_lock(mm);
  2407. * get_user_pages(mm, ..., pages, NULL);
  2408. * mmap_read_unlock(mm);
  2409. *
  2410. * with:
  2411. *
  2412. * get_user_pages_unlocked(mm, ..., pages);
  2413. *
  2414. * It is functionally equivalent to get_user_pages_fast so
  2415. * get_user_pages_fast should be used instead if specific gup_flags
  2416. * (e.g. FOLL_FORCE) are not required.
  2417. */
  2418. long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
  2419. struct page **pages, unsigned int gup_flags)
  2420. {
  2421. int locked = 0;
  2422. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  2423. FOLL_TOUCH | FOLL_UNLOCKABLE))
  2424. return -EINVAL;
  2425. return __get_user_pages_locked(current->mm, start, nr_pages, pages,
  2426. &locked, gup_flags);
  2427. }
  2428. EXPORT_SYMBOL(get_user_pages_unlocked);
  2429. /*
  2430. * GUP-fast
  2431. *
  2432. * get_user_pages_fast attempts to pin user pages by walking the page
  2433. * tables directly and avoids taking locks. Thus the walker needs to be
  2434. * protected from page table pages being freed from under it, and should
  2435. * block any THP splits.
  2436. *
  2437. * One way to achieve this is to have the walker disable interrupts, and
  2438. * rely on IPIs from the TLB flushing code blocking before the page table
  2439. * pages are freed. This is unsuitable for architectures that do not need
  2440. * to broadcast an IPI when invalidating TLBs.
  2441. *
  2442. * Another way to achieve this is to batch up page table containing pages
  2443. * belonging to more than one mm_user, then rcu_sched a callback to free those
  2444. * pages. Disabling interrupts will allow the gup_fast() walker to both block
  2445. * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
  2446. * (which is a relatively rare event). The code below adopts this strategy.
  2447. *
  2448. * Before activating this code, please be aware that the following assumptions
  2449. * are currently made:
  2450. *
  2451. * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
  2452. * free pages containing page tables or TLB flushing requires IPI broadcast.
  2453. *
  2454. * *) ptes can be read atomically by the architecture.
  2455. *
  2456. * *) access_ok is sufficient to validate userspace address ranges.
  2457. *
  2458. * The last two assumptions can be relaxed by the addition of helper functions.
  2459. *
  2460. * This code is based heavily on the PowerPC implementation by Nick Piggin.
  2461. */
  2462. #ifdef CONFIG_HAVE_GUP_FAST
  2463. /*
  2464. * Used in the GUP-fast path to determine whether GUP is permitted to work on
  2465. * a specific folio.
  2466. *
  2467. * This call assumes the caller has pinned the folio, that the lowest page table
  2468. * level still points to this folio, and that interrupts have been disabled.
  2469. *
  2470. * GUP-fast must reject all secretmem folios.
  2471. *
  2472. * Writing to pinned file-backed dirty tracked folios is inherently problematic
  2473. * (see comment describing the writable_file_mapping_allowed() function). We
  2474. * therefore try to avoid the most egregious case of a long-term mapping doing
  2475. * so.
  2476. *
  2477. * This function cannot be as thorough as that one as the VMA is not available
  2478. * in the fast path, so instead we whitelist known good cases and if in doubt,
  2479. * fall back to the slow path.
  2480. */
  2481. static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
  2482. {
  2483. bool reject_file_backed = false;
  2484. struct address_space *mapping;
  2485. bool check_secretmem = false;
  2486. unsigned long mapping_flags;
  2487. /*
  2488. * If we aren't pinning then no problematic write can occur. A long term
  2489. * pin is the most egregious case so this is the one we disallow.
  2490. */
  2491. if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
  2492. (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
  2493. reject_file_backed = true;
  2494. /* We hold a folio reference, so we can safely access folio fields. */
  2495. /* secretmem folios are always order-0 folios. */
  2496. if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
  2497. check_secretmem = true;
  2498. if (!reject_file_backed && !check_secretmem)
  2499. return true;
  2500. if (WARN_ON_ONCE(folio_test_slab(folio)))
  2501. return false;
  2502. /* hugetlb neither requires dirty-tracking nor can be secretmem. */
  2503. if (folio_test_hugetlb(folio))
  2504. return true;
  2505. /*
  2506. * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
  2507. * cannot proceed, which means no actions performed under RCU can
  2508. * proceed either.
  2509. *
  2510. * inodes and thus their mappings are freed under RCU, which means the
  2511. * mapping cannot be freed beneath us and thus we can safely dereference
  2512. * it.
  2513. */
  2514. lockdep_assert_irqs_disabled();
  2515. /*
  2516. * However, there may be operations which _alter_ the mapping, so ensure
  2517. * we read it once and only once.
  2518. */
  2519. mapping = READ_ONCE(folio->mapping);
  2520. /*
  2521. * The mapping may have been truncated, in any case we cannot determine
  2522. * if this mapping is safe - fall back to slow path to determine how to
  2523. * proceed.
  2524. */
  2525. if (!mapping)
  2526. return false;
  2527. /* Anonymous folios pose no problem. */
  2528. mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
  2529. if (mapping_flags)
  2530. return mapping_flags & PAGE_MAPPING_ANON;
  2531. /*
  2532. * At this point, we know the mapping is non-null and points to an
  2533. * address_space object.
  2534. */
  2535. if (check_secretmem && secretmem_mapping(mapping))
  2536. return false;
  2537. /* The only remaining allowed file system is shmem. */
  2538. return !reject_file_backed || shmem_mapping(mapping);
  2539. }
  2540. static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start,
  2541. unsigned int flags, struct page **pages)
  2542. {
  2543. while ((*nr) - nr_start) {
  2544. struct folio *folio = page_folio(pages[--(*nr)]);
  2545. folio_clear_referenced(folio);
  2546. gup_put_folio(folio, 1, flags);
  2547. }
  2548. }
  2549. #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
  2550. /*
  2551. * GUP-fast relies on pte change detection to avoid concurrent pgtable
  2552. * operations.
  2553. *
  2554. * To pin the page, GUP-fast needs to do below in order:
  2555. * (1) pin the page (by prefetching pte), then (2) check pte not changed.
  2556. *
  2557. * For the rest of pgtable operations where pgtable updates can be racy
  2558. * with GUP-fast, we need to do (1) clear pte, then (2) check whether page
  2559. * is pinned.
  2560. *
  2561. * Above will work for all pte-level operations, including THP split.
  2562. *
  2563. * For THP collapse, it's a bit more complicated because GUP-fast may be
  2564. * walking a pgtable page that is being freed (pte is still valid but pmd
  2565. * can be cleared already). To avoid race in such condition, we need to
  2566. * also check pmd here to make sure pmd doesn't change (corresponds to
  2567. * pmdp_collapse_flush() in the THP collapse code path).
  2568. */
  2569. static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
  2570. unsigned long end, unsigned int flags, struct page **pages,
  2571. int *nr)
  2572. {
  2573. struct dev_pagemap *pgmap = NULL;
  2574. int nr_start = *nr, ret = 0;
  2575. pte_t *ptep, *ptem;
  2576. ptem = ptep = pte_offset_map(&pmd, addr);
  2577. if (!ptep)
  2578. return 0;
  2579. do {
  2580. pte_t pte = ptep_get_lockless(ptep);
  2581. struct page *page;
  2582. struct folio *folio;
  2583. /*
  2584. * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
  2585. * pte_access_permitted() better should reject these pages
  2586. * either way: otherwise, GUP-fast might succeed in
  2587. * cases where ordinary GUP would fail due to VMA access
  2588. * permissions.
  2589. */
  2590. if (pte_protnone(pte))
  2591. goto pte_unmap;
  2592. if (!pte_access_permitted(pte, flags & FOLL_WRITE))
  2593. goto pte_unmap;
  2594. if (pte_devmap(pte)) {
  2595. if (unlikely(flags & FOLL_LONGTERM))
  2596. goto pte_unmap;
  2597. pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
  2598. if (unlikely(!pgmap)) {
  2599. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2600. goto pte_unmap;
  2601. }
  2602. } else if (pte_special(pte))
  2603. goto pte_unmap;
  2604. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  2605. page = pte_page(pte);
  2606. folio = try_grab_folio_fast(page, 1, flags);
  2607. if (!folio)
  2608. goto pte_unmap;
  2609. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
  2610. unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
  2611. gup_put_folio(folio, 1, flags);
  2612. goto pte_unmap;
  2613. }
  2614. if (!gup_fast_folio_allowed(folio, flags)) {
  2615. gup_put_folio(folio, 1, flags);
  2616. goto pte_unmap;
  2617. }
  2618. if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
  2619. gup_put_folio(folio, 1, flags);
  2620. goto pte_unmap;
  2621. }
  2622. /*
  2623. * We need to make the page accessible if and only if we are
  2624. * going to access its content (the FOLL_PIN case). Please
  2625. * see Documentation/core-api/pin_user_pages.rst for
  2626. * details.
  2627. */
  2628. if (flags & FOLL_PIN) {
  2629. ret = arch_make_folio_accessible(folio);
  2630. if (ret) {
  2631. gup_put_folio(folio, 1, flags);
  2632. goto pte_unmap;
  2633. }
  2634. }
  2635. folio_set_referenced(folio);
  2636. pages[*nr] = page;
  2637. (*nr)++;
  2638. } while (ptep++, addr += PAGE_SIZE, addr != end);
  2639. ret = 1;
  2640. pte_unmap:
  2641. if (pgmap)
  2642. put_dev_pagemap(pgmap);
  2643. pte_unmap(ptem);
  2644. return ret;
  2645. }
  2646. #else
  2647. /*
  2648. * If we can't determine whether or not a pte is special, then fail immediately
  2649. * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
  2650. * to be special.
  2651. *
  2652. * For a futex to be placed on a THP tail page, get_futex_key requires a
  2653. * get_user_pages_fast_only implementation that can pin pages. Thus it's still
  2654. * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
  2655. */
  2656. static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
  2657. unsigned long end, unsigned int flags, struct page **pages,
  2658. int *nr)
  2659. {
  2660. return 0;
  2661. }
  2662. #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
  2663. #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  2664. static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
  2665. unsigned long end, unsigned int flags, struct page **pages, int *nr)
  2666. {
  2667. int nr_start = *nr;
  2668. struct dev_pagemap *pgmap = NULL;
  2669. do {
  2670. struct folio *folio;
  2671. struct page *page = pfn_to_page(pfn);
  2672. pgmap = get_dev_pagemap(pfn, pgmap);
  2673. if (unlikely(!pgmap)) {
  2674. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2675. break;
  2676. }
  2677. if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
  2678. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2679. break;
  2680. }
  2681. folio = try_grab_folio_fast(page, 1, flags);
  2682. if (!folio) {
  2683. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2684. break;
  2685. }
  2686. folio_set_referenced(folio);
  2687. pages[*nr] = page;
  2688. (*nr)++;
  2689. pfn++;
  2690. } while (addr += PAGE_SIZE, addr != end);
  2691. put_dev_pagemap(pgmap);
  2692. return addr == end;
  2693. }
  2694. static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2695. unsigned long end, unsigned int flags, struct page **pages,
  2696. int *nr)
  2697. {
  2698. unsigned long fault_pfn;
  2699. int nr_start = *nr;
  2700. fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  2701. if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
  2702. return 0;
  2703. if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  2704. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2705. return 0;
  2706. }
  2707. return 1;
  2708. }
  2709. static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
  2710. unsigned long end, unsigned int flags, struct page **pages,
  2711. int *nr)
  2712. {
  2713. unsigned long fault_pfn;
  2714. int nr_start = *nr;
  2715. fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  2716. if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
  2717. return 0;
  2718. if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  2719. gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
  2720. return 0;
  2721. }
  2722. return 1;
  2723. }
  2724. #else
  2725. static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2726. unsigned long end, unsigned int flags, struct page **pages,
  2727. int *nr)
  2728. {
  2729. BUILD_BUG();
  2730. return 0;
  2731. }
  2732. static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr,
  2733. unsigned long end, unsigned int flags, struct page **pages,
  2734. int *nr)
  2735. {
  2736. BUILD_BUG();
  2737. return 0;
  2738. }
  2739. #endif
  2740. static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  2741. unsigned long end, unsigned int flags, struct page **pages,
  2742. int *nr)
  2743. {
  2744. struct page *page;
  2745. struct folio *folio;
  2746. int refs;
  2747. if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
  2748. return 0;
  2749. if (pmd_special(orig))
  2750. return 0;
  2751. if (pmd_devmap(orig)) {
  2752. if (unlikely(flags & FOLL_LONGTERM))
  2753. return 0;
  2754. return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
  2755. pages, nr);
  2756. }
  2757. page = pmd_page(orig);
  2758. refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
  2759. folio = try_grab_folio_fast(page, refs, flags);
  2760. if (!folio)
  2761. return 0;
  2762. if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  2763. gup_put_folio(folio, refs, flags);
  2764. return 0;
  2765. }
  2766. if (!gup_fast_folio_allowed(folio, flags)) {
  2767. gup_put_folio(folio, refs, flags);
  2768. return 0;
  2769. }
  2770. if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2771. gup_put_folio(folio, refs, flags);
  2772. return 0;
  2773. }
  2774. *nr += refs;
  2775. folio_set_referenced(folio);
  2776. return 1;
  2777. }
  2778. static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
  2779. unsigned long end, unsigned int flags, struct page **pages,
  2780. int *nr)
  2781. {
  2782. struct page *page;
  2783. struct folio *folio;
  2784. int refs;
  2785. if (!pud_access_permitted(orig, flags & FOLL_WRITE))
  2786. return 0;
  2787. if (pud_special(orig))
  2788. return 0;
  2789. if (pud_devmap(orig)) {
  2790. if (unlikely(flags & FOLL_LONGTERM))
  2791. return 0;
  2792. return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
  2793. pages, nr);
  2794. }
  2795. page = pud_page(orig);
  2796. refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
  2797. folio = try_grab_folio_fast(page, refs, flags);
  2798. if (!folio)
  2799. return 0;
  2800. if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  2801. gup_put_folio(folio, refs, flags);
  2802. return 0;
  2803. }
  2804. if (!gup_fast_folio_allowed(folio, flags)) {
  2805. gup_put_folio(folio, refs, flags);
  2806. return 0;
  2807. }
  2808. if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2809. gup_put_folio(folio, refs, flags);
  2810. return 0;
  2811. }
  2812. *nr += refs;
  2813. folio_set_referenced(folio);
  2814. return 1;
  2815. }
  2816. static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
  2817. unsigned long end, unsigned int flags, struct page **pages,
  2818. int *nr)
  2819. {
  2820. int refs;
  2821. struct page *page;
  2822. struct folio *folio;
  2823. if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
  2824. return 0;
  2825. BUILD_BUG_ON(pgd_devmap(orig));
  2826. page = pgd_page(orig);
  2827. refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
  2828. folio = try_grab_folio_fast(page, refs, flags);
  2829. if (!folio)
  2830. return 0;
  2831. if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
  2832. gup_put_folio(folio, refs, flags);
  2833. return 0;
  2834. }
  2835. if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
  2836. gup_put_folio(folio, refs, flags);
  2837. return 0;
  2838. }
  2839. if (!gup_fast_folio_allowed(folio, flags)) {
  2840. gup_put_folio(folio, refs, flags);
  2841. return 0;
  2842. }
  2843. *nr += refs;
  2844. folio_set_referenced(folio);
  2845. return 1;
  2846. }
  2847. static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
  2848. unsigned long end, unsigned int flags, struct page **pages,
  2849. int *nr)
  2850. {
  2851. unsigned long next;
  2852. pmd_t *pmdp;
  2853. pmdp = pmd_offset_lockless(pudp, pud, addr);
  2854. do {
  2855. pmd_t pmd = pmdp_get_lockless(pmdp);
  2856. next = pmd_addr_end(addr, end);
  2857. if (!pmd_present(pmd))
  2858. return 0;
  2859. if (unlikely(pmd_leaf(pmd))) {
  2860. /* See gup_fast_pte_range() */
  2861. if (pmd_protnone(pmd))
  2862. return 0;
  2863. if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
  2864. pages, nr))
  2865. return 0;
  2866. } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
  2867. pages, nr))
  2868. return 0;
  2869. } while (pmdp++, addr = next, addr != end);
  2870. return 1;
  2871. }
  2872. static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
  2873. unsigned long end, unsigned int flags, struct page **pages,
  2874. int *nr)
  2875. {
  2876. unsigned long next;
  2877. pud_t *pudp;
  2878. pudp = pud_offset_lockless(p4dp, p4d, addr);
  2879. do {
  2880. pud_t pud = READ_ONCE(*pudp);
  2881. next = pud_addr_end(addr, end);
  2882. if (unlikely(!pud_present(pud)))
  2883. return 0;
  2884. if (unlikely(pud_leaf(pud))) {
  2885. if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
  2886. pages, nr))
  2887. return 0;
  2888. } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
  2889. pages, nr))
  2890. return 0;
  2891. } while (pudp++, addr = next, addr != end);
  2892. return 1;
  2893. }
  2894. static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
  2895. unsigned long end, unsigned int flags, struct page **pages,
  2896. int *nr)
  2897. {
  2898. unsigned long next;
  2899. p4d_t *p4dp;
  2900. p4dp = p4d_offset_lockless(pgdp, pgd, addr);
  2901. do {
  2902. p4d_t p4d = READ_ONCE(*p4dp);
  2903. next = p4d_addr_end(addr, end);
  2904. if (!p4d_present(p4d))
  2905. return 0;
  2906. BUILD_BUG_ON(p4d_leaf(p4d));
  2907. if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
  2908. pages, nr))
  2909. return 0;
  2910. } while (p4dp++, addr = next, addr != end);
  2911. return 1;
  2912. }
  2913. static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
  2914. unsigned int flags, struct page **pages, int *nr)
  2915. {
  2916. unsigned long next;
  2917. pgd_t *pgdp;
  2918. pgdp = pgd_offset(current->mm, addr);
  2919. do {
  2920. pgd_t pgd = READ_ONCE(*pgdp);
  2921. next = pgd_addr_end(addr, end);
  2922. if (pgd_none(pgd))
  2923. return;
  2924. if (unlikely(pgd_leaf(pgd))) {
  2925. if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
  2926. pages, nr))
  2927. return;
  2928. } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
  2929. pages, nr))
  2930. return;
  2931. } while (pgdp++, addr = next, addr != end);
  2932. }
  2933. #else
  2934. static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
  2935. unsigned int flags, struct page **pages, int *nr)
  2936. {
  2937. }
  2938. #endif /* CONFIG_HAVE_GUP_FAST */
  2939. #ifndef gup_fast_permitted
  2940. /*
  2941. * Check if it's allowed to use get_user_pages_fast_only() for the range, or
  2942. * we need to fall back to the slow version:
  2943. */
  2944. static bool gup_fast_permitted(unsigned long start, unsigned long end)
  2945. {
  2946. return true;
  2947. }
  2948. #endif
  2949. static unsigned long gup_fast(unsigned long start, unsigned long end,
  2950. unsigned int gup_flags, struct page **pages)
  2951. {
  2952. unsigned long flags;
  2953. int nr_pinned = 0;
  2954. unsigned seq;
  2955. if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
  2956. !gup_fast_permitted(start, end))
  2957. return 0;
  2958. if (gup_flags & FOLL_PIN) {
  2959. seq = raw_read_seqcount(&current->mm->write_protect_seq);
  2960. if (seq & 1)
  2961. return 0;
  2962. }
  2963. /*
  2964. * Disable interrupts. The nested form is used, in order to allow full,
  2965. * general purpose use of this routine.
  2966. *
  2967. * With interrupts disabled, we block page table pages from being freed
  2968. * from under us. See struct mmu_table_batch comments in
  2969. * include/asm-generic/tlb.h for more details.
  2970. *
  2971. * We do not adopt an rcu_read_lock() here as we also want to block IPIs
  2972. * that come from THPs splitting.
  2973. */
  2974. local_irq_save(flags);
  2975. gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned);
  2976. local_irq_restore(flags);
  2977. /*
  2978. * When pinning pages for DMA there could be a concurrent write protect
  2979. * from fork() via copy_page_range(), in this case always fail GUP-fast.
  2980. */
  2981. if (gup_flags & FOLL_PIN) {
  2982. if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
  2983. gup_fast_unpin_user_pages(pages, nr_pinned);
  2984. return 0;
  2985. } else {
  2986. sanity_check_pinned_pages(pages, nr_pinned);
  2987. }
  2988. }
  2989. return nr_pinned;
  2990. }
  2991. static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
  2992. unsigned int gup_flags, struct page **pages)
  2993. {
  2994. unsigned long len, end;
  2995. unsigned long nr_pinned;
  2996. int locked = 0;
  2997. int ret;
  2998. if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
  2999. FOLL_FORCE | FOLL_PIN | FOLL_GET |
  3000. FOLL_FAST_ONLY | FOLL_NOFAULT |
  3001. FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
  3002. return -EINVAL;
  3003. if (gup_flags & FOLL_PIN)
  3004. mm_set_has_pinned_flag(&current->mm->flags);
  3005. if (!(gup_flags & FOLL_FAST_ONLY))
  3006. might_lock_read(&current->mm->mmap_lock);
  3007. start = untagged_addr(start) & PAGE_MASK;
  3008. len = nr_pages << PAGE_SHIFT;
  3009. if (check_add_overflow(start, len, &end))
  3010. return -EOVERFLOW;
  3011. if (end > TASK_SIZE_MAX)
  3012. return -EFAULT;
  3013. if (unlikely(!access_ok((void __user *)start, len)))
  3014. return -EFAULT;
  3015. nr_pinned = gup_fast(start, end, gup_flags, pages);
  3016. if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
  3017. return nr_pinned;
  3018. /* Slow path: try to get the remaining pages with get_user_pages */
  3019. start += nr_pinned << PAGE_SHIFT;
  3020. pages += nr_pinned;
  3021. ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
  3022. pages, &locked,
  3023. gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
  3024. if (ret < 0) {
  3025. /*
  3026. * The caller has to unpin the pages we already pinned so
  3027. * returning -errno is not an option
  3028. */
  3029. if (nr_pinned)
  3030. return nr_pinned;
  3031. return ret;
  3032. }
  3033. return ret + nr_pinned;
  3034. }
  3035. /**
  3036. * get_user_pages_fast_only() - pin user pages in memory
  3037. * @start: starting user address
  3038. * @nr_pages: number of pages from start to pin
  3039. * @gup_flags: flags modifying pin behaviour
  3040. * @pages: array that receives pointers to the pages pinned.
  3041. * Should be at least nr_pages long.
  3042. *
  3043. * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
  3044. * the regular GUP.
  3045. *
  3046. * If the architecture does not support this function, simply return with no
  3047. * pages pinned.
  3048. *
  3049. * Careful, careful! COW breaking can go either way, so a non-write
  3050. * access can get ambiguous page results. If you call this function without
  3051. * 'write' set, you'd better be sure that you're ok with that ambiguity.
  3052. */
  3053. int get_user_pages_fast_only(unsigned long start, int nr_pages,
  3054. unsigned int gup_flags, struct page **pages)
  3055. {
  3056. /*
  3057. * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
  3058. * because gup fast is always a "pin with a +1 page refcount" request.
  3059. *
  3060. * FOLL_FAST_ONLY is required in order to match the API description of
  3061. * this routine: no fall back to regular ("slow") GUP.
  3062. */
  3063. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  3064. FOLL_GET | FOLL_FAST_ONLY))
  3065. return -EINVAL;
  3066. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3067. }
  3068. EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
  3069. /**
  3070. * get_user_pages_fast() - pin user pages in memory
  3071. * @start: starting user address
  3072. * @nr_pages: number of pages from start to pin
  3073. * @gup_flags: flags modifying pin behaviour
  3074. * @pages: array that receives pointers to the pages pinned.
  3075. * Should be at least nr_pages long.
  3076. *
  3077. * Attempt to pin user pages in memory without taking mm->mmap_lock.
  3078. * If not successful, it will fall back to taking the lock and
  3079. * calling get_user_pages().
  3080. *
  3081. * Returns number of pages pinned. This may be fewer than the number requested.
  3082. * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
  3083. * -errno.
  3084. */
  3085. int get_user_pages_fast(unsigned long start, int nr_pages,
  3086. unsigned int gup_flags, struct page **pages)
  3087. {
  3088. /*
  3089. * The caller may or may not have explicitly set FOLL_GET; either way is
  3090. * OK. However, internally (within mm/gup.c), gup fast variants must set
  3091. * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
  3092. * request.
  3093. */
  3094. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
  3095. return -EINVAL;
  3096. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3097. }
  3098. EXPORT_SYMBOL_GPL(get_user_pages_fast);
  3099. /**
  3100. * pin_user_pages_fast() - pin user pages in memory without taking locks
  3101. *
  3102. * @start: starting user address
  3103. * @nr_pages: number of pages from start to pin
  3104. * @gup_flags: flags modifying pin behaviour
  3105. * @pages: array that receives pointers to the pages pinned.
  3106. * Should be at least nr_pages long.
  3107. *
  3108. * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
  3109. * get_user_pages_fast() for documentation on the function arguments, because
  3110. * the arguments here are identical.
  3111. *
  3112. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3113. * see Documentation/core-api/pin_user_pages.rst for further details.
  3114. *
  3115. * Note that if a zero_page is amongst the returned pages, it will not have
  3116. * pins in it and unpin_user_page() will not remove pins from it.
  3117. */
  3118. int pin_user_pages_fast(unsigned long start, int nr_pages,
  3119. unsigned int gup_flags, struct page **pages)
  3120. {
  3121. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
  3122. return -EINVAL;
  3123. return gup_fast_fallback(start, nr_pages, gup_flags, pages);
  3124. }
  3125. EXPORT_SYMBOL_GPL(pin_user_pages_fast);
  3126. /**
  3127. * pin_user_pages_remote() - pin pages of a remote process
  3128. *
  3129. * @mm: mm_struct of target mm
  3130. * @start: starting user address
  3131. * @nr_pages: number of pages from start to pin
  3132. * @gup_flags: flags modifying lookup behaviour
  3133. * @pages: array that receives pointers to the pages pinned.
  3134. * Should be at least nr_pages long.
  3135. * @locked: pointer to lock flag indicating whether lock is held and
  3136. * subsequently whether VM_FAULT_RETRY functionality can be
  3137. * utilised. Lock must initially be held.
  3138. *
  3139. * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
  3140. * get_user_pages_remote() for documentation on the function arguments, because
  3141. * the arguments here are identical.
  3142. *
  3143. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3144. * see Documentation/core-api/pin_user_pages.rst for details.
  3145. *
  3146. * Note that if a zero_page is amongst the returned pages, it will not have
  3147. * pins in it and unpin_user_page*() will not remove pins from it.
  3148. */
  3149. long pin_user_pages_remote(struct mm_struct *mm,
  3150. unsigned long start, unsigned long nr_pages,
  3151. unsigned int gup_flags, struct page **pages,
  3152. int *locked)
  3153. {
  3154. int local_locked = 1;
  3155. if (!is_valid_gup_args(pages, locked, &gup_flags,
  3156. FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
  3157. return 0;
  3158. return __gup_longterm_locked(mm, start, nr_pages, pages,
  3159. locked ? locked : &local_locked,
  3160. gup_flags);
  3161. }
  3162. EXPORT_SYMBOL(pin_user_pages_remote);
  3163. /**
  3164. * pin_user_pages() - pin user pages in memory for use by other devices
  3165. *
  3166. * @start: starting user address
  3167. * @nr_pages: number of pages from start to pin
  3168. * @gup_flags: flags modifying lookup behaviour
  3169. * @pages: array that receives pointers to the pages pinned.
  3170. * Should be at least nr_pages long.
  3171. *
  3172. * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
  3173. * FOLL_PIN is set.
  3174. *
  3175. * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  3176. * see Documentation/core-api/pin_user_pages.rst for details.
  3177. *
  3178. * Note that if a zero_page is amongst the returned pages, it will not have
  3179. * pins in it and unpin_user_page*() will not remove pins from it.
  3180. */
  3181. long pin_user_pages(unsigned long start, unsigned long nr_pages,
  3182. unsigned int gup_flags, struct page **pages)
  3183. {
  3184. int locked = 1;
  3185. if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
  3186. return 0;
  3187. return __gup_longterm_locked(current->mm, start, nr_pages,
  3188. pages, &locked, gup_flags);
  3189. }
  3190. EXPORT_SYMBOL(pin_user_pages);
  3191. /*
  3192. * pin_user_pages_unlocked() is the FOLL_PIN variant of
  3193. * get_user_pages_unlocked(). Behavior is the same, except that this one sets
  3194. * FOLL_PIN and rejects FOLL_GET.
  3195. *
  3196. * Note that if a zero_page is amongst the returned pages, it will not have
  3197. * pins in it and unpin_user_page*() will not remove pins from it.
  3198. */
  3199. long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
  3200. struct page **pages, unsigned int gup_flags)
  3201. {
  3202. int locked = 0;
  3203. if (!is_valid_gup_args(pages, NULL, &gup_flags,
  3204. FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
  3205. return 0;
  3206. return __gup_longterm_locked(current->mm, start, nr_pages, pages,
  3207. &locked, gup_flags);
  3208. }
  3209. EXPORT_SYMBOL(pin_user_pages_unlocked);
  3210. /**
  3211. * memfd_pin_folios() - pin folios associated with a memfd
  3212. * @memfd: the memfd whose folios are to be pinned
  3213. * @start: the first memfd offset
  3214. * @end: the last memfd offset (inclusive)
  3215. * @folios: array that receives pointers to the folios pinned
  3216. * @max_folios: maximum number of entries in @folios
  3217. * @offset: the offset into the first folio
  3218. *
  3219. * Attempt to pin folios associated with a memfd in the contiguous range
  3220. * [start, end]. Given that a memfd is either backed by shmem or hugetlb,
  3221. * the folios can either be found in the page cache or need to be allocated
  3222. * if necessary. Once the folios are located, they are all pinned via
  3223. * FOLL_PIN and @offset is populatedwith the offset into the first folio.
  3224. * And, eventually, these pinned folios must be released either using
  3225. * unpin_folios() or unpin_folio().
  3226. *
  3227. * It must be noted that the folios may be pinned for an indefinite amount
  3228. * of time. And, in most cases, the duration of time they may stay pinned
  3229. * would be controlled by the userspace. This behavior is effectively the
  3230. * same as using FOLL_LONGTERM with other GUP APIs.
  3231. *
  3232. * Returns number of folios pinned, which could be less than @max_folios
  3233. * as it depends on the folio sizes that cover the range [start, end].
  3234. * If no folios were pinned, it returns -errno.
  3235. */
  3236. long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
  3237. struct folio **folios, unsigned int max_folios,
  3238. pgoff_t *offset)
  3239. {
  3240. unsigned int flags, nr_folios, nr_found;
  3241. unsigned int i, pgshift = PAGE_SHIFT;
  3242. pgoff_t start_idx, end_idx, next_idx;
  3243. struct folio *folio = NULL;
  3244. struct folio_batch fbatch;
  3245. struct hstate *h;
  3246. long ret = -EINVAL;
  3247. if (start < 0 || start > end || !max_folios)
  3248. return -EINVAL;
  3249. if (!memfd)
  3250. return -EINVAL;
  3251. if (!shmem_file(memfd) && !is_file_hugepages(memfd))
  3252. return -EINVAL;
  3253. if (end >= i_size_read(file_inode(memfd)))
  3254. return -EINVAL;
  3255. if (is_file_hugepages(memfd)) {
  3256. h = hstate_file(memfd);
  3257. pgshift = huge_page_shift(h);
  3258. }
  3259. flags = memalloc_pin_save();
  3260. do {
  3261. nr_folios = 0;
  3262. start_idx = start >> pgshift;
  3263. end_idx = end >> pgshift;
  3264. if (is_file_hugepages(memfd)) {
  3265. start_idx <<= huge_page_order(h);
  3266. end_idx <<= huge_page_order(h);
  3267. }
  3268. folio_batch_init(&fbatch);
  3269. while (start_idx <= end_idx && nr_folios < max_folios) {
  3270. /*
  3271. * In most cases, we should be able to find the folios
  3272. * in the page cache. If we cannot find them for some
  3273. * reason, we try to allocate them and add them to the
  3274. * page cache.
  3275. */
  3276. nr_found = filemap_get_folios_contig(memfd->f_mapping,
  3277. &start_idx,
  3278. end_idx,
  3279. &fbatch);
  3280. if (folio) {
  3281. folio_put(folio);
  3282. folio = NULL;
  3283. }
  3284. next_idx = 0;
  3285. for (i = 0; i < nr_found; i++) {
  3286. /*
  3287. * As there can be multiple entries for a
  3288. * given folio in the batch returned by
  3289. * filemap_get_folios_contig(), the below
  3290. * check is to ensure that we pin and return a
  3291. * unique set of folios between start and end.
  3292. */
  3293. if (next_idx &&
  3294. next_idx != folio_index(fbatch.folios[i]))
  3295. continue;
  3296. folio = page_folio(&fbatch.folios[i]->page);
  3297. if (try_grab_folio(folio, 1, FOLL_PIN)) {
  3298. folio_batch_release(&fbatch);
  3299. ret = -EINVAL;
  3300. goto err;
  3301. }
  3302. if (nr_folios == 0)
  3303. *offset = offset_in_folio(folio, start);
  3304. folios[nr_folios] = folio;
  3305. next_idx = folio_next_index(folio);
  3306. if (++nr_folios == max_folios)
  3307. break;
  3308. }
  3309. folio = NULL;
  3310. folio_batch_release(&fbatch);
  3311. if (!nr_found) {
  3312. folio = memfd_alloc_folio(memfd, start_idx);
  3313. if (IS_ERR(folio)) {
  3314. ret = PTR_ERR(folio);
  3315. if (ret != -EEXIST)
  3316. goto err;
  3317. folio = NULL;
  3318. }
  3319. }
  3320. }
  3321. ret = check_and_migrate_movable_folios(nr_folios, folios);
  3322. } while (ret == -EAGAIN);
  3323. memalloc_pin_restore(flags);
  3324. return ret ? ret : nr_folios;
  3325. err:
  3326. memalloc_pin_restore(flags);
  3327. unpin_folios(folios, nr_folios);
  3328. return ret;
  3329. }
  3330. EXPORT_SYMBOL_GPL(memfd_pin_folios);