task_mmu.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pagewalk.h>
  3. #include <linux/mm_inline.h>
  4. #include <linux/hugetlb.h>
  5. #include <linux/huge_mm.h>
  6. #include <linux/mount.h>
  7. #include <linux/ksm.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/highmem.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/slab.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/mempolicy.h>
  14. #include <linux/rmap.h>
  15. #include <linux/swap.h>
  16. #include <linux/sched/mm.h>
  17. #include <linux/swapops.h>
  18. #include <linux/mmu_notifier.h>
  19. #include <linux/page_idle.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/pkeys.h>
  23. #include <linux/minmax.h>
  24. #include <linux/overflow.h>
  25. #include <linux/buildid.h>
  26. #include <asm/elf.h>
  27. #include <asm/tlb.h>
  28. #include <asm/tlbflush.h>
  29. #include "internal.h"
  30. #define SEQ_PUT_DEC(str, val) \
  31. seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  32. void task_mem(struct seq_file *m, struct mm_struct *mm)
  33. {
  34. unsigned long text, lib, swap, anon, file, shmem;
  35. unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  36. anon = get_mm_counter_sum(mm, MM_ANONPAGES);
  37. file = get_mm_counter_sum(mm, MM_FILEPAGES);
  38. shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES);
  39. /*
  40. * Note: to minimize their overhead, mm maintains hiwater_vm and
  41. * hiwater_rss only when about to *lower* total_vm or rss. Any
  42. * collector of these hiwater stats must therefore get total_vm
  43. * and rss too, which will usually be the higher. Barriers? not
  44. * worth the effort, such snapshots can always be inconsistent.
  45. */
  46. hiwater_vm = total_vm = mm->total_vm;
  47. if (hiwater_vm < mm->hiwater_vm)
  48. hiwater_vm = mm->hiwater_vm;
  49. hiwater_rss = total_rss = anon + file + shmem;
  50. if (hiwater_rss < mm->hiwater_rss)
  51. hiwater_rss = mm->hiwater_rss;
  52. /* split executable areas between text and lib */
  53. text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  54. text = min(text, mm->exec_vm << PAGE_SHIFT);
  55. lib = (mm->exec_vm << PAGE_SHIFT) - text;
  56. swap = get_mm_counter_sum(mm, MM_SWAPENTS);
  57. SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  58. SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  59. SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  60. SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  61. SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  62. SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  63. SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  64. SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  65. SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  66. SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  67. SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  68. seq_put_decimal_ull_width(m,
  69. " kB\nVmExe:\t", text >> 10, 8);
  70. seq_put_decimal_ull_width(m,
  71. " kB\nVmLib:\t", lib >> 10, 8);
  72. seq_put_decimal_ull_width(m,
  73. " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  74. SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  75. seq_puts(m, " kB\n");
  76. hugetlb_report_usage(m, mm);
  77. }
  78. #undef SEQ_PUT_DEC
  79. unsigned long task_vsize(struct mm_struct *mm)
  80. {
  81. return PAGE_SIZE * mm->total_vm;
  82. }
  83. unsigned long task_statm(struct mm_struct *mm,
  84. unsigned long *shared, unsigned long *text,
  85. unsigned long *data, unsigned long *resident)
  86. {
  87. *shared = get_mm_counter_sum(mm, MM_FILEPAGES) +
  88. get_mm_counter_sum(mm, MM_SHMEMPAGES);
  89. *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  90. >> PAGE_SHIFT;
  91. *data = mm->data_vm + mm->stack_vm;
  92. *resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES);
  93. return mm->total_vm;
  94. }
  95. #ifdef CONFIG_NUMA
  96. /*
  97. * Save get_task_policy() for show_numa_map().
  98. */
  99. static void hold_task_mempolicy(struct proc_maps_private *priv)
  100. {
  101. struct task_struct *task = priv->task;
  102. task_lock(task);
  103. priv->task_mempolicy = get_task_policy(task);
  104. mpol_get(priv->task_mempolicy);
  105. task_unlock(task);
  106. }
  107. static void release_task_mempolicy(struct proc_maps_private *priv)
  108. {
  109. mpol_put(priv->task_mempolicy);
  110. }
  111. #else
  112. static void hold_task_mempolicy(struct proc_maps_private *priv)
  113. {
  114. }
  115. static void release_task_mempolicy(struct proc_maps_private *priv)
  116. {
  117. }
  118. #endif
  119. static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
  120. loff_t *ppos)
  121. {
  122. struct vm_area_struct *vma = vma_next(&priv->iter);
  123. if (vma) {
  124. *ppos = vma->vm_start;
  125. } else {
  126. *ppos = -2UL;
  127. vma = get_gate_vma(priv->mm);
  128. }
  129. return vma;
  130. }
  131. static void *m_start(struct seq_file *m, loff_t *ppos)
  132. {
  133. struct proc_maps_private *priv = m->private;
  134. unsigned long last_addr = *ppos;
  135. struct mm_struct *mm;
  136. /* See m_next(). Zero at the start or after lseek. */
  137. if (last_addr == -1UL)
  138. return NULL;
  139. priv->task = get_proc_task(priv->inode);
  140. if (!priv->task)
  141. return ERR_PTR(-ESRCH);
  142. mm = priv->mm;
  143. if (!mm || !mmget_not_zero(mm)) {
  144. put_task_struct(priv->task);
  145. priv->task = NULL;
  146. return NULL;
  147. }
  148. if (mmap_read_lock_killable(mm)) {
  149. mmput(mm);
  150. put_task_struct(priv->task);
  151. priv->task = NULL;
  152. return ERR_PTR(-EINTR);
  153. }
  154. vma_iter_init(&priv->iter, mm, last_addr);
  155. hold_task_mempolicy(priv);
  156. if (last_addr == -2UL)
  157. return get_gate_vma(mm);
  158. return proc_get_vma(priv, ppos);
  159. }
  160. static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
  161. {
  162. if (*ppos == -2UL) {
  163. *ppos = -1UL;
  164. return NULL;
  165. }
  166. return proc_get_vma(m->private, ppos);
  167. }
  168. static void m_stop(struct seq_file *m, void *v)
  169. {
  170. struct proc_maps_private *priv = m->private;
  171. struct mm_struct *mm = priv->mm;
  172. if (!priv->task)
  173. return;
  174. release_task_mempolicy(priv);
  175. mmap_read_unlock(mm);
  176. mmput(mm);
  177. put_task_struct(priv->task);
  178. priv->task = NULL;
  179. }
  180. static int proc_maps_open(struct inode *inode, struct file *file,
  181. const struct seq_operations *ops, int psize)
  182. {
  183. struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
  184. if (!priv)
  185. return -ENOMEM;
  186. priv->inode = inode;
  187. priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
  188. if (IS_ERR(priv->mm)) {
  189. int err = PTR_ERR(priv->mm);
  190. seq_release_private(inode, file);
  191. return err;
  192. }
  193. return 0;
  194. }
  195. static int proc_map_release(struct inode *inode, struct file *file)
  196. {
  197. struct seq_file *seq = file->private_data;
  198. struct proc_maps_private *priv = seq->private;
  199. if (priv->mm)
  200. mmdrop(priv->mm);
  201. return seq_release_private(inode, file);
  202. }
  203. static int do_maps_open(struct inode *inode, struct file *file,
  204. const struct seq_operations *ops)
  205. {
  206. return proc_maps_open(inode, file, ops,
  207. sizeof(struct proc_maps_private));
  208. }
  209. static void get_vma_name(struct vm_area_struct *vma,
  210. const struct path **path,
  211. const char **name,
  212. const char **name_fmt)
  213. {
  214. struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
  215. *name = NULL;
  216. *path = NULL;
  217. *name_fmt = NULL;
  218. /*
  219. * Print the dentry name for named mappings, and a
  220. * special [heap] marker for the heap:
  221. */
  222. if (vma->vm_file) {
  223. /*
  224. * If user named this anon shared memory via
  225. * prctl(PR_SET_VMA ..., use the provided name.
  226. */
  227. if (anon_name) {
  228. *name_fmt = "[anon_shmem:%s]";
  229. *name = anon_name->name;
  230. } else {
  231. *path = file_user_path(vma->vm_file);
  232. }
  233. return;
  234. }
  235. if (vma->vm_ops && vma->vm_ops->name) {
  236. *name = vma->vm_ops->name(vma);
  237. if (*name)
  238. return;
  239. }
  240. *name = arch_vma_name(vma);
  241. if (*name)
  242. return;
  243. if (!vma->vm_mm) {
  244. *name = "[vdso]";
  245. return;
  246. }
  247. if (vma_is_initial_heap(vma)) {
  248. *name = "[heap]";
  249. return;
  250. }
  251. if (vma_is_initial_stack(vma)) {
  252. *name = "[stack]";
  253. return;
  254. }
  255. if (anon_name) {
  256. *name_fmt = "[anon:%s]";
  257. *name = anon_name->name;
  258. return;
  259. }
  260. }
  261. static void show_vma_header_prefix(struct seq_file *m,
  262. unsigned long start, unsigned long end,
  263. vm_flags_t flags, unsigned long long pgoff,
  264. dev_t dev, unsigned long ino)
  265. {
  266. seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
  267. seq_put_hex_ll(m, NULL, start, 8);
  268. seq_put_hex_ll(m, "-", end, 8);
  269. seq_putc(m, ' ');
  270. seq_putc(m, flags & VM_READ ? 'r' : '-');
  271. seq_putc(m, flags & VM_WRITE ? 'w' : '-');
  272. seq_putc(m, flags & VM_EXEC ? 'x' : '-');
  273. seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
  274. seq_put_hex_ll(m, " ", pgoff, 8);
  275. seq_put_hex_ll(m, " ", MAJOR(dev), 2);
  276. seq_put_hex_ll(m, ":", MINOR(dev), 2);
  277. seq_put_decimal_ull(m, " ", ino);
  278. seq_putc(m, ' ');
  279. }
  280. static void
  281. show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
  282. {
  283. const struct path *path;
  284. const char *name_fmt, *name;
  285. vm_flags_t flags = vma->vm_flags;
  286. unsigned long ino = 0;
  287. unsigned long long pgoff = 0;
  288. unsigned long start, end;
  289. dev_t dev = 0;
  290. if (vma->vm_file) {
  291. const struct inode *inode = file_user_inode(vma->vm_file);
  292. dev = inode->i_sb->s_dev;
  293. ino = inode->i_ino;
  294. pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
  295. }
  296. start = vma->vm_start;
  297. end = vma->vm_end;
  298. show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
  299. get_vma_name(vma, &path, &name, &name_fmt);
  300. if (path) {
  301. seq_pad(m, ' ');
  302. seq_path(m, path, "\n");
  303. } else if (name_fmt) {
  304. seq_pad(m, ' ');
  305. seq_printf(m, name_fmt, name);
  306. } else if (name) {
  307. seq_pad(m, ' ');
  308. seq_puts(m, name);
  309. }
  310. seq_putc(m, '\n');
  311. }
  312. static int show_map(struct seq_file *m, void *v)
  313. {
  314. show_map_vma(m, v);
  315. return 0;
  316. }
  317. static const struct seq_operations proc_pid_maps_op = {
  318. .start = m_start,
  319. .next = m_next,
  320. .stop = m_stop,
  321. .show = show_map
  322. };
  323. static int pid_maps_open(struct inode *inode, struct file *file)
  324. {
  325. return do_maps_open(inode, file, &proc_pid_maps_op);
  326. }
  327. #define PROCMAP_QUERY_VMA_FLAGS ( \
  328. PROCMAP_QUERY_VMA_READABLE | \
  329. PROCMAP_QUERY_VMA_WRITABLE | \
  330. PROCMAP_QUERY_VMA_EXECUTABLE | \
  331. PROCMAP_QUERY_VMA_SHARED \
  332. )
  333. #define PROCMAP_QUERY_VALID_FLAGS_MASK ( \
  334. PROCMAP_QUERY_COVERING_OR_NEXT_VMA | \
  335. PROCMAP_QUERY_FILE_BACKED_VMA | \
  336. PROCMAP_QUERY_VMA_FLAGS \
  337. )
  338. static int query_vma_setup(struct mm_struct *mm)
  339. {
  340. return mmap_read_lock_killable(mm);
  341. }
  342. static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
  343. {
  344. mmap_read_unlock(mm);
  345. }
  346. static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
  347. {
  348. return find_vma(mm, addr);
  349. }
  350. static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
  351. unsigned long addr, u32 flags)
  352. {
  353. struct vm_area_struct *vma;
  354. next_vma:
  355. vma = query_vma_find_by_addr(mm, addr);
  356. if (!vma)
  357. goto no_vma;
  358. /* user requested only file-backed VMA, keep iterating */
  359. if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
  360. goto skip_vma;
  361. /* VMA permissions should satisfy query flags */
  362. if (flags & PROCMAP_QUERY_VMA_FLAGS) {
  363. u32 perm = 0;
  364. if (flags & PROCMAP_QUERY_VMA_READABLE)
  365. perm |= VM_READ;
  366. if (flags & PROCMAP_QUERY_VMA_WRITABLE)
  367. perm |= VM_WRITE;
  368. if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
  369. perm |= VM_EXEC;
  370. if (flags & PROCMAP_QUERY_VMA_SHARED)
  371. perm |= VM_MAYSHARE;
  372. if ((vma->vm_flags & perm) != perm)
  373. goto skip_vma;
  374. }
  375. /* found covering VMA or user is OK with the matching next VMA */
  376. if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
  377. return vma;
  378. skip_vma:
  379. /*
  380. * If the user needs closest matching VMA, keep iterating.
  381. */
  382. addr = vma->vm_end;
  383. if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
  384. goto next_vma;
  385. no_vma:
  386. return ERR_PTR(-ENOENT);
  387. }
  388. static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
  389. {
  390. struct procmap_query karg;
  391. struct vm_area_struct *vma;
  392. struct mm_struct *mm;
  393. const char *name = NULL;
  394. char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
  395. __u64 usize;
  396. int err;
  397. if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
  398. return -EFAULT;
  399. /* argument struct can never be that large, reject abuse */
  400. if (usize > PAGE_SIZE)
  401. return -E2BIG;
  402. /* argument struct should have at least query_flags and query_addr fields */
  403. if (usize < offsetofend(struct procmap_query, query_addr))
  404. return -EINVAL;
  405. err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
  406. if (err)
  407. return err;
  408. /* reject unknown flags */
  409. if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
  410. return -EINVAL;
  411. /* either both buffer address and size are set, or both should be zero */
  412. if (!!karg.vma_name_size != !!karg.vma_name_addr)
  413. return -EINVAL;
  414. if (!!karg.build_id_size != !!karg.build_id_addr)
  415. return -EINVAL;
  416. mm = priv->mm;
  417. if (!mm || !mmget_not_zero(mm))
  418. return -ESRCH;
  419. err = query_vma_setup(mm);
  420. if (err) {
  421. mmput(mm);
  422. return err;
  423. }
  424. vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
  425. if (IS_ERR(vma)) {
  426. err = PTR_ERR(vma);
  427. vma = NULL;
  428. goto out;
  429. }
  430. karg.vma_start = vma->vm_start;
  431. karg.vma_end = vma->vm_end;
  432. karg.vma_flags = 0;
  433. if (vma->vm_flags & VM_READ)
  434. karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
  435. if (vma->vm_flags & VM_WRITE)
  436. karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
  437. if (vma->vm_flags & VM_EXEC)
  438. karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
  439. if (vma->vm_flags & VM_MAYSHARE)
  440. karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
  441. karg.vma_page_size = vma_kernel_pagesize(vma);
  442. if (vma->vm_file) {
  443. const struct inode *inode = file_user_inode(vma->vm_file);
  444. karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
  445. karg.dev_major = MAJOR(inode->i_sb->s_dev);
  446. karg.dev_minor = MINOR(inode->i_sb->s_dev);
  447. karg.inode = inode->i_ino;
  448. } else {
  449. karg.vma_offset = 0;
  450. karg.dev_major = 0;
  451. karg.dev_minor = 0;
  452. karg.inode = 0;
  453. }
  454. if (karg.build_id_size) {
  455. __u32 build_id_sz;
  456. err = build_id_parse(vma, build_id_buf, &build_id_sz);
  457. if (err) {
  458. karg.build_id_size = 0;
  459. } else {
  460. if (karg.build_id_size < build_id_sz) {
  461. err = -ENAMETOOLONG;
  462. goto out;
  463. }
  464. karg.build_id_size = build_id_sz;
  465. }
  466. }
  467. if (karg.vma_name_size) {
  468. size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
  469. const struct path *path;
  470. const char *name_fmt;
  471. size_t name_sz = 0;
  472. get_vma_name(vma, &path, &name, &name_fmt);
  473. if (path || name_fmt || name) {
  474. name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
  475. if (!name_buf) {
  476. err = -ENOMEM;
  477. goto out;
  478. }
  479. }
  480. if (path) {
  481. name = d_path(path, name_buf, name_buf_sz);
  482. if (IS_ERR(name)) {
  483. err = PTR_ERR(name);
  484. goto out;
  485. }
  486. name_sz = name_buf + name_buf_sz - name;
  487. } else if (name || name_fmt) {
  488. name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
  489. name = name_buf;
  490. }
  491. if (name_sz > name_buf_sz) {
  492. err = -ENAMETOOLONG;
  493. goto out;
  494. }
  495. karg.vma_name_size = name_sz;
  496. }
  497. /* unlock vma or mmap_lock, and put mm_struct before copying data to user */
  498. query_vma_teardown(mm, vma);
  499. mmput(mm);
  500. if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
  501. name, karg.vma_name_size)) {
  502. kfree(name_buf);
  503. return -EFAULT;
  504. }
  505. kfree(name_buf);
  506. if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
  507. build_id_buf, karg.build_id_size))
  508. return -EFAULT;
  509. if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
  510. return -EFAULT;
  511. return 0;
  512. out:
  513. query_vma_teardown(mm, vma);
  514. mmput(mm);
  515. kfree(name_buf);
  516. return err;
  517. }
  518. static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  519. {
  520. struct seq_file *seq = file->private_data;
  521. struct proc_maps_private *priv = seq->private;
  522. switch (cmd) {
  523. case PROCMAP_QUERY:
  524. return do_procmap_query(priv, (void __user *)arg);
  525. default:
  526. return -ENOIOCTLCMD;
  527. }
  528. }
  529. const struct file_operations proc_pid_maps_operations = {
  530. .open = pid_maps_open,
  531. .read = seq_read,
  532. .llseek = seq_lseek,
  533. .release = proc_map_release,
  534. .unlocked_ioctl = procfs_procmap_ioctl,
  535. .compat_ioctl = compat_ptr_ioctl,
  536. };
  537. /*
  538. * Proportional Set Size(PSS): my share of RSS.
  539. *
  540. * PSS of a process is the count of pages it has in memory, where each
  541. * page is divided by the number of processes sharing it. So if a
  542. * process has 1000 pages all to itself, and 1000 shared with one other
  543. * process, its PSS will be 1500.
  544. *
  545. * To keep (accumulated) division errors low, we adopt a 64bit
  546. * fixed-point pss counter to minimize division errors. So (pss >>
  547. * PSS_SHIFT) would be the real byte count.
  548. *
  549. * A shift of 12 before division means (assuming 4K page size):
  550. * - 1M 3-user-pages add up to 8KB errors;
  551. * - supports mapcount up to 2^24, or 16M;
  552. * - supports PSS up to 2^52 bytes, or 4PB.
  553. */
  554. #define PSS_SHIFT 12
  555. #ifdef CONFIG_PROC_PAGE_MONITOR
  556. struct mem_size_stats {
  557. unsigned long resident;
  558. unsigned long shared_clean;
  559. unsigned long shared_dirty;
  560. unsigned long private_clean;
  561. unsigned long private_dirty;
  562. unsigned long referenced;
  563. unsigned long anonymous;
  564. unsigned long lazyfree;
  565. unsigned long anonymous_thp;
  566. unsigned long shmem_thp;
  567. unsigned long file_thp;
  568. unsigned long swap;
  569. unsigned long shared_hugetlb;
  570. unsigned long private_hugetlb;
  571. unsigned long ksm;
  572. u64 pss;
  573. u64 pss_anon;
  574. u64 pss_file;
  575. u64 pss_shmem;
  576. u64 pss_dirty;
  577. u64 pss_locked;
  578. u64 swap_pss;
  579. };
  580. static void smaps_page_accumulate(struct mem_size_stats *mss,
  581. struct folio *folio, unsigned long size, unsigned long pss,
  582. bool dirty, bool locked, bool private)
  583. {
  584. mss->pss += pss;
  585. if (folio_test_anon(folio))
  586. mss->pss_anon += pss;
  587. else if (folio_test_swapbacked(folio))
  588. mss->pss_shmem += pss;
  589. else
  590. mss->pss_file += pss;
  591. if (locked)
  592. mss->pss_locked += pss;
  593. if (dirty || folio_test_dirty(folio)) {
  594. mss->pss_dirty += pss;
  595. if (private)
  596. mss->private_dirty += size;
  597. else
  598. mss->shared_dirty += size;
  599. } else {
  600. if (private)
  601. mss->private_clean += size;
  602. else
  603. mss->shared_clean += size;
  604. }
  605. }
  606. static void smaps_account(struct mem_size_stats *mss, struct page *page,
  607. bool compound, bool young, bool dirty, bool locked,
  608. bool present)
  609. {
  610. struct folio *folio = page_folio(page);
  611. int i, nr = compound ? compound_nr(page) : 1;
  612. unsigned long size = nr * PAGE_SIZE;
  613. /*
  614. * First accumulate quantities that depend only on |size| and the type
  615. * of the compound page.
  616. */
  617. if (folio_test_anon(folio)) {
  618. mss->anonymous += size;
  619. if (!folio_test_swapbacked(folio) && !dirty &&
  620. !folio_test_dirty(folio))
  621. mss->lazyfree += size;
  622. }
  623. if (folio_test_ksm(folio))
  624. mss->ksm += size;
  625. mss->resident += size;
  626. /* Accumulate the size in pages that have been accessed. */
  627. if (young || folio_test_young(folio) || folio_test_referenced(folio))
  628. mss->referenced += size;
  629. /*
  630. * Then accumulate quantities that may depend on sharing, or that may
  631. * differ page-by-page.
  632. *
  633. * refcount == 1 for present entries guarantees that the folio is mapped
  634. * exactly once. For large folios this implies that exactly one
  635. * PTE/PMD/... maps (a part of) this folio.
  636. *
  637. * Treat all non-present entries (where relying on the mapcount and
  638. * refcount doesn't make sense) as "maybe shared, but not sure how
  639. * often". We treat device private entries as being fake-present.
  640. *
  641. * Note that it would not be safe to read the mapcount especially for
  642. * pages referenced by migration entries, even with the PTL held.
  643. */
  644. if (folio_ref_count(folio) == 1 || !present) {
  645. smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
  646. dirty, locked, present);
  647. return;
  648. }
  649. /*
  650. * We obtain a snapshot of the mapcount. Without holding the folio lock
  651. * this snapshot can be slightly wrong as we cannot always read the
  652. * mapcount atomically.
  653. */
  654. for (i = 0; i < nr; i++, page++) {
  655. int mapcount = folio_precise_page_mapcount(folio, page);
  656. unsigned long pss = PAGE_SIZE << PSS_SHIFT;
  657. if (mapcount >= 2)
  658. pss /= mapcount;
  659. smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
  660. dirty, locked, mapcount < 2);
  661. }
  662. }
  663. #ifdef CONFIG_SHMEM
  664. static int smaps_pte_hole(unsigned long addr, unsigned long end,
  665. __always_unused int depth, struct mm_walk *walk)
  666. {
  667. struct mem_size_stats *mss = walk->private;
  668. struct vm_area_struct *vma = walk->vma;
  669. mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
  670. linear_page_index(vma, addr),
  671. linear_page_index(vma, end));
  672. return 0;
  673. }
  674. #else
  675. #define smaps_pte_hole NULL
  676. #endif /* CONFIG_SHMEM */
  677. static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
  678. {
  679. #ifdef CONFIG_SHMEM
  680. if (walk->ops->pte_hole) {
  681. /* depth is not used */
  682. smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
  683. }
  684. #endif
  685. }
  686. static void smaps_pte_entry(pte_t *pte, unsigned long addr,
  687. struct mm_walk *walk)
  688. {
  689. struct mem_size_stats *mss = walk->private;
  690. struct vm_area_struct *vma = walk->vma;
  691. bool locked = !!(vma->vm_flags & VM_LOCKED);
  692. struct page *page = NULL;
  693. bool present = false, young = false, dirty = false;
  694. pte_t ptent = ptep_get(pte);
  695. if (pte_present(ptent)) {
  696. page = vm_normal_page(vma, addr, ptent);
  697. young = pte_young(ptent);
  698. dirty = pte_dirty(ptent);
  699. present = true;
  700. } else if (is_swap_pte(ptent)) {
  701. swp_entry_t swpent = pte_to_swp_entry(ptent);
  702. if (!non_swap_entry(swpent)) {
  703. int mapcount;
  704. mss->swap += PAGE_SIZE;
  705. mapcount = swp_swapcount(swpent);
  706. if (mapcount >= 2) {
  707. u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
  708. do_div(pss_delta, mapcount);
  709. mss->swap_pss += pss_delta;
  710. } else {
  711. mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
  712. }
  713. } else if (is_pfn_swap_entry(swpent)) {
  714. if (is_device_private_entry(swpent))
  715. present = true;
  716. page = pfn_swap_entry_to_page(swpent);
  717. }
  718. } else {
  719. smaps_pte_hole_lookup(addr, walk);
  720. return;
  721. }
  722. if (!page)
  723. return;
  724. smaps_account(mss, page, false, young, dirty, locked, present);
  725. }
  726. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  727. static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  728. struct mm_walk *walk)
  729. {
  730. struct mem_size_stats *mss = walk->private;
  731. struct vm_area_struct *vma = walk->vma;
  732. bool locked = !!(vma->vm_flags & VM_LOCKED);
  733. struct page *page = NULL;
  734. bool present = false;
  735. struct folio *folio;
  736. if (pmd_present(*pmd)) {
  737. page = vm_normal_page_pmd(vma, addr, *pmd);
  738. present = true;
  739. } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
  740. swp_entry_t entry = pmd_to_swp_entry(*pmd);
  741. if (is_pfn_swap_entry(entry))
  742. page = pfn_swap_entry_to_page(entry);
  743. }
  744. if (IS_ERR_OR_NULL(page))
  745. return;
  746. folio = page_folio(page);
  747. if (folio_test_anon(folio))
  748. mss->anonymous_thp += HPAGE_PMD_SIZE;
  749. else if (folio_test_swapbacked(folio))
  750. mss->shmem_thp += HPAGE_PMD_SIZE;
  751. else if (folio_is_zone_device(folio))
  752. /* pass */;
  753. else
  754. mss->file_thp += HPAGE_PMD_SIZE;
  755. smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
  756. locked, present);
  757. }
  758. #else
  759. static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  760. struct mm_walk *walk)
  761. {
  762. }
  763. #endif
  764. static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  765. struct mm_walk *walk)
  766. {
  767. struct vm_area_struct *vma = walk->vma;
  768. pte_t *pte;
  769. spinlock_t *ptl;
  770. ptl = pmd_trans_huge_lock(pmd, vma);
  771. if (ptl) {
  772. smaps_pmd_entry(pmd, addr, walk);
  773. spin_unlock(ptl);
  774. goto out;
  775. }
  776. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  777. if (!pte) {
  778. walk->action = ACTION_AGAIN;
  779. return 0;
  780. }
  781. for (; addr != end; pte++, addr += PAGE_SIZE)
  782. smaps_pte_entry(pte, addr, walk);
  783. pte_unmap_unlock(pte - 1, ptl);
  784. out:
  785. cond_resched();
  786. return 0;
  787. }
  788. static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
  789. {
  790. /*
  791. * Don't forget to update Documentation/ on changes.
  792. *
  793. * The length of the second argument of mnemonics[]
  794. * needs to be 3 instead of previously set 2
  795. * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
  796. * to avoid spurious
  797. * -Werror=unterminated-string-initialization warning
  798. * with GCC 15
  799. */
  800. static const char mnemonics[BITS_PER_LONG][3] = {
  801. /*
  802. * In case if we meet a flag we don't know about.
  803. */
  804. [0 ... (BITS_PER_LONG-1)] = "??",
  805. [ilog2(VM_READ)] = "rd",
  806. [ilog2(VM_WRITE)] = "wr",
  807. [ilog2(VM_EXEC)] = "ex",
  808. [ilog2(VM_SHARED)] = "sh",
  809. [ilog2(VM_MAYREAD)] = "mr",
  810. [ilog2(VM_MAYWRITE)] = "mw",
  811. [ilog2(VM_MAYEXEC)] = "me",
  812. [ilog2(VM_MAYSHARE)] = "ms",
  813. [ilog2(VM_GROWSDOWN)] = "gd",
  814. [ilog2(VM_PFNMAP)] = "pf",
  815. [ilog2(VM_LOCKED)] = "lo",
  816. [ilog2(VM_IO)] = "io",
  817. [ilog2(VM_SEQ_READ)] = "sr",
  818. [ilog2(VM_RAND_READ)] = "rr",
  819. [ilog2(VM_DONTCOPY)] = "dc",
  820. [ilog2(VM_DONTEXPAND)] = "de",
  821. [ilog2(VM_LOCKONFAULT)] = "lf",
  822. [ilog2(VM_ACCOUNT)] = "ac",
  823. [ilog2(VM_NORESERVE)] = "nr",
  824. [ilog2(VM_HUGETLB)] = "ht",
  825. [ilog2(VM_SYNC)] = "sf",
  826. [ilog2(VM_ARCH_1)] = "ar",
  827. [ilog2(VM_WIPEONFORK)] = "wf",
  828. [ilog2(VM_DONTDUMP)] = "dd",
  829. #ifdef CONFIG_ARM64_BTI
  830. [ilog2(VM_ARM64_BTI)] = "bt",
  831. #endif
  832. #ifdef CONFIG_MEM_SOFT_DIRTY
  833. [ilog2(VM_SOFTDIRTY)] = "sd",
  834. #endif
  835. [ilog2(VM_MIXEDMAP)] = "mm",
  836. [ilog2(VM_HUGEPAGE)] = "hg",
  837. [ilog2(VM_NOHUGEPAGE)] = "nh",
  838. [ilog2(VM_MERGEABLE)] = "mg",
  839. [ilog2(VM_UFFD_MISSING)]= "um",
  840. [ilog2(VM_UFFD_WP)] = "uw",
  841. #ifdef CONFIG_ARM64_MTE
  842. [ilog2(VM_MTE)] = "mt",
  843. [ilog2(VM_MTE_ALLOWED)] = "",
  844. #endif
  845. #ifdef CONFIG_ARCH_HAS_PKEYS
  846. /* These come out via ProtectionKey: */
  847. [ilog2(VM_PKEY_BIT0)] = "",
  848. [ilog2(VM_PKEY_BIT1)] = "",
  849. [ilog2(VM_PKEY_BIT2)] = "",
  850. #if VM_PKEY_BIT3
  851. [ilog2(VM_PKEY_BIT3)] = "",
  852. #endif
  853. #if VM_PKEY_BIT4
  854. [ilog2(VM_PKEY_BIT4)] = "",
  855. #endif
  856. #endif /* CONFIG_ARCH_HAS_PKEYS */
  857. #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
  858. [ilog2(VM_UFFD_MINOR)] = "ui",
  859. #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
  860. #ifdef CONFIG_X86_USER_SHADOW_STACK
  861. [ilog2(VM_SHADOW_STACK)] = "ss",
  862. #endif
  863. #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
  864. [ilog2(VM_DROPPABLE)] = "dp",
  865. #endif
  866. #ifdef CONFIG_64BIT
  867. [ilog2(VM_SEALED)] = "sl",
  868. #endif
  869. };
  870. size_t i;
  871. seq_puts(m, "VmFlags: ");
  872. for (i = 0; i < BITS_PER_LONG; i++) {
  873. if (!mnemonics[i][0])
  874. continue;
  875. if (vma->vm_flags & (1UL << i))
  876. seq_printf(m, "%s ", mnemonics[i]);
  877. }
  878. seq_putc(m, '\n');
  879. }
  880. #ifdef CONFIG_HUGETLB_PAGE
  881. static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
  882. unsigned long addr, unsigned long end,
  883. struct mm_walk *walk)
  884. {
  885. struct mem_size_stats *mss = walk->private;
  886. struct vm_area_struct *vma = walk->vma;
  887. struct folio *folio = NULL;
  888. bool present = false;
  889. spinlock_t *ptl;
  890. pte_t ptent;
  891. ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
  892. ptent = huge_ptep_get(walk->mm, addr, pte);
  893. if (pte_present(ptent)) {
  894. folio = page_folio(pte_page(ptent));
  895. present = true;
  896. } else if (is_swap_pte(ptent)) {
  897. swp_entry_t swpent = pte_to_swp_entry(ptent);
  898. if (is_pfn_swap_entry(swpent))
  899. folio = pfn_swap_entry_folio(swpent);
  900. }
  901. if (folio) {
  902. /* We treat non-present entries as "maybe shared". */
  903. if (!present || folio_likely_mapped_shared(folio) ||
  904. hugetlb_pmd_shared(pte))
  905. mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
  906. else
  907. mss->private_hugetlb += huge_page_size(hstate_vma(vma));
  908. }
  909. spin_unlock(ptl);
  910. return 0;
  911. }
  912. #else
  913. #define smaps_hugetlb_range NULL
  914. #endif /* HUGETLB_PAGE */
  915. static const struct mm_walk_ops smaps_walk_ops = {
  916. .pmd_entry = smaps_pte_range,
  917. .hugetlb_entry = smaps_hugetlb_range,
  918. .walk_lock = PGWALK_RDLOCK,
  919. };
  920. static const struct mm_walk_ops smaps_shmem_walk_ops = {
  921. .pmd_entry = smaps_pte_range,
  922. .hugetlb_entry = smaps_hugetlb_range,
  923. .pte_hole = smaps_pte_hole,
  924. .walk_lock = PGWALK_RDLOCK,
  925. };
  926. /*
  927. * Gather mem stats from @vma with the indicated beginning
  928. * address @start, and keep them in @mss.
  929. *
  930. * Use vm_start of @vma as the beginning address if @start is 0.
  931. */
  932. static void smap_gather_stats(struct vm_area_struct *vma,
  933. struct mem_size_stats *mss, unsigned long start)
  934. {
  935. const struct mm_walk_ops *ops = &smaps_walk_ops;
  936. /* Invalid start */
  937. if (start >= vma->vm_end)
  938. return;
  939. if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
  940. /*
  941. * For shared or readonly shmem mappings we know that all
  942. * swapped out pages belong to the shmem object, and we can
  943. * obtain the swap value much more efficiently. For private
  944. * writable mappings, we might have COW pages that are
  945. * not affected by the parent swapped out pages of the shmem
  946. * object, so we have to distinguish them during the page walk.
  947. * Unless we know that the shmem object (or the part mapped by
  948. * our VMA) has no swapped out pages at all.
  949. */
  950. unsigned long shmem_swapped = shmem_swap_usage(vma);
  951. if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
  952. !(vma->vm_flags & VM_WRITE))) {
  953. mss->swap += shmem_swapped;
  954. } else {
  955. ops = &smaps_shmem_walk_ops;
  956. }
  957. }
  958. /* mmap_lock is held in m_start */
  959. if (!start)
  960. walk_page_vma(vma, ops, mss);
  961. else
  962. walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
  963. }
  964. #define SEQ_PUT_DEC(str, val) \
  965. seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
  966. /* Show the contents common for smaps and smaps_rollup */
  967. static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
  968. bool rollup_mode)
  969. {
  970. SEQ_PUT_DEC("Rss: ", mss->resident);
  971. SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
  972. SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT);
  973. if (rollup_mode) {
  974. /*
  975. * These are meaningful only for smaps_rollup, otherwise two of
  976. * them are zero, and the other one is the same as Pss.
  977. */
  978. SEQ_PUT_DEC(" kB\nPss_Anon: ",
  979. mss->pss_anon >> PSS_SHIFT);
  980. SEQ_PUT_DEC(" kB\nPss_File: ",
  981. mss->pss_file >> PSS_SHIFT);
  982. SEQ_PUT_DEC(" kB\nPss_Shmem: ",
  983. mss->pss_shmem >> PSS_SHIFT);
  984. }
  985. SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean);
  986. SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty);
  987. SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean);
  988. SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
  989. SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
  990. SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
  991. SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm);
  992. SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
  993. SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
  994. SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
  995. SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
  996. SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
  997. seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
  998. mss->private_hugetlb >> 10, 7);
  999. SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
  1000. SEQ_PUT_DEC(" kB\nSwapPss: ",
  1001. mss->swap_pss >> PSS_SHIFT);
  1002. SEQ_PUT_DEC(" kB\nLocked: ",
  1003. mss->pss_locked >> PSS_SHIFT);
  1004. seq_puts(m, " kB\n");
  1005. }
  1006. static int show_smap(struct seq_file *m, void *v)
  1007. {
  1008. struct vm_area_struct *vma = v;
  1009. struct mem_size_stats mss = {};
  1010. smap_gather_stats(vma, &mss, 0);
  1011. show_map_vma(m, vma);
  1012. SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
  1013. SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
  1014. SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
  1015. seq_puts(m, " kB\n");
  1016. __show_smap(m, &mss, false);
  1017. seq_printf(m, "THPeligible: %8u\n",
  1018. !!thp_vma_allowable_orders(vma, vma->vm_flags,
  1019. TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
  1020. if (arch_pkeys_enabled())
  1021. seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
  1022. show_smap_vma_flags(m, vma);
  1023. return 0;
  1024. }
  1025. static int show_smaps_rollup(struct seq_file *m, void *v)
  1026. {
  1027. struct proc_maps_private *priv = m->private;
  1028. struct mem_size_stats mss = {};
  1029. struct mm_struct *mm = priv->mm;
  1030. struct vm_area_struct *vma;
  1031. unsigned long vma_start = 0, last_vma_end = 0;
  1032. int ret = 0;
  1033. VMA_ITERATOR(vmi, mm, 0);
  1034. priv->task = get_proc_task(priv->inode);
  1035. if (!priv->task)
  1036. return -ESRCH;
  1037. if (!mm || !mmget_not_zero(mm)) {
  1038. ret = -ESRCH;
  1039. goto out_put_task;
  1040. }
  1041. ret = mmap_read_lock_killable(mm);
  1042. if (ret)
  1043. goto out_put_mm;
  1044. hold_task_mempolicy(priv);
  1045. vma = vma_next(&vmi);
  1046. if (unlikely(!vma))
  1047. goto empty_set;
  1048. vma_start = vma->vm_start;
  1049. do {
  1050. smap_gather_stats(vma, &mss, 0);
  1051. last_vma_end = vma->vm_end;
  1052. /*
  1053. * Release mmap_lock temporarily if someone wants to
  1054. * access it for write request.
  1055. */
  1056. if (mmap_lock_is_contended(mm)) {
  1057. vma_iter_invalidate(&vmi);
  1058. mmap_read_unlock(mm);
  1059. ret = mmap_read_lock_killable(mm);
  1060. if (ret) {
  1061. release_task_mempolicy(priv);
  1062. goto out_put_mm;
  1063. }
  1064. /*
  1065. * After dropping the lock, there are four cases to
  1066. * consider. See the following example for explanation.
  1067. *
  1068. * +------+------+-----------+
  1069. * | VMA1 | VMA2 | VMA3 |
  1070. * +------+------+-----------+
  1071. * | | | |
  1072. * 4k 8k 16k 400k
  1073. *
  1074. * Suppose we drop the lock after reading VMA2 due to
  1075. * contention, then we get:
  1076. *
  1077. * last_vma_end = 16k
  1078. *
  1079. * 1) VMA2 is freed, but VMA3 exists:
  1080. *
  1081. * vma_next(vmi) will return VMA3.
  1082. * In this case, just continue from VMA3.
  1083. *
  1084. * 2) VMA2 still exists:
  1085. *
  1086. * vma_next(vmi) will return VMA3.
  1087. * In this case, just continue from VMA3.
  1088. *
  1089. * 3) No more VMAs can be found:
  1090. *
  1091. * vma_next(vmi) will return NULL.
  1092. * No more things to do, just break.
  1093. *
  1094. * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
  1095. *
  1096. * vma_next(vmi) will return VMA' whose range
  1097. * contains last_vma_end.
  1098. * Iterate VMA' from last_vma_end.
  1099. */
  1100. vma = vma_next(&vmi);
  1101. /* Case 3 above */
  1102. if (!vma)
  1103. break;
  1104. /* Case 1 and 2 above */
  1105. if (vma->vm_start >= last_vma_end) {
  1106. smap_gather_stats(vma, &mss, 0);
  1107. last_vma_end = vma->vm_end;
  1108. continue;
  1109. }
  1110. /* Case 4 above */
  1111. if (vma->vm_end > last_vma_end) {
  1112. smap_gather_stats(vma, &mss, last_vma_end);
  1113. last_vma_end = vma->vm_end;
  1114. }
  1115. }
  1116. } for_each_vma(vmi, vma);
  1117. empty_set:
  1118. show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
  1119. seq_pad(m, ' ');
  1120. seq_puts(m, "[rollup]\n");
  1121. __show_smap(m, &mss, true);
  1122. release_task_mempolicy(priv);
  1123. mmap_read_unlock(mm);
  1124. out_put_mm:
  1125. mmput(mm);
  1126. out_put_task:
  1127. put_task_struct(priv->task);
  1128. priv->task = NULL;
  1129. return ret;
  1130. }
  1131. #undef SEQ_PUT_DEC
  1132. static const struct seq_operations proc_pid_smaps_op = {
  1133. .start = m_start,
  1134. .next = m_next,
  1135. .stop = m_stop,
  1136. .show = show_smap
  1137. };
  1138. static int pid_smaps_open(struct inode *inode, struct file *file)
  1139. {
  1140. return do_maps_open(inode, file, &proc_pid_smaps_op);
  1141. }
  1142. static int smaps_rollup_open(struct inode *inode, struct file *file)
  1143. {
  1144. int ret;
  1145. struct proc_maps_private *priv;
  1146. priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
  1147. if (!priv)
  1148. return -ENOMEM;
  1149. ret = single_open(file, show_smaps_rollup, priv);
  1150. if (ret)
  1151. goto out_free;
  1152. priv->inode = inode;
  1153. priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
  1154. if (IS_ERR(priv->mm)) {
  1155. ret = PTR_ERR(priv->mm);
  1156. single_release(inode, file);
  1157. goto out_free;
  1158. }
  1159. return 0;
  1160. out_free:
  1161. kfree(priv);
  1162. return ret;
  1163. }
  1164. static int smaps_rollup_release(struct inode *inode, struct file *file)
  1165. {
  1166. struct seq_file *seq = file->private_data;
  1167. struct proc_maps_private *priv = seq->private;
  1168. if (priv->mm)
  1169. mmdrop(priv->mm);
  1170. kfree(priv);
  1171. return single_release(inode, file);
  1172. }
  1173. const struct file_operations proc_pid_smaps_operations = {
  1174. .open = pid_smaps_open,
  1175. .read = seq_read,
  1176. .llseek = seq_lseek,
  1177. .release = proc_map_release,
  1178. };
  1179. const struct file_operations proc_pid_smaps_rollup_operations = {
  1180. .open = smaps_rollup_open,
  1181. .read = seq_read,
  1182. .llseek = seq_lseek,
  1183. .release = smaps_rollup_release,
  1184. };
  1185. enum clear_refs_types {
  1186. CLEAR_REFS_ALL = 1,
  1187. CLEAR_REFS_ANON,
  1188. CLEAR_REFS_MAPPED,
  1189. CLEAR_REFS_SOFT_DIRTY,
  1190. CLEAR_REFS_MM_HIWATER_RSS,
  1191. CLEAR_REFS_LAST,
  1192. };
  1193. struct clear_refs_private {
  1194. enum clear_refs_types type;
  1195. };
  1196. #ifdef CONFIG_MEM_SOFT_DIRTY
  1197. static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
  1198. {
  1199. struct folio *folio;
  1200. if (!pte_write(pte))
  1201. return false;
  1202. if (!is_cow_mapping(vma->vm_flags))
  1203. return false;
  1204. if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
  1205. return false;
  1206. folio = vm_normal_folio(vma, addr, pte);
  1207. if (!folio)
  1208. return false;
  1209. return folio_maybe_dma_pinned(folio);
  1210. }
  1211. static inline void clear_soft_dirty(struct vm_area_struct *vma,
  1212. unsigned long addr, pte_t *pte)
  1213. {
  1214. /*
  1215. * The soft-dirty tracker uses #PF-s to catch writes
  1216. * to pages, so write-protect the pte as well. See the
  1217. * Documentation/admin-guide/mm/soft-dirty.rst for full description
  1218. * of how soft-dirty works.
  1219. */
  1220. pte_t ptent = ptep_get(pte);
  1221. if (pte_present(ptent)) {
  1222. pte_t old_pte;
  1223. if (pte_is_pinned(vma, addr, ptent))
  1224. return;
  1225. old_pte = ptep_modify_prot_start(vma, addr, pte);
  1226. ptent = pte_wrprotect(old_pte);
  1227. ptent = pte_clear_soft_dirty(ptent);
  1228. ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
  1229. } else if (is_swap_pte(ptent)) {
  1230. ptent = pte_swp_clear_soft_dirty(ptent);
  1231. set_pte_at(vma->vm_mm, addr, pte, ptent);
  1232. }
  1233. }
  1234. #else
  1235. static inline void clear_soft_dirty(struct vm_area_struct *vma,
  1236. unsigned long addr, pte_t *pte)
  1237. {
  1238. }
  1239. #endif
  1240. #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  1241. static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  1242. unsigned long addr, pmd_t *pmdp)
  1243. {
  1244. pmd_t old, pmd = *pmdp;
  1245. if (pmd_present(pmd)) {
  1246. /* See comment in change_huge_pmd() */
  1247. old = pmdp_invalidate(vma, addr, pmdp);
  1248. if (pmd_dirty(old))
  1249. pmd = pmd_mkdirty(pmd);
  1250. if (pmd_young(old))
  1251. pmd = pmd_mkyoung(pmd);
  1252. pmd = pmd_wrprotect(pmd);
  1253. pmd = pmd_clear_soft_dirty(pmd);
  1254. set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  1255. } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
  1256. pmd = pmd_swp_clear_soft_dirty(pmd);
  1257. set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  1258. }
  1259. }
  1260. #else
  1261. static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  1262. unsigned long addr, pmd_t *pmdp)
  1263. {
  1264. }
  1265. #endif
  1266. static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
  1267. unsigned long end, struct mm_walk *walk)
  1268. {
  1269. struct clear_refs_private *cp = walk->private;
  1270. struct vm_area_struct *vma = walk->vma;
  1271. pte_t *pte, ptent;
  1272. spinlock_t *ptl;
  1273. struct folio *folio;
  1274. ptl = pmd_trans_huge_lock(pmd, vma);
  1275. if (ptl) {
  1276. if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  1277. clear_soft_dirty_pmd(vma, addr, pmd);
  1278. goto out;
  1279. }
  1280. if (!pmd_present(*pmd))
  1281. goto out;
  1282. folio = pmd_folio(*pmd);
  1283. /* Clear accessed and referenced bits. */
  1284. pmdp_test_and_clear_young(vma, addr, pmd);
  1285. folio_test_clear_young(folio);
  1286. folio_clear_referenced(folio);
  1287. out:
  1288. spin_unlock(ptl);
  1289. return 0;
  1290. }
  1291. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  1292. if (!pte) {
  1293. walk->action = ACTION_AGAIN;
  1294. return 0;
  1295. }
  1296. for (; addr != end; pte++, addr += PAGE_SIZE) {
  1297. ptent = ptep_get(pte);
  1298. if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  1299. clear_soft_dirty(vma, addr, pte);
  1300. continue;
  1301. }
  1302. if (!pte_present(ptent))
  1303. continue;
  1304. folio = vm_normal_folio(vma, addr, ptent);
  1305. if (!folio)
  1306. continue;
  1307. /* Clear accessed and referenced bits. */
  1308. ptep_test_and_clear_young(vma, addr, pte);
  1309. folio_test_clear_young(folio);
  1310. folio_clear_referenced(folio);
  1311. }
  1312. pte_unmap_unlock(pte - 1, ptl);
  1313. cond_resched();
  1314. return 0;
  1315. }
  1316. static int clear_refs_test_walk(unsigned long start, unsigned long end,
  1317. struct mm_walk *walk)
  1318. {
  1319. struct clear_refs_private *cp = walk->private;
  1320. struct vm_area_struct *vma = walk->vma;
  1321. if (vma->vm_flags & VM_PFNMAP)
  1322. return 1;
  1323. /*
  1324. * Writing 1 to /proc/pid/clear_refs affects all pages.
  1325. * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
  1326. * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
  1327. * Writing 4 to /proc/pid/clear_refs affects all pages.
  1328. */
  1329. if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
  1330. return 1;
  1331. if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
  1332. return 1;
  1333. return 0;
  1334. }
  1335. static const struct mm_walk_ops clear_refs_walk_ops = {
  1336. .pmd_entry = clear_refs_pte_range,
  1337. .test_walk = clear_refs_test_walk,
  1338. .walk_lock = PGWALK_WRLOCK,
  1339. };
  1340. static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  1341. size_t count, loff_t *ppos)
  1342. {
  1343. struct task_struct *task;
  1344. char buffer[PROC_NUMBUF] = {};
  1345. struct mm_struct *mm;
  1346. struct vm_area_struct *vma;
  1347. enum clear_refs_types type;
  1348. int itype;
  1349. int rv;
  1350. if (count > sizeof(buffer) - 1)
  1351. count = sizeof(buffer) - 1;
  1352. if (copy_from_user(buffer, buf, count))
  1353. return -EFAULT;
  1354. rv = kstrtoint(strstrip(buffer), 10, &itype);
  1355. if (rv < 0)
  1356. return rv;
  1357. type = (enum clear_refs_types)itype;
  1358. if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
  1359. return -EINVAL;
  1360. task = get_proc_task(file_inode(file));
  1361. if (!task)
  1362. return -ESRCH;
  1363. mm = get_task_mm(task);
  1364. if (mm) {
  1365. VMA_ITERATOR(vmi, mm, 0);
  1366. struct mmu_notifier_range range;
  1367. struct clear_refs_private cp = {
  1368. .type = type,
  1369. };
  1370. if (mmap_write_lock_killable(mm)) {
  1371. count = -EINTR;
  1372. goto out_mm;
  1373. }
  1374. if (type == CLEAR_REFS_MM_HIWATER_RSS) {
  1375. /*
  1376. * Writing 5 to /proc/pid/clear_refs resets the peak
  1377. * resident set size to this mm's current rss value.
  1378. */
  1379. reset_mm_hiwater_rss(mm);
  1380. goto out_unlock;
  1381. }
  1382. if (type == CLEAR_REFS_SOFT_DIRTY) {
  1383. for_each_vma(vmi, vma) {
  1384. if (!(vma->vm_flags & VM_SOFTDIRTY))
  1385. continue;
  1386. vm_flags_clear(vma, VM_SOFTDIRTY);
  1387. vma_set_page_prot(vma);
  1388. }
  1389. inc_tlb_flush_pending(mm);
  1390. mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
  1391. 0, mm, 0, -1UL);
  1392. mmu_notifier_invalidate_range_start(&range);
  1393. }
  1394. walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
  1395. if (type == CLEAR_REFS_SOFT_DIRTY) {
  1396. mmu_notifier_invalidate_range_end(&range);
  1397. flush_tlb_mm(mm);
  1398. dec_tlb_flush_pending(mm);
  1399. }
  1400. out_unlock:
  1401. mmap_write_unlock(mm);
  1402. out_mm:
  1403. mmput(mm);
  1404. }
  1405. put_task_struct(task);
  1406. return count;
  1407. }
  1408. const struct file_operations proc_clear_refs_operations = {
  1409. .write = clear_refs_write,
  1410. .llseek = noop_llseek,
  1411. };
  1412. typedef struct {
  1413. u64 pme;
  1414. } pagemap_entry_t;
  1415. struct pagemapread {
  1416. int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
  1417. pagemap_entry_t *buffer;
  1418. bool show_pfn;
  1419. };
  1420. #define PAGEMAP_WALK_SIZE (PMD_SIZE)
  1421. #define PAGEMAP_WALK_MASK (PMD_MASK)
  1422. #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
  1423. #define PM_PFRAME_BITS 55
  1424. #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
  1425. #define PM_SOFT_DIRTY BIT_ULL(55)
  1426. #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
  1427. #define PM_UFFD_WP BIT_ULL(57)
  1428. #define PM_FILE BIT_ULL(61)
  1429. #define PM_SWAP BIT_ULL(62)
  1430. #define PM_PRESENT BIT_ULL(63)
  1431. #define PM_END_OF_BUFFER 1
  1432. static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
  1433. {
  1434. return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
  1435. }
  1436. static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
  1437. {
  1438. pm->buffer[pm->pos++] = *pme;
  1439. if (pm->pos >= pm->len)
  1440. return PM_END_OF_BUFFER;
  1441. return 0;
  1442. }
  1443. static int pagemap_pte_hole(unsigned long start, unsigned long end,
  1444. __always_unused int depth, struct mm_walk *walk)
  1445. {
  1446. struct pagemapread *pm = walk->private;
  1447. unsigned long addr = start;
  1448. int err = 0;
  1449. while (addr < end) {
  1450. struct vm_area_struct *vma = find_vma(walk->mm, addr);
  1451. pagemap_entry_t pme = make_pme(0, 0);
  1452. /* End of address space hole, which we mark as non-present. */
  1453. unsigned long hole_end;
  1454. if (vma)
  1455. hole_end = min(end, vma->vm_start);
  1456. else
  1457. hole_end = end;
  1458. for (; addr < hole_end; addr += PAGE_SIZE) {
  1459. err = add_to_pagemap(&pme, pm);
  1460. if (err)
  1461. goto out;
  1462. }
  1463. if (!vma)
  1464. break;
  1465. /* Addresses in the VMA. */
  1466. if (vma->vm_flags & VM_SOFTDIRTY)
  1467. pme = make_pme(0, PM_SOFT_DIRTY);
  1468. for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
  1469. err = add_to_pagemap(&pme, pm);
  1470. if (err)
  1471. goto out;
  1472. }
  1473. }
  1474. out:
  1475. return err;
  1476. }
  1477. static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
  1478. struct vm_area_struct *vma, unsigned long addr, pte_t pte)
  1479. {
  1480. u64 frame = 0, flags = 0;
  1481. struct page *page = NULL;
  1482. struct folio *folio;
  1483. if (pte_present(pte)) {
  1484. if (pm->show_pfn)
  1485. frame = pte_pfn(pte);
  1486. flags |= PM_PRESENT;
  1487. page = vm_normal_page(vma, addr, pte);
  1488. if (pte_soft_dirty(pte))
  1489. flags |= PM_SOFT_DIRTY;
  1490. if (pte_uffd_wp(pte))
  1491. flags |= PM_UFFD_WP;
  1492. } else if (is_swap_pte(pte)) {
  1493. swp_entry_t entry;
  1494. if (pte_swp_soft_dirty(pte))
  1495. flags |= PM_SOFT_DIRTY;
  1496. if (pte_swp_uffd_wp(pte))
  1497. flags |= PM_UFFD_WP;
  1498. entry = pte_to_swp_entry(pte);
  1499. if (pm->show_pfn) {
  1500. pgoff_t offset;
  1501. /*
  1502. * For PFN swap offsets, keeping the offset field
  1503. * to be PFN only to be compatible with old smaps.
  1504. */
  1505. if (is_pfn_swap_entry(entry))
  1506. offset = swp_offset_pfn(entry);
  1507. else
  1508. offset = swp_offset(entry);
  1509. frame = swp_type(entry) |
  1510. (offset << MAX_SWAPFILES_SHIFT);
  1511. }
  1512. flags |= PM_SWAP;
  1513. if (is_pfn_swap_entry(entry))
  1514. page = pfn_swap_entry_to_page(entry);
  1515. if (pte_marker_entry_uffd_wp(entry))
  1516. flags |= PM_UFFD_WP;
  1517. }
  1518. if (page) {
  1519. folio = page_folio(page);
  1520. if (!folio_test_anon(folio))
  1521. flags |= PM_FILE;
  1522. if ((flags & PM_PRESENT) &&
  1523. folio_precise_page_mapcount(folio, page) == 1)
  1524. flags |= PM_MMAP_EXCLUSIVE;
  1525. }
  1526. if (vma->vm_flags & VM_SOFTDIRTY)
  1527. flags |= PM_SOFT_DIRTY;
  1528. return make_pme(frame, flags);
  1529. }
  1530. static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
  1531. struct mm_walk *walk)
  1532. {
  1533. struct vm_area_struct *vma = walk->vma;
  1534. struct pagemapread *pm = walk->private;
  1535. spinlock_t *ptl;
  1536. pte_t *pte, *orig_pte;
  1537. int err = 0;
  1538. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1539. ptl = pmd_trans_huge_lock(pmdp, vma);
  1540. if (ptl) {
  1541. unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
  1542. u64 flags = 0, frame = 0;
  1543. pmd_t pmd = *pmdp;
  1544. struct page *page = NULL;
  1545. struct folio *folio = NULL;
  1546. if (vma->vm_flags & VM_SOFTDIRTY)
  1547. flags |= PM_SOFT_DIRTY;
  1548. if (pmd_present(pmd)) {
  1549. page = pmd_page(pmd);
  1550. flags |= PM_PRESENT;
  1551. if (pmd_soft_dirty(pmd))
  1552. flags |= PM_SOFT_DIRTY;
  1553. if (pmd_uffd_wp(pmd))
  1554. flags |= PM_UFFD_WP;
  1555. if (pm->show_pfn)
  1556. frame = pmd_pfn(pmd) + idx;
  1557. }
  1558. #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  1559. else if (is_swap_pmd(pmd)) {
  1560. swp_entry_t entry = pmd_to_swp_entry(pmd);
  1561. unsigned long offset;
  1562. if (pm->show_pfn) {
  1563. if (is_pfn_swap_entry(entry))
  1564. offset = swp_offset_pfn(entry) + idx;
  1565. else
  1566. offset = swp_offset(entry) + idx;
  1567. frame = swp_type(entry) |
  1568. (offset << MAX_SWAPFILES_SHIFT);
  1569. }
  1570. flags |= PM_SWAP;
  1571. if (pmd_swp_soft_dirty(pmd))
  1572. flags |= PM_SOFT_DIRTY;
  1573. if (pmd_swp_uffd_wp(pmd))
  1574. flags |= PM_UFFD_WP;
  1575. VM_BUG_ON(!is_pmd_migration_entry(pmd));
  1576. page = pfn_swap_entry_to_page(entry);
  1577. }
  1578. #endif
  1579. if (page) {
  1580. folio = page_folio(page);
  1581. if (!folio_test_anon(folio))
  1582. flags |= PM_FILE;
  1583. }
  1584. for (; addr != end; addr += PAGE_SIZE, idx++) {
  1585. u64 cur_flags = flags;
  1586. pagemap_entry_t pme;
  1587. if (folio && (flags & PM_PRESENT) &&
  1588. folio_precise_page_mapcount(folio, page + idx) == 1)
  1589. cur_flags |= PM_MMAP_EXCLUSIVE;
  1590. pme = make_pme(frame, cur_flags);
  1591. err = add_to_pagemap(&pme, pm);
  1592. if (err)
  1593. break;
  1594. if (pm->show_pfn) {
  1595. if (flags & PM_PRESENT)
  1596. frame++;
  1597. else if (flags & PM_SWAP)
  1598. frame += (1 << MAX_SWAPFILES_SHIFT);
  1599. }
  1600. }
  1601. spin_unlock(ptl);
  1602. return err;
  1603. }
  1604. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1605. /*
  1606. * We can assume that @vma always points to a valid one and @end never
  1607. * goes beyond vma->vm_end.
  1608. */
  1609. orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
  1610. if (!pte) {
  1611. walk->action = ACTION_AGAIN;
  1612. return err;
  1613. }
  1614. for (; addr < end; pte++, addr += PAGE_SIZE) {
  1615. pagemap_entry_t pme;
  1616. pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
  1617. err = add_to_pagemap(&pme, pm);
  1618. if (err)
  1619. break;
  1620. }
  1621. pte_unmap_unlock(orig_pte, ptl);
  1622. cond_resched();
  1623. return err;
  1624. }
  1625. #ifdef CONFIG_HUGETLB_PAGE
  1626. /* This function walks within one hugetlb entry in the single call */
  1627. static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
  1628. unsigned long addr, unsigned long end,
  1629. struct mm_walk *walk)
  1630. {
  1631. struct pagemapread *pm = walk->private;
  1632. struct vm_area_struct *vma = walk->vma;
  1633. u64 flags = 0, frame = 0;
  1634. int err = 0;
  1635. pte_t pte;
  1636. if (vma->vm_flags & VM_SOFTDIRTY)
  1637. flags |= PM_SOFT_DIRTY;
  1638. pte = huge_ptep_get(walk->mm, addr, ptep);
  1639. if (pte_present(pte)) {
  1640. struct folio *folio = page_folio(pte_page(pte));
  1641. if (!folio_test_anon(folio))
  1642. flags |= PM_FILE;
  1643. if (!folio_likely_mapped_shared(folio) &&
  1644. !hugetlb_pmd_shared(ptep))
  1645. flags |= PM_MMAP_EXCLUSIVE;
  1646. if (huge_pte_uffd_wp(pte))
  1647. flags |= PM_UFFD_WP;
  1648. flags |= PM_PRESENT;
  1649. if (pm->show_pfn)
  1650. frame = pte_pfn(pte) +
  1651. ((addr & ~hmask) >> PAGE_SHIFT);
  1652. } else if (pte_swp_uffd_wp_any(pte)) {
  1653. flags |= PM_UFFD_WP;
  1654. }
  1655. for (; addr != end; addr += PAGE_SIZE) {
  1656. pagemap_entry_t pme = make_pme(frame, flags);
  1657. err = add_to_pagemap(&pme, pm);
  1658. if (err)
  1659. return err;
  1660. if (pm->show_pfn && (flags & PM_PRESENT))
  1661. frame++;
  1662. }
  1663. cond_resched();
  1664. return err;
  1665. }
  1666. #else
  1667. #define pagemap_hugetlb_range NULL
  1668. #endif /* HUGETLB_PAGE */
  1669. static const struct mm_walk_ops pagemap_ops = {
  1670. .pmd_entry = pagemap_pmd_range,
  1671. .pte_hole = pagemap_pte_hole,
  1672. .hugetlb_entry = pagemap_hugetlb_range,
  1673. .walk_lock = PGWALK_RDLOCK,
  1674. };
  1675. /*
  1676. * /proc/pid/pagemap - an array mapping virtual pages to pfns
  1677. *
  1678. * For each page in the address space, this file contains one 64-bit entry
  1679. * consisting of the following:
  1680. *
  1681. * Bits 0-54 page frame number (PFN) if present
  1682. * Bits 0-4 swap type if swapped
  1683. * Bits 5-54 swap offset if swapped
  1684. * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
  1685. * Bit 56 page exclusively mapped
  1686. * Bit 57 pte is uffd-wp write-protected
  1687. * Bits 58-60 zero
  1688. * Bit 61 page is file-page or shared-anon
  1689. * Bit 62 page swapped
  1690. * Bit 63 page present
  1691. *
  1692. * If the page is not present but in swap, then the PFN contains an
  1693. * encoding of the swap file number and the page's offset into the
  1694. * swap. Unmapped pages return a null PFN. This allows determining
  1695. * precisely which pages are mapped (or in swap) and comparing mapped
  1696. * pages between processes.
  1697. *
  1698. * Efficient users of this interface will use /proc/pid/maps to
  1699. * determine which areas of memory are actually mapped and llseek to
  1700. * skip over unmapped regions.
  1701. */
  1702. static ssize_t pagemap_read(struct file *file, char __user *buf,
  1703. size_t count, loff_t *ppos)
  1704. {
  1705. struct mm_struct *mm = file->private_data;
  1706. struct pagemapread pm;
  1707. unsigned long src;
  1708. unsigned long svpfn;
  1709. unsigned long start_vaddr;
  1710. unsigned long end_vaddr;
  1711. int ret = 0, copied = 0;
  1712. if (!mm || !mmget_not_zero(mm))
  1713. goto out;
  1714. ret = -EINVAL;
  1715. /* file position must be aligned */
  1716. if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
  1717. goto out_mm;
  1718. ret = 0;
  1719. if (!count)
  1720. goto out_mm;
  1721. /* do not disclose physical addresses: attack vector */
  1722. pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
  1723. pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
  1724. pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
  1725. ret = -ENOMEM;
  1726. if (!pm.buffer)
  1727. goto out_mm;
  1728. src = *ppos;
  1729. svpfn = src / PM_ENTRY_BYTES;
  1730. end_vaddr = mm->task_size;
  1731. /* watch out for wraparound */
  1732. start_vaddr = end_vaddr;
  1733. if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
  1734. unsigned long end;
  1735. ret = mmap_read_lock_killable(mm);
  1736. if (ret)
  1737. goto out_free;
  1738. start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
  1739. mmap_read_unlock(mm);
  1740. end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
  1741. if (end >= start_vaddr && end < mm->task_size)
  1742. end_vaddr = end;
  1743. }
  1744. /* Ensure the address is inside the task */
  1745. if (start_vaddr > mm->task_size)
  1746. start_vaddr = end_vaddr;
  1747. ret = 0;
  1748. while (count && (start_vaddr < end_vaddr)) {
  1749. int len;
  1750. unsigned long end;
  1751. pm.pos = 0;
  1752. end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
  1753. /* overflow ? */
  1754. if (end < start_vaddr || end > end_vaddr)
  1755. end = end_vaddr;
  1756. ret = mmap_read_lock_killable(mm);
  1757. if (ret)
  1758. goto out_free;
  1759. ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
  1760. mmap_read_unlock(mm);
  1761. start_vaddr = end;
  1762. len = min(count, PM_ENTRY_BYTES * pm.pos);
  1763. if (copy_to_user(buf, pm.buffer, len)) {
  1764. ret = -EFAULT;
  1765. goto out_free;
  1766. }
  1767. copied += len;
  1768. buf += len;
  1769. count -= len;
  1770. }
  1771. *ppos += copied;
  1772. if (!ret || ret == PM_END_OF_BUFFER)
  1773. ret = copied;
  1774. out_free:
  1775. kfree(pm.buffer);
  1776. out_mm:
  1777. mmput(mm);
  1778. out:
  1779. return ret;
  1780. }
  1781. static int pagemap_open(struct inode *inode, struct file *file)
  1782. {
  1783. struct mm_struct *mm;
  1784. mm = proc_mem_open(inode, PTRACE_MODE_READ);
  1785. if (IS_ERR(mm))
  1786. return PTR_ERR(mm);
  1787. file->private_data = mm;
  1788. return 0;
  1789. }
  1790. static int pagemap_release(struct inode *inode, struct file *file)
  1791. {
  1792. struct mm_struct *mm = file->private_data;
  1793. if (mm)
  1794. mmdrop(mm);
  1795. return 0;
  1796. }
  1797. #define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \
  1798. PAGE_IS_FILE | PAGE_IS_PRESENT | \
  1799. PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \
  1800. PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
  1801. #define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
  1802. struct pagemap_scan_private {
  1803. struct pm_scan_arg arg;
  1804. unsigned long masks_of_interest, cur_vma_category;
  1805. struct page_region *vec_buf;
  1806. unsigned long vec_buf_len, vec_buf_index, found_pages;
  1807. struct page_region __user *vec_out;
  1808. };
  1809. static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
  1810. struct vm_area_struct *vma,
  1811. unsigned long addr, pte_t pte)
  1812. {
  1813. unsigned long categories = 0;
  1814. if (pte_present(pte)) {
  1815. struct page *page;
  1816. categories |= PAGE_IS_PRESENT;
  1817. if (!pte_uffd_wp(pte))
  1818. categories |= PAGE_IS_WRITTEN;
  1819. if (p->masks_of_interest & PAGE_IS_FILE) {
  1820. page = vm_normal_page(vma, addr, pte);
  1821. if (page && !PageAnon(page))
  1822. categories |= PAGE_IS_FILE;
  1823. }
  1824. if (is_zero_pfn(pte_pfn(pte)))
  1825. categories |= PAGE_IS_PFNZERO;
  1826. if (pte_soft_dirty(pte))
  1827. categories |= PAGE_IS_SOFT_DIRTY;
  1828. } else if (is_swap_pte(pte)) {
  1829. swp_entry_t swp;
  1830. categories |= PAGE_IS_SWAPPED;
  1831. if (!pte_swp_uffd_wp_any(pte))
  1832. categories |= PAGE_IS_WRITTEN;
  1833. if (p->masks_of_interest & PAGE_IS_FILE) {
  1834. swp = pte_to_swp_entry(pte);
  1835. if (is_pfn_swap_entry(swp) &&
  1836. !folio_test_anon(pfn_swap_entry_folio(swp)))
  1837. categories |= PAGE_IS_FILE;
  1838. }
  1839. if (pte_swp_soft_dirty(pte))
  1840. categories |= PAGE_IS_SOFT_DIRTY;
  1841. }
  1842. return categories;
  1843. }
  1844. static void make_uffd_wp_pte(struct vm_area_struct *vma,
  1845. unsigned long addr, pte_t *pte, pte_t ptent)
  1846. {
  1847. if (pte_present(ptent)) {
  1848. pte_t old_pte;
  1849. old_pte = ptep_modify_prot_start(vma, addr, pte);
  1850. ptent = pte_mkuffd_wp(old_pte);
  1851. ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
  1852. } else if (is_swap_pte(ptent)) {
  1853. ptent = pte_swp_mkuffd_wp(ptent);
  1854. set_pte_at(vma->vm_mm, addr, pte, ptent);
  1855. } else {
  1856. set_pte_at(vma->vm_mm, addr, pte,
  1857. make_pte_marker(PTE_MARKER_UFFD_WP));
  1858. }
  1859. }
  1860. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1861. static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
  1862. struct vm_area_struct *vma,
  1863. unsigned long addr, pmd_t pmd)
  1864. {
  1865. unsigned long categories = PAGE_IS_HUGE;
  1866. if (pmd_present(pmd)) {
  1867. struct page *page;
  1868. categories |= PAGE_IS_PRESENT;
  1869. if (!pmd_uffd_wp(pmd))
  1870. categories |= PAGE_IS_WRITTEN;
  1871. if (p->masks_of_interest & PAGE_IS_FILE) {
  1872. page = vm_normal_page_pmd(vma, addr, pmd);
  1873. if (page && !PageAnon(page))
  1874. categories |= PAGE_IS_FILE;
  1875. }
  1876. if (is_huge_zero_pmd(pmd))
  1877. categories |= PAGE_IS_PFNZERO;
  1878. if (pmd_soft_dirty(pmd))
  1879. categories |= PAGE_IS_SOFT_DIRTY;
  1880. } else if (is_swap_pmd(pmd)) {
  1881. swp_entry_t swp;
  1882. categories |= PAGE_IS_SWAPPED;
  1883. if (!pmd_swp_uffd_wp(pmd))
  1884. categories |= PAGE_IS_WRITTEN;
  1885. if (pmd_swp_soft_dirty(pmd))
  1886. categories |= PAGE_IS_SOFT_DIRTY;
  1887. if (p->masks_of_interest & PAGE_IS_FILE) {
  1888. swp = pmd_to_swp_entry(pmd);
  1889. if (is_pfn_swap_entry(swp) &&
  1890. !folio_test_anon(pfn_swap_entry_folio(swp)))
  1891. categories |= PAGE_IS_FILE;
  1892. }
  1893. }
  1894. return categories;
  1895. }
  1896. static void make_uffd_wp_pmd(struct vm_area_struct *vma,
  1897. unsigned long addr, pmd_t *pmdp)
  1898. {
  1899. pmd_t old, pmd = *pmdp;
  1900. if (pmd_present(pmd)) {
  1901. old = pmdp_invalidate_ad(vma, addr, pmdp);
  1902. pmd = pmd_mkuffd_wp(old);
  1903. set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  1904. } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
  1905. pmd = pmd_swp_mkuffd_wp(pmd);
  1906. set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  1907. }
  1908. }
  1909. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1910. #ifdef CONFIG_HUGETLB_PAGE
  1911. static unsigned long pagemap_hugetlb_category(pte_t pte)
  1912. {
  1913. unsigned long categories = PAGE_IS_HUGE;
  1914. /*
  1915. * According to pagemap_hugetlb_range(), file-backed HugeTLB
  1916. * page cannot be swapped. So PAGE_IS_FILE is not checked for
  1917. * swapped pages.
  1918. */
  1919. if (pte_present(pte)) {
  1920. categories |= PAGE_IS_PRESENT;
  1921. if (!huge_pte_uffd_wp(pte))
  1922. categories |= PAGE_IS_WRITTEN;
  1923. if (!PageAnon(pte_page(pte)))
  1924. categories |= PAGE_IS_FILE;
  1925. if (is_zero_pfn(pte_pfn(pte)))
  1926. categories |= PAGE_IS_PFNZERO;
  1927. if (pte_soft_dirty(pte))
  1928. categories |= PAGE_IS_SOFT_DIRTY;
  1929. } else if (is_swap_pte(pte)) {
  1930. categories |= PAGE_IS_SWAPPED;
  1931. if (!pte_swp_uffd_wp_any(pte))
  1932. categories |= PAGE_IS_WRITTEN;
  1933. if (pte_swp_soft_dirty(pte))
  1934. categories |= PAGE_IS_SOFT_DIRTY;
  1935. }
  1936. return categories;
  1937. }
  1938. static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
  1939. unsigned long addr, pte_t *ptep,
  1940. pte_t ptent)
  1941. {
  1942. unsigned long psize;
  1943. if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
  1944. return;
  1945. psize = huge_page_size(hstate_vma(vma));
  1946. if (is_hugetlb_entry_migration(ptent))
  1947. set_huge_pte_at(vma->vm_mm, addr, ptep,
  1948. pte_swp_mkuffd_wp(ptent), psize);
  1949. else if (!huge_pte_none(ptent))
  1950. huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
  1951. huge_pte_mkuffd_wp(ptent));
  1952. else
  1953. set_huge_pte_at(vma->vm_mm, addr, ptep,
  1954. make_pte_marker(PTE_MARKER_UFFD_WP), psize);
  1955. }
  1956. #endif /* CONFIG_HUGETLB_PAGE */
  1957. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
  1958. static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
  1959. unsigned long addr, unsigned long end)
  1960. {
  1961. struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
  1962. if (!p->vec_buf)
  1963. return;
  1964. if (cur_buf->start != addr)
  1965. cur_buf->end = addr;
  1966. else
  1967. cur_buf->start = cur_buf->end = 0;
  1968. p->found_pages -= (end - addr) / PAGE_SIZE;
  1969. }
  1970. #endif
  1971. static bool pagemap_scan_is_interesting_page(unsigned long categories,
  1972. const struct pagemap_scan_private *p)
  1973. {
  1974. categories ^= p->arg.category_inverted;
  1975. if ((categories & p->arg.category_mask) != p->arg.category_mask)
  1976. return false;
  1977. if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
  1978. return false;
  1979. return true;
  1980. }
  1981. static bool pagemap_scan_is_interesting_vma(unsigned long categories,
  1982. const struct pagemap_scan_private *p)
  1983. {
  1984. unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
  1985. categories ^= p->arg.category_inverted;
  1986. if ((categories & required) != required)
  1987. return false;
  1988. return true;
  1989. }
  1990. static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
  1991. struct mm_walk *walk)
  1992. {
  1993. struct pagemap_scan_private *p = walk->private;
  1994. struct vm_area_struct *vma = walk->vma;
  1995. unsigned long vma_category = 0;
  1996. bool wp_allowed = userfaultfd_wp_async(vma) &&
  1997. userfaultfd_wp_use_markers(vma);
  1998. if (!wp_allowed) {
  1999. /* User requested explicit failure over wp-async capability */
  2000. if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
  2001. return -EPERM;
  2002. /*
  2003. * User requires wr-protect, and allows silently skipping
  2004. * unsupported vmas.
  2005. */
  2006. if (p->arg.flags & PM_SCAN_WP_MATCHING)
  2007. return 1;
  2008. /*
  2009. * Then the request doesn't involve wr-protects at all,
  2010. * fall through to the rest checks, and allow vma walk.
  2011. */
  2012. }
  2013. if (vma->vm_flags & VM_PFNMAP)
  2014. return 1;
  2015. if (wp_allowed)
  2016. vma_category |= PAGE_IS_WPALLOWED;
  2017. if (vma->vm_flags & VM_SOFTDIRTY)
  2018. vma_category |= PAGE_IS_SOFT_DIRTY;
  2019. if (!pagemap_scan_is_interesting_vma(vma_category, p))
  2020. return 1;
  2021. p->cur_vma_category = vma_category;
  2022. return 0;
  2023. }
  2024. static bool pagemap_scan_push_range(unsigned long categories,
  2025. struct pagemap_scan_private *p,
  2026. unsigned long addr, unsigned long end)
  2027. {
  2028. struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
  2029. /*
  2030. * When there is no output buffer provided at all, the sentinel values
  2031. * won't match here. There is no other way for `cur_buf->end` to be
  2032. * non-zero other than it being non-empty.
  2033. */
  2034. if (addr == cur_buf->end && categories == cur_buf->categories) {
  2035. cur_buf->end = end;
  2036. return true;
  2037. }
  2038. if (cur_buf->end) {
  2039. if (p->vec_buf_index >= p->vec_buf_len - 1)
  2040. return false;
  2041. cur_buf = &p->vec_buf[++p->vec_buf_index];
  2042. }
  2043. cur_buf->start = addr;
  2044. cur_buf->end = end;
  2045. cur_buf->categories = categories;
  2046. return true;
  2047. }
  2048. static int pagemap_scan_output(unsigned long categories,
  2049. struct pagemap_scan_private *p,
  2050. unsigned long addr, unsigned long *end)
  2051. {
  2052. unsigned long n_pages, total_pages;
  2053. int ret = 0;
  2054. if (!p->vec_buf)
  2055. return 0;
  2056. categories &= p->arg.return_mask;
  2057. n_pages = (*end - addr) / PAGE_SIZE;
  2058. if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
  2059. total_pages > p->arg.max_pages) {
  2060. size_t n_too_much = total_pages - p->arg.max_pages;
  2061. *end -= n_too_much * PAGE_SIZE;
  2062. n_pages -= n_too_much;
  2063. ret = -ENOSPC;
  2064. }
  2065. if (!pagemap_scan_push_range(categories, p, addr, *end)) {
  2066. *end = addr;
  2067. n_pages = 0;
  2068. ret = -ENOSPC;
  2069. }
  2070. p->found_pages += n_pages;
  2071. if (ret)
  2072. p->arg.walk_end = *end;
  2073. return ret;
  2074. }
  2075. static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
  2076. unsigned long end, struct mm_walk *walk)
  2077. {
  2078. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2079. struct pagemap_scan_private *p = walk->private;
  2080. struct vm_area_struct *vma = walk->vma;
  2081. unsigned long categories;
  2082. spinlock_t *ptl;
  2083. int ret = 0;
  2084. ptl = pmd_trans_huge_lock(pmd, vma);
  2085. if (!ptl)
  2086. return -ENOENT;
  2087. categories = p->cur_vma_category |
  2088. pagemap_thp_category(p, vma, start, *pmd);
  2089. if (!pagemap_scan_is_interesting_page(categories, p))
  2090. goto out_unlock;
  2091. ret = pagemap_scan_output(categories, p, start, &end);
  2092. if (start == end)
  2093. goto out_unlock;
  2094. if (~p->arg.flags & PM_SCAN_WP_MATCHING)
  2095. goto out_unlock;
  2096. if (~categories & PAGE_IS_WRITTEN)
  2097. goto out_unlock;
  2098. /*
  2099. * Break huge page into small pages if the WP operation
  2100. * needs to be performed on a portion of the huge page.
  2101. */
  2102. if (end != start + HPAGE_SIZE) {
  2103. spin_unlock(ptl);
  2104. split_huge_pmd(vma, pmd, start);
  2105. pagemap_scan_backout_range(p, start, end);
  2106. /* Report as if there was no THP */
  2107. return -ENOENT;
  2108. }
  2109. make_uffd_wp_pmd(vma, start, pmd);
  2110. flush_tlb_range(vma, start, end);
  2111. out_unlock:
  2112. spin_unlock(ptl);
  2113. return ret;
  2114. #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
  2115. return -ENOENT;
  2116. #endif
  2117. }
  2118. static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
  2119. unsigned long end, struct mm_walk *walk)
  2120. {
  2121. struct pagemap_scan_private *p = walk->private;
  2122. struct vm_area_struct *vma = walk->vma;
  2123. unsigned long addr, flush_end = 0;
  2124. pte_t *pte, *start_pte;
  2125. spinlock_t *ptl;
  2126. int ret;
  2127. arch_enter_lazy_mmu_mode();
  2128. ret = pagemap_scan_thp_entry(pmd, start, end, walk);
  2129. if (ret != -ENOENT) {
  2130. arch_leave_lazy_mmu_mode();
  2131. return ret;
  2132. }
  2133. ret = 0;
  2134. start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
  2135. if (!pte) {
  2136. arch_leave_lazy_mmu_mode();
  2137. walk->action = ACTION_AGAIN;
  2138. return 0;
  2139. }
  2140. if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
  2141. /* Fast path for performing exclusive WP */
  2142. for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
  2143. pte_t ptent = ptep_get(pte);
  2144. if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
  2145. pte_swp_uffd_wp_any(ptent))
  2146. continue;
  2147. make_uffd_wp_pte(vma, addr, pte, ptent);
  2148. if (!flush_end)
  2149. start = addr;
  2150. flush_end = addr + PAGE_SIZE;
  2151. }
  2152. goto flush_and_return;
  2153. }
  2154. if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
  2155. p->arg.category_mask == PAGE_IS_WRITTEN &&
  2156. p->arg.return_mask == PAGE_IS_WRITTEN) {
  2157. for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
  2158. unsigned long next = addr + PAGE_SIZE;
  2159. pte_t ptent = ptep_get(pte);
  2160. if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
  2161. pte_swp_uffd_wp_any(ptent))
  2162. continue;
  2163. ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
  2164. p, addr, &next);
  2165. if (next == addr)
  2166. break;
  2167. if (~p->arg.flags & PM_SCAN_WP_MATCHING)
  2168. continue;
  2169. make_uffd_wp_pte(vma, addr, pte, ptent);
  2170. if (!flush_end)
  2171. start = addr;
  2172. flush_end = next;
  2173. }
  2174. goto flush_and_return;
  2175. }
  2176. for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
  2177. pte_t ptent = ptep_get(pte);
  2178. unsigned long categories = p->cur_vma_category |
  2179. pagemap_page_category(p, vma, addr, ptent);
  2180. unsigned long next = addr + PAGE_SIZE;
  2181. if (!pagemap_scan_is_interesting_page(categories, p))
  2182. continue;
  2183. ret = pagemap_scan_output(categories, p, addr, &next);
  2184. if (next == addr)
  2185. break;
  2186. if (~p->arg.flags & PM_SCAN_WP_MATCHING)
  2187. continue;
  2188. if (~categories & PAGE_IS_WRITTEN)
  2189. continue;
  2190. make_uffd_wp_pte(vma, addr, pte, ptent);
  2191. if (!flush_end)
  2192. start = addr;
  2193. flush_end = next;
  2194. }
  2195. flush_and_return:
  2196. if (flush_end)
  2197. flush_tlb_range(vma, start, addr);
  2198. pte_unmap_unlock(start_pte, ptl);
  2199. arch_leave_lazy_mmu_mode();
  2200. cond_resched();
  2201. return ret;
  2202. }
  2203. #ifdef CONFIG_HUGETLB_PAGE
  2204. static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
  2205. unsigned long start, unsigned long end,
  2206. struct mm_walk *walk)
  2207. {
  2208. struct pagemap_scan_private *p = walk->private;
  2209. struct vm_area_struct *vma = walk->vma;
  2210. unsigned long categories;
  2211. spinlock_t *ptl;
  2212. int ret = 0;
  2213. pte_t pte;
  2214. if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
  2215. /* Go the short route when not write-protecting pages. */
  2216. pte = huge_ptep_get(walk->mm, start, ptep);
  2217. categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
  2218. if (!pagemap_scan_is_interesting_page(categories, p))
  2219. return 0;
  2220. return pagemap_scan_output(categories, p, start, &end);
  2221. }
  2222. i_mmap_lock_write(vma->vm_file->f_mapping);
  2223. ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
  2224. pte = huge_ptep_get(walk->mm, start, ptep);
  2225. categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
  2226. if (!pagemap_scan_is_interesting_page(categories, p))
  2227. goto out_unlock;
  2228. ret = pagemap_scan_output(categories, p, start, &end);
  2229. if (start == end)
  2230. goto out_unlock;
  2231. if (~categories & PAGE_IS_WRITTEN)
  2232. goto out_unlock;
  2233. if (end != start + HPAGE_SIZE) {
  2234. /* Partial HugeTLB page WP isn't possible. */
  2235. pagemap_scan_backout_range(p, start, end);
  2236. p->arg.walk_end = start;
  2237. ret = 0;
  2238. goto out_unlock;
  2239. }
  2240. make_uffd_wp_huge_pte(vma, start, ptep, pte);
  2241. flush_hugetlb_tlb_range(vma, start, end);
  2242. out_unlock:
  2243. spin_unlock(ptl);
  2244. i_mmap_unlock_write(vma->vm_file->f_mapping);
  2245. return ret;
  2246. }
  2247. #else
  2248. #define pagemap_scan_hugetlb_entry NULL
  2249. #endif
  2250. static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
  2251. int depth, struct mm_walk *walk)
  2252. {
  2253. struct pagemap_scan_private *p = walk->private;
  2254. struct vm_area_struct *vma = walk->vma;
  2255. int ret, err;
  2256. if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
  2257. return 0;
  2258. ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
  2259. if (addr == end)
  2260. return ret;
  2261. if (~p->arg.flags & PM_SCAN_WP_MATCHING)
  2262. return ret;
  2263. err = uffd_wp_range(vma, addr, end - addr, true);
  2264. if (err < 0)
  2265. ret = err;
  2266. return ret;
  2267. }
  2268. static const struct mm_walk_ops pagemap_scan_ops = {
  2269. .test_walk = pagemap_scan_test_walk,
  2270. .pmd_entry = pagemap_scan_pmd_entry,
  2271. .pte_hole = pagemap_scan_pte_hole,
  2272. .hugetlb_entry = pagemap_scan_hugetlb_entry,
  2273. };
  2274. static int pagemap_scan_get_args(struct pm_scan_arg *arg,
  2275. unsigned long uarg)
  2276. {
  2277. if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
  2278. return -EFAULT;
  2279. if (arg->size != sizeof(struct pm_scan_arg))
  2280. return -EINVAL;
  2281. /* Validate requested features */
  2282. if (arg->flags & ~PM_SCAN_FLAGS)
  2283. return -EINVAL;
  2284. if ((arg->category_inverted | arg->category_mask |
  2285. arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
  2286. return -EINVAL;
  2287. arg->start = untagged_addr((unsigned long)arg->start);
  2288. arg->end = untagged_addr((unsigned long)arg->end);
  2289. arg->vec = untagged_addr((unsigned long)arg->vec);
  2290. /* Validate memory pointers */
  2291. if (!IS_ALIGNED(arg->start, PAGE_SIZE))
  2292. return -EINVAL;
  2293. if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
  2294. return -EFAULT;
  2295. if (!arg->vec && arg->vec_len)
  2296. return -EINVAL;
  2297. if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
  2298. return -EINVAL;
  2299. if (arg->vec && !access_ok((void __user *)(long)arg->vec,
  2300. size_mul(arg->vec_len, sizeof(struct page_region))))
  2301. return -EFAULT;
  2302. /* Fixup default values */
  2303. arg->end = ALIGN(arg->end, PAGE_SIZE);
  2304. arg->walk_end = 0;
  2305. if (!arg->max_pages)
  2306. arg->max_pages = ULONG_MAX;
  2307. return 0;
  2308. }
  2309. static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
  2310. unsigned long uargl)
  2311. {
  2312. struct pm_scan_arg __user *uarg = (void __user *)uargl;
  2313. if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
  2314. return -EFAULT;
  2315. return 0;
  2316. }
  2317. static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
  2318. {
  2319. if (!p->arg.vec_len)
  2320. return 0;
  2321. p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
  2322. p->arg.vec_len);
  2323. p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
  2324. GFP_KERNEL);
  2325. if (!p->vec_buf)
  2326. return -ENOMEM;
  2327. p->vec_buf->start = p->vec_buf->end = 0;
  2328. p->vec_out = (struct page_region __user *)(long)p->arg.vec;
  2329. return 0;
  2330. }
  2331. static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
  2332. {
  2333. const struct page_region *buf = p->vec_buf;
  2334. long n = p->vec_buf_index;
  2335. if (!p->vec_buf)
  2336. return 0;
  2337. if (buf[n].end != buf[n].start)
  2338. n++;
  2339. if (!n)
  2340. return 0;
  2341. if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
  2342. return -EFAULT;
  2343. p->arg.vec_len -= n;
  2344. p->vec_out += n;
  2345. p->vec_buf_index = 0;
  2346. p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
  2347. p->vec_buf->start = p->vec_buf->end = 0;
  2348. return n;
  2349. }
  2350. static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
  2351. {
  2352. struct pagemap_scan_private p = {0};
  2353. unsigned long walk_start;
  2354. size_t n_ranges_out = 0;
  2355. int ret;
  2356. ret = pagemap_scan_get_args(&p.arg, uarg);
  2357. if (ret)
  2358. return ret;
  2359. p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
  2360. p.arg.return_mask;
  2361. ret = pagemap_scan_init_bounce_buffer(&p);
  2362. if (ret)
  2363. return ret;
  2364. for (walk_start = p.arg.start; walk_start < p.arg.end;
  2365. walk_start = p.arg.walk_end) {
  2366. struct mmu_notifier_range range;
  2367. long n_out;
  2368. if (fatal_signal_pending(current)) {
  2369. ret = -EINTR;
  2370. break;
  2371. }
  2372. ret = mmap_read_lock_killable(mm);
  2373. if (ret)
  2374. break;
  2375. /* Protection change for the range is going to happen. */
  2376. if (p.arg.flags & PM_SCAN_WP_MATCHING) {
  2377. mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
  2378. mm, walk_start, p.arg.end);
  2379. mmu_notifier_invalidate_range_start(&range);
  2380. }
  2381. ret = walk_page_range(mm, walk_start, p.arg.end,
  2382. &pagemap_scan_ops, &p);
  2383. if (p.arg.flags & PM_SCAN_WP_MATCHING)
  2384. mmu_notifier_invalidate_range_end(&range);
  2385. mmap_read_unlock(mm);
  2386. n_out = pagemap_scan_flush_buffer(&p);
  2387. if (n_out < 0)
  2388. ret = n_out;
  2389. else
  2390. n_ranges_out += n_out;
  2391. if (ret != -ENOSPC)
  2392. break;
  2393. if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
  2394. break;
  2395. }
  2396. /* ENOSPC signifies early stop (buffer full) from the walk. */
  2397. if (!ret || ret == -ENOSPC)
  2398. ret = n_ranges_out;
  2399. /* The walk_end isn't set when ret is zero */
  2400. if (!p.arg.walk_end)
  2401. p.arg.walk_end = p.arg.end;
  2402. if (pagemap_scan_writeback_args(&p.arg, uarg))
  2403. ret = -EFAULT;
  2404. kfree(p.vec_buf);
  2405. return ret;
  2406. }
  2407. static long do_pagemap_cmd(struct file *file, unsigned int cmd,
  2408. unsigned long arg)
  2409. {
  2410. struct mm_struct *mm = file->private_data;
  2411. switch (cmd) {
  2412. case PAGEMAP_SCAN:
  2413. return do_pagemap_scan(mm, arg);
  2414. default:
  2415. return -EINVAL;
  2416. }
  2417. }
  2418. const struct file_operations proc_pagemap_operations = {
  2419. .llseek = mem_lseek, /* borrow this */
  2420. .read = pagemap_read,
  2421. .open = pagemap_open,
  2422. .release = pagemap_release,
  2423. .unlocked_ioctl = do_pagemap_cmd,
  2424. .compat_ioctl = do_pagemap_cmd,
  2425. };
  2426. #endif /* CONFIG_PROC_PAGE_MONITOR */
  2427. #ifdef CONFIG_NUMA
  2428. struct numa_maps {
  2429. unsigned long pages;
  2430. unsigned long anon;
  2431. unsigned long active;
  2432. unsigned long writeback;
  2433. unsigned long mapcount_max;
  2434. unsigned long dirty;
  2435. unsigned long swapcache;
  2436. unsigned long node[MAX_NUMNODES];
  2437. };
  2438. struct numa_maps_private {
  2439. struct proc_maps_private proc_maps;
  2440. struct numa_maps md;
  2441. };
  2442. static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
  2443. unsigned long nr_pages)
  2444. {
  2445. struct folio *folio = page_folio(page);
  2446. int count = folio_precise_page_mapcount(folio, page);
  2447. md->pages += nr_pages;
  2448. if (pte_dirty || folio_test_dirty(folio))
  2449. md->dirty += nr_pages;
  2450. if (folio_test_swapcache(folio))
  2451. md->swapcache += nr_pages;
  2452. if (folio_test_active(folio) || folio_test_unevictable(folio))
  2453. md->active += nr_pages;
  2454. if (folio_test_writeback(folio))
  2455. md->writeback += nr_pages;
  2456. if (folio_test_anon(folio))
  2457. md->anon += nr_pages;
  2458. if (count > md->mapcount_max)
  2459. md->mapcount_max = count;
  2460. md->node[folio_nid(folio)] += nr_pages;
  2461. }
  2462. static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
  2463. unsigned long addr)
  2464. {
  2465. struct page *page;
  2466. int nid;
  2467. if (!pte_present(pte))
  2468. return NULL;
  2469. page = vm_normal_page(vma, addr, pte);
  2470. if (!page || is_zone_device_page(page))
  2471. return NULL;
  2472. if (PageReserved(page))
  2473. return NULL;
  2474. nid = page_to_nid(page);
  2475. if (!node_isset(nid, node_states[N_MEMORY]))
  2476. return NULL;
  2477. return page;
  2478. }
  2479. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2480. static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
  2481. struct vm_area_struct *vma,
  2482. unsigned long addr)
  2483. {
  2484. struct page *page;
  2485. int nid;
  2486. if (!pmd_present(pmd))
  2487. return NULL;
  2488. page = vm_normal_page_pmd(vma, addr, pmd);
  2489. if (!page)
  2490. return NULL;
  2491. if (PageReserved(page))
  2492. return NULL;
  2493. nid = page_to_nid(page);
  2494. if (!node_isset(nid, node_states[N_MEMORY]))
  2495. return NULL;
  2496. return page;
  2497. }
  2498. #endif
  2499. static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
  2500. unsigned long end, struct mm_walk *walk)
  2501. {
  2502. struct numa_maps *md = walk->private;
  2503. struct vm_area_struct *vma = walk->vma;
  2504. spinlock_t *ptl;
  2505. pte_t *orig_pte;
  2506. pte_t *pte;
  2507. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2508. ptl = pmd_trans_huge_lock(pmd, vma);
  2509. if (ptl) {
  2510. struct page *page;
  2511. page = can_gather_numa_stats_pmd(*pmd, vma, addr);
  2512. if (page)
  2513. gather_stats(page, md, pmd_dirty(*pmd),
  2514. HPAGE_PMD_SIZE/PAGE_SIZE);
  2515. spin_unlock(ptl);
  2516. return 0;
  2517. }
  2518. #endif
  2519. orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2520. if (!pte) {
  2521. walk->action = ACTION_AGAIN;
  2522. return 0;
  2523. }
  2524. do {
  2525. pte_t ptent = ptep_get(pte);
  2526. struct page *page = can_gather_numa_stats(ptent, vma, addr);
  2527. if (!page)
  2528. continue;
  2529. gather_stats(page, md, pte_dirty(ptent), 1);
  2530. } while (pte++, addr += PAGE_SIZE, addr != end);
  2531. pte_unmap_unlock(orig_pte, ptl);
  2532. cond_resched();
  2533. return 0;
  2534. }
  2535. #ifdef CONFIG_HUGETLB_PAGE
  2536. static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
  2537. unsigned long addr, unsigned long end, struct mm_walk *walk)
  2538. {
  2539. pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
  2540. struct numa_maps *md;
  2541. struct page *page;
  2542. if (!pte_present(huge_pte))
  2543. return 0;
  2544. page = pte_page(huge_pte);
  2545. md = walk->private;
  2546. gather_stats(page, md, pte_dirty(huge_pte), 1);
  2547. return 0;
  2548. }
  2549. #else
  2550. static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
  2551. unsigned long addr, unsigned long end, struct mm_walk *walk)
  2552. {
  2553. return 0;
  2554. }
  2555. #endif
  2556. static const struct mm_walk_ops show_numa_ops = {
  2557. .hugetlb_entry = gather_hugetlb_stats,
  2558. .pmd_entry = gather_pte_stats,
  2559. .walk_lock = PGWALK_RDLOCK,
  2560. };
  2561. /*
  2562. * Display pages allocated per node and memory policy via /proc.
  2563. */
  2564. static int show_numa_map(struct seq_file *m, void *v)
  2565. {
  2566. struct numa_maps_private *numa_priv = m->private;
  2567. struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
  2568. struct vm_area_struct *vma = v;
  2569. struct numa_maps *md = &numa_priv->md;
  2570. struct file *file = vma->vm_file;
  2571. struct mm_struct *mm = vma->vm_mm;
  2572. char buffer[64];
  2573. struct mempolicy *pol;
  2574. pgoff_t ilx;
  2575. int nid;
  2576. if (!mm)
  2577. return 0;
  2578. /* Ensure we start with an empty set of numa_maps statistics. */
  2579. memset(md, 0, sizeof(*md));
  2580. pol = __get_vma_policy(vma, vma->vm_start, &ilx);
  2581. if (pol) {
  2582. mpol_to_str(buffer, sizeof(buffer), pol);
  2583. mpol_cond_put(pol);
  2584. } else {
  2585. mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
  2586. }
  2587. seq_printf(m, "%08lx %s", vma->vm_start, buffer);
  2588. if (file) {
  2589. seq_puts(m, " file=");
  2590. seq_path(m, file_user_path(file), "\n\t= ");
  2591. } else if (vma_is_initial_heap(vma)) {
  2592. seq_puts(m, " heap");
  2593. } else if (vma_is_initial_stack(vma)) {
  2594. seq_puts(m, " stack");
  2595. }
  2596. if (is_vm_hugetlb_page(vma))
  2597. seq_puts(m, " huge");
  2598. /* mmap_lock is held by m_start */
  2599. walk_page_vma(vma, &show_numa_ops, md);
  2600. if (!md->pages)
  2601. goto out;
  2602. if (md->anon)
  2603. seq_printf(m, " anon=%lu", md->anon);
  2604. if (md->dirty)
  2605. seq_printf(m, " dirty=%lu", md->dirty);
  2606. if (md->pages != md->anon && md->pages != md->dirty)
  2607. seq_printf(m, " mapped=%lu", md->pages);
  2608. if (md->mapcount_max > 1)
  2609. seq_printf(m, " mapmax=%lu", md->mapcount_max);
  2610. if (md->swapcache)
  2611. seq_printf(m, " swapcache=%lu", md->swapcache);
  2612. if (md->active < md->pages && !is_vm_hugetlb_page(vma))
  2613. seq_printf(m, " active=%lu", md->active);
  2614. if (md->writeback)
  2615. seq_printf(m, " writeback=%lu", md->writeback);
  2616. for_each_node_state(nid, N_MEMORY)
  2617. if (md->node[nid])
  2618. seq_printf(m, " N%d=%lu", nid, md->node[nid]);
  2619. seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
  2620. out:
  2621. seq_putc(m, '\n');
  2622. return 0;
  2623. }
  2624. static const struct seq_operations proc_pid_numa_maps_op = {
  2625. .start = m_start,
  2626. .next = m_next,
  2627. .stop = m_stop,
  2628. .show = show_numa_map,
  2629. };
  2630. static int pid_numa_maps_open(struct inode *inode, struct file *file)
  2631. {
  2632. return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
  2633. sizeof(struct numa_maps_private));
  2634. }
  2635. const struct file_operations proc_pid_numa_maps_operations = {
  2636. .open = pid_numa_maps_open,
  2637. .read = seq_read,
  2638. .llseek = seq_lseek,
  2639. .release = proc_map_release,
  2640. };
  2641. #endif /* CONFIG_NUMA */