vmstat.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/mm/vmstat.c
  4. *
  5. * Manages VM statistics
  6. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  7. *
  8. * zoned VM statistics
  9. * Copyright (C) 2006 Silicon Graphics, Inc.,
  10. * Christoph Lameter <christoph@lameter.com>
  11. * Copyright (C) 2008-2014 Christoph Lameter
  12. */
  13. #include <linux/fs.h>
  14. #include <linux/mm.h>
  15. #include <linux/err.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/cpu.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/vmstat.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/sched.h>
  25. #include <linux/math64.h>
  26. #include <linux/writeback.h>
  27. #include <linux/compaction.h>
  28. #include <linux/mm_inline.h>
  29. #include <linux/page_owner.h>
  30. #include <linux/sched/isolation.h>
  31. #include "internal.h"
  32. #ifdef CONFIG_NUMA
  33. int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
  34. /* zero numa counters within a zone */
  35. static void zero_zone_numa_counters(struct zone *zone)
  36. {
  37. int item, cpu;
  38. for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
  39. atomic_long_set(&zone->vm_numa_event[item], 0);
  40. for_each_online_cpu(cpu) {
  41. per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
  42. = 0;
  43. }
  44. }
  45. }
  46. /* zero numa counters of all the populated zones */
  47. static void zero_zones_numa_counters(void)
  48. {
  49. struct zone *zone;
  50. for_each_populated_zone(zone)
  51. zero_zone_numa_counters(zone);
  52. }
  53. /* zero global numa counters */
  54. static void zero_global_numa_counters(void)
  55. {
  56. int item;
  57. for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
  58. atomic_long_set(&vm_numa_event[item], 0);
  59. }
  60. static void invalid_numa_statistics(void)
  61. {
  62. zero_zones_numa_counters();
  63. zero_global_numa_counters();
  64. }
  65. static DEFINE_MUTEX(vm_numa_stat_lock);
  66. int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
  67. void *buffer, size_t *length, loff_t *ppos)
  68. {
  69. int ret, oldval;
  70. mutex_lock(&vm_numa_stat_lock);
  71. if (write)
  72. oldval = sysctl_vm_numa_stat;
  73. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  74. if (ret || !write)
  75. goto out;
  76. if (oldval == sysctl_vm_numa_stat)
  77. goto out;
  78. else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
  79. static_branch_enable(&vm_numa_stat_key);
  80. pr_info("enable numa statistics\n");
  81. } else {
  82. static_branch_disable(&vm_numa_stat_key);
  83. invalid_numa_statistics();
  84. pr_info("disable numa statistics, and clear numa counters\n");
  85. }
  86. out:
  87. mutex_unlock(&vm_numa_stat_lock);
  88. return ret;
  89. }
  90. #endif
  91. #ifdef CONFIG_VM_EVENT_COUNTERS
  92. DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  93. EXPORT_PER_CPU_SYMBOL(vm_event_states);
  94. static void sum_vm_events(unsigned long *ret)
  95. {
  96. int cpu;
  97. int i;
  98. memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  99. for_each_online_cpu(cpu) {
  100. struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  101. for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  102. ret[i] += this->event[i];
  103. }
  104. }
  105. /*
  106. * Accumulate the vm event counters across all CPUs.
  107. * The result is unavoidably approximate - it can change
  108. * during and after execution of this function.
  109. */
  110. void all_vm_events(unsigned long *ret)
  111. {
  112. cpus_read_lock();
  113. sum_vm_events(ret);
  114. cpus_read_unlock();
  115. }
  116. EXPORT_SYMBOL_GPL(all_vm_events);
  117. /*
  118. * Fold the foreign cpu events into our own.
  119. *
  120. * This is adding to the events on one processor
  121. * but keeps the global counts constant.
  122. */
  123. void vm_events_fold_cpu(int cpu)
  124. {
  125. struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  126. int i;
  127. for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  128. count_vm_events(i, fold_state->event[i]);
  129. fold_state->event[i] = 0;
  130. }
  131. }
  132. #endif /* CONFIG_VM_EVENT_COUNTERS */
  133. /*
  134. * Manage combined zone based / global counters
  135. *
  136. * vm_stat contains the global counters
  137. */
  138. atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
  139. atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
  140. atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
  141. EXPORT_SYMBOL(vm_zone_stat);
  142. EXPORT_SYMBOL(vm_node_stat);
  143. #ifdef CONFIG_NUMA
  144. static void fold_vm_zone_numa_events(struct zone *zone)
  145. {
  146. unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
  147. int cpu;
  148. enum numa_stat_item item;
  149. for_each_online_cpu(cpu) {
  150. struct per_cpu_zonestat *pzstats;
  151. pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
  152. for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
  153. zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
  154. }
  155. for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
  156. zone_numa_event_add(zone_numa_events[item], zone, item);
  157. }
  158. void fold_vm_numa_events(void)
  159. {
  160. struct zone *zone;
  161. for_each_populated_zone(zone)
  162. fold_vm_zone_numa_events(zone);
  163. }
  164. #endif
  165. #ifdef CONFIG_SMP
  166. int calculate_pressure_threshold(struct zone *zone)
  167. {
  168. int threshold;
  169. int watermark_distance;
  170. /*
  171. * As vmstats are not up to date, there is drift between the estimated
  172. * and real values. For high thresholds and a high number of CPUs, it
  173. * is possible for the min watermark to be breached while the estimated
  174. * value looks fine. The pressure threshold is a reduced value such
  175. * that even the maximum amount of drift will not accidentally breach
  176. * the min watermark
  177. */
  178. watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
  179. threshold = max(1, (int)(watermark_distance / num_online_cpus()));
  180. /*
  181. * Maximum threshold is 125
  182. */
  183. threshold = min(125, threshold);
  184. return threshold;
  185. }
  186. int calculate_normal_threshold(struct zone *zone)
  187. {
  188. int threshold;
  189. int mem; /* memory in 128 MB units */
  190. /*
  191. * The threshold scales with the number of processors and the amount
  192. * of memory per zone. More memory means that we can defer updates for
  193. * longer, more processors could lead to more contention.
  194. * fls() is used to have a cheap way of logarithmic scaling.
  195. *
  196. * Some sample thresholds:
  197. *
  198. * Threshold Processors (fls) Zonesize fls(mem)+1
  199. * ------------------------------------------------------------------
  200. * 8 1 1 0.9-1 GB 4
  201. * 16 2 2 0.9-1 GB 4
  202. * 20 2 2 1-2 GB 5
  203. * 24 2 2 2-4 GB 6
  204. * 28 2 2 4-8 GB 7
  205. * 32 2 2 8-16 GB 8
  206. * 4 2 2 <128M 1
  207. * 30 4 3 2-4 GB 5
  208. * 48 4 3 8-16 GB 8
  209. * 32 8 4 1-2 GB 4
  210. * 32 8 4 0.9-1GB 4
  211. * 10 16 5 <128M 1
  212. * 40 16 5 900M 4
  213. * 70 64 7 2-4 GB 5
  214. * 84 64 7 4-8 GB 6
  215. * 108 512 9 4-8 GB 6
  216. * 125 1024 10 8-16 GB 8
  217. * 125 1024 10 16-32 GB 9
  218. */
  219. mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
  220. threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
  221. /*
  222. * Maximum threshold is 125
  223. */
  224. threshold = min(125, threshold);
  225. return threshold;
  226. }
  227. /*
  228. * Refresh the thresholds for each zone.
  229. */
  230. void refresh_zone_stat_thresholds(void)
  231. {
  232. struct pglist_data *pgdat;
  233. struct zone *zone;
  234. int cpu;
  235. int threshold;
  236. /* Zero current pgdat thresholds */
  237. for_each_online_pgdat(pgdat) {
  238. for_each_online_cpu(cpu) {
  239. per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
  240. }
  241. }
  242. for_each_populated_zone(zone) {
  243. struct pglist_data *pgdat = zone->zone_pgdat;
  244. unsigned long max_drift, tolerate_drift;
  245. threshold = calculate_normal_threshold(zone);
  246. for_each_online_cpu(cpu) {
  247. int pgdat_threshold;
  248. per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
  249. = threshold;
  250. /* Base nodestat threshold on the largest populated zone. */
  251. pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
  252. per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
  253. = max(threshold, pgdat_threshold);
  254. }
  255. /*
  256. * Only set percpu_drift_mark if there is a danger that
  257. * NR_FREE_PAGES reports the low watermark is ok when in fact
  258. * the min watermark could be breached by an allocation
  259. */
  260. tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
  261. max_drift = num_online_cpus() * threshold;
  262. if (max_drift > tolerate_drift)
  263. zone->percpu_drift_mark = high_wmark_pages(zone) +
  264. max_drift;
  265. }
  266. }
  267. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  268. int (*calculate_pressure)(struct zone *))
  269. {
  270. struct zone *zone;
  271. int cpu;
  272. int threshold;
  273. int i;
  274. for (i = 0; i < pgdat->nr_zones; i++) {
  275. zone = &pgdat->node_zones[i];
  276. if (!zone->percpu_drift_mark)
  277. continue;
  278. threshold = (*calculate_pressure)(zone);
  279. for_each_online_cpu(cpu)
  280. per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
  281. = threshold;
  282. }
  283. }
  284. /*
  285. * For use when we know that interrupts are disabled,
  286. * or when we know that preemption is disabled and that
  287. * particular counter cannot be updated from interrupt context.
  288. */
  289. void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  290. long delta)
  291. {
  292. struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
  293. s8 __percpu *p = pcp->vm_stat_diff + item;
  294. long x;
  295. long t;
  296. /*
  297. * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
  298. * atomicity is provided by IRQs being disabled -- either explicitly
  299. * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
  300. * CPU migrations and preemption potentially corrupts a counter so
  301. * disable preemption.
  302. */
  303. preempt_disable_nested();
  304. x = delta + __this_cpu_read(*p);
  305. t = __this_cpu_read(pcp->stat_threshold);
  306. if (unlikely(abs(x) > t)) {
  307. zone_page_state_add(x, zone, item);
  308. x = 0;
  309. }
  310. __this_cpu_write(*p, x);
  311. preempt_enable_nested();
  312. }
  313. EXPORT_SYMBOL(__mod_zone_page_state);
  314. void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
  315. long delta)
  316. {
  317. struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
  318. s8 __percpu *p = pcp->vm_node_stat_diff + item;
  319. long x;
  320. long t;
  321. if (vmstat_item_in_bytes(item)) {
  322. /*
  323. * Only cgroups use subpage accounting right now; at
  324. * the global level, these items still change in
  325. * multiples of whole pages. Store them as pages
  326. * internally to keep the per-cpu counters compact.
  327. */
  328. VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
  329. delta >>= PAGE_SHIFT;
  330. }
  331. /* See __mod_node_page_state */
  332. preempt_disable_nested();
  333. x = delta + __this_cpu_read(*p);
  334. t = __this_cpu_read(pcp->stat_threshold);
  335. if (unlikely(abs(x) > t)) {
  336. node_page_state_add(x, pgdat, item);
  337. x = 0;
  338. }
  339. __this_cpu_write(*p, x);
  340. preempt_enable_nested();
  341. }
  342. EXPORT_SYMBOL(__mod_node_page_state);
  343. /*
  344. * Optimized increment and decrement functions.
  345. *
  346. * These are only for a single page and therefore can take a struct page *
  347. * argument instead of struct zone *. This allows the inclusion of the code
  348. * generated for page_zone(page) into the optimized functions.
  349. *
  350. * No overflow check is necessary and therefore the differential can be
  351. * incremented or decremented in place which may allow the compilers to
  352. * generate better code.
  353. * The increment or decrement is known and therefore one boundary check can
  354. * be omitted.
  355. *
  356. * NOTE: These functions are very performance sensitive. Change only
  357. * with care.
  358. *
  359. * Some processors have inc/dec instructions that are atomic vs an interrupt.
  360. * However, the code must first determine the differential location in a zone
  361. * based on the processor number and then inc/dec the counter. There is no
  362. * guarantee without disabling preemption that the processor will not change
  363. * in between and therefore the atomicity vs. interrupt cannot be exploited
  364. * in a useful way here.
  365. */
  366. void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  367. {
  368. struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
  369. s8 __percpu *p = pcp->vm_stat_diff + item;
  370. s8 v, t;
  371. /* See __mod_node_page_state */
  372. preempt_disable_nested();
  373. v = __this_cpu_inc_return(*p);
  374. t = __this_cpu_read(pcp->stat_threshold);
  375. if (unlikely(v > t)) {
  376. s8 overstep = t >> 1;
  377. zone_page_state_add(v + overstep, zone, item);
  378. __this_cpu_write(*p, -overstep);
  379. }
  380. preempt_enable_nested();
  381. }
  382. void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  383. {
  384. struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
  385. s8 __percpu *p = pcp->vm_node_stat_diff + item;
  386. s8 v, t;
  387. VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
  388. /* See __mod_node_page_state */
  389. preempt_disable_nested();
  390. v = __this_cpu_inc_return(*p);
  391. t = __this_cpu_read(pcp->stat_threshold);
  392. if (unlikely(v > t)) {
  393. s8 overstep = t >> 1;
  394. node_page_state_add(v + overstep, pgdat, item);
  395. __this_cpu_write(*p, -overstep);
  396. }
  397. preempt_enable_nested();
  398. }
  399. void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
  400. {
  401. __inc_zone_state(page_zone(page), item);
  402. }
  403. EXPORT_SYMBOL(__inc_zone_page_state);
  404. void __inc_node_page_state(struct page *page, enum node_stat_item item)
  405. {
  406. __inc_node_state(page_pgdat(page), item);
  407. }
  408. EXPORT_SYMBOL(__inc_node_page_state);
  409. void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  410. {
  411. struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
  412. s8 __percpu *p = pcp->vm_stat_diff + item;
  413. s8 v, t;
  414. /* See __mod_node_page_state */
  415. preempt_disable_nested();
  416. v = __this_cpu_dec_return(*p);
  417. t = __this_cpu_read(pcp->stat_threshold);
  418. if (unlikely(v < - t)) {
  419. s8 overstep = t >> 1;
  420. zone_page_state_add(v - overstep, zone, item);
  421. __this_cpu_write(*p, overstep);
  422. }
  423. preempt_enable_nested();
  424. }
  425. void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  426. {
  427. struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
  428. s8 __percpu *p = pcp->vm_node_stat_diff + item;
  429. s8 v, t;
  430. VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
  431. /* See __mod_node_page_state */
  432. preempt_disable_nested();
  433. v = __this_cpu_dec_return(*p);
  434. t = __this_cpu_read(pcp->stat_threshold);
  435. if (unlikely(v < - t)) {
  436. s8 overstep = t >> 1;
  437. node_page_state_add(v - overstep, pgdat, item);
  438. __this_cpu_write(*p, overstep);
  439. }
  440. preempt_enable_nested();
  441. }
  442. void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
  443. {
  444. __dec_zone_state(page_zone(page), item);
  445. }
  446. EXPORT_SYMBOL(__dec_zone_page_state);
  447. void __dec_node_page_state(struct page *page, enum node_stat_item item)
  448. {
  449. __dec_node_state(page_pgdat(page), item);
  450. }
  451. EXPORT_SYMBOL(__dec_node_page_state);
  452. #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
  453. /*
  454. * If we have cmpxchg_local support then we do not need to incur the overhead
  455. * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
  456. *
  457. * mod_state() modifies the zone counter state through atomic per cpu
  458. * operations.
  459. *
  460. * Overstep mode specifies how overstep should handled:
  461. * 0 No overstepping
  462. * 1 Overstepping half of threshold
  463. * -1 Overstepping minus half of threshold
  464. */
  465. static inline void mod_zone_state(struct zone *zone,
  466. enum zone_stat_item item, long delta, int overstep_mode)
  467. {
  468. struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
  469. s8 __percpu *p = pcp->vm_stat_diff + item;
  470. long n, t, z;
  471. s8 o;
  472. o = this_cpu_read(*p);
  473. do {
  474. z = 0; /* overflow to zone counters */
  475. /*
  476. * The fetching of the stat_threshold is racy. We may apply
  477. * a counter threshold to the wrong the cpu if we get
  478. * rescheduled while executing here. However, the next
  479. * counter update will apply the threshold again and
  480. * therefore bring the counter under the threshold again.
  481. *
  482. * Most of the time the thresholds are the same anyways
  483. * for all cpus in a zone.
  484. */
  485. t = this_cpu_read(pcp->stat_threshold);
  486. n = delta + (long)o;
  487. if (abs(n) > t) {
  488. int os = overstep_mode * (t >> 1) ;
  489. /* Overflow must be added to zone counters */
  490. z = n + os;
  491. n = -os;
  492. }
  493. } while (!this_cpu_try_cmpxchg(*p, &o, n));
  494. if (z)
  495. zone_page_state_add(z, zone, item);
  496. }
  497. void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  498. long delta)
  499. {
  500. mod_zone_state(zone, item, delta, 0);
  501. }
  502. EXPORT_SYMBOL(mod_zone_page_state);
  503. void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  504. {
  505. mod_zone_state(page_zone(page), item, 1, 1);
  506. }
  507. EXPORT_SYMBOL(inc_zone_page_state);
  508. void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  509. {
  510. mod_zone_state(page_zone(page), item, -1, -1);
  511. }
  512. EXPORT_SYMBOL(dec_zone_page_state);
  513. static inline void mod_node_state(struct pglist_data *pgdat,
  514. enum node_stat_item item, int delta, int overstep_mode)
  515. {
  516. struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
  517. s8 __percpu *p = pcp->vm_node_stat_diff + item;
  518. long n, t, z;
  519. s8 o;
  520. if (vmstat_item_in_bytes(item)) {
  521. /*
  522. * Only cgroups use subpage accounting right now; at
  523. * the global level, these items still change in
  524. * multiples of whole pages. Store them as pages
  525. * internally to keep the per-cpu counters compact.
  526. */
  527. VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
  528. delta >>= PAGE_SHIFT;
  529. }
  530. o = this_cpu_read(*p);
  531. do {
  532. z = 0; /* overflow to node counters */
  533. /*
  534. * The fetching of the stat_threshold is racy. We may apply
  535. * a counter threshold to the wrong the cpu if we get
  536. * rescheduled while executing here. However, the next
  537. * counter update will apply the threshold again and
  538. * therefore bring the counter under the threshold again.
  539. *
  540. * Most of the time the thresholds are the same anyways
  541. * for all cpus in a node.
  542. */
  543. t = this_cpu_read(pcp->stat_threshold);
  544. n = delta + (long)o;
  545. if (abs(n) > t) {
  546. int os = overstep_mode * (t >> 1) ;
  547. /* Overflow must be added to node counters */
  548. z = n + os;
  549. n = -os;
  550. }
  551. } while (!this_cpu_try_cmpxchg(*p, &o, n));
  552. if (z)
  553. node_page_state_add(z, pgdat, item);
  554. }
  555. void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
  556. long delta)
  557. {
  558. mod_node_state(pgdat, item, delta, 0);
  559. }
  560. EXPORT_SYMBOL(mod_node_page_state);
  561. void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  562. {
  563. mod_node_state(pgdat, item, 1, 1);
  564. }
  565. void inc_node_page_state(struct page *page, enum node_stat_item item)
  566. {
  567. mod_node_state(page_pgdat(page), item, 1, 1);
  568. }
  569. EXPORT_SYMBOL(inc_node_page_state);
  570. void dec_node_page_state(struct page *page, enum node_stat_item item)
  571. {
  572. mod_node_state(page_pgdat(page), item, -1, -1);
  573. }
  574. EXPORT_SYMBOL(dec_node_page_state);
  575. #else
  576. /*
  577. * Use interrupt disable to serialize counter updates
  578. */
  579. void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  580. long delta)
  581. {
  582. unsigned long flags;
  583. local_irq_save(flags);
  584. __mod_zone_page_state(zone, item, delta);
  585. local_irq_restore(flags);
  586. }
  587. EXPORT_SYMBOL(mod_zone_page_state);
  588. void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  589. {
  590. unsigned long flags;
  591. struct zone *zone;
  592. zone = page_zone(page);
  593. local_irq_save(flags);
  594. __inc_zone_state(zone, item);
  595. local_irq_restore(flags);
  596. }
  597. EXPORT_SYMBOL(inc_zone_page_state);
  598. void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  599. {
  600. unsigned long flags;
  601. local_irq_save(flags);
  602. __dec_zone_page_state(page, item);
  603. local_irq_restore(flags);
  604. }
  605. EXPORT_SYMBOL(dec_zone_page_state);
  606. void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  607. {
  608. unsigned long flags;
  609. local_irq_save(flags);
  610. __inc_node_state(pgdat, item);
  611. local_irq_restore(flags);
  612. }
  613. EXPORT_SYMBOL(inc_node_state);
  614. void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
  615. long delta)
  616. {
  617. unsigned long flags;
  618. local_irq_save(flags);
  619. __mod_node_page_state(pgdat, item, delta);
  620. local_irq_restore(flags);
  621. }
  622. EXPORT_SYMBOL(mod_node_page_state);
  623. void inc_node_page_state(struct page *page, enum node_stat_item item)
  624. {
  625. unsigned long flags;
  626. struct pglist_data *pgdat;
  627. pgdat = page_pgdat(page);
  628. local_irq_save(flags);
  629. __inc_node_state(pgdat, item);
  630. local_irq_restore(flags);
  631. }
  632. EXPORT_SYMBOL(inc_node_page_state);
  633. void dec_node_page_state(struct page *page, enum node_stat_item item)
  634. {
  635. unsigned long flags;
  636. local_irq_save(flags);
  637. __dec_node_page_state(page, item);
  638. local_irq_restore(flags);
  639. }
  640. EXPORT_SYMBOL(dec_node_page_state);
  641. #endif
  642. /*
  643. * Fold a differential into the global counters.
  644. * Returns the number of counters updated.
  645. */
  646. static int fold_diff(int *zone_diff, int *node_diff)
  647. {
  648. int i;
  649. int changes = 0;
  650. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  651. if (zone_diff[i]) {
  652. atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
  653. changes++;
  654. }
  655. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
  656. if (node_diff[i]) {
  657. atomic_long_add(node_diff[i], &vm_node_stat[i]);
  658. changes++;
  659. }
  660. return changes;
  661. }
  662. /*
  663. * Update the zone counters for the current cpu.
  664. *
  665. * Note that refresh_cpu_vm_stats strives to only access
  666. * node local memory. The per cpu pagesets on remote zones are placed
  667. * in the memory local to the processor using that pageset. So the
  668. * loop over all zones will access a series of cachelines local to
  669. * the processor.
  670. *
  671. * The call to zone_page_state_add updates the cachelines with the
  672. * statistics in the remote zone struct as well as the global cachelines
  673. * with the global counters. These could cause remote node cache line
  674. * bouncing and will have to be only done when necessary.
  675. *
  676. * The function returns the number of global counters updated.
  677. */
  678. static int refresh_cpu_vm_stats(bool do_pagesets)
  679. {
  680. struct pglist_data *pgdat;
  681. struct zone *zone;
  682. int i;
  683. int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
  684. int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
  685. int changes = 0;
  686. for_each_populated_zone(zone) {
  687. struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
  688. struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
  689. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
  690. int v;
  691. v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
  692. if (v) {
  693. atomic_long_add(v, &zone->vm_stat[i]);
  694. global_zone_diff[i] += v;
  695. #ifdef CONFIG_NUMA
  696. /* 3 seconds idle till flush */
  697. __this_cpu_write(pcp->expire, 3);
  698. #endif
  699. }
  700. }
  701. if (do_pagesets) {
  702. cond_resched();
  703. changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
  704. #ifdef CONFIG_NUMA
  705. /*
  706. * Deal with draining the remote pageset of this
  707. * processor
  708. *
  709. * Check if there are pages remaining in this pageset
  710. * if not then there is nothing to expire.
  711. */
  712. if (!__this_cpu_read(pcp->expire) ||
  713. !__this_cpu_read(pcp->count))
  714. continue;
  715. /*
  716. * We never drain zones local to this processor.
  717. */
  718. if (zone_to_nid(zone) == numa_node_id()) {
  719. __this_cpu_write(pcp->expire, 0);
  720. continue;
  721. }
  722. if (__this_cpu_dec_return(pcp->expire)) {
  723. changes++;
  724. continue;
  725. }
  726. if (__this_cpu_read(pcp->count)) {
  727. drain_zone_pages(zone, this_cpu_ptr(pcp));
  728. changes++;
  729. }
  730. #endif
  731. }
  732. }
  733. for_each_online_pgdat(pgdat) {
  734. struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
  735. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
  736. int v;
  737. v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
  738. if (v) {
  739. atomic_long_add(v, &pgdat->vm_stat[i]);
  740. global_node_diff[i] += v;
  741. }
  742. }
  743. }
  744. changes += fold_diff(global_zone_diff, global_node_diff);
  745. return changes;
  746. }
  747. /*
  748. * Fold the data for an offline cpu into the global array.
  749. * There cannot be any access by the offline cpu and therefore
  750. * synchronization is simplified.
  751. */
  752. void cpu_vm_stats_fold(int cpu)
  753. {
  754. struct pglist_data *pgdat;
  755. struct zone *zone;
  756. int i;
  757. int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
  758. int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
  759. for_each_populated_zone(zone) {
  760. struct per_cpu_zonestat *pzstats;
  761. pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
  762. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
  763. if (pzstats->vm_stat_diff[i]) {
  764. int v;
  765. v = pzstats->vm_stat_diff[i];
  766. pzstats->vm_stat_diff[i] = 0;
  767. atomic_long_add(v, &zone->vm_stat[i]);
  768. global_zone_diff[i] += v;
  769. }
  770. }
  771. #ifdef CONFIG_NUMA
  772. for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
  773. if (pzstats->vm_numa_event[i]) {
  774. unsigned long v;
  775. v = pzstats->vm_numa_event[i];
  776. pzstats->vm_numa_event[i] = 0;
  777. zone_numa_event_add(v, zone, i);
  778. }
  779. }
  780. #endif
  781. }
  782. for_each_online_pgdat(pgdat) {
  783. struct per_cpu_nodestat *p;
  784. p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
  785. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
  786. if (p->vm_node_stat_diff[i]) {
  787. int v;
  788. v = p->vm_node_stat_diff[i];
  789. p->vm_node_stat_diff[i] = 0;
  790. atomic_long_add(v, &pgdat->vm_stat[i]);
  791. global_node_diff[i] += v;
  792. }
  793. }
  794. fold_diff(global_zone_diff, global_node_diff);
  795. }
  796. /*
  797. * this is only called if !populated_zone(zone), which implies no other users of
  798. * pset->vm_stat_diff[] exist.
  799. */
  800. void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
  801. {
  802. unsigned long v;
  803. int i;
  804. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
  805. if (pzstats->vm_stat_diff[i]) {
  806. v = pzstats->vm_stat_diff[i];
  807. pzstats->vm_stat_diff[i] = 0;
  808. zone_page_state_add(v, zone, i);
  809. }
  810. }
  811. #ifdef CONFIG_NUMA
  812. for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
  813. if (pzstats->vm_numa_event[i]) {
  814. v = pzstats->vm_numa_event[i];
  815. pzstats->vm_numa_event[i] = 0;
  816. zone_numa_event_add(v, zone, i);
  817. }
  818. }
  819. #endif
  820. }
  821. #endif
  822. #ifdef CONFIG_NUMA
  823. /*
  824. * Determine the per node value of a stat item. This function
  825. * is called frequently in a NUMA machine, so try to be as
  826. * frugal as possible.
  827. */
  828. unsigned long sum_zone_node_page_state(int node,
  829. enum zone_stat_item item)
  830. {
  831. struct zone *zones = NODE_DATA(node)->node_zones;
  832. int i;
  833. unsigned long count = 0;
  834. for (i = 0; i < MAX_NR_ZONES; i++)
  835. count += zone_page_state(zones + i, item);
  836. return count;
  837. }
  838. /* Determine the per node value of a numa stat item. */
  839. unsigned long sum_zone_numa_event_state(int node,
  840. enum numa_stat_item item)
  841. {
  842. struct zone *zones = NODE_DATA(node)->node_zones;
  843. unsigned long count = 0;
  844. int i;
  845. for (i = 0; i < MAX_NR_ZONES; i++)
  846. count += zone_numa_event_state(zones + i, item);
  847. return count;
  848. }
  849. /*
  850. * Determine the per node value of a stat item.
  851. */
  852. unsigned long node_page_state_pages(struct pglist_data *pgdat,
  853. enum node_stat_item item)
  854. {
  855. long x = atomic_long_read(&pgdat->vm_stat[item]);
  856. #ifdef CONFIG_SMP
  857. if (x < 0)
  858. x = 0;
  859. #endif
  860. return x;
  861. }
  862. unsigned long node_page_state(struct pglist_data *pgdat,
  863. enum node_stat_item item)
  864. {
  865. VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
  866. return node_page_state_pages(pgdat, item);
  867. }
  868. #endif
  869. /*
  870. * Count number of pages "struct page" and "struct page_ext" consume.
  871. * nr_memmap_boot_pages: # of pages allocated by boot allocator
  872. * nr_memmap_pages: # of pages that were allocated by buddy allocator
  873. */
  874. static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
  875. static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
  876. void memmap_boot_pages_add(long delta)
  877. {
  878. atomic_long_add(delta, &nr_memmap_boot_pages);
  879. }
  880. void memmap_pages_add(long delta)
  881. {
  882. atomic_long_add(delta, &nr_memmap_pages);
  883. }
  884. #ifdef CONFIG_COMPACTION
  885. struct contig_page_info {
  886. unsigned long free_pages;
  887. unsigned long free_blocks_total;
  888. unsigned long free_blocks_suitable;
  889. };
  890. /*
  891. * Calculate the number of free pages in a zone, how many contiguous
  892. * pages are free and how many are large enough to satisfy an allocation of
  893. * the target size. Note that this function makes no attempt to estimate
  894. * how many suitable free blocks there *might* be if MOVABLE pages were
  895. * migrated. Calculating that is possible, but expensive and can be
  896. * figured out from userspace
  897. */
  898. static void fill_contig_page_info(struct zone *zone,
  899. unsigned int suitable_order,
  900. struct contig_page_info *info)
  901. {
  902. unsigned int order;
  903. info->free_pages = 0;
  904. info->free_blocks_total = 0;
  905. info->free_blocks_suitable = 0;
  906. for (order = 0; order < NR_PAGE_ORDERS; order++) {
  907. unsigned long blocks;
  908. /*
  909. * Count number of free blocks.
  910. *
  911. * Access to nr_free is lockless as nr_free is used only for
  912. * diagnostic purposes. Use data_race to avoid KCSAN warning.
  913. */
  914. blocks = data_race(zone->free_area[order].nr_free);
  915. info->free_blocks_total += blocks;
  916. /* Count free base pages */
  917. info->free_pages += blocks << order;
  918. /* Count the suitable free blocks */
  919. if (order >= suitable_order)
  920. info->free_blocks_suitable += blocks <<
  921. (order - suitable_order);
  922. }
  923. }
  924. /*
  925. * A fragmentation index only makes sense if an allocation of a requested
  926. * size would fail. If that is true, the fragmentation index indicates
  927. * whether external fragmentation or a lack of memory was the problem.
  928. * The value can be used to determine if page reclaim or compaction
  929. * should be used
  930. */
  931. static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
  932. {
  933. unsigned long requested = 1UL << order;
  934. if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
  935. return 0;
  936. if (!info->free_blocks_total)
  937. return 0;
  938. /* Fragmentation index only makes sense when a request would fail */
  939. if (info->free_blocks_suitable)
  940. return -1000;
  941. /*
  942. * Index is between 0 and 1 so return within 3 decimal places
  943. *
  944. * 0 => allocation would fail due to lack of memory
  945. * 1 => allocation would fail due to fragmentation
  946. */
  947. return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
  948. }
  949. /*
  950. * Calculates external fragmentation within a zone wrt the given order.
  951. * It is defined as the percentage of pages found in blocks of size
  952. * less than 1 << order. It returns values in range [0, 100].
  953. */
  954. unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
  955. {
  956. struct contig_page_info info;
  957. fill_contig_page_info(zone, order, &info);
  958. if (info.free_pages == 0)
  959. return 0;
  960. return div_u64((info.free_pages -
  961. (info.free_blocks_suitable << order)) * 100,
  962. info.free_pages);
  963. }
  964. /* Same as __fragmentation index but allocs contig_page_info on stack */
  965. int fragmentation_index(struct zone *zone, unsigned int order)
  966. {
  967. struct contig_page_info info;
  968. fill_contig_page_info(zone, order, &info);
  969. return __fragmentation_index(order, &info);
  970. }
  971. #endif
  972. #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
  973. defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
  974. #ifdef CONFIG_ZONE_DMA
  975. #define TEXT_FOR_DMA(xx) xx "_dma",
  976. #else
  977. #define TEXT_FOR_DMA(xx)
  978. #endif
  979. #ifdef CONFIG_ZONE_DMA32
  980. #define TEXT_FOR_DMA32(xx) xx "_dma32",
  981. #else
  982. #define TEXT_FOR_DMA32(xx)
  983. #endif
  984. #ifdef CONFIG_HIGHMEM
  985. #define TEXT_FOR_HIGHMEM(xx) xx "_high",
  986. #else
  987. #define TEXT_FOR_HIGHMEM(xx)
  988. #endif
  989. #ifdef CONFIG_ZONE_DEVICE
  990. #define TEXT_FOR_DEVICE(xx) xx "_device",
  991. #else
  992. #define TEXT_FOR_DEVICE(xx)
  993. #endif
  994. #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
  995. TEXT_FOR_HIGHMEM(xx) xx "_movable", \
  996. TEXT_FOR_DEVICE(xx)
  997. const char * const vmstat_text[] = {
  998. /* enum zone_stat_item counters */
  999. "nr_free_pages",
  1000. "nr_zone_inactive_anon",
  1001. "nr_zone_active_anon",
  1002. "nr_zone_inactive_file",
  1003. "nr_zone_active_file",
  1004. "nr_zone_unevictable",
  1005. "nr_zone_write_pending",
  1006. "nr_mlock",
  1007. "nr_bounce",
  1008. #if IS_ENABLED(CONFIG_ZSMALLOC)
  1009. "nr_zspages",
  1010. #endif
  1011. "nr_free_cma",
  1012. #ifdef CONFIG_UNACCEPTED_MEMORY
  1013. "nr_unaccepted",
  1014. #endif
  1015. /* enum numa_stat_item counters */
  1016. #ifdef CONFIG_NUMA
  1017. "numa_hit",
  1018. "numa_miss",
  1019. "numa_foreign",
  1020. "numa_interleave",
  1021. "numa_local",
  1022. "numa_other",
  1023. #endif
  1024. /* enum node_stat_item counters */
  1025. "nr_inactive_anon",
  1026. "nr_active_anon",
  1027. "nr_inactive_file",
  1028. "nr_active_file",
  1029. "nr_unevictable",
  1030. "nr_slab_reclaimable",
  1031. "nr_slab_unreclaimable",
  1032. "nr_isolated_anon",
  1033. "nr_isolated_file",
  1034. "workingset_nodes",
  1035. "workingset_refault_anon",
  1036. "workingset_refault_file",
  1037. "workingset_activate_anon",
  1038. "workingset_activate_file",
  1039. "workingset_restore_anon",
  1040. "workingset_restore_file",
  1041. "workingset_nodereclaim",
  1042. "nr_anon_pages",
  1043. "nr_mapped",
  1044. "nr_file_pages",
  1045. "nr_dirty",
  1046. "nr_writeback",
  1047. "nr_writeback_temp",
  1048. "nr_shmem",
  1049. "nr_shmem_hugepages",
  1050. "nr_shmem_pmdmapped",
  1051. "nr_file_hugepages",
  1052. "nr_file_pmdmapped",
  1053. "nr_anon_transparent_hugepages",
  1054. "nr_vmscan_write",
  1055. "nr_vmscan_immediate_reclaim",
  1056. "nr_dirtied",
  1057. "nr_written",
  1058. "nr_throttled_written",
  1059. "nr_kernel_misc_reclaimable",
  1060. "nr_foll_pin_acquired",
  1061. "nr_foll_pin_released",
  1062. "nr_kernel_stack",
  1063. #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
  1064. "nr_shadow_call_stack",
  1065. #endif
  1066. "nr_page_table_pages",
  1067. "nr_sec_page_table_pages",
  1068. #ifdef CONFIG_IOMMU_SUPPORT
  1069. "nr_iommu_pages",
  1070. #endif
  1071. #ifdef CONFIG_SWAP
  1072. "nr_swapcached",
  1073. #endif
  1074. #ifdef CONFIG_NUMA_BALANCING
  1075. "pgpromote_success",
  1076. "pgpromote_candidate",
  1077. #endif
  1078. "pgdemote_kswapd",
  1079. "pgdemote_direct",
  1080. "pgdemote_khugepaged",
  1081. /* system-wide enum vm_stat_item counters */
  1082. "nr_dirty_threshold",
  1083. "nr_dirty_background_threshold",
  1084. "nr_memmap_pages",
  1085. "nr_memmap_boot_pages",
  1086. #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
  1087. /* enum vm_event_item counters */
  1088. "pgpgin",
  1089. "pgpgout",
  1090. "pswpin",
  1091. "pswpout",
  1092. TEXTS_FOR_ZONES("pgalloc")
  1093. TEXTS_FOR_ZONES("allocstall")
  1094. TEXTS_FOR_ZONES("pgskip")
  1095. "pgfree",
  1096. "pgactivate",
  1097. "pgdeactivate",
  1098. "pglazyfree",
  1099. "pgfault",
  1100. "pgmajfault",
  1101. "pglazyfreed",
  1102. "pgrefill",
  1103. "pgreuse",
  1104. "pgsteal_kswapd",
  1105. "pgsteal_direct",
  1106. "pgsteal_khugepaged",
  1107. "pgscan_kswapd",
  1108. "pgscan_direct",
  1109. "pgscan_khugepaged",
  1110. "pgscan_direct_throttle",
  1111. "pgscan_anon",
  1112. "pgscan_file",
  1113. "pgsteal_anon",
  1114. "pgsteal_file",
  1115. #ifdef CONFIG_NUMA
  1116. "zone_reclaim_success",
  1117. "zone_reclaim_failed",
  1118. #endif
  1119. "pginodesteal",
  1120. "slabs_scanned",
  1121. "kswapd_inodesteal",
  1122. "kswapd_low_wmark_hit_quickly",
  1123. "kswapd_high_wmark_hit_quickly",
  1124. "pageoutrun",
  1125. "pgrotated",
  1126. "drop_pagecache",
  1127. "drop_slab",
  1128. "oom_kill",
  1129. #ifdef CONFIG_NUMA_BALANCING
  1130. "numa_pte_updates",
  1131. "numa_huge_pte_updates",
  1132. "numa_hint_faults",
  1133. "numa_hint_faults_local",
  1134. "numa_pages_migrated",
  1135. #endif
  1136. #ifdef CONFIG_MIGRATION
  1137. "pgmigrate_success",
  1138. "pgmigrate_fail",
  1139. "thp_migration_success",
  1140. "thp_migration_fail",
  1141. "thp_migration_split",
  1142. #endif
  1143. #ifdef CONFIG_COMPACTION
  1144. "compact_migrate_scanned",
  1145. "compact_free_scanned",
  1146. "compact_isolated",
  1147. "compact_stall",
  1148. "compact_fail",
  1149. "compact_success",
  1150. "compact_daemon_wake",
  1151. "compact_daemon_migrate_scanned",
  1152. "compact_daemon_free_scanned",
  1153. #endif
  1154. #ifdef CONFIG_HUGETLB_PAGE
  1155. "htlb_buddy_alloc_success",
  1156. "htlb_buddy_alloc_fail",
  1157. #endif
  1158. #ifdef CONFIG_CMA
  1159. "cma_alloc_success",
  1160. "cma_alloc_fail",
  1161. #endif
  1162. "unevictable_pgs_culled",
  1163. "unevictable_pgs_scanned",
  1164. "unevictable_pgs_rescued",
  1165. "unevictable_pgs_mlocked",
  1166. "unevictable_pgs_munlocked",
  1167. "unevictable_pgs_cleared",
  1168. "unevictable_pgs_stranded",
  1169. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1170. "thp_fault_alloc",
  1171. "thp_fault_fallback",
  1172. "thp_fault_fallback_charge",
  1173. "thp_collapse_alloc",
  1174. "thp_collapse_alloc_failed",
  1175. "thp_file_alloc",
  1176. "thp_file_fallback",
  1177. "thp_file_fallback_charge",
  1178. "thp_file_mapped",
  1179. "thp_split_page",
  1180. "thp_split_page_failed",
  1181. "thp_deferred_split_page",
  1182. "thp_underused_split_page",
  1183. "thp_split_pmd",
  1184. "thp_scan_exceed_none_pte",
  1185. "thp_scan_exceed_swap_pte",
  1186. "thp_scan_exceed_share_pte",
  1187. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  1188. "thp_split_pud",
  1189. #endif
  1190. "thp_zero_page_alloc",
  1191. "thp_zero_page_alloc_failed",
  1192. "thp_swpout",
  1193. "thp_swpout_fallback",
  1194. #endif
  1195. #ifdef CONFIG_MEMORY_BALLOON
  1196. "balloon_inflate",
  1197. "balloon_deflate",
  1198. #ifdef CONFIG_BALLOON_COMPACTION
  1199. "balloon_migrate",
  1200. #endif
  1201. #endif /* CONFIG_MEMORY_BALLOON */
  1202. #ifdef CONFIG_DEBUG_TLBFLUSH
  1203. "nr_tlb_remote_flush",
  1204. "nr_tlb_remote_flush_received",
  1205. "nr_tlb_local_flush_all",
  1206. "nr_tlb_local_flush_one",
  1207. #endif /* CONFIG_DEBUG_TLBFLUSH */
  1208. #ifdef CONFIG_SWAP
  1209. "swap_ra",
  1210. "swap_ra_hit",
  1211. "swpin_zero",
  1212. "swpout_zero",
  1213. #ifdef CONFIG_KSM
  1214. "ksm_swpin_copy",
  1215. #endif
  1216. #endif
  1217. #ifdef CONFIG_KSM
  1218. "cow_ksm",
  1219. #endif
  1220. #ifdef CONFIG_ZSWAP
  1221. "zswpin",
  1222. "zswpout",
  1223. "zswpwb",
  1224. #endif
  1225. #ifdef CONFIG_X86
  1226. "direct_map_level2_splits",
  1227. "direct_map_level3_splits",
  1228. #endif
  1229. #ifdef CONFIG_PER_VMA_LOCK_STATS
  1230. "vma_lock_success",
  1231. "vma_lock_abort",
  1232. "vma_lock_retry",
  1233. "vma_lock_miss",
  1234. #endif
  1235. #ifdef CONFIG_DEBUG_STACK_USAGE
  1236. "kstack_1k",
  1237. #if THREAD_SIZE > 1024
  1238. "kstack_2k",
  1239. #endif
  1240. #if THREAD_SIZE > 2048
  1241. "kstack_4k",
  1242. #endif
  1243. #if THREAD_SIZE > 4096
  1244. "kstack_8k",
  1245. #endif
  1246. #if THREAD_SIZE > 8192
  1247. "kstack_16k",
  1248. #endif
  1249. #if THREAD_SIZE > 16384
  1250. "kstack_32k",
  1251. #endif
  1252. #if THREAD_SIZE > 32768
  1253. "kstack_64k",
  1254. #endif
  1255. #if THREAD_SIZE > 65536
  1256. "kstack_rest",
  1257. #endif
  1258. #endif
  1259. #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
  1260. };
  1261. #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
  1262. #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
  1263. defined(CONFIG_PROC_FS)
  1264. static void *frag_start(struct seq_file *m, loff_t *pos)
  1265. {
  1266. pg_data_t *pgdat;
  1267. loff_t node = *pos;
  1268. for (pgdat = first_online_pgdat();
  1269. pgdat && node;
  1270. pgdat = next_online_pgdat(pgdat))
  1271. --node;
  1272. return pgdat;
  1273. }
  1274. static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
  1275. {
  1276. pg_data_t *pgdat = (pg_data_t *)arg;
  1277. (*pos)++;
  1278. return next_online_pgdat(pgdat);
  1279. }
  1280. static void frag_stop(struct seq_file *m, void *arg)
  1281. {
  1282. }
  1283. /*
  1284. * Walk zones in a node and print using a callback.
  1285. * If @assert_populated is true, only use callback for zones that are populated.
  1286. */
  1287. static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
  1288. bool assert_populated, bool nolock,
  1289. void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
  1290. {
  1291. struct zone *zone;
  1292. struct zone *node_zones = pgdat->node_zones;
  1293. unsigned long flags;
  1294. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  1295. if (assert_populated && !populated_zone(zone))
  1296. continue;
  1297. if (!nolock)
  1298. spin_lock_irqsave(&zone->lock, flags);
  1299. print(m, pgdat, zone);
  1300. if (!nolock)
  1301. spin_unlock_irqrestore(&zone->lock, flags);
  1302. }
  1303. }
  1304. #endif
  1305. #ifdef CONFIG_PROC_FS
  1306. static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
  1307. struct zone *zone)
  1308. {
  1309. int order;
  1310. seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  1311. for (order = 0; order < NR_PAGE_ORDERS; ++order)
  1312. /*
  1313. * Access to nr_free is lockless as nr_free is used only for
  1314. * printing purposes. Use data_race to avoid KCSAN warning.
  1315. */
  1316. seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
  1317. seq_putc(m, '\n');
  1318. }
  1319. /*
  1320. * This walks the free areas for each zone.
  1321. */
  1322. static int frag_show(struct seq_file *m, void *arg)
  1323. {
  1324. pg_data_t *pgdat = (pg_data_t *)arg;
  1325. walk_zones_in_node(m, pgdat, true, false, frag_show_print);
  1326. return 0;
  1327. }
  1328. static void pagetypeinfo_showfree_print(struct seq_file *m,
  1329. pg_data_t *pgdat, struct zone *zone)
  1330. {
  1331. int order, mtype;
  1332. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
  1333. seq_printf(m, "Node %4d, zone %8s, type %12s ",
  1334. pgdat->node_id,
  1335. zone->name,
  1336. migratetype_names[mtype]);
  1337. for (order = 0; order < NR_PAGE_ORDERS; ++order) {
  1338. unsigned long freecount = 0;
  1339. struct free_area *area;
  1340. struct list_head *curr;
  1341. bool overflow = false;
  1342. area = &(zone->free_area[order]);
  1343. list_for_each(curr, &area->free_list[mtype]) {
  1344. /*
  1345. * Cap the free_list iteration because it might
  1346. * be really large and we are under a spinlock
  1347. * so a long time spent here could trigger a
  1348. * hard lockup detector. Anyway this is a
  1349. * debugging tool so knowing there is a handful
  1350. * of pages of this order should be more than
  1351. * sufficient.
  1352. */
  1353. if (++freecount >= 100000) {
  1354. overflow = true;
  1355. break;
  1356. }
  1357. }
  1358. seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
  1359. spin_unlock_irq(&zone->lock);
  1360. cond_resched();
  1361. spin_lock_irq(&zone->lock);
  1362. }
  1363. seq_putc(m, '\n');
  1364. }
  1365. }
  1366. /* Print out the free pages at each order for each migatetype */
  1367. static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
  1368. {
  1369. int order;
  1370. pg_data_t *pgdat = (pg_data_t *)arg;
  1371. /* Print header */
  1372. seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
  1373. for (order = 0; order < NR_PAGE_ORDERS; ++order)
  1374. seq_printf(m, "%6d ", order);
  1375. seq_putc(m, '\n');
  1376. walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
  1377. }
  1378. static void pagetypeinfo_showblockcount_print(struct seq_file *m,
  1379. pg_data_t *pgdat, struct zone *zone)
  1380. {
  1381. int mtype;
  1382. unsigned long pfn;
  1383. unsigned long start_pfn = zone->zone_start_pfn;
  1384. unsigned long end_pfn = zone_end_pfn(zone);
  1385. unsigned long count[MIGRATE_TYPES] = { 0, };
  1386. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  1387. struct page *page;
  1388. page = pfn_to_online_page(pfn);
  1389. if (!page)
  1390. continue;
  1391. if (page_zone(page) != zone)
  1392. continue;
  1393. mtype = get_pageblock_migratetype(page);
  1394. if (mtype < MIGRATE_TYPES)
  1395. count[mtype]++;
  1396. }
  1397. /* Print counts */
  1398. seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  1399. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  1400. seq_printf(m, "%12lu ", count[mtype]);
  1401. seq_putc(m, '\n');
  1402. }
  1403. /* Print out the number of pageblocks for each migratetype */
  1404. static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
  1405. {
  1406. int mtype;
  1407. pg_data_t *pgdat = (pg_data_t *)arg;
  1408. seq_printf(m, "\n%-23s", "Number of blocks type ");
  1409. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  1410. seq_printf(m, "%12s ", migratetype_names[mtype]);
  1411. seq_putc(m, '\n');
  1412. walk_zones_in_node(m, pgdat, true, false,
  1413. pagetypeinfo_showblockcount_print);
  1414. }
  1415. /*
  1416. * Print out the number of pageblocks for each migratetype that contain pages
  1417. * of other types. This gives an indication of how well fallbacks are being
  1418. * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
  1419. * to determine what is going on
  1420. */
  1421. static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
  1422. {
  1423. #ifdef CONFIG_PAGE_OWNER
  1424. int mtype;
  1425. if (!static_branch_unlikely(&page_owner_inited))
  1426. return;
  1427. drain_all_pages(NULL);
  1428. seq_printf(m, "\n%-23s", "Number of mixed blocks ");
  1429. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  1430. seq_printf(m, "%12s ", migratetype_names[mtype]);
  1431. seq_putc(m, '\n');
  1432. walk_zones_in_node(m, pgdat, true, true,
  1433. pagetypeinfo_showmixedcount_print);
  1434. #endif /* CONFIG_PAGE_OWNER */
  1435. }
  1436. /*
  1437. * This prints out statistics in relation to grouping pages by mobility.
  1438. * It is expensive to collect so do not constantly read the file.
  1439. */
  1440. static int pagetypeinfo_show(struct seq_file *m, void *arg)
  1441. {
  1442. pg_data_t *pgdat = (pg_data_t *)arg;
  1443. /* check memoryless node */
  1444. if (!node_state(pgdat->node_id, N_MEMORY))
  1445. return 0;
  1446. seq_printf(m, "Page block order: %d\n", pageblock_order);
  1447. seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
  1448. seq_putc(m, '\n');
  1449. pagetypeinfo_showfree(m, pgdat);
  1450. pagetypeinfo_showblockcount(m, pgdat);
  1451. pagetypeinfo_showmixedcount(m, pgdat);
  1452. return 0;
  1453. }
  1454. static const struct seq_operations fragmentation_op = {
  1455. .start = frag_start,
  1456. .next = frag_next,
  1457. .stop = frag_stop,
  1458. .show = frag_show,
  1459. };
  1460. static const struct seq_operations pagetypeinfo_op = {
  1461. .start = frag_start,
  1462. .next = frag_next,
  1463. .stop = frag_stop,
  1464. .show = pagetypeinfo_show,
  1465. };
  1466. static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
  1467. {
  1468. int zid;
  1469. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1470. struct zone *compare = &pgdat->node_zones[zid];
  1471. if (populated_zone(compare))
  1472. return zone == compare;
  1473. }
  1474. return false;
  1475. }
  1476. static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
  1477. struct zone *zone)
  1478. {
  1479. int i;
  1480. seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
  1481. if (is_zone_first_populated(pgdat, zone)) {
  1482. seq_printf(m, "\n per-node stats");
  1483. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
  1484. unsigned long pages = node_page_state_pages(pgdat, i);
  1485. if (vmstat_item_print_in_thp(i))
  1486. pages /= HPAGE_PMD_NR;
  1487. seq_printf(m, "\n %-12s %lu", node_stat_name(i),
  1488. pages);
  1489. }
  1490. }
  1491. seq_printf(m,
  1492. "\n pages free %lu"
  1493. "\n boost %lu"
  1494. "\n min %lu"
  1495. "\n low %lu"
  1496. "\n high %lu"
  1497. "\n promo %lu"
  1498. "\n spanned %lu"
  1499. "\n present %lu"
  1500. "\n managed %lu"
  1501. "\n cma %lu",
  1502. zone_page_state(zone, NR_FREE_PAGES),
  1503. zone->watermark_boost,
  1504. min_wmark_pages(zone),
  1505. low_wmark_pages(zone),
  1506. high_wmark_pages(zone),
  1507. promo_wmark_pages(zone),
  1508. zone->spanned_pages,
  1509. zone->present_pages,
  1510. zone_managed_pages(zone),
  1511. zone_cma_pages(zone));
  1512. seq_printf(m,
  1513. "\n protection: (%ld",
  1514. zone->lowmem_reserve[0]);
  1515. for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
  1516. seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
  1517. seq_putc(m, ')');
  1518. /* If unpopulated, no other information is useful */
  1519. if (!populated_zone(zone)) {
  1520. seq_putc(m, '\n');
  1521. return;
  1522. }
  1523. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  1524. seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
  1525. zone_page_state(zone, i));
  1526. #ifdef CONFIG_NUMA
  1527. fold_vm_zone_numa_events(zone);
  1528. for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
  1529. seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
  1530. zone_numa_event_state(zone, i));
  1531. #endif
  1532. seq_printf(m, "\n pagesets");
  1533. for_each_online_cpu(i) {
  1534. struct per_cpu_pages *pcp;
  1535. struct per_cpu_zonestat __maybe_unused *pzstats;
  1536. pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
  1537. seq_printf(m,
  1538. "\n cpu: %i"
  1539. "\n count: %i"
  1540. "\n high: %i"
  1541. "\n batch: %i",
  1542. i,
  1543. pcp->count,
  1544. pcp->high,
  1545. pcp->batch);
  1546. #ifdef CONFIG_SMP
  1547. pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
  1548. seq_printf(m, "\n vm stats threshold: %d",
  1549. pzstats->stat_threshold);
  1550. #endif
  1551. }
  1552. seq_printf(m,
  1553. "\n node_unreclaimable: %u"
  1554. "\n start_pfn: %lu",
  1555. pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
  1556. zone->zone_start_pfn);
  1557. seq_putc(m, '\n');
  1558. }
  1559. /*
  1560. * Output information about zones in @pgdat. All zones are printed regardless
  1561. * of whether they are populated or not: lowmem_reserve_ratio operates on the
  1562. * set of all zones and userspace would not be aware of such zones if they are
  1563. * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
  1564. */
  1565. static int zoneinfo_show(struct seq_file *m, void *arg)
  1566. {
  1567. pg_data_t *pgdat = (pg_data_t *)arg;
  1568. walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
  1569. return 0;
  1570. }
  1571. static const struct seq_operations zoneinfo_op = {
  1572. .start = frag_start, /* iterate over all zones. The same as in
  1573. * fragmentation. */
  1574. .next = frag_next,
  1575. .stop = frag_stop,
  1576. .show = zoneinfo_show,
  1577. };
  1578. #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
  1579. NR_VM_NUMA_EVENT_ITEMS + \
  1580. NR_VM_NODE_STAT_ITEMS + \
  1581. NR_VM_STAT_ITEMS + \
  1582. (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
  1583. NR_VM_EVENT_ITEMS : 0))
  1584. static void *vmstat_start(struct seq_file *m, loff_t *pos)
  1585. {
  1586. unsigned long *v;
  1587. int i;
  1588. if (*pos >= NR_VMSTAT_ITEMS)
  1589. return NULL;
  1590. BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
  1591. fold_vm_numa_events();
  1592. v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
  1593. m->private = v;
  1594. if (!v)
  1595. return ERR_PTR(-ENOMEM);
  1596. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  1597. v[i] = global_zone_page_state(i);
  1598. v += NR_VM_ZONE_STAT_ITEMS;
  1599. #ifdef CONFIG_NUMA
  1600. for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
  1601. v[i] = global_numa_event_state(i);
  1602. v += NR_VM_NUMA_EVENT_ITEMS;
  1603. #endif
  1604. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
  1605. v[i] = global_node_page_state_pages(i);
  1606. if (vmstat_item_print_in_thp(i))
  1607. v[i] /= HPAGE_PMD_NR;
  1608. }
  1609. v += NR_VM_NODE_STAT_ITEMS;
  1610. global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
  1611. v + NR_DIRTY_THRESHOLD);
  1612. v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
  1613. v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
  1614. v += NR_VM_STAT_ITEMS;
  1615. #ifdef CONFIG_VM_EVENT_COUNTERS
  1616. all_vm_events(v);
  1617. v[PGPGIN] /= 2; /* sectors -> kbytes */
  1618. v[PGPGOUT] /= 2;
  1619. #endif
  1620. return (unsigned long *)m->private + *pos;
  1621. }
  1622. static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
  1623. {
  1624. (*pos)++;
  1625. if (*pos >= NR_VMSTAT_ITEMS)
  1626. return NULL;
  1627. return (unsigned long *)m->private + *pos;
  1628. }
  1629. static int vmstat_show(struct seq_file *m, void *arg)
  1630. {
  1631. unsigned long *l = arg;
  1632. unsigned long off = l - (unsigned long *)m->private;
  1633. seq_puts(m, vmstat_text[off]);
  1634. seq_put_decimal_ull(m, " ", *l);
  1635. seq_putc(m, '\n');
  1636. if (off == NR_VMSTAT_ITEMS - 1) {
  1637. /*
  1638. * We've come to the end - add any deprecated counters to avoid
  1639. * breaking userspace which might depend on them being present.
  1640. */
  1641. seq_puts(m, "nr_unstable 0\n");
  1642. }
  1643. return 0;
  1644. }
  1645. static void vmstat_stop(struct seq_file *m, void *arg)
  1646. {
  1647. kfree(m->private);
  1648. m->private = NULL;
  1649. }
  1650. static const struct seq_operations vmstat_op = {
  1651. .start = vmstat_start,
  1652. .next = vmstat_next,
  1653. .stop = vmstat_stop,
  1654. .show = vmstat_show,
  1655. };
  1656. #endif /* CONFIG_PROC_FS */
  1657. #ifdef CONFIG_SMP
  1658. static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
  1659. int sysctl_stat_interval __read_mostly = HZ;
  1660. #ifdef CONFIG_PROC_FS
  1661. static void refresh_vm_stats(struct work_struct *work)
  1662. {
  1663. refresh_cpu_vm_stats(true);
  1664. }
  1665. int vmstat_refresh(const struct ctl_table *table, int write,
  1666. void *buffer, size_t *lenp, loff_t *ppos)
  1667. {
  1668. long val;
  1669. int err;
  1670. int i;
  1671. /*
  1672. * The regular update, every sysctl_stat_interval, may come later
  1673. * than expected: leaving a significant amount in per_cpu buckets.
  1674. * This is particularly misleading when checking a quantity of HUGE
  1675. * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
  1676. * which can equally be echo'ed to or cat'ted from (by root),
  1677. * can be used to update the stats just before reading them.
  1678. *
  1679. * Oh, and since global_zone_page_state() etc. are so careful to hide
  1680. * transiently negative values, report an error here if any of
  1681. * the stats is negative, so we know to go looking for imbalance.
  1682. */
  1683. err = schedule_on_each_cpu(refresh_vm_stats);
  1684. if (err)
  1685. return err;
  1686. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
  1687. /*
  1688. * Skip checking stats known to go negative occasionally.
  1689. */
  1690. switch (i) {
  1691. case NR_ZONE_WRITE_PENDING:
  1692. case NR_FREE_CMA_PAGES:
  1693. continue;
  1694. }
  1695. val = atomic_long_read(&vm_zone_stat[i]);
  1696. if (val < 0) {
  1697. pr_warn("%s: %s %ld\n",
  1698. __func__, zone_stat_name(i), val);
  1699. }
  1700. }
  1701. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
  1702. /*
  1703. * Skip checking stats known to go negative occasionally.
  1704. */
  1705. switch (i) {
  1706. case NR_WRITEBACK:
  1707. continue;
  1708. }
  1709. val = atomic_long_read(&vm_node_stat[i]);
  1710. if (val < 0) {
  1711. pr_warn("%s: %s %ld\n",
  1712. __func__, node_stat_name(i), val);
  1713. }
  1714. }
  1715. if (write)
  1716. *ppos += *lenp;
  1717. else
  1718. *lenp = 0;
  1719. return 0;
  1720. }
  1721. #endif /* CONFIG_PROC_FS */
  1722. static void vmstat_update(struct work_struct *w)
  1723. {
  1724. if (refresh_cpu_vm_stats(true)) {
  1725. /*
  1726. * Counters were updated so we expect more updates
  1727. * to occur in the future. Keep on running the
  1728. * update worker thread.
  1729. */
  1730. queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
  1731. this_cpu_ptr(&vmstat_work),
  1732. round_jiffies_relative(sysctl_stat_interval));
  1733. }
  1734. }
  1735. /*
  1736. * Check if the diffs for a certain cpu indicate that
  1737. * an update is needed.
  1738. */
  1739. static bool need_update(int cpu)
  1740. {
  1741. pg_data_t *last_pgdat = NULL;
  1742. struct zone *zone;
  1743. for_each_populated_zone(zone) {
  1744. struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
  1745. struct per_cpu_nodestat *n;
  1746. /*
  1747. * The fast way of checking if there are any vmstat diffs.
  1748. */
  1749. if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
  1750. return true;
  1751. if (last_pgdat == zone->zone_pgdat)
  1752. continue;
  1753. last_pgdat = zone->zone_pgdat;
  1754. n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
  1755. if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
  1756. return true;
  1757. }
  1758. return false;
  1759. }
  1760. /*
  1761. * Switch off vmstat processing and then fold all the remaining differentials
  1762. * until the diffs stay at zero. The function is used by NOHZ and can only be
  1763. * invoked when tick processing is not active.
  1764. */
  1765. void quiet_vmstat(void)
  1766. {
  1767. if (system_state != SYSTEM_RUNNING)
  1768. return;
  1769. if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
  1770. return;
  1771. if (!need_update(smp_processor_id()))
  1772. return;
  1773. /*
  1774. * Just refresh counters and do not care about the pending delayed
  1775. * vmstat_update. It doesn't fire that often to matter and canceling
  1776. * it would be too expensive from this path.
  1777. * vmstat_shepherd will take care about that for us.
  1778. */
  1779. refresh_cpu_vm_stats(false);
  1780. }
  1781. /*
  1782. * Shepherd worker thread that checks the
  1783. * differentials of processors that have their worker
  1784. * threads for vm statistics updates disabled because of
  1785. * inactivity.
  1786. */
  1787. static void vmstat_shepherd(struct work_struct *w);
  1788. static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
  1789. static void vmstat_shepherd(struct work_struct *w)
  1790. {
  1791. int cpu;
  1792. cpus_read_lock();
  1793. /* Check processors whose vmstat worker threads have been disabled */
  1794. for_each_online_cpu(cpu) {
  1795. struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
  1796. /*
  1797. * In kernel users of vmstat counters either require the precise value and
  1798. * they are using zone_page_state_snapshot interface or they can live with
  1799. * an imprecision as the regular flushing can happen at arbitrary time and
  1800. * cumulative error can grow (see calculate_normal_threshold).
  1801. *
  1802. * From that POV the regular flushing can be postponed for CPUs that have
  1803. * been isolated from the kernel interference without critical
  1804. * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
  1805. * for all isolated CPUs to avoid interference with the isolated workload.
  1806. */
  1807. if (cpu_is_isolated(cpu))
  1808. continue;
  1809. if (!delayed_work_pending(dw) && need_update(cpu))
  1810. queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
  1811. cond_resched();
  1812. }
  1813. cpus_read_unlock();
  1814. schedule_delayed_work(&shepherd,
  1815. round_jiffies_relative(sysctl_stat_interval));
  1816. }
  1817. static void __init start_shepherd_timer(void)
  1818. {
  1819. int cpu;
  1820. for_each_possible_cpu(cpu)
  1821. INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
  1822. vmstat_update);
  1823. schedule_delayed_work(&shepherd,
  1824. round_jiffies_relative(sysctl_stat_interval));
  1825. }
  1826. static void __init init_cpu_node_state(void)
  1827. {
  1828. int node;
  1829. for_each_online_node(node) {
  1830. if (!cpumask_empty(cpumask_of_node(node)))
  1831. node_set_state(node, N_CPU);
  1832. }
  1833. }
  1834. static int vmstat_cpu_online(unsigned int cpu)
  1835. {
  1836. refresh_zone_stat_thresholds();
  1837. if (!node_state(cpu_to_node(cpu), N_CPU)) {
  1838. node_set_state(cpu_to_node(cpu), N_CPU);
  1839. }
  1840. return 0;
  1841. }
  1842. static int vmstat_cpu_down_prep(unsigned int cpu)
  1843. {
  1844. cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
  1845. return 0;
  1846. }
  1847. static int vmstat_cpu_dead(unsigned int cpu)
  1848. {
  1849. const struct cpumask *node_cpus;
  1850. int node;
  1851. node = cpu_to_node(cpu);
  1852. refresh_zone_stat_thresholds();
  1853. node_cpus = cpumask_of_node(node);
  1854. if (!cpumask_empty(node_cpus))
  1855. return 0;
  1856. node_clear_state(node, N_CPU);
  1857. return 0;
  1858. }
  1859. #endif
  1860. struct workqueue_struct *mm_percpu_wq;
  1861. void __init init_mm_internals(void)
  1862. {
  1863. int ret __maybe_unused;
  1864. mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
  1865. #ifdef CONFIG_SMP
  1866. ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
  1867. NULL, vmstat_cpu_dead);
  1868. if (ret < 0)
  1869. pr_err("vmstat: failed to register 'dead' hotplug state\n");
  1870. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
  1871. vmstat_cpu_online,
  1872. vmstat_cpu_down_prep);
  1873. if (ret < 0)
  1874. pr_err("vmstat: failed to register 'online' hotplug state\n");
  1875. cpus_read_lock();
  1876. init_cpu_node_state();
  1877. cpus_read_unlock();
  1878. start_shepherd_timer();
  1879. #endif
  1880. #ifdef CONFIG_PROC_FS
  1881. proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
  1882. proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
  1883. proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
  1884. proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
  1885. #endif
  1886. }
  1887. #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
  1888. /*
  1889. * Return an index indicating how much of the available free memory is
  1890. * unusable for an allocation of the requested size.
  1891. */
  1892. static int unusable_free_index(unsigned int order,
  1893. struct contig_page_info *info)
  1894. {
  1895. /* No free memory is interpreted as all free memory is unusable */
  1896. if (info->free_pages == 0)
  1897. return 1000;
  1898. /*
  1899. * Index should be a value between 0 and 1. Return a value to 3
  1900. * decimal places.
  1901. *
  1902. * 0 => no fragmentation
  1903. * 1 => high fragmentation
  1904. */
  1905. return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
  1906. }
  1907. static void unusable_show_print(struct seq_file *m,
  1908. pg_data_t *pgdat, struct zone *zone)
  1909. {
  1910. unsigned int order;
  1911. int index;
  1912. struct contig_page_info info;
  1913. seq_printf(m, "Node %d, zone %8s ",
  1914. pgdat->node_id,
  1915. zone->name);
  1916. for (order = 0; order < NR_PAGE_ORDERS; ++order) {
  1917. fill_contig_page_info(zone, order, &info);
  1918. index = unusable_free_index(order, &info);
  1919. seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
  1920. }
  1921. seq_putc(m, '\n');
  1922. }
  1923. /*
  1924. * Display unusable free space index
  1925. *
  1926. * The unusable free space index measures how much of the available free
  1927. * memory cannot be used to satisfy an allocation of a given size and is a
  1928. * value between 0 and 1. The higher the value, the more of free memory is
  1929. * unusable and by implication, the worse the external fragmentation is. This
  1930. * can be expressed as a percentage by multiplying by 100.
  1931. */
  1932. static int unusable_show(struct seq_file *m, void *arg)
  1933. {
  1934. pg_data_t *pgdat = (pg_data_t *)arg;
  1935. /* check memoryless node */
  1936. if (!node_state(pgdat->node_id, N_MEMORY))
  1937. return 0;
  1938. walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
  1939. return 0;
  1940. }
  1941. static const struct seq_operations unusable_sops = {
  1942. .start = frag_start,
  1943. .next = frag_next,
  1944. .stop = frag_stop,
  1945. .show = unusable_show,
  1946. };
  1947. DEFINE_SEQ_ATTRIBUTE(unusable);
  1948. static void extfrag_show_print(struct seq_file *m,
  1949. pg_data_t *pgdat, struct zone *zone)
  1950. {
  1951. unsigned int order;
  1952. int index;
  1953. /* Alloc on stack as interrupts are disabled for zone walk */
  1954. struct contig_page_info info;
  1955. seq_printf(m, "Node %d, zone %8s ",
  1956. pgdat->node_id,
  1957. zone->name);
  1958. for (order = 0; order < NR_PAGE_ORDERS; ++order) {
  1959. fill_contig_page_info(zone, order, &info);
  1960. index = __fragmentation_index(order, &info);
  1961. seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
  1962. }
  1963. seq_putc(m, '\n');
  1964. }
  1965. /*
  1966. * Display fragmentation index for orders that allocations would fail for
  1967. */
  1968. static int extfrag_show(struct seq_file *m, void *arg)
  1969. {
  1970. pg_data_t *pgdat = (pg_data_t *)arg;
  1971. walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
  1972. return 0;
  1973. }
  1974. static const struct seq_operations extfrag_sops = {
  1975. .start = frag_start,
  1976. .next = frag_next,
  1977. .stop = frag_stop,
  1978. .show = extfrag_show,
  1979. };
  1980. DEFINE_SEQ_ATTRIBUTE(extfrag);
  1981. static int __init extfrag_debug_init(void)
  1982. {
  1983. struct dentry *extfrag_debug_root;
  1984. extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
  1985. debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
  1986. &unusable_fops);
  1987. debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
  1988. &extfrag_fops);
  1989. return 0;
  1990. }
  1991. module_init(extfrag_debug_init);
  1992. #endif