ksm.c 105 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Memory merging support.
  4. *
  5. * This code enables dynamic sharing of identical pages found in different
  6. * memory areas, even if they are not shared by fork()
  7. *
  8. * Copyright (C) 2008-2009 Red Hat, Inc.
  9. * Authors:
  10. * Izik Eidus
  11. * Andrea Arcangeli
  12. * Chris Wright
  13. * Hugh Dickins
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/mm.h>
  17. #include <linux/mm_inline.h>
  18. #include <linux/fs.h>
  19. #include <linux/mman.h>
  20. #include <linux/sched.h>
  21. #include <linux/sched/mm.h>
  22. #include <linux/sched/coredump.h>
  23. #include <linux/sched/cputime.h>
  24. #include <linux/rwsem.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/rmap.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/xxhash.h>
  29. #include <linux/delay.h>
  30. #include <linux/kthread.h>
  31. #include <linux/wait.h>
  32. #include <linux/slab.h>
  33. #include <linux/rbtree.h>
  34. #include <linux/memory.h>
  35. #include <linux/mmu_notifier.h>
  36. #include <linux/swap.h>
  37. #include <linux/ksm.h>
  38. #include <linux/hashtable.h>
  39. #include <linux/freezer.h>
  40. #include <linux/oom.h>
  41. #include <linux/numa.h>
  42. #include <linux/pagewalk.h>
  43. #include <asm/tlbflush.h>
  44. #include "internal.h"
  45. #include "mm_slot.h"
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/ksm.h>
  48. #ifdef CONFIG_NUMA
  49. #define NUMA(x) (x)
  50. #define DO_NUMA(x) do { (x); } while (0)
  51. #else
  52. #define NUMA(x) (0)
  53. #define DO_NUMA(x) do { } while (0)
  54. #endif
  55. typedef u8 rmap_age_t;
  56. /**
  57. * DOC: Overview
  58. *
  59. * A few notes about the KSM scanning process,
  60. * to make it easier to understand the data structures below:
  61. *
  62. * In order to reduce excessive scanning, KSM sorts the memory pages by their
  63. * contents into a data structure that holds pointers to the pages' locations.
  64. *
  65. * Since the contents of the pages may change at any moment, KSM cannot just
  66. * insert the pages into a normal sorted tree and expect it to find anything.
  67. * Therefore KSM uses two data structures - the stable and the unstable tree.
  68. *
  69. * The stable tree holds pointers to all the merged pages (ksm pages), sorted
  70. * by their contents. Because each such page is write-protected, searching on
  71. * this tree is fully assured to be working (except when pages are unmapped),
  72. * and therefore this tree is called the stable tree.
  73. *
  74. * The stable tree node includes information required for reverse
  75. * mapping from a KSM page to virtual addresses that map this page.
  76. *
  77. * In order to avoid large latencies of the rmap walks on KSM pages,
  78. * KSM maintains two types of nodes in the stable tree:
  79. *
  80. * * the regular nodes that keep the reverse mapping structures in a
  81. * linked list
  82. * * the "chains" that link nodes ("dups") that represent the same
  83. * write protected memory content, but each "dup" corresponds to a
  84. * different KSM page copy of that content
  85. *
  86. * Internally, the regular nodes, "dups" and "chains" are represented
  87. * using the same struct ksm_stable_node structure.
  88. *
  89. * In addition to the stable tree, KSM uses a second data structure called the
  90. * unstable tree: this tree holds pointers to pages which have been found to
  91. * be "unchanged for a period of time". The unstable tree sorts these pages
  92. * by their contents, but since they are not write-protected, KSM cannot rely
  93. * upon the unstable tree to work correctly - the unstable tree is liable to
  94. * be corrupted as its contents are modified, and so it is called unstable.
  95. *
  96. * KSM solves this problem by several techniques:
  97. *
  98. * 1) The unstable tree is flushed every time KSM completes scanning all
  99. * memory areas, and then the tree is rebuilt again from the beginning.
  100. * 2) KSM will only insert into the unstable tree, pages whose hash value
  101. * has not changed since the previous scan of all memory areas.
  102. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
  103. * colors of the nodes and not on their contents, assuring that even when
  104. * the tree gets "corrupted" it won't get out of balance, so scanning time
  105. * remains the same (also, searching and inserting nodes in an rbtree uses
  106. * the same algorithm, so we have no overhead when we flush and rebuild).
  107. * 4) KSM never flushes the stable tree, which means that even if it were to
  108. * take 10 attempts to find a page in the unstable tree, once it is found,
  109. * it is secured in the stable tree. (When we scan a new page, we first
  110. * compare it against the stable tree, and then against the unstable tree.)
  111. *
  112. * If the merge_across_nodes tunable is unset, then KSM maintains multiple
  113. * stable trees and multiple unstable trees: one of each for each NUMA node.
  114. */
  115. /**
  116. * struct ksm_mm_slot - ksm information per mm that is being scanned
  117. * @slot: hash lookup from mm to mm_slot
  118. * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
  119. */
  120. struct ksm_mm_slot {
  121. struct mm_slot slot;
  122. struct ksm_rmap_item *rmap_list;
  123. };
  124. /**
  125. * struct ksm_scan - cursor for scanning
  126. * @mm_slot: the current mm_slot we are scanning
  127. * @address: the next address inside that to be scanned
  128. * @rmap_list: link to the next rmap to be scanned in the rmap_list
  129. * @seqnr: count of completed full scans (needed when removing unstable node)
  130. *
  131. * There is only the one ksm_scan instance of this cursor structure.
  132. */
  133. struct ksm_scan {
  134. struct ksm_mm_slot *mm_slot;
  135. unsigned long address;
  136. struct ksm_rmap_item **rmap_list;
  137. unsigned long seqnr;
  138. };
  139. /**
  140. * struct ksm_stable_node - node of the stable rbtree
  141. * @node: rb node of this ksm page in the stable tree
  142. * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
  143. * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
  144. * @list: linked into migrate_nodes, pending placement in the proper node tree
  145. * @hlist: hlist head of rmap_items using this ksm page
  146. * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
  147. * @chain_prune_time: time of the last full garbage collection
  148. * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
  149. * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
  150. */
  151. struct ksm_stable_node {
  152. union {
  153. struct rb_node node; /* when node of stable tree */
  154. struct { /* when listed for migration */
  155. struct list_head *head;
  156. struct {
  157. struct hlist_node hlist_dup;
  158. struct list_head list;
  159. };
  160. };
  161. };
  162. struct hlist_head hlist;
  163. union {
  164. unsigned long kpfn;
  165. unsigned long chain_prune_time;
  166. };
  167. /*
  168. * STABLE_NODE_CHAIN can be any negative number in
  169. * rmap_hlist_len negative range, but better not -1 to be able
  170. * to reliably detect underflows.
  171. */
  172. #define STABLE_NODE_CHAIN -1024
  173. int rmap_hlist_len;
  174. #ifdef CONFIG_NUMA
  175. int nid;
  176. #endif
  177. };
  178. /**
  179. * struct ksm_rmap_item - reverse mapping item for virtual addresses
  180. * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
  181. * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
  182. * @nid: NUMA node id of unstable tree in which linked (may not match page)
  183. * @mm: the memory structure this rmap_item is pointing into
  184. * @address: the virtual address this rmap_item tracks (+ flags in low bits)
  185. * @oldchecksum: previous checksum of the page at that virtual address
  186. * @node: rb node of this rmap_item in the unstable tree
  187. * @head: pointer to stable_node heading this list in the stable tree
  188. * @hlist: link into hlist of rmap_items hanging off that stable_node
  189. * @age: number of scan iterations since creation
  190. * @remaining_skips: how many scans to skip
  191. */
  192. struct ksm_rmap_item {
  193. struct ksm_rmap_item *rmap_list;
  194. union {
  195. struct anon_vma *anon_vma; /* when stable */
  196. #ifdef CONFIG_NUMA
  197. int nid; /* when node of unstable tree */
  198. #endif
  199. };
  200. struct mm_struct *mm;
  201. unsigned long address; /* + low bits used for flags below */
  202. unsigned int oldchecksum; /* when unstable */
  203. rmap_age_t age;
  204. rmap_age_t remaining_skips;
  205. union {
  206. struct rb_node node; /* when node of unstable tree */
  207. struct { /* when listed from stable tree */
  208. struct ksm_stable_node *head;
  209. struct hlist_node hlist;
  210. };
  211. };
  212. };
  213. #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
  214. #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
  215. #define STABLE_FLAG 0x200 /* is listed from the stable tree */
  216. /* The stable and unstable tree heads */
  217. static struct rb_root one_stable_tree[1] = { RB_ROOT };
  218. static struct rb_root one_unstable_tree[1] = { RB_ROOT };
  219. static struct rb_root *root_stable_tree = one_stable_tree;
  220. static struct rb_root *root_unstable_tree = one_unstable_tree;
  221. /* Recently migrated nodes of stable tree, pending proper placement */
  222. static LIST_HEAD(migrate_nodes);
  223. #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
  224. #define MM_SLOTS_HASH_BITS 10
  225. static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  226. static struct ksm_mm_slot ksm_mm_head = {
  227. .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
  228. };
  229. static struct ksm_scan ksm_scan = {
  230. .mm_slot = &ksm_mm_head,
  231. };
  232. static struct kmem_cache *rmap_item_cache;
  233. static struct kmem_cache *stable_node_cache;
  234. static struct kmem_cache *mm_slot_cache;
  235. /* Default number of pages to scan per batch */
  236. #define DEFAULT_PAGES_TO_SCAN 100
  237. /* The number of pages scanned */
  238. static unsigned long ksm_pages_scanned;
  239. /* The number of nodes in the stable tree */
  240. static unsigned long ksm_pages_shared;
  241. /* The number of page slots additionally sharing those nodes */
  242. static unsigned long ksm_pages_sharing;
  243. /* The number of nodes in the unstable tree */
  244. static unsigned long ksm_pages_unshared;
  245. /* The number of rmap_items in use: to calculate pages_volatile */
  246. static unsigned long ksm_rmap_items;
  247. /* The number of stable_node chains */
  248. static unsigned long ksm_stable_node_chains;
  249. /* The number of stable_node dups linked to the stable_node chains */
  250. static unsigned long ksm_stable_node_dups;
  251. /* Delay in pruning stale stable_node_dups in the stable_node_chains */
  252. static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
  253. /* Maximum number of page slots sharing a stable node */
  254. static int ksm_max_page_sharing = 256;
  255. /* Number of pages ksmd should scan in one batch */
  256. static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
  257. /* Milliseconds ksmd should sleep between batches */
  258. static unsigned int ksm_thread_sleep_millisecs = 20;
  259. /* Checksum of an empty (zeroed) page */
  260. static unsigned int zero_checksum __read_mostly;
  261. /* Whether to merge empty (zeroed) pages with actual zero pages */
  262. static bool ksm_use_zero_pages __read_mostly;
  263. /* Skip pages that couldn't be de-duplicated previously */
  264. /* Default to true at least temporarily, for testing */
  265. static bool ksm_smart_scan = true;
  266. /* The number of zero pages which is placed by KSM */
  267. atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
  268. /* The number of pages that have been skipped due to "smart scanning" */
  269. static unsigned long ksm_pages_skipped;
  270. /* Don't scan more than max pages per batch. */
  271. static unsigned long ksm_advisor_max_pages_to_scan = 30000;
  272. /* Min CPU for scanning pages per scan */
  273. #define KSM_ADVISOR_MIN_CPU 10
  274. /* Max CPU for scanning pages per scan */
  275. static unsigned int ksm_advisor_max_cpu = 70;
  276. /* Target scan time in seconds to analyze all KSM candidate pages. */
  277. static unsigned long ksm_advisor_target_scan_time = 200;
  278. /* Exponentially weighted moving average. */
  279. #define EWMA_WEIGHT 30
  280. /**
  281. * struct advisor_ctx - metadata for KSM advisor
  282. * @start_scan: start time of the current scan
  283. * @scan_time: scan time of previous scan
  284. * @change: change in percent to pages_to_scan parameter
  285. * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
  286. */
  287. struct advisor_ctx {
  288. ktime_t start_scan;
  289. unsigned long scan_time;
  290. unsigned long change;
  291. unsigned long long cpu_time;
  292. };
  293. static struct advisor_ctx advisor_ctx;
  294. /* Define different advisor's */
  295. enum ksm_advisor_type {
  296. KSM_ADVISOR_NONE,
  297. KSM_ADVISOR_SCAN_TIME,
  298. };
  299. static enum ksm_advisor_type ksm_advisor;
  300. #ifdef CONFIG_SYSFS
  301. /*
  302. * Only called through the sysfs control interface:
  303. */
  304. /* At least scan this many pages per batch. */
  305. static unsigned long ksm_advisor_min_pages_to_scan = 500;
  306. static void set_advisor_defaults(void)
  307. {
  308. if (ksm_advisor == KSM_ADVISOR_NONE) {
  309. ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
  310. } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
  311. advisor_ctx = (const struct advisor_ctx){ 0 };
  312. ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
  313. }
  314. }
  315. #endif /* CONFIG_SYSFS */
  316. static inline void advisor_start_scan(void)
  317. {
  318. if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
  319. advisor_ctx.start_scan = ktime_get();
  320. }
  321. /*
  322. * Use previous scan time if available, otherwise use current scan time as an
  323. * approximation for the previous scan time.
  324. */
  325. static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
  326. unsigned long scan_time)
  327. {
  328. return ctx->scan_time ? ctx->scan_time : scan_time;
  329. }
  330. /* Calculate exponential weighted moving average */
  331. static unsigned long ewma(unsigned long prev, unsigned long curr)
  332. {
  333. return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
  334. }
  335. /*
  336. * The scan time advisor is based on the current scan rate and the target
  337. * scan rate.
  338. *
  339. * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
  340. *
  341. * To avoid perturbations it calculates a change factor of previous changes.
  342. * A new change factor is calculated for each iteration and it uses an
  343. * exponentially weighted moving average. The new pages_to_scan value is
  344. * multiplied with that change factor:
  345. *
  346. * new_pages_to_scan *= change facor
  347. *
  348. * The new_pages_to_scan value is limited by the cpu min and max values. It
  349. * calculates the cpu percent for the last scan and calculates the new
  350. * estimated cpu percent cost for the next scan. That value is capped by the
  351. * cpu min and max setting.
  352. *
  353. * In addition the new pages_to_scan value is capped by the max and min
  354. * limits.
  355. */
  356. static void scan_time_advisor(void)
  357. {
  358. unsigned int cpu_percent;
  359. unsigned long cpu_time;
  360. unsigned long cpu_time_diff;
  361. unsigned long cpu_time_diff_ms;
  362. unsigned long pages;
  363. unsigned long per_page_cost;
  364. unsigned long factor;
  365. unsigned long change;
  366. unsigned long last_scan_time;
  367. unsigned long scan_time;
  368. /* Convert scan time to seconds */
  369. scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
  370. MSEC_PER_SEC);
  371. scan_time = scan_time ? scan_time : 1;
  372. /* Calculate CPU consumption of ksmd background thread */
  373. cpu_time = task_sched_runtime(current);
  374. cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
  375. cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
  376. cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
  377. cpu_percent = cpu_percent ? cpu_percent : 1;
  378. last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
  379. /* Calculate scan time as percentage of target scan time */
  380. factor = ksm_advisor_target_scan_time * 100 / scan_time;
  381. factor = factor ? factor : 1;
  382. /*
  383. * Calculate scan time as percentage of last scan time and use
  384. * exponentially weighted average to smooth it
  385. */
  386. change = scan_time * 100 / last_scan_time;
  387. change = change ? change : 1;
  388. change = ewma(advisor_ctx.change, change);
  389. /* Calculate new scan rate based on target scan rate. */
  390. pages = ksm_thread_pages_to_scan * 100 / factor;
  391. /* Update pages_to_scan by weighted change percentage. */
  392. pages = pages * change / 100;
  393. /* Cap new pages_to_scan value */
  394. per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
  395. per_page_cost = per_page_cost ? per_page_cost : 1;
  396. pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
  397. pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
  398. pages = min(pages, ksm_advisor_max_pages_to_scan);
  399. /* Update advisor context */
  400. advisor_ctx.change = change;
  401. advisor_ctx.scan_time = scan_time;
  402. advisor_ctx.cpu_time = cpu_time;
  403. ksm_thread_pages_to_scan = pages;
  404. trace_ksm_advisor(scan_time, pages, cpu_percent);
  405. }
  406. static void advisor_stop_scan(void)
  407. {
  408. if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
  409. scan_time_advisor();
  410. }
  411. #ifdef CONFIG_NUMA
  412. /* Zeroed when merging across nodes is not allowed */
  413. static unsigned int ksm_merge_across_nodes = 1;
  414. static int ksm_nr_node_ids = 1;
  415. #else
  416. #define ksm_merge_across_nodes 1U
  417. #define ksm_nr_node_ids 1
  418. #endif
  419. #define KSM_RUN_STOP 0
  420. #define KSM_RUN_MERGE 1
  421. #define KSM_RUN_UNMERGE 2
  422. #define KSM_RUN_OFFLINE 4
  423. static unsigned long ksm_run = KSM_RUN_STOP;
  424. static void wait_while_offlining(void);
  425. static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
  426. static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
  427. static DEFINE_MUTEX(ksm_thread_mutex);
  428. static DEFINE_SPINLOCK(ksm_mmlist_lock);
  429. static int __init ksm_slab_init(void)
  430. {
  431. rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0);
  432. if (!rmap_item_cache)
  433. goto out;
  434. stable_node_cache = KMEM_CACHE(ksm_stable_node, 0);
  435. if (!stable_node_cache)
  436. goto out_free1;
  437. mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0);
  438. if (!mm_slot_cache)
  439. goto out_free2;
  440. return 0;
  441. out_free2:
  442. kmem_cache_destroy(stable_node_cache);
  443. out_free1:
  444. kmem_cache_destroy(rmap_item_cache);
  445. out:
  446. return -ENOMEM;
  447. }
  448. static void __init ksm_slab_free(void)
  449. {
  450. kmem_cache_destroy(mm_slot_cache);
  451. kmem_cache_destroy(stable_node_cache);
  452. kmem_cache_destroy(rmap_item_cache);
  453. mm_slot_cache = NULL;
  454. }
  455. static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
  456. {
  457. return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
  458. }
  459. static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
  460. {
  461. return dup->head == STABLE_NODE_DUP_HEAD;
  462. }
  463. static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
  464. struct ksm_stable_node *chain)
  465. {
  466. VM_BUG_ON(is_stable_node_dup(dup));
  467. dup->head = STABLE_NODE_DUP_HEAD;
  468. VM_BUG_ON(!is_stable_node_chain(chain));
  469. hlist_add_head(&dup->hlist_dup, &chain->hlist);
  470. ksm_stable_node_dups++;
  471. }
  472. static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
  473. {
  474. VM_BUG_ON(!is_stable_node_dup(dup));
  475. hlist_del(&dup->hlist_dup);
  476. ksm_stable_node_dups--;
  477. }
  478. static inline void stable_node_dup_del(struct ksm_stable_node *dup)
  479. {
  480. VM_BUG_ON(is_stable_node_chain(dup));
  481. if (is_stable_node_dup(dup))
  482. __stable_node_dup_del(dup);
  483. else
  484. rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
  485. #ifdef CONFIG_DEBUG_VM
  486. dup->head = NULL;
  487. #endif
  488. }
  489. static inline struct ksm_rmap_item *alloc_rmap_item(void)
  490. {
  491. struct ksm_rmap_item *rmap_item;
  492. rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
  493. __GFP_NORETRY | __GFP_NOWARN);
  494. if (rmap_item)
  495. ksm_rmap_items++;
  496. return rmap_item;
  497. }
  498. static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
  499. {
  500. ksm_rmap_items--;
  501. rmap_item->mm->ksm_rmap_items--;
  502. rmap_item->mm = NULL; /* debug safety */
  503. kmem_cache_free(rmap_item_cache, rmap_item);
  504. }
  505. static inline struct ksm_stable_node *alloc_stable_node(void)
  506. {
  507. /*
  508. * The allocation can take too long with GFP_KERNEL when memory is under
  509. * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
  510. * grants access to memory reserves, helping to avoid this problem.
  511. */
  512. return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
  513. }
  514. static inline void free_stable_node(struct ksm_stable_node *stable_node)
  515. {
  516. VM_BUG_ON(stable_node->rmap_hlist_len &&
  517. !is_stable_node_chain(stable_node));
  518. kmem_cache_free(stable_node_cache, stable_node);
  519. }
  520. /*
  521. * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
  522. * page tables after it has passed through ksm_exit() - which, if necessary,
  523. * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
  524. * a special flag: they can just back out as soon as mm_users goes to zero.
  525. * ksm_test_exit() is used throughout to make this test for exit: in some
  526. * places for correctness, in some places just to avoid unnecessary work.
  527. */
  528. static inline bool ksm_test_exit(struct mm_struct *mm)
  529. {
  530. return atomic_read(&mm->mm_users) == 0;
  531. }
  532. /*
  533. * We use break_ksm to break COW on a ksm page by triggering unsharing,
  534. * such that the ksm page will get replaced by an exclusive anonymous page.
  535. *
  536. * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
  537. * in case the application has unmapped and remapped mm,addr meanwhile.
  538. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
  539. * mmap of /dev/mem, where we would not want to touch it.
  540. *
  541. * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
  542. * of the process that owns 'vma'. We also do not want to enforce
  543. * protection keys here anyway.
  544. */
  545. static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
  546. {
  547. vm_fault_t ret = 0;
  548. if (lock_vma)
  549. vma_start_write(vma);
  550. do {
  551. bool ksm_page = false;
  552. struct folio_walk fw;
  553. struct folio *folio;
  554. cond_resched();
  555. folio = folio_walk_start(&fw, vma, addr,
  556. FW_MIGRATION | FW_ZEROPAGE);
  557. if (folio) {
  558. /* Small folio implies FW_LEVEL_PTE. */
  559. if (!folio_test_large(folio) &&
  560. (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
  561. ksm_page = true;
  562. folio_walk_end(&fw, vma);
  563. }
  564. if (!ksm_page)
  565. return 0;
  566. ret = handle_mm_fault(vma, addr,
  567. FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
  568. NULL);
  569. } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
  570. /*
  571. * We must loop until we no longer find a KSM page because
  572. * handle_mm_fault() may back out if there's any difficulty e.g. if
  573. * pte accessed bit gets updated concurrently.
  574. *
  575. * VM_FAULT_SIGBUS could occur if we race with truncation of the
  576. * backing file, which also invalidates anonymous pages: that's
  577. * okay, that truncation will have unmapped the PageKsm for us.
  578. *
  579. * VM_FAULT_OOM: at the time of writing (late July 2009), setting
  580. * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
  581. * current task has TIF_MEMDIE set, and will be OOM killed on return
  582. * to user; and ksmd, having no mm, would never be chosen for that.
  583. *
  584. * But if the mm is in a limited mem_cgroup, then the fault may fail
  585. * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
  586. * even ksmd can fail in this way - though it's usually breaking ksm
  587. * just to undo a merge it made a moment before, so unlikely to oom.
  588. *
  589. * That's a pity: we might therefore have more kernel pages allocated
  590. * than we're counting as nodes in the stable tree; but ksm_do_scan
  591. * will retry to break_cow on each pass, so should recover the page
  592. * in due course. The important thing is to not let VM_MERGEABLE
  593. * be cleared while any such pages might remain in the area.
  594. */
  595. return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
  596. }
  597. static bool vma_ksm_compatible(struct vm_area_struct *vma)
  598. {
  599. if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
  600. VM_IO | VM_DONTEXPAND | VM_HUGETLB |
  601. VM_MIXEDMAP| VM_DROPPABLE))
  602. return false; /* just ignore the advice */
  603. if (vma_is_dax(vma))
  604. return false;
  605. #ifdef VM_SAO
  606. if (vma->vm_flags & VM_SAO)
  607. return false;
  608. #endif
  609. #ifdef VM_SPARC_ADI
  610. if (vma->vm_flags & VM_SPARC_ADI)
  611. return false;
  612. #endif
  613. return true;
  614. }
  615. static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
  616. unsigned long addr)
  617. {
  618. struct vm_area_struct *vma;
  619. if (ksm_test_exit(mm))
  620. return NULL;
  621. vma = vma_lookup(mm, addr);
  622. if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
  623. return NULL;
  624. return vma;
  625. }
  626. static void break_cow(struct ksm_rmap_item *rmap_item)
  627. {
  628. struct mm_struct *mm = rmap_item->mm;
  629. unsigned long addr = rmap_item->address;
  630. struct vm_area_struct *vma;
  631. /*
  632. * It is not an accident that whenever we want to break COW
  633. * to undo, we also need to drop a reference to the anon_vma.
  634. */
  635. put_anon_vma(rmap_item->anon_vma);
  636. mmap_read_lock(mm);
  637. vma = find_mergeable_vma(mm, addr);
  638. if (vma)
  639. break_ksm(vma, addr, false);
  640. mmap_read_unlock(mm);
  641. }
  642. static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
  643. {
  644. struct mm_struct *mm = rmap_item->mm;
  645. unsigned long addr = rmap_item->address;
  646. struct vm_area_struct *vma;
  647. struct page *page = NULL;
  648. struct folio_walk fw;
  649. struct folio *folio;
  650. mmap_read_lock(mm);
  651. vma = find_mergeable_vma(mm, addr);
  652. if (!vma)
  653. goto out;
  654. folio = folio_walk_start(&fw, vma, addr, 0);
  655. if (folio) {
  656. if (!folio_is_zone_device(folio) &&
  657. folio_test_anon(folio)) {
  658. folio_get(folio);
  659. page = fw.page;
  660. }
  661. folio_walk_end(&fw, vma);
  662. }
  663. out:
  664. if (page) {
  665. flush_anon_page(vma, page, addr);
  666. flush_dcache_page(page);
  667. }
  668. mmap_read_unlock(mm);
  669. return page;
  670. }
  671. /*
  672. * This helper is used for getting right index into array of tree roots.
  673. * When merge_across_nodes knob is set to 1, there are only two rb-trees for
  674. * stable and unstable pages from all nodes with roots in index 0. Otherwise,
  675. * every node has its own stable and unstable tree.
  676. */
  677. static inline int get_kpfn_nid(unsigned long kpfn)
  678. {
  679. return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
  680. }
  681. static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
  682. struct rb_root *root)
  683. {
  684. struct ksm_stable_node *chain = alloc_stable_node();
  685. VM_BUG_ON(is_stable_node_chain(dup));
  686. if (likely(chain)) {
  687. INIT_HLIST_HEAD(&chain->hlist);
  688. chain->chain_prune_time = jiffies;
  689. chain->rmap_hlist_len = STABLE_NODE_CHAIN;
  690. #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
  691. chain->nid = NUMA_NO_NODE; /* debug */
  692. #endif
  693. ksm_stable_node_chains++;
  694. /*
  695. * Put the stable node chain in the first dimension of
  696. * the stable tree and at the same time remove the old
  697. * stable node.
  698. */
  699. rb_replace_node(&dup->node, &chain->node, root);
  700. /*
  701. * Move the old stable node to the second dimension
  702. * queued in the hlist_dup. The invariant is that all
  703. * dup stable_nodes in the chain->hlist point to pages
  704. * that are write protected and have the exact same
  705. * content.
  706. */
  707. stable_node_chain_add_dup(dup, chain);
  708. }
  709. return chain;
  710. }
  711. static inline void free_stable_node_chain(struct ksm_stable_node *chain,
  712. struct rb_root *root)
  713. {
  714. rb_erase(&chain->node, root);
  715. free_stable_node(chain);
  716. ksm_stable_node_chains--;
  717. }
  718. static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
  719. {
  720. struct ksm_rmap_item *rmap_item;
  721. /* check it's not STABLE_NODE_CHAIN or negative */
  722. BUG_ON(stable_node->rmap_hlist_len < 0);
  723. hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
  724. if (rmap_item->hlist.next) {
  725. ksm_pages_sharing--;
  726. trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
  727. } else {
  728. ksm_pages_shared--;
  729. }
  730. rmap_item->mm->ksm_merging_pages--;
  731. VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
  732. stable_node->rmap_hlist_len--;
  733. put_anon_vma(rmap_item->anon_vma);
  734. rmap_item->address &= PAGE_MASK;
  735. cond_resched();
  736. }
  737. /*
  738. * We need the second aligned pointer of the migrate_nodes
  739. * list_head to stay clear from the rb_parent_color union
  740. * (aligned and different than any node) and also different
  741. * from &migrate_nodes. This will verify that future list.h changes
  742. * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
  743. */
  744. BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
  745. BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
  746. trace_ksm_remove_ksm_page(stable_node->kpfn);
  747. if (stable_node->head == &migrate_nodes)
  748. list_del(&stable_node->list);
  749. else
  750. stable_node_dup_del(stable_node);
  751. free_stable_node(stable_node);
  752. }
  753. enum ksm_get_folio_flags {
  754. KSM_GET_FOLIO_NOLOCK,
  755. KSM_GET_FOLIO_LOCK,
  756. KSM_GET_FOLIO_TRYLOCK
  757. };
  758. /*
  759. * ksm_get_folio: checks if the page indicated by the stable node
  760. * is still its ksm page, despite having held no reference to it.
  761. * In which case we can trust the content of the page, and it
  762. * returns the gotten page; but if the page has now been zapped,
  763. * remove the stale node from the stable tree and return NULL.
  764. * But beware, the stable node's page might be being migrated.
  765. *
  766. * You would expect the stable_node to hold a reference to the ksm page.
  767. * But if it increments the page's count, swapping out has to wait for
  768. * ksmd to come around again before it can free the page, which may take
  769. * seconds or even minutes: much too unresponsive. So instead we use a
  770. * "keyhole reference": access to the ksm page from the stable node peeps
  771. * out through its keyhole to see if that page still holds the right key,
  772. * pointing back to this stable node. This relies on freeing a PageAnon
  773. * page to reset its page->mapping to NULL, and relies on no other use of
  774. * a page to put something that might look like our key in page->mapping.
  775. * is on its way to being freed; but it is an anomaly to bear in mind.
  776. */
  777. static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
  778. enum ksm_get_folio_flags flags)
  779. {
  780. struct folio *folio;
  781. void *expected_mapping;
  782. unsigned long kpfn;
  783. expected_mapping = (void *)((unsigned long)stable_node |
  784. PAGE_MAPPING_KSM);
  785. again:
  786. kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
  787. folio = pfn_folio(kpfn);
  788. if (READ_ONCE(folio->mapping) != expected_mapping)
  789. goto stale;
  790. /*
  791. * We cannot do anything with the page while its refcount is 0.
  792. * Usually 0 means free, or tail of a higher-order page: in which
  793. * case this node is no longer referenced, and should be freed;
  794. * however, it might mean that the page is under page_ref_freeze().
  795. * The __remove_mapping() case is easy, again the node is now stale;
  796. * the same is in reuse_ksm_page() case; but if page is swapcache
  797. * in folio_migrate_mapping(), it might still be our page,
  798. * in which case it's essential to keep the node.
  799. */
  800. while (!folio_try_get(folio)) {
  801. /*
  802. * Another check for folio->mapping != expected_mapping
  803. * would work here too. We have chosen to test the
  804. * swapcache flag to optimize the common case, when the
  805. * folio is or is about to be freed: the swapcache flag
  806. * is cleared (under spin_lock_irq) in the ref_freeze
  807. * section of __remove_mapping(); but anon folio->mapping
  808. * is reset to NULL later, in free_pages_prepare().
  809. */
  810. if (!folio_test_swapcache(folio))
  811. goto stale;
  812. cpu_relax();
  813. }
  814. if (READ_ONCE(folio->mapping) != expected_mapping) {
  815. folio_put(folio);
  816. goto stale;
  817. }
  818. if (flags == KSM_GET_FOLIO_TRYLOCK) {
  819. if (!folio_trylock(folio)) {
  820. folio_put(folio);
  821. return ERR_PTR(-EBUSY);
  822. }
  823. } else if (flags == KSM_GET_FOLIO_LOCK)
  824. folio_lock(folio);
  825. if (flags != KSM_GET_FOLIO_NOLOCK) {
  826. if (READ_ONCE(folio->mapping) != expected_mapping) {
  827. folio_unlock(folio);
  828. folio_put(folio);
  829. goto stale;
  830. }
  831. }
  832. return folio;
  833. stale:
  834. /*
  835. * We come here from above when folio->mapping or the swapcache flag
  836. * suggests that the node is stale; but it might be under migration.
  837. * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
  838. * before checking whether node->kpfn has been changed.
  839. */
  840. smp_rmb();
  841. if (READ_ONCE(stable_node->kpfn) != kpfn)
  842. goto again;
  843. remove_node_from_stable_tree(stable_node);
  844. return NULL;
  845. }
  846. /*
  847. * Removing rmap_item from stable or unstable tree.
  848. * This function will clean the information from the stable/unstable tree.
  849. */
  850. static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
  851. {
  852. if (rmap_item->address & STABLE_FLAG) {
  853. struct ksm_stable_node *stable_node;
  854. struct folio *folio;
  855. stable_node = rmap_item->head;
  856. folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
  857. if (!folio)
  858. goto out;
  859. hlist_del(&rmap_item->hlist);
  860. folio_unlock(folio);
  861. folio_put(folio);
  862. if (!hlist_empty(&stable_node->hlist))
  863. ksm_pages_sharing--;
  864. else
  865. ksm_pages_shared--;
  866. rmap_item->mm->ksm_merging_pages--;
  867. VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
  868. stable_node->rmap_hlist_len--;
  869. put_anon_vma(rmap_item->anon_vma);
  870. rmap_item->head = NULL;
  871. rmap_item->address &= PAGE_MASK;
  872. } else if (rmap_item->address & UNSTABLE_FLAG) {
  873. unsigned char age;
  874. /*
  875. * Usually ksmd can and must skip the rb_erase, because
  876. * root_unstable_tree was already reset to RB_ROOT.
  877. * But be careful when an mm is exiting: do the rb_erase
  878. * if this rmap_item was inserted by this scan, rather
  879. * than left over from before.
  880. */
  881. age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
  882. BUG_ON(age > 1);
  883. if (!age)
  884. rb_erase(&rmap_item->node,
  885. root_unstable_tree + NUMA(rmap_item->nid));
  886. ksm_pages_unshared--;
  887. rmap_item->address &= PAGE_MASK;
  888. }
  889. out:
  890. cond_resched(); /* we're called from many long loops */
  891. }
  892. static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
  893. {
  894. while (*rmap_list) {
  895. struct ksm_rmap_item *rmap_item = *rmap_list;
  896. *rmap_list = rmap_item->rmap_list;
  897. remove_rmap_item_from_tree(rmap_item);
  898. free_rmap_item(rmap_item);
  899. }
  900. }
  901. /*
  902. * Though it's very tempting to unmerge rmap_items from stable tree rather
  903. * than check every pte of a given vma, the locking doesn't quite work for
  904. * that - an rmap_item is assigned to the stable tree after inserting ksm
  905. * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
  906. * rmap_items from parent to child at fork time (so as not to waste time
  907. * if exit comes before the next scan reaches it).
  908. *
  909. * Similarly, although we'd like to remove rmap_items (so updating counts
  910. * and freeing memory) when unmerging an area, it's easier to leave that
  911. * to the next pass of ksmd - consider, for example, how ksmd might be
  912. * in cmp_and_merge_page on one of the rmap_items we would be removing.
  913. */
  914. static int unmerge_ksm_pages(struct vm_area_struct *vma,
  915. unsigned long start, unsigned long end, bool lock_vma)
  916. {
  917. unsigned long addr;
  918. int err = 0;
  919. for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
  920. if (ksm_test_exit(vma->vm_mm))
  921. break;
  922. if (signal_pending(current))
  923. err = -ERESTARTSYS;
  924. else
  925. err = break_ksm(vma, addr, lock_vma);
  926. }
  927. return err;
  928. }
  929. static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
  930. {
  931. return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
  932. }
  933. static inline struct ksm_stable_node *page_stable_node(struct page *page)
  934. {
  935. return folio_stable_node(page_folio(page));
  936. }
  937. static inline void folio_set_stable_node(struct folio *folio,
  938. struct ksm_stable_node *stable_node)
  939. {
  940. VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
  941. folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
  942. }
  943. #ifdef CONFIG_SYSFS
  944. /*
  945. * Only called through the sysfs control interface:
  946. */
  947. static int remove_stable_node(struct ksm_stable_node *stable_node)
  948. {
  949. struct folio *folio;
  950. int err;
  951. folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
  952. if (!folio) {
  953. /*
  954. * ksm_get_folio did remove_node_from_stable_tree itself.
  955. */
  956. return 0;
  957. }
  958. /*
  959. * Page could be still mapped if this races with __mmput() running in
  960. * between ksm_exit() and exit_mmap(). Just refuse to let
  961. * merge_across_nodes/max_page_sharing be switched.
  962. */
  963. err = -EBUSY;
  964. if (!folio_mapped(folio)) {
  965. /*
  966. * The stable node did not yet appear stale to ksm_get_folio(),
  967. * since that allows for an unmapped ksm folio to be recognized
  968. * right up until it is freed; but the node is safe to remove.
  969. * This folio might be in an LRU cache waiting to be freed,
  970. * or it might be in the swapcache (perhaps under writeback),
  971. * or it might have been removed from swapcache a moment ago.
  972. */
  973. folio_set_stable_node(folio, NULL);
  974. remove_node_from_stable_tree(stable_node);
  975. err = 0;
  976. }
  977. folio_unlock(folio);
  978. folio_put(folio);
  979. return err;
  980. }
  981. static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
  982. struct rb_root *root)
  983. {
  984. struct ksm_stable_node *dup;
  985. struct hlist_node *hlist_safe;
  986. if (!is_stable_node_chain(stable_node)) {
  987. VM_BUG_ON(is_stable_node_dup(stable_node));
  988. if (remove_stable_node(stable_node))
  989. return true;
  990. else
  991. return false;
  992. }
  993. hlist_for_each_entry_safe(dup, hlist_safe,
  994. &stable_node->hlist, hlist_dup) {
  995. VM_BUG_ON(!is_stable_node_dup(dup));
  996. if (remove_stable_node(dup))
  997. return true;
  998. }
  999. BUG_ON(!hlist_empty(&stable_node->hlist));
  1000. free_stable_node_chain(stable_node, root);
  1001. return false;
  1002. }
  1003. static int remove_all_stable_nodes(void)
  1004. {
  1005. struct ksm_stable_node *stable_node, *next;
  1006. int nid;
  1007. int err = 0;
  1008. for (nid = 0; nid < ksm_nr_node_ids; nid++) {
  1009. while (root_stable_tree[nid].rb_node) {
  1010. stable_node = rb_entry(root_stable_tree[nid].rb_node,
  1011. struct ksm_stable_node, node);
  1012. if (remove_stable_node_chain(stable_node,
  1013. root_stable_tree + nid)) {
  1014. err = -EBUSY;
  1015. break; /* proceed to next nid */
  1016. }
  1017. cond_resched();
  1018. }
  1019. }
  1020. list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
  1021. if (remove_stable_node(stable_node))
  1022. err = -EBUSY;
  1023. cond_resched();
  1024. }
  1025. return err;
  1026. }
  1027. static int unmerge_and_remove_all_rmap_items(void)
  1028. {
  1029. struct ksm_mm_slot *mm_slot;
  1030. struct mm_slot *slot;
  1031. struct mm_struct *mm;
  1032. struct vm_area_struct *vma;
  1033. int err = 0;
  1034. spin_lock(&ksm_mmlist_lock);
  1035. slot = list_entry(ksm_mm_head.slot.mm_node.next,
  1036. struct mm_slot, mm_node);
  1037. ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
  1038. spin_unlock(&ksm_mmlist_lock);
  1039. for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
  1040. mm_slot = ksm_scan.mm_slot) {
  1041. VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
  1042. mm = mm_slot->slot.mm;
  1043. mmap_read_lock(mm);
  1044. /*
  1045. * Exit right away if mm is exiting to avoid lockdep issue in
  1046. * the maple tree
  1047. */
  1048. if (ksm_test_exit(mm))
  1049. goto mm_exiting;
  1050. for_each_vma(vmi, vma) {
  1051. if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
  1052. continue;
  1053. err = unmerge_ksm_pages(vma,
  1054. vma->vm_start, vma->vm_end, false);
  1055. if (err)
  1056. goto error;
  1057. }
  1058. mm_exiting:
  1059. remove_trailing_rmap_items(&mm_slot->rmap_list);
  1060. mmap_read_unlock(mm);
  1061. spin_lock(&ksm_mmlist_lock);
  1062. slot = list_entry(mm_slot->slot.mm_node.next,
  1063. struct mm_slot, mm_node);
  1064. ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
  1065. if (ksm_test_exit(mm)) {
  1066. hash_del(&mm_slot->slot.hash);
  1067. list_del(&mm_slot->slot.mm_node);
  1068. spin_unlock(&ksm_mmlist_lock);
  1069. mm_slot_free(mm_slot_cache, mm_slot);
  1070. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  1071. clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
  1072. mmdrop(mm);
  1073. } else
  1074. spin_unlock(&ksm_mmlist_lock);
  1075. }
  1076. /* Clean up stable nodes, but don't worry if some are still busy */
  1077. remove_all_stable_nodes();
  1078. ksm_scan.seqnr = 0;
  1079. return 0;
  1080. error:
  1081. mmap_read_unlock(mm);
  1082. spin_lock(&ksm_mmlist_lock);
  1083. ksm_scan.mm_slot = &ksm_mm_head;
  1084. spin_unlock(&ksm_mmlist_lock);
  1085. return err;
  1086. }
  1087. #endif /* CONFIG_SYSFS */
  1088. static u32 calc_checksum(struct page *page)
  1089. {
  1090. u32 checksum;
  1091. void *addr = kmap_local_page(page);
  1092. checksum = xxhash(addr, PAGE_SIZE, 0);
  1093. kunmap_local(addr);
  1094. return checksum;
  1095. }
  1096. static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
  1097. pte_t *orig_pte)
  1098. {
  1099. struct mm_struct *mm = vma->vm_mm;
  1100. DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
  1101. int swapped;
  1102. int err = -EFAULT;
  1103. struct mmu_notifier_range range;
  1104. bool anon_exclusive;
  1105. pte_t entry;
  1106. if (WARN_ON_ONCE(folio_test_large(folio)))
  1107. return err;
  1108. pvmw.address = page_address_in_vma(&folio->page, vma);
  1109. if (pvmw.address == -EFAULT)
  1110. goto out;
  1111. mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
  1112. pvmw.address + PAGE_SIZE);
  1113. mmu_notifier_invalidate_range_start(&range);
  1114. if (!page_vma_mapped_walk(&pvmw))
  1115. goto out_mn;
  1116. if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
  1117. goto out_unlock;
  1118. anon_exclusive = PageAnonExclusive(&folio->page);
  1119. entry = ptep_get(pvmw.pte);
  1120. if (pte_write(entry) || pte_dirty(entry) ||
  1121. anon_exclusive || mm_tlb_flush_pending(mm)) {
  1122. swapped = folio_test_swapcache(folio);
  1123. flush_cache_page(vma, pvmw.address, folio_pfn(folio));
  1124. /*
  1125. * Ok this is tricky, when get_user_pages_fast() run it doesn't
  1126. * take any lock, therefore the check that we are going to make
  1127. * with the pagecount against the mapcount is racy and
  1128. * O_DIRECT can happen right after the check.
  1129. * So we clear the pte and flush the tlb before the check
  1130. * this assure us that no O_DIRECT can happen after the check
  1131. * or in the middle of the check.
  1132. *
  1133. * No need to notify as we are downgrading page table to read
  1134. * only not changing it to point to a new page.
  1135. *
  1136. * See Documentation/mm/mmu_notifier.rst
  1137. */
  1138. entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
  1139. /*
  1140. * Check that no O_DIRECT or similar I/O is in progress on the
  1141. * page
  1142. */
  1143. if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
  1144. set_pte_at(mm, pvmw.address, pvmw.pte, entry);
  1145. goto out_unlock;
  1146. }
  1147. /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
  1148. if (anon_exclusive &&
  1149. folio_try_share_anon_rmap_pte(folio, &folio->page)) {
  1150. set_pte_at(mm, pvmw.address, pvmw.pte, entry);
  1151. goto out_unlock;
  1152. }
  1153. if (pte_dirty(entry))
  1154. folio_mark_dirty(folio);
  1155. entry = pte_mkclean(entry);
  1156. if (pte_write(entry))
  1157. entry = pte_wrprotect(entry);
  1158. set_pte_at(mm, pvmw.address, pvmw.pte, entry);
  1159. }
  1160. *orig_pte = entry;
  1161. err = 0;
  1162. out_unlock:
  1163. page_vma_mapped_walk_done(&pvmw);
  1164. out_mn:
  1165. mmu_notifier_invalidate_range_end(&range);
  1166. out:
  1167. return err;
  1168. }
  1169. /**
  1170. * replace_page - replace page in vma by new ksm page
  1171. * @vma: vma that holds the pte pointing to page
  1172. * @page: the page we are replacing by kpage
  1173. * @kpage: the ksm page we replace page by
  1174. * @orig_pte: the original value of the pte
  1175. *
  1176. * Returns 0 on success, -EFAULT on failure.
  1177. */
  1178. static int replace_page(struct vm_area_struct *vma, struct page *page,
  1179. struct page *kpage, pte_t orig_pte)
  1180. {
  1181. struct folio *kfolio = page_folio(kpage);
  1182. struct mm_struct *mm = vma->vm_mm;
  1183. struct folio *folio;
  1184. pmd_t *pmd;
  1185. pmd_t pmde;
  1186. pte_t *ptep;
  1187. pte_t newpte;
  1188. spinlock_t *ptl;
  1189. unsigned long addr;
  1190. int err = -EFAULT;
  1191. struct mmu_notifier_range range;
  1192. addr = page_address_in_vma(page, vma);
  1193. if (addr == -EFAULT)
  1194. goto out;
  1195. pmd = mm_find_pmd(mm, addr);
  1196. if (!pmd)
  1197. goto out;
  1198. /*
  1199. * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
  1200. * without holding anon_vma lock for write. So when looking for a
  1201. * genuine pmde (in which to find pte), test present and !THP together.
  1202. */
  1203. pmde = pmdp_get_lockless(pmd);
  1204. if (!pmd_present(pmde) || pmd_trans_huge(pmde))
  1205. goto out;
  1206. mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
  1207. addr + PAGE_SIZE);
  1208. mmu_notifier_invalidate_range_start(&range);
  1209. ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
  1210. if (!ptep)
  1211. goto out_mn;
  1212. if (!pte_same(ptep_get(ptep), orig_pte)) {
  1213. pte_unmap_unlock(ptep, ptl);
  1214. goto out_mn;
  1215. }
  1216. VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
  1217. VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
  1218. kfolio);
  1219. /*
  1220. * No need to check ksm_use_zero_pages here: we can only have a
  1221. * zero_page here if ksm_use_zero_pages was enabled already.
  1222. */
  1223. if (!is_zero_pfn(page_to_pfn(kpage))) {
  1224. folio_get(kfolio);
  1225. folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
  1226. newpte = mk_pte(kpage, vma->vm_page_prot);
  1227. } else {
  1228. /*
  1229. * Use pte_mkdirty to mark the zero page mapped by KSM, and then
  1230. * we can easily track all KSM-placed zero pages by checking if
  1231. * the dirty bit in zero page's PTE is set.
  1232. */
  1233. newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
  1234. ksm_map_zero_page(mm);
  1235. /*
  1236. * We're replacing an anonymous page with a zero page, which is
  1237. * not anonymous. We need to do proper accounting otherwise we
  1238. * will get wrong values in /proc, and a BUG message in dmesg
  1239. * when tearing down the mm.
  1240. */
  1241. dec_mm_counter(mm, MM_ANONPAGES);
  1242. }
  1243. flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
  1244. /*
  1245. * No need to notify as we are replacing a read only page with another
  1246. * read only page with the same content.
  1247. *
  1248. * See Documentation/mm/mmu_notifier.rst
  1249. */
  1250. ptep_clear_flush(vma, addr, ptep);
  1251. set_pte_at(mm, addr, ptep, newpte);
  1252. folio = page_folio(page);
  1253. folio_remove_rmap_pte(folio, page, vma);
  1254. if (!folio_mapped(folio))
  1255. folio_free_swap(folio);
  1256. folio_put(folio);
  1257. pte_unmap_unlock(ptep, ptl);
  1258. err = 0;
  1259. out_mn:
  1260. mmu_notifier_invalidate_range_end(&range);
  1261. out:
  1262. return err;
  1263. }
  1264. /*
  1265. * try_to_merge_one_page - take two pages and merge them into one
  1266. * @vma: the vma that holds the pte pointing to page
  1267. * @page: the PageAnon page that we want to replace with kpage
  1268. * @kpage: the PageKsm page that we want to map instead of page,
  1269. * or NULL the first time when we want to use page as kpage.
  1270. *
  1271. * This function returns 0 if the pages were merged, -EFAULT otherwise.
  1272. */
  1273. static int try_to_merge_one_page(struct vm_area_struct *vma,
  1274. struct page *page, struct page *kpage)
  1275. {
  1276. pte_t orig_pte = __pte(0);
  1277. int err = -EFAULT;
  1278. if (page == kpage) /* ksm page forked */
  1279. return 0;
  1280. if (!PageAnon(page))
  1281. goto out;
  1282. /*
  1283. * We need the folio lock to read a stable swapcache flag in
  1284. * write_protect_page(). We use trylock_page() instead of
  1285. * lock_page() because we don't want to wait here - we
  1286. * prefer to continue scanning and merging different pages,
  1287. * then come back to this page when it is unlocked.
  1288. */
  1289. if (!trylock_page(page))
  1290. goto out;
  1291. if (PageTransCompound(page)) {
  1292. if (split_huge_page(page))
  1293. goto out_unlock;
  1294. }
  1295. /*
  1296. * If this anonymous page is mapped only here, its pte may need
  1297. * to be write-protected. If it's mapped elsewhere, all of its
  1298. * ptes are necessarily already write-protected. But in either
  1299. * case, we need to lock and check page_count is not raised.
  1300. */
  1301. if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
  1302. if (!kpage) {
  1303. /*
  1304. * While we hold page lock, upgrade page from
  1305. * PageAnon+anon_vma to PageKsm+NULL stable_node:
  1306. * stable_tree_insert() will update stable_node.
  1307. */
  1308. folio_set_stable_node(page_folio(page), NULL);
  1309. mark_page_accessed(page);
  1310. /*
  1311. * Page reclaim just frees a clean page with no dirty
  1312. * ptes: make sure that the ksm page would be swapped.
  1313. */
  1314. if (!PageDirty(page))
  1315. SetPageDirty(page);
  1316. err = 0;
  1317. } else if (pages_identical(page, kpage))
  1318. err = replace_page(vma, page, kpage, orig_pte);
  1319. }
  1320. out_unlock:
  1321. unlock_page(page);
  1322. out:
  1323. return err;
  1324. }
  1325. /*
  1326. * This function returns 0 if the pages were merged or if they are
  1327. * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
  1328. */
  1329. static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item,
  1330. struct page *page)
  1331. {
  1332. struct mm_struct *mm = rmap_item->mm;
  1333. int err = -EFAULT;
  1334. /*
  1335. * Same checksum as an empty page. We attempt to merge it with the
  1336. * appropriate zero page if the user enabled this via sysfs.
  1337. */
  1338. if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) {
  1339. struct vm_area_struct *vma;
  1340. mmap_read_lock(mm);
  1341. vma = find_mergeable_vma(mm, rmap_item->address);
  1342. if (vma) {
  1343. err = try_to_merge_one_page(vma, page,
  1344. ZERO_PAGE(rmap_item->address));
  1345. trace_ksm_merge_one_page(
  1346. page_to_pfn(ZERO_PAGE(rmap_item->address)),
  1347. rmap_item, mm, err);
  1348. } else {
  1349. /*
  1350. * If the vma is out of date, we do not need to
  1351. * continue.
  1352. */
  1353. err = 0;
  1354. }
  1355. mmap_read_unlock(mm);
  1356. }
  1357. return err;
  1358. }
  1359. /*
  1360. * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
  1361. * but no new kernel page is allocated: kpage must already be a ksm page.
  1362. *
  1363. * This function returns 0 if the pages were merged, -EFAULT otherwise.
  1364. */
  1365. static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
  1366. struct page *page, struct page *kpage)
  1367. {
  1368. struct mm_struct *mm = rmap_item->mm;
  1369. struct vm_area_struct *vma;
  1370. int err = -EFAULT;
  1371. mmap_read_lock(mm);
  1372. vma = find_mergeable_vma(mm, rmap_item->address);
  1373. if (!vma)
  1374. goto out;
  1375. err = try_to_merge_one_page(vma, page, kpage);
  1376. if (err)
  1377. goto out;
  1378. /* Unstable nid is in union with stable anon_vma: remove first */
  1379. remove_rmap_item_from_tree(rmap_item);
  1380. /* Must get reference to anon_vma while still holding mmap_lock */
  1381. rmap_item->anon_vma = vma->anon_vma;
  1382. get_anon_vma(vma->anon_vma);
  1383. out:
  1384. mmap_read_unlock(mm);
  1385. trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
  1386. rmap_item, mm, err);
  1387. return err;
  1388. }
  1389. /*
  1390. * try_to_merge_two_pages - take two identical pages and prepare them
  1391. * to be merged into one page.
  1392. *
  1393. * This function returns the kpage if we successfully merged two identical
  1394. * pages into one ksm page, NULL otherwise.
  1395. *
  1396. * Note that this function upgrades page to ksm page: if one of the pages
  1397. * is already a ksm page, try_to_merge_with_ksm_page should be used.
  1398. */
  1399. static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
  1400. struct page *page,
  1401. struct ksm_rmap_item *tree_rmap_item,
  1402. struct page *tree_page)
  1403. {
  1404. int err;
  1405. err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
  1406. if (!err) {
  1407. err = try_to_merge_with_ksm_page(tree_rmap_item,
  1408. tree_page, page);
  1409. /*
  1410. * If that fails, we have a ksm page with only one pte
  1411. * pointing to it: so break it.
  1412. */
  1413. if (err)
  1414. break_cow(rmap_item);
  1415. }
  1416. return err ? NULL : page;
  1417. }
  1418. static __always_inline
  1419. bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
  1420. {
  1421. VM_BUG_ON(stable_node->rmap_hlist_len < 0);
  1422. /*
  1423. * Check that at least one mapping still exists, otherwise
  1424. * there's no much point to merge and share with this
  1425. * stable_node, as the underlying tree_page of the other
  1426. * sharer is going to be freed soon.
  1427. */
  1428. return stable_node->rmap_hlist_len &&
  1429. stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
  1430. }
  1431. static __always_inline
  1432. bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
  1433. {
  1434. return __is_page_sharing_candidate(stable_node, 0);
  1435. }
  1436. static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
  1437. struct ksm_stable_node **_stable_node,
  1438. struct rb_root *root,
  1439. bool prune_stale_stable_nodes)
  1440. {
  1441. struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
  1442. struct hlist_node *hlist_safe;
  1443. struct folio *folio, *tree_folio = NULL;
  1444. int found_rmap_hlist_len;
  1445. if (!prune_stale_stable_nodes ||
  1446. time_before(jiffies, stable_node->chain_prune_time +
  1447. msecs_to_jiffies(
  1448. ksm_stable_node_chains_prune_millisecs)))
  1449. prune_stale_stable_nodes = false;
  1450. else
  1451. stable_node->chain_prune_time = jiffies;
  1452. hlist_for_each_entry_safe(dup, hlist_safe,
  1453. &stable_node->hlist, hlist_dup) {
  1454. cond_resched();
  1455. /*
  1456. * We must walk all stable_node_dup to prune the stale
  1457. * stable nodes during lookup.
  1458. *
  1459. * ksm_get_folio can drop the nodes from the
  1460. * stable_node->hlist if they point to freed pages
  1461. * (that's why we do a _safe walk). The "dup"
  1462. * stable_node parameter itself will be freed from
  1463. * under us if it returns NULL.
  1464. */
  1465. folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
  1466. if (!folio)
  1467. continue;
  1468. /* Pick the best candidate if possible. */
  1469. if (!found || (is_page_sharing_candidate(dup) &&
  1470. (!is_page_sharing_candidate(found) ||
  1471. dup->rmap_hlist_len > found_rmap_hlist_len))) {
  1472. if (found)
  1473. folio_put(tree_folio);
  1474. found = dup;
  1475. found_rmap_hlist_len = found->rmap_hlist_len;
  1476. tree_folio = folio;
  1477. /* skip put_page for found candidate */
  1478. if (!prune_stale_stable_nodes &&
  1479. is_page_sharing_candidate(found))
  1480. break;
  1481. continue;
  1482. }
  1483. folio_put(folio);
  1484. }
  1485. if (found) {
  1486. if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) {
  1487. /*
  1488. * If there's not just one entry it would
  1489. * corrupt memory, better BUG_ON. In KSM
  1490. * context with no lock held it's not even
  1491. * fatal.
  1492. */
  1493. BUG_ON(stable_node->hlist.first->next);
  1494. /*
  1495. * There's just one entry and it is below the
  1496. * deduplication limit so drop the chain.
  1497. */
  1498. rb_replace_node(&stable_node->node, &found->node,
  1499. root);
  1500. free_stable_node(stable_node);
  1501. ksm_stable_node_chains--;
  1502. ksm_stable_node_dups--;
  1503. /*
  1504. * NOTE: the caller depends on the stable_node
  1505. * to be equal to stable_node_dup if the chain
  1506. * was collapsed.
  1507. */
  1508. *_stable_node = found;
  1509. /*
  1510. * Just for robustness, as stable_node is
  1511. * otherwise left as a stable pointer, the
  1512. * compiler shall optimize it away at build
  1513. * time.
  1514. */
  1515. stable_node = NULL;
  1516. } else if (stable_node->hlist.first != &found->hlist_dup &&
  1517. __is_page_sharing_candidate(found, 1)) {
  1518. /*
  1519. * If the found stable_node dup can accept one
  1520. * more future merge (in addition to the one
  1521. * that is underway) and is not at the head of
  1522. * the chain, put it there so next search will
  1523. * be quicker in the !prune_stale_stable_nodes
  1524. * case.
  1525. *
  1526. * NOTE: it would be inaccurate to use nr > 1
  1527. * instead of checking the hlist.first pointer
  1528. * directly, because in the
  1529. * prune_stale_stable_nodes case "nr" isn't
  1530. * the position of the found dup in the chain,
  1531. * but the total number of dups in the chain.
  1532. */
  1533. hlist_del(&found->hlist_dup);
  1534. hlist_add_head(&found->hlist_dup,
  1535. &stable_node->hlist);
  1536. }
  1537. } else {
  1538. /* Its hlist must be empty if no one found. */
  1539. free_stable_node_chain(stable_node, root);
  1540. }
  1541. *_stable_node_dup = found;
  1542. return tree_folio;
  1543. }
  1544. /*
  1545. * Like for ksm_get_folio, this function can free the *_stable_node and
  1546. * *_stable_node_dup if the returned tree_page is NULL.
  1547. *
  1548. * It can also free and overwrite *_stable_node with the found
  1549. * stable_node_dup if the chain is collapsed (in which case
  1550. * *_stable_node will be equal to *_stable_node_dup like if the chain
  1551. * never existed). It's up to the caller to verify tree_page is not
  1552. * NULL before dereferencing *_stable_node or *_stable_node_dup.
  1553. *
  1554. * *_stable_node_dup is really a second output parameter of this
  1555. * function and will be overwritten in all cases, the caller doesn't
  1556. * need to initialize it.
  1557. */
  1558. static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
  1559. struct ksm_stable_node **_stable_node,
  1560. struct rb_root *root,
  1561. bool prune_stale_stable_nodes)
  1562. {
  1563. struct ksm_stable_node *stable_node = *_stable_node;
  1564. if (!is_stable_node_chain(stable_node)) {
  1565. *_stable_node_dup = stable_node;
  1566. return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
  1567. }
  1568. return stable_node_dup(_stable_node_dup, _stable_node, root,
  1569. prune_stale_stable_nodes);
  1570. }
  1571. static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
  1572. struct ksm_stable_node **s_n,
  1573. struct rb_root *root)
  1574. {
  1575. return __stable_node_chain(s_n_d, s_n, root, true);
  1576. }
  1577. static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
  1578. struct ksm_stable_node **s_n,
  1579. struct rb_root *root)
  1580. {
  1581. return __stable_node_chain(s_n_d, s_n, root, false);
  1582. }
  1583. /*
  1584. * stable_tree_search - search for page inside the stable tree
  1585. *
  1586. * This function checks if there is a page inside the stable tree
  1587. * with identical content to the page that we are scanning right now.
  1588. *
  1589. * This function returns the stable tree node of identical content if found,
  1590. * NULL otherwise.
  1591. */
  1592. static struct page *stable_tree_search(struct page *page)
  1593. {
  1594. int nid;
  1595. struct rb_root *root;
  1596. struct rb_node **new;
  1597. struct rb_node *parent;
  1598. struct ksm_stable_node *stable_node, *stable_node_dup;
  1599. struct ksm_stable_node *page_node;
  1600. struct folio *folio;
  1601. folio = page_folio(page);
  1602. page_node = folio_stable_node(folio);
  1603. if (page_node && page_node->head != &migrate_nodes) {
  1604. /* ksm page forked */
  1605. folio_get(folio);
  1606. return &folio->page;
  1607. }
  1608. nid = get_kpfn_nid(folio_pfn(folio));
  1609. root = root_stable_tree + nid;
  1610. again:
  1611. new = &root->rb_node;
  1612. parent = NULL;
  1613. while (*new) {
  1614. struct folio *tree_folio;
  1615. int ret;
  1616. cond_resched();
  1617. stable_node = rb_entry(*new, struct ksm_stable_node, node);
  1618. tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
  1619. if (!tree_folio) {
  1620. /*
  1621. * If we walked over a stale stable_node,
  1622. * ksm_get_folio() will call rb_erase() and it
  1623. * may rebalance the tree from under us. So
  1624. * restart the search from scratch. Returning
  1625. * NULL would be safe too, but we'd generate
  1626. * false negative insertions just because some
  1627. * stable_node was stale.
  1628. */
  1629. goto again;
  1630. }
  1631. ret = memcmp_pages(page, &tree_folio->page);
  1632. folio_put(tree_folio);
  1633. parent = *new;
  1634. if (ret < 0)
  1635. new = &parent->rb_left;
  1636. else if (ret > 0)
  1637. new = &parent->rb_right;
  1638. else {
  1639. if (page_node) {
  1640. VM_BUG_ON(page_node->head != &migrate_nodes);
  1641. /*
  1642. * If the mapcount of our migrated KSM folio is
  1643. * at most 1, we can merge it with another
  1644. * KSM folio where we know that we have space
  1645. * for one more mapping without exceeding the
  1646. * ksm_max_page_sharing limit: see
  1647. * chain_prune(). This way, we can avoid adding
  1648. * this stable node to the chain.
  1649. */
  1650. if (folio_mapcount(folio) > 1)
  1651. goto chain_append;
  1652. }
  1653. if (!is_page_sharing_candidate(stable_node_dup)) {
  1654. /*
  1655. * If the stable_node is a chain and
  1656. * we got a payload match in memcmp
  1657. * but we cannot merge the scanned
  1658. * page in any of the existing
  1659. * stable_node dups because they're
  1660. * all full, we need to wait the
  1661. * scanned page to find itself a match
  1662. * in the unstable tree to create a
  1663. * brand new KSM page to add later to
  1664. * the dups of this stable_node.
  1665. */
  1666. return NULL;
  1667. }
  1668. /*
  1669. * Lock and unlock the stable_node's page (which
  1670. * might already have been migrated) so that page
  1671. * migration is sure to notice its raised count.
  1672. * It would be more elegant to return stable_node
  1673. * than kpage, but that involves more changes.
  1674. */
  1675. tree_folio = ksm_get_folio(stable_node_dup,
  1676. KSM_GET_FOLIO_TRYLOCK);
  1677. if (PTR_ERR(tree_folio) == -EBUSY)
  1678. return ERR_PTR(-EBUSY);
  1679. if (unlikely(!tree_folio))
  1680. /*
  1681. * The tree may have been rebalanced,
  1682. * so re-evaluate parent and new.
  1683. */
  1684. goto again;
  1685. folio_unlock(tree_folio);
  1686. if (get_kpfn_nid(stable_node_dup->kpfn) !=
  1687. NUMA(stable_node_dup->nid)) {
  1688. folio_put(tree_folio);
  1689. goto replace;
  1690. }
  1691. return &tree_folio->page;
  1692. }
  1693. }
  1694. if (!page_node)
  1695. return NULL;
  1696. list_del(&page_node->list);
  1697. DO_NUMA(page_node->nid = nid);
  1698. rb_link_node(&page_node->node, parent, new);
  1699. rb_insert_color(&page_node->node, root);
  1700. out:
  1701. if (is_page_sharing_candidate(page_node)) {
  1702. folio_get(folio);
  1703. return &folio->page;
  1704. } else
  1705. return NULL;
  1706. replace:
  1707. /*
  1708. * If stable_node was a chain and chain_prune collapsed it,
  1709. * stable_node has been updated to be the new regular
  1710. * stable_node. A collapse of the chain is indistinguishable
  1711. * from the case there was no chain in the stable
  1712. * rbtree. Otherwise stable_node is the chain and
  1713. * stable_node_dup is the dup to replace.
  1714. */
  1715. if (stable_node_dup == stable_node) {
  1716. VM_BUG_ON(is_stable_node_chain(stable_node_dup));
  1717. VM_BUG_ON(is_stable_node_dup(stable_node_dup));
  1718. /* there is no chain */
  1719. if (page_node) {
  1720. VM_BUG_ON(page_node->head != &migrate_nodes);
  1721. list_del(&page_node->list);
  1722. DO_NUMA(page_node->nid = nid);
  1723. rb_replace_node(&stable_node_dup->node,
  1724. &page_node->node,
  1725. root);
  1726. if (is_page_sharing_candidate(page_node))
  1727. folio_get(folio);
  1728. else
  1729. folio = NULL;
  1730. } else {
  1731. rb_erase(&stable_node_dup->node, root);
  1732. folio = NULL;
  1733. }
  1734. } else {
  1735. VM_BUG_ON(!is_stable_node_chain(stable_node));
  1736. __stable_node_dup_del(stable_node_dup);
  1737. if (page_node) {
  1738. VM_BUG_ON(page_node->head != &migrate_nodes);
  1739. list_del(&page_node->list);
  1740. DO_NUMA(page_node->nid = nid);
  1741. stable_node_chain_add_dup(page_node, stable_node);
  1742. if (is_page_sharing_candidate(page_node))
  1743. folio_get(folio);
  1744. else
  1745. folio = NULL;
  1746. } else {
  1747. folio = NULL;
  1748. }
  1749. }
  1750. stable_node_dup->head = &migrate_nodes;
  1751. list_add(&stable_node_dup->list, stable_node_dup->head);
  1752. return &folio->page;
  1753. chain_append:
  1754. /*
  1755. * If stable_node was a chain and chain_prune collapsed it,
  1756. * stable_node has been updated to be the new regular
  1757. * stable_node. A collapse of the chain is indistinguishable
  1758. * from the case there was no chain in the stable
  1759. * rbtree. Otherwise stable_node is the chain and
  1760. * stable_node_dup is the dup to replace.
  1761. */
  1762. if (stable_node_dup == stable_node) {
  1763. VM_BUG_ON(is_stable_node_dup(stable_node_dup));
  1764. /* chain is missing so create it */
  1765. stable_node = alloc_stable_node_chain(stable_node_dup,
  1766. root);
  1767. if (!stable_node)
  1768. return NULL;
  1769. }
  1770. /*
  1771. * Add this stable_node dup that was
  1772. * migrated to the stable_node chain
  1773. * of the current nid for this page
  1774. * content.
  1775. */
  1776. VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
  1777. VM_BUG_ON(page_node->head != &migrate_nodes);
  1778. list_del(&page_node->list);
  1779. DO_NUMA(page_node->nid = nid);
  1780. stable_node_chain_add_dup(page_node, stable_node);
  1781. goto out;
  1782. }
  1783. /*
  1784. * stable_tree_insert - insert stable tree node pointing to new ksm page
  1785. * into the stable tree.
  1786. *
  1787. * This function returns the stable tree node just allocated on success,
  1788. * NULL otherwise.
  1789. */
  1790. static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
  1791. {
  1792. int nid;
  1793. unsigned long kpfn;
  1794. struct rb_root *root;
  1795. struct rb_node **new;
  1796. struct rb_node *parent;
  1797. struct ksm_stable_node *stable_node, *stable_node_dup;
  1798. bool need_chain = false;
  1799. kpfn = folio_pfn(kfolio);
  1800. nid = get_kpfn_nid(kpfn);
  1801. root = root_stable_tree + nid;
  1802. again:
  1803. parent = NULL;
  1804. new = &root->rb_node;
  1805. while (*new) {
  1806. struct folio *tree_folio;
  1807. int ret;
  1808. cond_resched();
  1809. stable_node = rb_entry(*new, struct ksm_stable_node, node);
  1810. tree_folio = chain(&stable_node_dup, &stable_node, root);
  1811. if (!tree_folio) {
  1812. /*
  1813. * If we walked over a stale stable_node,
  1814. * ksm_get_folio() will call rb_erase() and it
  1815. * may rebalance the tree from under us. So
  1816. * restart the search from scratch. Returning
  1817. * NULL would be safe too, but we'd generate
  1818. * false negative insertions just because some
  1819. * stable_node was stale.
  1820. */
  1821. goto again;
  1822. }
  1823. ret = memcmp_pages(&kfolio->page, &tree_folio->page);
  1824. folio_put(tree_folio);
  1825. parent = *new;
  1826. if (ret < 0)
  1827. new = &parent->rb_left;
  1828. else if (ret > 0)
  1829. new = &parent->rb_right;
  1830. else {
  1831. need_chain = true;
  1832. break;
  1833. }
  1834. }
  1835. stable_node_dup = alloc_stable_node();
  1836. if (!stable_node_dup)
  1837. return NULL;
  1838. INIT_HLIST_HEAD(&stable_node_dup->hlist);
  1839. stable_node_dup->kpfn = kpfn;
  1840. stable_node_dup->rmap_hlist_len = 0;
  1841. DO_NUMA(stable_node_dup->nid = nid);
  1842. if (!need_chain) {
  1843. rb_link_node(&stable_node_dup->node, parent, new);
  1844. rb_insert_color(&stable_node_dup->node, root);
  1845. } else {
  1846. if (!is_stable_node_chain(stable_node)) {
  1847. struct ksm_stable_node *orig = stable_node;
  1848. /* chain is missing so create it */
  1849. stable_node = alloc_stable_node_chain(orig, root);
  1850. if (!stable_node) {
  1851. free_stable_node(stable_node_dup);
  1852. return NULL;
  1853. }
  1854. }
  1855. stable_node_chain_add_dup(stable_node_dup, stable_node);
  1856. }
  1857. folio_set_stable_node(kfolio, stable_node_dup);
  1858. return stable_node_dup;
  1859. }
  1860. /*
  1861. * unstable_tree_search_insert - search for identical page,
  1862. * else insert rmap_item into the unstable tree.
  1863. *
  1864. * This function searches for a page in the unstable tree identical to the
  1865. * page currently being scanned; and if no identical page is found in the
  1866. * tree, we insert rmap_item as a new object into the unstable tree.
  1867. *
  1868. * This function returns pointer to rmap_item found to be identical
  1869. * to the currently scanned page, NULL otherwise.
  1870. *
  1871. * This function does both searching and inserting, because they share
  1872. * the same walking algorithm in an rbtree.
  1873. */
  1874. static
  1875. struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
  1876. struct page *page,
  1877. struct page **tree_pagep)
  1878. {
  1879. struct rb_node **new;
  1880. struct rb_root *root;
  1881. struct rb_node *parent = NULL;
  1882. int nid;
  1883. nid = get_kpfn_nid(page_to_pfn(page));
  1884. root = root_unstable_tree + nid;
  1885. new = &root->rb_node;
  1886. while (*new) {
  1887. struct ksm_rmap_item *tree_rmap_item;
  1888. struct page *tree_page;
  1889. int ret;
  1890. cond_resched();
  1891. tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
  1892. tree_page = get_mergeable_page(tree_rmap_item);
  1893. if (!tree_page)
  1894. return NULL;
  1895. /*
  1896. * Don't substitute a ksm page for a forked page.
  1897. */
  1898. if (page == tree_page) {
  1899. put_page(tree_page);
  1900. return NULL;
  1901. }
  1902. ret = memcmp_pages(page, tree_page);
  1903. parent = *new;
  1904. if (ret < 0) {
  1905. put_page(tree_page);
  1906. new = &parent->rb_left;
  1907. } else if (ret > 0) {
  1908. put_page(tree_page);
  1909. new = &parent->rb_right;
  1910. } else if (!ksm_merge_across_nodes &&
  1911. page_to_nid(tree_page) != nid) {
  1912. /*
  1913. * If tree_page has been migrated to another NUMA node,
  1914. * it will be flushed out and put in the right unstable
  1915. * tree next time: only merge with it when across_nodes.
  1916. */
  1917. put_page(tree_page);
  1918. return NULL;
  1919. } else {
  1920. *tree_pagep = tree_page;
  1921. return tree_rmap_item;
  1922. }
  1923. }
  1924. rmap_item->address |= UNSTABLE_FLAG;
  1925. rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
  1926. DO_NUMA(rmap_item->nid = nid);
  1927. rb_link_node(&rmap_item->node, parent, new);
  1928. rb_insert_color(&rmap_item->node, root);
  1929. ksm_pages_unshared++;
  1930. return NULL;
  1931. }
  1932. /*
  1933. * stable_tree_append - add another rmap_item to the linked list of
  1934. * rmap_items hanging off a given node of the stable tree, all sharing
  1935. * the same ksm page.
  1936. */
  1937. static void stable_tree_append(struct ksm_rmap_item *rmap_item,
  1938. struct ksm_stable_node *stable_node,
  1939. bool max_page_sharing_bypass)
  1940. {
  1941. /*
  1942. * rmap won't find this mapping if we don't insert the
  1943. * rmap_item in the right stable_node
  1944. * duplicate. page_migration could break later if rmap breaks,
  1945. * so we can as well crash here. We really need to check for
  1946. * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
  1947. * for other negative values as an underflow if detected here
  1948. * for the first time (and not when decreasing rmap_hlist_len)
  1949. * would be sign of memory corruption in the stable_node.
  1950. */
  1951. BUG_ON(stable_node->rmap_hlist_len < 0);
  1952. stable_node->rmap_hlist_len++;
  1953. if (!max_page_sharing_bypass)
  1954. /* possibly non fatal but unexpected overflow, only warn */
  1955. WARN_ON_ONCE(stable_node->rmap_hlist_len >
  1956. ksm_max_page_sharing);
  1957. rmap_item->head = stable_node;
  1958. rmap_item->address |= STABLE_FLAG;
  1959. hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
  1960. if (rmap_item->hlist.next)
  1961. ksm_pages_sharing++;
  1962. else
  1963. ksm_pages_shared++;
  1964. rmap_item->mm->ksm_merging_pages++;
  1965. }
  1966. /*
  1967. * cmp_and_merge_page - first see if page can be merged into the stable tree;
  1968. * if not, compare checksum to previous and if it's the same, see if page can
  1969. * be inserted into the unstable tree, or merged with a page already there and
  1970. * both transferred to the stable tree.
  1971. *
  1972. * @page: the page that we are searching identical page to.
  1973. * @rmap_item: the reverse mapping into the virtual address of this page
  1974. */
  1975. static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
  1976. {
  1977. struct ksm_rmap_item *tree_rmap_item;
  1978. struct page *tree_page = NULL;
  1979. struct ksm_stable_node *stable_node;
  1980. struct page *kpage;
  1981. unsigned int checksum;
  1982. int err;
  1983. bool max_page_sharing_bypass = false;
  1984. stable_node = page_stable_node(page);
  1985. if (stable_node) {
  1986. if (stable_node->head != &migrate_nodes &&
  1987. get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
  1988. NUMA(stable_node->nid)) {
  1989. stable_node_dup_del(stable_node);
  1990. stable_node->head = &migrate_nodes;
  1991. list_add(&stable_node->list, stable_node->head);
  1992. }
  1993. if (stable_node->head != &migrate_nodes &&
  1994. rmap_item->head == stable_node)
  1995. return;
  1996. /*
  1997. * If it's a KSM fork, allow it to go over the sharing limit
  1998. * without warnings.
  1999. */
  2000. if (!is_page_sharing_candidate(stable_node))
  2001. max_page_sharing_bypass = true;
  2002. } else {
  2003. remove_rmap_item_from_tree(rmap_item);
  2004. /*
  2005. * If the hash value of the page has changed from the last time
  2006. * we calculated it, this page is changing frequently: therefore we
  2007. * don't want to insert it in the unstable tree, and we don't want
  2008. * to waste our time searching for something identical to it there.
  2009. */
  2010. checksum = calc_checksum(page);
  2011. if (rmap_item->oldchecksum != checksum) {
  2012. rmap_item->oldchecksum = checksum;
  2013. return;
  2014. }
  2015. if (!try_to_merge_with_zero_page(rmap_item, page))
  2016. return;
  2017. }
  2018. /* We first start with searching the page inside the stable tree */
  2019. kpage = stable_tree_search(page);
  2020. if (kpage == page && rmap_item->head == stable_node) {
  2021. put_page(kpage);
  2022. return;
  2023. }
  2024. remove_rmap_item_from_tree(rmap_item);
  2025. if (kpage) {
  2026. if (PTR_ERR(kpage) == -EBUSY)
  2027. return;
  2028. err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
  2029. if (!err) {
  2030. /*
  2031. * The page was successfully merged:
  2032. * add its rmap_item to the stable tree.
  2033. */
  2034. lock_page(kpage);
  2035. stable_tree_append(rmap_item, page_stable_node(kpage),
  2036. max_page_sharing_bypass);
  2037. unlock_page(kpage);
  2038. }
  2039. put_page(kpage);
  2040. return;
  2041. }
  2042. tree_rmap_item =
  2043. unstable_tree_search_insert(rmap_item, page, &tree_page);
  2044. if (tree_rmap_item) {
  2045. bool split;
  2046. kpage = try_to_merge_two_pages(rmap_item, page,
  2047. tree_rmap_item, tree_page);
  2048. /*
  2049. * If both pages we tried to merge belong to the same compound
  2050. * page, then we actually ended up increasing the reference
  2051. * count of the same compound page twice, and split_huge_page
  2052. * failed.
  2053. * Here we set a flag if that happened, and we use it later to
  2054. * try split_huge_page again. Since we call put_page right
  2055. * afterwards, the reference count will be correct and
  2056. * split_huge_page should succeed.
  2057. */
  2058. split = PageTransCompound(page)
  2059. && compound_head(page) == compound_head(tree_page);
  2060. put_page(tree_page);
  2061. if (kpage) {
  2062. /*
  2063. * The pages were successfully merged: insert new
  2064. * node in the stable tree and add both rmap_items.
  2065. */
  2066. lock_page(kpage);
  2067. stable_node = stable_tree_insert(page_folio(kpage));
  2068. if (stable_node) {
  2069. stable_tree_append(tree_rmap_item, stable_node,
  2070. false);
  2071. stable_tree_append(rmap_item, stable_node,
  2072. false);
  2073. }
  2074. unlock_page(kpage);
  2075. /*
  2076. * If we fail to insert the page into the stable tree,
  2077. * we will have 2 virtual addresses that are pointing
  2078. * to a ksm page left outside the stable tree,
  2079. * in which case we need to break_cow on both.
  2080. */
  2081. if (!stable_node) {
  2082. break_cow(tree_rmap_item);
  2083. break_cow(rmap_item);
  2084. }
  2085. } else if (split) {
  2086. /*
  2087. * We are here if we tried to merge two pages and
  2088. * failed because they both belonged to the same
  2089. * compound page. We will split the page now, but no
  2090. * merging will take place.
  2091. * We do not want to add the cost of a full lock; if
  2092. * the page is locked, it is better to skip it and
  2093. * perhaps try again later.
  2094. */
  2095. if (!trylock_page(page))
  2096. return;
  2097. split_huge_page(page);
  2098. unlock_page(page);
  2099. }
  2100. }
  2101. }
  2102. static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
  2103. struct ksm_rmap_item **rmap_list,
  2104. unsigned long addr)
  2105. {
  2106. struct ksm_rmap_item *rmap_item;
  2107. while (*rmap_list) {
  2108. rmap_item = *rmap_list;
  2109. if ((rmap_item->address & PAGE_MASK) == addr)
  2110. return rmap_item;
  2111. if (rmap_item->address > addr)
  2112. break;
  2113. *rmap_list = rmap_item->rmap_list;
  2114. remove_rmap_item_from_tree(rmap_item);
  2115. free_rmap_item(rmap_item);
  2116. }
  2117. rmap_item = alloc_rmap_item();
  2118. if (rmap_item) {
  2119. /* It has already been zeroed */
  2120. rmap_item->mm = mm_slot->slot.mm;
  2121. rmap_item->mm->ksm_rmap_items++;
  2122. rmap_item->address = addr;
  2123. rmap_item->rmap_list = *rmap_list;
  2124. *rmap_list = rmap_item;
  2125. }
  2126. return rmap_item;
  2127. }
  2128. /*
  2129. * Calculate skip age for the ksm page age. The age determines how often
  2130. * de-duplicating has already been tried unsuccessfully. If the age is
  2131. * smaller, the scanning of this page is skipped for less scans.
  2132. *
  2133. * @age: rmap_item age of page
  2134. */
  2135. static unsigned int skip_age(rmap_age_t age)
  2136. {
  2137. if (age <= 3)
  2138. return 1;
  2139. if (age <= 5)
  2140. return 2;
  2141. if (age <= 8)
  2142. return 4;
  2143. return 8;
  2144. }
  2145. /*
  2146. * Determines if a page should be skipped for the current scan.
  2147. *
  2148. * @page: page to check
  2149. * @rmap_item: associated rmap_item of page
  2150. */
  2151. static bool should_skip_rmap_item(struct page *page,
  2152. struct ksm_rmap_item *rmap_item)
  2153. {
  2154. rmap_age_t age;
  2155. if (!ksm_smart_scan)
  2156. return false;
  2157. /*
  2158. * Never skip pages that are already KSM; pages cmp_and_merge_page()
  2159. * will essentially ignore them, but we still have to process them
  2160. * properly.
  2161. */
  2162. if (PageKsm(page))
  2163. return false;
  2164. age = rmap_item->age;
  2165. if (age != U8_MAX)
  2166. rmap_item->age++;
  2167. /*
  2168. * Smaller ages are not skipped, they need to get a chance to go
  2169. * through the different phases of the KSM merging.
  2170. */
  2171. if (age < 3)
  2172. return false;
  2173. /*
  2174. * Are we still allowed to skip? If not, then don't skip it
  2175. * and determine how much more often we are allowed to skip next.
  2176. */
  2177. if (!rmap_item->remaining_skips) {
  2178. rmap_item->remaining_skips = skip_age(age);
  2179. return false;
  2180. }
  2181. /* Skip this page */
  2182. ksm_pages_skipped++;
  2183. rmap_item->remaining_skips--;
  2184. remove_rmap_item_from_tree(rmap_item);
  2185. return true;
  2186. }
  2187. static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
  2188. {
  2189. struct mm_struct *mm;
  2190. struct ksm_mm_slot *mm_slot;
  2191. struct mm_slot *slot;
  2192. struct vm_area_struct *vma;
  2193. struct ksm_rmap_item *rmap_item;
  2194. struct vma_iterator vmi;
  2195. int nid;
  2196. if (list_empty(&ksm_mm_head.slot.mm_node))
  2197. return NULL;
  2198. mm_slot = ksm_scan.mm_slot;
  2199. if (mm_slot == &ksm_mm_head) {
  2200. advisor_start_scan();
  2201. trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
  2202. /*
  2203. * A number of pages can hang around indefinitely in per-cpu
  2204. * LRU cache, raised page count preventing write_protect_page
  2205. * from merging them. Though it doesn't really matter much,
  2206. * it is puzzling to see some stuck in pages_volatile until
  2207. * other activity jostles them out, and they also prevented
  2208. * LTP's KSM test from succeeding deterministically; so drain
  2209. * them here (here rather than on entry to ksm_do_scan(),
  2210. * so we don't IPI too often when pages_to_scan is set low).
  2211. */
  2212. lru_add_drain_all();
  2213. /*
  2214. * Whereas stale stable_nodes on the stable_tree itself
  2215. * get pruned in the regular course of stable_tree_search(),
  2216. * those moved out to the migrate_nodes list can accumulate:
  2217. * so prune them once before each full scan.
  2218. */
  2219. if (!ksm_merge_across_nodes) {
  2220. struct ksm_stable_node *stable_node, *next;
  2221. struct folio *folio;
  2222. list_for_each_entry_safe(stable_node, next,
  2223. &migrate_nodes, list) {
  2224. folio = ksm_get_folio(stable_node,
  2225. KSM_GET_FOLIO_NOLOCK);
  2226. if (folio)
  2227. folio_put(folio);
  2228. cond_resched();
  2229. }
  2230. }
  2231. for (nid = 0; nid < ksm_nr_node_ids; nid++)
  2232. root_unstable_tree[nid] = RB_ROOT;
  2233. spin_lock(&ksm_mmlist_lock);
  2234. slot = list_entry(mm_slot->slot.mm_node.next,
  2235. struct mm_slot, mm_node);
  2236. mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
  2237. ksm_scan.mm_slot = mm_slot;
  2238. spin_unlock(&ksm_mmlist_lock);
  2239. /*
  2240. * Although we tested list_empty() above, a racing __ksm_exit
  2241. * of the last mm on the list may have removed it since then.
  2242. */
  2243. if (mm_slot == &ksm_mm_head)
  2244. return NULL;
  2245. next_mm:
  2246. ksm_scan.address = 0;
  2247. ksm_scan.rmap_list = &mm_slot->rmap_list;
  2248. }
  2249. slot = &mm_slot->slot;
  2250. mm = slot->mm;
  2251. vma_iter_init(&vmi, mm, ksm_scan.address);
  2252. mmap_read_lock(mm);
  2253. if (ksm_test_exit(mm))
  2254. goto no_vmas;
  2255. for_each_vma(vmi, vma) {
  2256. if (!(vma->vm_flags & VM_MERGEABLE))
  2257. continue;
  2258. if (ksm_scan.address < vma->vm_start)
  2259. ksm_scan.address = vma->vm_start;
  2260. if (!vma->anon_vma)
  2261. ksm_scan.address = vma->vm_end;
  2262. while (ksm_scan.address < vma->vm_end) {
  2263. struct page *tmp_page = NULL;
  2264. struct folio_walk fw;
  2265. struct folio *folio;
  2266. if (ksm_test_exit(mm))
  2267. break;
  2268. folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
  2269. if (folio) {
  2270. if (!folio_is_zone_device(folio) &&
  2271. folio_test_anon(folio)) {
  2272. folio_get(folio);
  2273. tmp_page = fw.page;
  2274. }
  2275. folio_walk_end(&fw, vma);
  2276. }
  2277. if (tmp_page) {
  2278. flush_anon_page(vma, tmp_page, ksm_scan.address);
  2279. flush_dcache_page(tmp_page);
  2280. rmap_item = get_next_rmap_item(mm_slot,
  2281. ksm_scan.rmap_list, ksm_scan.address);
  2282. if (rmap_item) {
  2283. ksm_scan.rmap_list =
  2284. &rmap_item->rmap_list;
  2285. if (should_skip_rmap_item(tmp_page, rmap_item)) {
  2286. folio_put(folio);
  2287. goto next_page;
  2288. }
  2289. ksm_scan.address += PAGE_SIZE;
  2290. *page = tmp_page;
  2291. } else {
  2292. folio_put(folio);
  2293. }
  2294. mmap_read_unlock(mm);
  2295. return rmap_item;
  2296. }
  2297. next_page:
  2298. ksm_scan.address += PAGE_SIZE;
  2299. cond_resched();
  2300. }
  2301. }
  2302. if (ksm_test_exit(mm)) {
  2303. no_vmas:
  2304. ksm_scan.address = 0;
  2305. ksm_scan.rmap_list = &mm_slot->rmap_list;
  2306. }
  2307. /*
  2308. * Nuke all the rmap_items that are above this current rmap:
  2309. * because there were no VM_MERGEABLE vmas with such addresses.
  2310. */
  2311. remove_trailing_rmap_items(ksm_scan.rmap_list);
  2312. spin_lock(&ksm_mmlist_lock);
  2313. slot = list_entry(mm_slot->slot.mm_node.next,
  2314. struct mm_slot, mm_node);
  2315. ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
  2316. if (ksm_scan.address == 0) {
  2317. /*
  2318. * We've completed a full scan of all vmas, holding mmap_lock
  2319. * throughout, and found no VM_MERGEABLE: so do the same as
  2320. * __ksm_exit does to remove this mm from all our lists now.
  2321. * This applies either when cleaning up after __ksm_exit
  2322. * (but beware: we can reach here even before __ksm_exit),
  2323. * or when all VM_MERGEABLE areas have been unmapped (and
  2324. * mmap_lock then protects against race with MADV_MERGEABLE).
  2325. */
  2326. hash_del(&mm_slot->slot.hash);
  2327. list_del(&mm_slot->slot.mm_node);
  2328. spin_unlock(&ksm_mmlist_lock);
  2329. mm_slot_free(mm_slot_cache, mm_slot);
  2330. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  2331. clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
  2332. mmap_read_unlock(mm);
  2333. mmdrop(mm);
  2334. } else {
  2335. mmap_read_unlock(mm);
  2336. /*
  2337. * mmap_read_unlock(mm) first because after
  2338. * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
  2339. * already have been freed under us by __ksm_exit()
  2340. * because the "mm_slot" is still hashed and
  2341. * ksm_scan.mm_slot doesn't point to it anymore.
  2342. */
  2343. spin_unlock(&ksm_mmlist_lock);
  2344. }
  2345. /* Repeat until we've completed scanning the whole list */
  2346. mm_slot = ksm_scan.mm_slot;
  2347. if (mm_slot != &ksm_mm_head)
  2348. goto next_mm;
  2349. advisor_stop_scan();
  2350. trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
  2351. ksm_scan.seqnr++;
  2352. return NULL;
  2353. }
  2354. /**
  2355. * ksm_do_scan - the ksm scanner main worker function.
  2356. * @scan_npages: number of pages we want to scan before we return.
  2357. */
  2358. static void ksm_do_scan(unsigned int scan_npages)
  2359. {
  2360. struct ksm_rmap_item *rmap_item;
  2361. struct page *page;
  2362. while (scan_npages-- && likely(!freezing(current))) {
  2363. cond_resched();
  2364. rmap_item = scan_get_next_rmap_item(&page);
  2365. if (!rmap_item)
  2366. return;
  2367. cmp_and_merge_page(page, rmap_item);
  2368. put_page(page);
  2369. ksm_pages_scanned++;
  2370. }
  2371. }
  2372. static int ksmd_should_run(void)
  2373. {
  2374. return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
  2375. }
  2376. static int ksm_scan_thread(void *nothing)
  2377. {
  2378. unsigned int sleep_ms;
  2379. set_freezable();
  2380. set_user_nice(current, 5);
  2381. while (!kthread_should_stop()) {
  2382. mutex_lock(&ksm_thread_mutex);
  2383. wait_while_offlining();
  2384. if (ksmd_should_run())
  2385. ksm_do_scan(ksm_thread_pages_to_scan);
  2386. mutex_unlock(&ksm_thread_mutex);
  2387. if (ksmd_should_run()) {
  2388. sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
  2389. wait_event_freezable_timeout(ksm_iter_wait,
  2390. sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
  2391. msecs_to_jiffies(sleep_ms));
  2392. } else {
  2393. wait_event_freezable(ksm_thread_wait,
  2394. ksmd_should_run() || kthread_should_stop());
  2395. }
  2396. }
  2397. return 0;
  2398. }
  2399. static void __ksm_add_vma(struct vm_area_struct *vma)
  2400. {
  2401. unsigned long vm_flags = vma->vm_flags;
  2402. if (vm_flags & VM_MERGEABLE)
  2403. return;
  2404. if (vma_ksm_compatible(vma))
  2405. vm_flags_set(vma, VM_MERGEABLE);
  2406. }
  2407. static int __ksm_del_vma(struct vm_area_struct *vma)
  2408. {
  2409. int err;
  2410. if (!(vma->vm_flags & VM_MERGEABLE))
  2411. return 0;
  2412. if (vma->anon_vma) {
  2413. err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
  2414. if (err)
  2415. return err;
  2416. }
  2417. vm_flags_clear(vma, VM_MERGEABLE);
  2418. return 0;
  2419. }
  2420. /**
  2421. * ksm_add_vma - Mark vma as mergeable if compatible
  2422. *
  2423. * @vma: Pointer to vma
  2424. */
  2425. void ksm_add_vma(struct vm_area_struct *vma)
  2426. {
  2427. struct mm_struct *mm = vma->vm_mm;
  2428. if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
  2429. __ksm_add_vma(vma);
  2430. }
  2431. static void ksm_add_vmas(struct mm_struct *mm)
  2432. {
  2433. struct vm_area_struct *vma;
  2434. VMA_ITERATOR(vmi, mm, 0);
  2435. for_each_vma(vmi, vma)
  2436. __ksm_add_vma(vma);
  2437. }
  2438. static int ksm_del_vmas(struct mm_struct *mm)
  2439. {
  2440. struct vm_area_struct *vma;
  2441. int err;
  2442. VMA_ITERATOR(vmi, mm, 0);
  2443. for_each_vma(vmi, vma) {
  2444. err = __ksm_del_vma(vma);
  2445. if (err)
  2446. return err;
  2447. }
  2448. return 0;
  2449. }
  2450. /**
  2451. * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
  2452. * compatible VMA's
  2453. *
  2454. * @mm: Pointer to mm
  2455. *
  2456. * Returns 0 on success, otherwise error code
  2457. */
  2458. int ksm_enable_merge_any(struct mm_struct *mm)
  2459. {
  2460. int err;
  2461. if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
  2462. return 0;
  2463. if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
  2464. err = __ksm_enter(mm);
  2465. if (err)
  2466. return err;
  2467. }
  2468. set_bit(MMF_VM_MERGE_ANY, &mm->flags);
  2469. ksm_add_vmas(mm);
  2470. return 0;
  2471. }
  2472. /**
  2473. * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
  2474. * previously enabled via ksm_enable_merge_any().
  2475. *
  2476. * Disabling merging implies unmerging any merged pages, like setting
  2477. * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
  2478. * merging on all compatible VMA's remains enabled.
  2479. *
  2480. * @mm: Pointer to mm
  2481. *
  2482. * Returns 0 on success, otherwise error code
  2483. */
  2484. int ksm_disable_merge_any(struct mm_struct *mm)
  2485. {
  2486. int err;
  2487. if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
  2488. return 0;
  2489. err = ksm_del_vmas(mm);
  2490. if (err) {
  2491. ksm_add_vmas(mm);
  2492. return err;
  2493. }
  2494. clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
  2495. return 0;
  2496. }
  2497. int ksm_disable(struct mm_struct *mm)
  2498. {
  2499. mmap_assert_write_locked(mm);
  2500. if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
  2501. return 0;
  2502. if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
  2503. return ksm_disable_merge_any(mm);
  2504. return ksm_del_vmas(mm);
  2505. }
  2506. int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  2507. unsigned long end, int advice, unsigned long *vm_flags)
  2508. {
  2509. struct mm_struct *mm = vma->vm_mm;
  2510. int err;
  2511. switch (advice) {
  2512. case MADV_MERGEABLE:
  2513. if (vma->vm_flags & VM_MERGEABLE)
  2514. return 0;
  2515. if (!vma_ksm_compatible(vma))
  2516. return 0;
  2517. if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
  2518. err = __ksm_enter(mm);
  2519. if (err)
  2520. return err;
  2521. }
  2522. *vm_flags |= VM_MERGEABLE;
  2523. break;
  2524. case MADV_UNMERGEABLE:
  2525. if (!(*vm_flags & VM_MERGEABLE))
  2526. return 0; /* just ignore the advice */
  2527. if (vma->anon_vma) {
  2528. err = unmerge_ksm_pages(vma, start, end, true);
  2529. if (err)
  2530. return err;
  2531. }
  2532. *vm_flags &= ~VM_MERGEABLE;
  2533. break;
  2534. }
  2535. return 0;
  2536. }
  2537. EXPORT_SYMBOL_GPL(ksm_madvise);
  2538. int __ksm_enter(struct mm_struct *mm)
  2539. {
  2540. struct ksm_mm_slot *mm_slot;
  2541. struct mm_slot *slot;
  2542. int needs_wakeup;
  2543. mm_slot = mm_slot_alloc(mm_slot_cache);
  2544. if (!mm_slot)
  2545. return -ENOMEM;
  2546. slot = &mm_slot->slot;
  2547. /* Check ksm_run too? Would need tighter locking */
  2548. needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
  2549. spin_lock(&ksm_mmlist_lock);
  2550. mm_slot_insert(mm_slots_hash, mm, slot);
  2551. /*
  2552. * When KSM_RUN_MERGE (or KSM_RUN_STOP),
  2553. * insert just behind the scanning cursor, to let the area settle
  2554. * down a little; when fork is followed by immediate exec, we don't
  2555. * want ksmd to waste time setting up and tearing down an rmap_list.
  2556. *
  2557. * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
  2558. * scanning cursor, otherwise KSM pages in newly forked mms will be
  2559. * missed: then we might as well insert at the end of the list.
  2560. */
  2561. if (ksm_run & KSM_RUN_UNMERGE)
  2562. list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
  2563. else
  2564. list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
  2565. spin_unlock(&ksm_mmlist_lock);
  2566. set_bit(MMF_VM_MERGEABLE, &mm->flags);
  2567. mmgrab(mm);
  2568. if (needs_wakeup)
  2569. wake_up_interruptible(&ksm_thread_wait);
  2570. trace_ksm_enter(mm);
  2571. return 0;
  2572. }
  2573. void __ksm_exit(struct mm_struct *mm)
  2574. {
  2575. struct ksm_mm_slot *mm_slot;
  2576. struct mm_slot *slot;
  2577. int easy_to_free = 0;
  2578. /*
  2579. * This process is exiting: if it's straightforward (as is the
  2580. * case when ksmd was never running), free mm_slot immediately.
  2581. * But if it's at the cursor or has rmap_items linked to it, use
  2582. * mmap_lock to synchronize with any break_cows before pagetables
  2583. * are freed, and leave the mm_slot on the list for ksmd to free.
  2584. * Beware: ksm may already have noticed it exiting and freed the slot.
  2585. */
  2586. spin_lock(&ksm_mmlist_lock);
  2587. slot = mm_slot_lookup(mm_slots_hash, mm);
  2588. mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
  2589. if (mm_slot && ksm_scan.mm_slot != mm_slot) {
  2590. if (!mm_slot->rmap_list) {
  2591. hash_del(&slot->hash);
  2592. list_del(&slot->mm_node);
  2593. easy_to_free = 1;
  2594. } else {
  2595. list_move(&slot->mm_node,
  2596. &ksm_scan.mm_slot->slot.mm_node);
  2597. }
  2598. }
  2599. spin_unlock(&ksm_mmlist_lock);
  2600. if (easy_to_free) {
  2601. mm_slot_free(mm_slot_cache, mm_slot);
  2602. clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
  2603. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  2604. mmdrop(mm);
  2605. } else if (mm_slot) {
  2606. mmap_write_lock(mm);
  2607. mmap_write_unlock(mm);
  2608. }
  2609. trace_ksm_exit(mm);
  2610. }
  2611. struct folio *ksm_might_need_to_copy(struct folio *folio,
  2612. struct vm_area_struct *vma, unsigned long addr)
  2613. {
  2614. struct page *page = folio_page(folio, 0);
  2615. struct anon_vma *anon_vma = folio_anon_vma(folio);
  2616. struct folio *new_folio;
  2617. if (folio_test_large(folio))
  2618. return folio;
  2619. if (folio_test_ksm(folio)) {
  2620. if (folio_stable_node(folio) &&
  2621. !(ksm_run & KSM_RUN_UNMERGE))
  2622. return folio; /* no need to copy it */
  2623. } else if (!anon_vma) {
  2624. return folio; /* no need to copy it */
  2625. } else if (folio->index == linear_page_index(vma, addr) &&
  2626. anon_vma->root == vma->anon_vma->root) {
  2627. return folio; /* still no need to copy it */
  2628. }
  2629. if (PageHWPoison(page))
  2630. return ERR_PTR(-EHWPOISON);
  2631. if (!folio_test_uptodate(folio))
  2632. return folio; /* let do_swap_page report the error */
  2633. new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
  2634. if (new_folio &&
  2635. mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
  2636. folio_put(new_folio);
  2637. new_folio = NULL;
  2638. }
  2639. if (new_folio) {
  2640. if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
  2641. addr, vma)) {
  2642. folio_put(new_folio);
  2643. return ERR_PTR(-EHWPOISON);
  2644. }
  2645. folio_set_dirty(new_folio);
  2646. __folio_mark_uptodate(new_folio);
  2647. __folio_set_locked(new_folio);
  2648. #ifdef CONFIG_SWAP
  2649. count_vm_event(KSM_SWPIN_COPY);
  2650. #endif
  2651. }
  2652. return new_folio;
  2653. }
  2654. void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
  2655. {
  2656. struct ksm_stable_node *stable_node;
  2657. struct ksm_rmap_item *rmap_item;
  2658. int search_new_forks = 0;
  2659. VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
  2660. /*
  2661. * Rely on the page lock to protect against concurrent modifications
  2662. * to that page's node of the stable tree.
  2663. */
  2664. VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
  2665. stable_node = folio_stable_node(folio);
  2666. if (!stable_node)
  2667. return;
  2668. again:
  2669. hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
  2670. struct anon_vma *anon_vma = rmap_item->anon_vma;
  2671. struct anon_vma_chain *vmac;
  2672. struct vm_area_struct *vma;
  2673. cond_resched();
  2674. if (!anon_vma_trylock_read(anon_vma)) {
  2675. if (rwc->try_lock) {
  2676. rwc->contended = true;
  2677. return;
  2678. }
  2679. anon_vma_lock_read(anon_vma);
  2680. }
  2681. anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
  2682. 0, ULONG_MAX) {
  2683. unsigned long addr;
  2684. cond_resched();
  2685. vma = vmac->vma;
  2686. /* Ignore the stable/unstable/sqnr flags */
  2687. addr = rmap_item->address & PAGE_MASK;
  2688. if (addr < vma->vm_start || addr >= vma->vm_end)
  2689. continue;
  2690. /*
  2691. * Initially we examine only the vma which covers this
  2692. * rmap_item; but later, if there is still work to do,
  2693. * we examine covering vmas in other mms: in case they
  2694. * were forked from the original since ksmd passed.
  2695. */
  2696. if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
  2697. continue;
  2698. if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
  2699. continue;
  2700. if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
  2701. anon_vma_unlock_read(anon_vma);
  2702. return;
  2703. }
  2704. if (rwc->done && rwc->done(folio)) {
  2705. anon_vma_unlock_read(anon_vma);
  2706. return;
  2707. }
  2708. }
  2709. anon_vma_unlock_read(anon_vma);
  2710. }
  2711. if (!search_new_forks++)
  2712. goto again;
  2713. }
  2714. #ifdef CONFIG_MEMORY_FAILURE
  2715. /*
  2716. * Collect processes when the error hit an ksm page.
  2717. */
  2718. void collect_procs_ksm(struct folio *folio, struct page *page,
  2719. struct list_head *to_kill, int force_early)
  2720. {
  2721. struct ksm_stable_node *stable_node;
  2722. struct ksm_rmap_item *rmap_item;
  2723. struct vm_area_struct *vma;
  2724. struct task_struct *tsk;
  2725. stable_node = folio_stable_node(folio);
  2726. if (!stable_node)
  2727. return;
  2728. hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
  2729. struct anon_vma *av = rmap_item->anon_vma;
  2730. anon_vma_lock_read(av);
  2731. rcu_read_lock();
  2732. for_each_process(tsk) {
  2733. struct anon_vma_chain *vmac;
  2734. unsigned long addr;
  2735. struct task_struct *t =
  2736. task_early_kill(tsk, force_early);
  2737. if (!t)
  2738. continue;
  2739. anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
  2740. ULONG_MAX)
  2741. {
  2742. vma = vmac->vma;
  2743. if (vma->vm_mm == t->mm) {
  2744. addr = rmap_item->address & PAGE_MASK;
  2745. add_to_kill_ksm(t, page, vma, to_kill,
  2746. addr);
  2747. }
  2748. }
  2749. }
  2750. rcu_read_unlock();
  2751. anon_vma_unlock_read(av);
  2752. }
  2753. }
  2754. #endif
  2755. #ifdef CONFIG_MIGRATION
  2756. void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
  2757. {
  2758. struct ksm_stable_node *stable_node;
  2759. VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
  2760. VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
  2761. VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
  2762. stable_node = folio_stable_node(folio);
  2763. if (stable_node) {
  2764. VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
  2765. stable_node->kpfn = folio_pfn(newfolio);
  2766. /*
  2767. * newfolio->mapping was set in advance; now we need smp_wmb()
  2768. * to make sure that the new stable_node->kpfn is visible
  2769. * to ksm_get_folio() before it can see that folio->mapping
  2770. * has gone stale (or that the swapcache flag has been cleared).
  2771. */
  2772. smp_wmb();
  2773. folio_set_stable_node(folio, NULL);
  2774. }
  2775. }
  2776. #endif /* CONFIG_MIGRATION */
  2777. #ifdef CONFIG_MEMORY_HOTREMOVE
  2778. static void wait_while_offlining(void)
  2779. {
  2780. while (ksm_run & KSM_RUN_OFFLINE) {
  2781. mutex_unlock(&ksm_thread_mutex);
  2782. wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
  2783. TASK_UNINTERRUPTIBLE);
  2784. mutex_lock(&ksm_thread_mutex);
  2785. }
  2786. }
  2787. static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
  2788. unsigned long start_pfn,
  2789. unsigned long end_pfn)
  2790. {
  2791. if (stable_node->kpfn >= start_pfn &&
  2792. stable_node->kpfn < end_pfn) {
  2793. /*
  2794. * Don't ksm_get_folio, page has already gone:
  2795. * which is why we keep kpfn instead of page*
  2796. */
  2797. remove_node_from_stable_tree(stable_node);
  2798. return true;
  2799. }
  2800. return false;
  2801. }
  2802. static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
  2803. unsigned long start_pfn,
  2804. unsigned long end_pfn,
  2805. struct rb_root *root)
  2806. {
  2807. struct ksm_stable_node *dup;
  2808. struct hlist_node *hlist_safe;
  2809. if (!is_stable_node_chain(stable_node)) {
  2810. VM_BUG_ON(is_stable_node_dup(stable_node));
  2811. return stable_node_dup_remove_range(stable_node, start_pfn,
  2812. end_pfn);
  2813. }
  2814. hlist_for_each_entry_safe(dup, hlist_safe,
  2815. &stable_node->hlist, hlist_dup) {
  2816. VM_BUG_ON(!is_stable_node_dup(dup));
  2817. stable_node_dup_remove_range(dup, start_pfn, end_pfn);
  2818. }
  2819. if (hlist_empty(&stable_node->hlist)) {
  2820. free_stable_node_chain(stable_node, root);
  2821. return true; /* notify caller that tree was rebalanced */
  2822. } else
  2823. return false;
  2824. }
  2825. static void ksm_check_stable_tree(unsigned long start_pfn,
  2826. unsigned long end_pfn)
  2827. {
  2828. struct ksm_stable_node *stable_node, *next;
  2829. struct rb_node *node;
  2830. int nid;
  2831. for (nid = 0; nid < ksm_nr_node_ids; nid++) {
  2832. node = rb_first(root_stable_tree + nid);
  2833. while (node) {
  2834. stable_node = rb_entry(node, struct ksm_stable_node, node);
  2835. if (stable_node_chain_remove_range(stable_node,
  2836. start_pfn, end_pfn,
  2837. root_stable_tree +
  2838. nid))
  2839. node = rb_first(root_stable_tree + nid);
  2840. else
  2841. node = rb_next(node);
  2842. cond_resched();
  2843. }
  2844. }
  2845. list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
  2846. if (stable_node->kpfn >= start_pfn &&
  2847. stable_node->kpfn < end_pfn)
  2848. remove_node_from_stable_tree(stable_node);
  2849. cond_resched();
  2850. }
  2851. }
  2852. static int ksm_memory_callback(struct notifier_block *self,
  2853. unsigned long action, void *arg)
  2854. {
  2855. struct memory_notify *mn = arg;
  2856. switch (action) {
  2857. case MEM_GOING_OFFLINE:
  2858. /*
  2859. * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
  2860. * and remove_all_stable_nodes() while memory is going offline:
  2861. * it is unsafe for them to touch the stable tree at this time.
  2862. * But unmerge_ksm_pages(), rmap lookups and other entry points
  2863. * which do not need the ksm_thread_mutex are all safe.
  2864. */
  2865. mutex_lock(&ksm_thread_mutex);
  2866. ksm_run |= KSM_RUN_OFFLINE;
  2867. mutex_unlock(&ksm_thread_mutex);
  2868. break;
  2869. case MEM_OFFLINE:
  2870. /*
  2871. * Most of the work is done by page migration; but there might
  2872. * be a few stable_nodes left over, still pointing to struct
  2873. * pages which have been offlined: prune those from the tree,
  2874. * otherwise ksm_get_folio() might later try to access a
  2875. * non-existent struct page.
  2876. */
  2877. ksm_check_stable_tree(mn->start_pfn,
  2878. mn->start_pfn + mn->nr_pages);
  2879. fallthrough;
  2880. case MEM_CANCEL_OFFLINE:
  2881. mutex_lock(&ksm_thread_mutex);
  2882. ksm_run &= ~KSM_RUN_OFFLINE;
  2883. mutex_unlock(&ksm_thread_mutex);
  2884. smp_mb(); /* wake_up_bit advises this */
  2885. wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
  2886. break;
  2887. }
  2888. return NOTIFY_OK;
  2889. }
  2890. #else
  2891. static void wait_while_offlining(void)
  2892. {
  2893. }
  2894. #endif /* CONFIG_MEMORY_HOTREMOVE */
  2895. #ifdef CONFIG_PROC_FS
  2896. long ksm_process_profit(struct mm_struct *mm)
  2897. {
  2898. return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
  2899. mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
  2900. }
  2901. #endif /* CONFIG_PROC_FS */
  2902. #ifdef CONFIG_SYSFS
  2903. /*
  2904. * This all compiles without CONFIG_SYSFS, but is a waste of space.
  2905. */
  2906. #define KSM_ATTR_RO(_name) \
  2907. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  2908. #define KSM_ATTR(_name) \
  2909. static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
  2910. static ssize_t sleep_millisecs_show(struct kobject *kobj,
  2911. struct kobj_attribute *attr, char *buf)
  2912. {
  2913. return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
  2914. }
  2915. static ssize_t sleep_millisecs_store(struct kobject *kobj,
  2916. struct kobj_attribute *attr,
  2917. const char *buf, size_t count)
  2918. {
  2919. unsigned int msecs;
  2920. int err;
  2921. err = kstrtouint(buf, 10, &msecs);
  2922. if (err)
  2923. return -EINVAL;
  2924. ksm_thread_sleep_millisecs = msecs;
  2925. wake_up_interruptible(&ksm_iter_wait);
  2926. return count;
  2927. }
  2928. KSM_ATTR(sleep_millisecs);
  2929. static ssize_t pages_to_scan_show(struct kobject *kobj,
  2930. struct kobj_attribute *attr, char *buf)
  2931. {
  2932. return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
  2933. }
  2934. static ssize_t pages_to_scan_store(struct kobject *kobj,
  2935. struct kobj_attribute *attr,
  2936. const char *buf, size_t count)
  2937. {
  2938. unsigned int nr_pages;
  2939. int err;
  2940. if (ksm_advisor != KSM_ADVISOR_NONE)
  2941. return -EINVAL;
  2942. err = kstrtouint(buf, 10, &nr_pages);
  2943. if (err)
  2944. return -EINVAL;
  2945. ksm_thread_pages_to_scan = nr_pages;
  2946. return count;
  2947. }
  2948. KSM_ATTR(pages_to_scan);
  2949. static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
  2950. char *buf)
  2951. {
  2952. return sysfs_emit(buf, "%lu\n", ksm_run);
  2953. }
  2954. static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
  2955. const char *buf, size_t count)
  2956. {
  2957. unsigned int flags;
  2958. int err;
  2959. err = kstrtouint(buf, 10, &flags);
  2960. if (err)
  2961. return -EINVAL;
  2962. if (flags > KSM_RUN_UNMERGE)
  2963. return -EINVAL;
  2964. /*
  2965. * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
  2966. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
  2967. * breaking COW to free the pages_shared (but leaves mm_slots
  2968. * on the list for when ksmd may be set running again).
  2969. */
  2970. mutex_lock(&ksm_thread_mutex);
  2971. wait_while_offlining();
  2972. if (ksm_run != flags) {
  2973. ksm_run = flags;
  2974. if (flags & KSM_RUN_UNMERGE) {
  2975. set_current_oom_origin();
  2976. err = unmerge_and_remove_all_rmap_items();
  2977. clear_current_oom_origin();
  2978. if (err) {
  2979. ksm_run = KSM_RUN_STOP;
  2980. count = err;
  2981. }
  2982. }
  2983. }
  2984. mutex_unlock(&ksm_thread_mutex);
  2985. if (flags & KSM_RUN_MERGE)
  2986. wake_up_interruptible(&ksm_thread_wait);
  2987. return count;
  2988. }
  2989. KSM_ATTR(run);
  2990. #ifdef CONFIG_NUMA
  2991. static ssize_t merge_across_nodes_show(struct kobject *kobj,
  2992. struct kobj_attribute *attr, char *buf)
  2993. {
  2994. return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
  2995. }
  2996. static ssize_t merge_across_nodes_store(struct kobject *kobj,
  2997. struct kobj_attribute *attr,
  2998. const char *buf, size_t count)
  2999. {
  3000. int err;
  3001. unsigned long knob;
  3002. err = kstrtoul(buf, 10, &knob);
  3003. if (err)
  3004. return err;
  3005. if (knob > 1)
  3006. return -EINVAL;
  3007. mutex_lock(&ksm_thread_mutex);
  3008. wait_while_offlining();
  3009. if (ksm_merge_across_nodes != knob) {
  3010. if (ksm_pages_shared || remove_all_stable_nodes())
  3011. err = -EBUSY;
  3012. else if (root_stable_tree == one_stable_tree) {
  3013. struct rb_root *buf;
  3014. /*
  3015. * This is the first time that we switch away from the
  3016. * default of merging across nodes: must now allocate
  3017. * a buffer to hold as many roots as may be needed.
  3018. * Allocate stable and unstable together:
  3019. * MAXSMP NODES_SHIFT 10 will use 16kB.
  3020. */
  3021. buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
  3022. GFP_KERNEL);
  3023. /* Let us assume that RB_ROOT is NULL is zero */
  3024. if (!buf)
  3025. err = -ENOMEM;
  3026. else {
  3027. root_stable_tree = buf;
  3028. root_unstable_tree = buf + nr_node_ids;
  3029. /* Stable tree is empty but not the unstable */
  3030. root_unstable_tree[0] = one_unstable_tree[0];
  3031. }
  3032. }
  3033. if (!err) {
  3034. ksm_merge_across_nodes = knob;
  3035. ksm_nr_node_ids = knob ? 1 : nr_node_ids;
  3036. }
  3037. }
  3038. mutex_unlock(&ksm_thread_mutex);
  3039. return err ? err : count;
  3040. }
  3041. KSM_ATTR(merge_across_nodes);
  3042. #endif
  3043. static ssize_t use_zero_pages_show(struct kobject *kobj,
  3044. struct kobj_attribute *attr, char *buf)
  3045. {
  3046. return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
  3047. }
  3048. static ssize_t use_zero_pages_store(struct kobject *kobj,
  3049. struct kobj_attribute *attr,
  3050. const char *buf, size_t count)
  3051. {
  3052. int err;
  3053. bool value;
  3054. err = kstrtobool(buf, &value);
  3055. if (err)
  3056. return -EINVAL;
  3057. ksm_use_zero_pages = value;
  3058. return count;
  3059. }
  3060. KSM_ATTR(use_zero_pages);
  3061. static ssize_t max_page_sharing_show(struct kobject *kobj,
  3062. struct kobj_attribute *attr, char *buf)
  3063. {
  3064. return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
  3065. }
  3066. static ssize_t max_page_sharing_store(struct kobject *kobj,
  3067. struct kobj_attribute *attr,
  3068. const char *buf, size_t count)
  3069. {
  3070. int err;
  3071. int knob;
  3072. err = kstrtoint(buf, 10, &knob);
  3073. if (err)
  3074. return err;
  3075. /*
  3076. * When a KSM page is created it is shared by 2 mappings. This
  3077. * being a signed comparison, it implicitly verifies it's not
  3078. * negative.
  3079. */
  3080. if (knob < 2)
  3081. return -EINVAL;
  3082. if (READ_ONCE(ksm_max_page_sharing) == knob)
  3083. return count;
  3084. mutex_lock(&ksm_thread_mutex);
  3085. wait_while_offlining();
  3086. if (ksm_max_page_sharing != knob) {
  3087. if (ksm_pages_shared || remove_all_stable_nodes())
  3088. err = -EBUSY;
  3089. else
  3090. ksm_max_page_sharing = knob;
  3091. }
  3092. mutex_unlock(&ksm_thread_mutex);
  3093. return err ? err : count;
  3094. }
  3095. KSM_ATTR(max_page_sharing);
  3096. static ssize_t pages_scanned_show(struct kobject *kobj,
  3097. struct kobj_attribute *attr, char *buf)
  3098. {
  3099. return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
  3100. }
  3101. KSM_ATTR_RO(pages_scanned);
  3102. static ssize_t pages_shared_show(struct kobject *kobj,
  3103. struct kobj_attribute *attr, char *buf)
  3104. {
  3105. return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
  3106. }
  3107. KSM_ATTR_RO(pages_shared);
  3108. static ssize_t pages_sharing_show(struct kobject *kobj,
  3109. struct kobj_attribute *attr, char *buf)
  3110. {
  3111. return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
  3112. }
  3113. KSM_ATTR_RO(pages_sharing);
  3114. static ssize_t pages_unshared_show(struct kobject *kobj,
  3115. struct kobj_attribute *attr, char *buf)
  3116. {
  3117. return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
  3118. }
  3119. KSM_ATTR_RO(pages_unshared);
  3120. static ssize_t pages_volatile_show(struct kobject *kobj,
  3121. struct kobj_attribute *attr, char *buf)
  3122. {
  3123. long ksm_pages_volatile;
  3124. ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
  3125. - ksm_pages_sharing - ksm_pages_unshared;
  3126. /*
  3127. * It was not worth any locking to calculate that statistic,
  3128. * but it might therefore sometimes be negative: conceal that.
  3129. */
  3130. if (ksm_pages_volatile < 0)
  3131. ksm_pages_volatile = 0;
  3132. return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
  3133. }
  3134. KSM_ATTR_RO(pages_volatile);
  3135. static ssize_t pages_skipped_show(struct kobject *kobj,
  3136. struct kobj_attribute *attr, char *buf)
  3137. {
  3138. return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
  3139. }
  3140. KSM_ATTR_RO(pages_skipped);
  3141. static ssize_t ksm_zero_pages_show(struct kobject *kobj,
  3142. struct kobj_attribute *attr, char *buf)
  3143. {
  3144. return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
  3145. }
  3146. KSM_ATTR_RO(ksm_zero_pages);
  3147. static ssize_t general_profit_show(struct kobject *kobj,
  3148. struct kobj_attribute *attr, char *buf)
  3149. {
  3150. long general_profit;
  3151. general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
  3152. ksm_rmap_items * sizeof(struct ksm_rmap_item);
  3153. return sysfs_emit(buf, "%ld\n", general_profit);
  3154. }
  3155. KSM_ATTR_RO(general_profit);
  3156. static ssize_t stable_node_dups_show(struct kobject *kobj,
  3157. struct kobj_attribute *attr, char *buf)
  3158. {
  3159. return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
  3160. }
  3161. KSM_ATTR_RO(stable_node_dups);
  3162. static ssize_t stable_node_chains_show(struct kobject *kobj,
  3163. struct kobj_attribute *attr, char *buf)
  3164. {
  3165. return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
  3166. }
  3167. KSM_ATTR_RO(stable_node_chains);
  3168. static ssize_t
  3169. stable_node_chains_prune_millisecs_show(struct kobject *kobj,
  3170. struct kobj_attribute *attr,
  3171. char *buf)
  3172. {
  3173. return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
  3174. }
  3175. static ssize_t
  3176. stable_node_chains_prune_millisecs_store(struct kobject *kobj,
  3177. struct kobj_attribute *attr,
  3178. const char *buf, size_t count)
  3179. {
  3180. unsigned int msecs;
  3181. int err;
  3182. err = kstrtouint(buf, 10, &msecs);
  3183. if (err)
  3184. return -EINVAL;
  3185. ksm_stable_node_chains_prune_millisecs = msecs;
  3186. return count;
  3187. }
  3188. KSM_ATTR(stable_node_chains_prune_millisecs);
  3189. static ssize_t full_scans_show(struct kobject *kobj,
  3190. struct kobj_attribute *attr, char *buf)
  3191. {
  3192. return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
  3193. }
  3194. KSM_ATTR_RO(full_scans);
  3195. static ssize_t smart_scan_show(struct kobject *kobj,
  3196. struct kobj_attribute *attr, char *buf)
  3197. {
  3198. return sysfs_emit(buf, "%u\n", ksm_smart_scan);
  3199. }
  3200. static ssize_t smart_scan_store(struct kobject *kobj,
  3201. struct kobj_attribute *attr,
  3202. const char *buf, size_t count)
  3203. {
  3204. int err;
  3205. bool value;
  3206. err = kstrtobool(buf, &value);
  3207. if (err)
  3208. return -EINVAL;
  3209. ksm_smart_scan = value;
  3210. return count;
  3211. }
  3212. KSM_ATTR(smart_scan);
  3213. static ssize_t advisor_mode_show(struct kobject *kobj,
  3214. struct kobj_attribute *attr, char *buf)
  3215. {
  3216. const char *output;
  3217. if (ksm_advisor == KSM_ADVISOR_NONE)
  3218. output = "[none] scan-time";
  3219. else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
  3220. output = "none [scan-time]";
  3221. return sysfs_emit(buf, "%s\n", output);
  3222. }
  3223. static ssize_t advisor_mode_store(struct kobject *kobj,
  3224. struct kobj_attribute *attr, const char *buf,
  3225. size_t count)
  3226. {
  3227. enum ksm_advisor_type curr_advisor = ksm_advisor;
  3228. if (sysfs_streq("scan-time", buf))
  3229. ksm_advisor = KSM_ADVISOR_SCAN_TIME;
  3230. else if (sysfs_streq("none", buf))
  3231. ksm_advisor = KSM_ADVISOR_NONE;
  3232. else
  3233. return -EINVAL;
  3234. /* Set advisor default values */
  3235. if (curr_advisor != ksm_advisor)
  3236. set_advisor_defaults();
  3237. return count;
  3238. }
  3239. KSM_ATTR(advisor_mode);
  3240. static ssize_t advisor_max_cpu_show(struct kobject *kobj,
  3241. struct kobj_attribute *attr, char *buf)
  3242. {
  3243. return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
  3244. }
  3245. static ssize_t advisor_max_cpu_store(struct kobject *kobj,
  3246. struct kobj_attribute *attr,
  3247. const char *buf, size_t count)
  3248. {
  3249. int err;
  3250. unsigned long value;
  3251. err = kstrtoul(buf, 10, &value);
  3252. if (err)
  3253. return -EINVAL;
  3254. ksm_advisor_max_cpu = value;
  3255. return count;
  3256. }
  3257. KSM_ATTR(advisor_max_cpu);
  3258. static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
  3259. struct kobj_attribute *attr, char *buf)
  3260. {
  3261. return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
  3262. }
  3263. static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
  3264. struct kobj_attribute *attr,
  3265. const char *buf, size_t count)
  3266. {
  3267. int err;
  3268. unsigned long value;
  3269. err = kstrtoul(buf, 10, &value);
  3270. if (err)
  3271. return -EINVAL;
  3272. ksm_advisor_min_pages_to_scan = value;
  3273. return count;
  3274. }
  3275. KSM_ATTR(advisor_min_pages_to_scan);
  3276. static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
  3277. struct kobj_attribute *attr, char *buf)
  3278. {
  3279. return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
  3280. }
  3281. static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
  3282. struct kobj_attribute *attr,
  3283. const char *buf, size_t count)
  3284. {
  3285. int err;
  3286. unsigned long value;
  3287. err = kstrtoul(buf, 10, &value);
  3288. if (err)
  3289. return -EINVAL;
  3290. ksm_advisor_max_pages_to_scan = value;
  3291. return count;
  3292. }
  3293. KSM_ATTR(advisor_max_pages_to_scan);
  3294. static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
  3295. struct kobj_attribute *attr, char *buf)
  3296. {
  3297. return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
  3298. }
  3299. static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
  3300. struct kobj_attribute *attr,
  3301. const char *buf, size_t count)
  3302. {
  3303. int err;
  3304. unsigned long value;
  3305. err = kstrtoul(buf, 10, &value);
  3306. if (err)
  3307. return -EINVAL;
  3308. if (value < 1)
  3309. return -EINVAL;
  3310. ksm_advisor_target_scan_time = value;
  3311. return count;
  3312. }
  3313. KSM_ATTR(advisor_target_scan_time);
  3314. static struct attribute *ksm_attrs[] = {
  3315. &sleep_millisecs_attr.attr,
  3316. &pages_to_scan_attr.attr,
  3317. &run_attr.attr,
  3318. &pages_scanned_attr.attr,
  3319. &pages_shared_attr.attr,
  3320. &pages_sharing_attr.attr,
  3321. &pages_unshared_attr.attr,
  3322. &pages_volatile_attr.attr,
  3323. &pages_skipped_attr.attr,
  3324. &ksm_zero_pages_attr.attr,
  3325. &full_scans_attr.attr,
  3326. #ifdef CONFIG_NUMA
  3327. &merge_across_nodes_attr.attr,
  3328. #endif
  3329. &max_page_sharing_attr.attr,
  3330. &stable_node_chains_attr.attr,
  3331. &stable_node_dups_attr.attr,
  3332. &stable_node_chains_prune_millisecs_attr.attr,
  3333. &use_zero_pages_attr.attr,
  3334. &general_profit_attr.attr,
  3335. &smart_scan_attr.attr,
  3336. &advisor_mode_attr.attr,
  3337. &advisor_max_cpu_attr.attr,
  3338. &advisor_min_pages_to_scan_attr.attr,
  3339. &advisor_max_pages_to_scan_attr.attr,
  3340. &advisor_target_scan_time_attr.attr,
  3341. NULL,
  3342. };
  3343. static const struct attribute_group ksm_attr_group = {
  3344. .attrs = ksm_attrs,
  3345. .name = "ksm",
  3346. };
  3347. #endif /* CONFIG_SYSFS */
  3348. static int __init ksm_init(void)
  3349. {
  3350. struct task_struct *ksm_thread;
  3351. int err;
  3352. /* The correct value depends on page size and endianness */
  3353. zero_checksum = calc_checksum(ZERO_PAGE(0));
  3354. /* Default to false for backwards compatibility */
  3355. ksm_use_zero_pages = false;
  3356. err = ksm_slab_init();
  3357. if (err)
  3358. goto out;
  3359. ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
  3360. if (IS_ERR(ksm_thread)) {
  3361. pr_err("ksm: creating kthread failed\n");
  3362. err = PTR_ERR(ksm_thread);
  3363. goto out_free;
  3364. }
  3365. #ifdef CONFIG_SYSFS
  3366. err = sysfs_create_group(mm_kobj, &ksm_attr_group);
  3367. if (err) {
  3368. pr_err("ksm: register sysfs failed\n");
  3369. kthread_stop(ksm_thread);
  3370. goto out_free;
  3371. }
  3372. #else
  3373. ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
  3374. #endif /* CONFIG_SYSFS */
  3375. #ifdef CONFIG_MEMORY_HOTREMOVE
  3376. /* There is no significance to this priority 100 */
  3377. hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI);
  3378. #endif
  3379. return 0;
  3380. out_free:
  3381. ksm_slab_free();
  3382. out:
  3383. return err;
  3384. }
  3385. subsys_initcall(ksm_init);