super.c 135 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/super.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/fs_context.h>
  12. #include <linux/sched/mm.h>
  13. #include <linux/statfs.h>
  14. #include <linux/kthread.h>
  15. #include <linux/parser.h>
  16. #include <linux/mount.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/random.h>
  20. #include <linux/exportfs.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/quotaops.h>
  23. #include <linux/f2fs_fs.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/quota.h>
  26. #include <linux/unicode.h>
  27. #include <linux/part_stat.h>
  28. #include <linux/zstd.h>
  29. #include <linux/lz4.h>
  30. #include "f2fs.h"
  31. #include "node.h"
  32. #include "segment.h"
  33. #include "xattr.h"
  34. #include "gc.h"
  35. #include "iostat.h"
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/f2fs.h>
  38. static struct kmem_cache *f2fs_inode_cachep;
  39. #ifdef CONFIG_F2FS_FAULT_INJECTION
  40. const char *f2fs_fault_name[FAULT_MAX] = {
  41. [FAULT_KMALLOC] = "kmalloc",
  42. [FAULT_KVMALLOC] = "kvmalloc",
  43. [FAULT_PAGE_ALLOC] = "page alloc",
  44. [FAULT_PAGE_GET] = "page get",
  45. [FAULT_ALLOC_NID] = "alloc nid",
  46. [FAULT_ORPHAN] = "orphan",
  47. [FAULT_BLOCK] = "no more block",
  48. [FAULT_DIR_DEPTH] = "too big dir depth",
  49. [FAULT_EVICT_INODE] = "evict_inode fail",
  50. [FAULT_TRUNCATE] = "truncate fail",
  51. [FAULT_READ_IO] = "read IO error",
  52. [FAULT_CHECKPOINT] = "checkpoint error",
  53. [FAULT_DISCARD] = "discard error",
  54. [FAULT_WRITE_IO] = "write IO error",
  55. [FAULT_SLAB_ALLOC] = "slab alloc",
  56. [FAULT_DQUOT_INIT] = "dquot initialize",
  57. [FAULT_LOCK_OP] = "lock_op",
  58. [FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
  59. [FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
  60. [FAULT_NO_SEGMENT] = "no free segment",
  61. };
  62. int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
  63. unsigned long type)
  64. {
  65. struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  66. if (rate) {
  67. if (rate > INT_MAX)
  68. return -EINVAL;
  69. atomic_set(&ffi->inject_ops, 0);
  70. ffi->inject_rate = (int)rate;
  71. }
  72. if (type) {
  73. if (type >= BIT(FAULT_MAX))
  74. return -EINVAL;
  75. ffi->inject_type = (unsigned int)type;
  76. }
  77. if (!rate && !type)
  78. memset(ffi, 0, sizeof(struct f2fs_fault_info));
  79. else
  80. f2fs_info(sbi,
  81. "build fault injection attr: rate: %lu, type: 0x%lx",
  82. rate, type);
  83. return 0;
  84. }
  85. #endif
  86. /* f2fs-wide shrinker description */
  87. static struct shrinker *f2fs_shrinker_info;
  88. static int __init f2fs_init_shrinker(void)
  89. {
  90. f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
  91. if (!f2fs_shrinker_info)
  92. return -ENOMEM;
  93. f2fs_shrinker_info->count_objects = f2fs_shrink_count;
  94. f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
  95. shrinker_register(f2fs_shrinker_info);
  96. return 0;
  97. }
  98. static void f2fs_exit_shrinker(void)
  99. {
  100. shrinker_free(f2fs_shrinker_info);
  101. }
  102. enum {
  103. Opt_gc_background,
  104. Opt_disable_roll_forward,
  105. Opt_norecovery,
  106. Opt_discard,
  107. Opt_nodiscard,
  108. Opt_noheap,
  109. Opt_heap,
  110. Opt_user_xattr,
  111. Opt_nouser_xattr,
  112. Opt_acl,
  113. Opt_noacl,
  114. Opt_active_logs,
  115. Opt_disable_ext_identify,
  116. Opt_inline_xattr,
  117. Opt_noinline_xattr,
  118. Opt_inline_xattr_size,
  119. Opt_inline_data,
  120. Opt_inline_dentry,
  121. Opt_noinline_dentry,
  122. Opt_flush_merge,
  123. Opt_noflush_merge,
  124. Opt_barrier,
  125. Opt_nobarrier,
  126. Opt_fastboot,
  127. Opt_extent_cache,
  128. Opt_noextent_cache,
  129. Opt_noinline_data,
  130. Opt_data_flush,
  131. Opt_reserve_root,
  132. Opt_resgid,
  133. Opt_resuid,
  134. Opt_mode,
  135. Opt_fault_injection,
  136. Opt_fault_type,
  137. Opt_lazytime,
  138. Opt_nolazytime,
  139. Opt_quota,
  140. Opt_noquota,
  141. Opt_usrquota,
  142. Opt_grpquota,
  143. Opt_prjquota,
  144. Opt_usrjquota,
  145. Opt_grpjquota,
  146. Opt_prjjquota,
  147. Opt_offusrjquota,
  148. Opt_offgrpjquota,
  149. Opt_offprjjquota,
  150. Opt_jqfmt_vfsold,
  151. Opt_jqfmt_vfsv0,
  152. Opt_jqfmt_vfsv1,
  153. Opt_alloc,
  154. Opt_fsync,
  155. Opt_test_dummy_encryption,
  156. Opt_inlinecrypt,
  157. Opt_checkpoint_disable,
  158. Opt_checkpoint_disable_cap,
  159. Opt_checkpoint_disable_cap_perc,
  160. Opt_checkpoint_enable,
  161. Opt_checkpoint_merge,
  162. Opt_nocheckpoint_merge,
  163. Opt_compress_algorithm,
  164. Opt_compress_log_size,
  165. Opt_compress_extension,
  166. Opt_nocompress_extension,
  167. Opt_compress_chksum,
  168. Opt_compress_mode,
  169. Opt_compress_cache,
  170. Opt_atgc,
  171. Opt_gc_merge,
  172. Opt_nogc_merge,
  173. Opt_discard_unit,
  174. Opt_memory_mode,
  175. Opt_age_extent_cache,
  176. Opt_errors,
  177. Opt_err,
  178. };
  179. static match_table_t f2fs_tokens = {
  180. {Opt_gc_background, "background_gc=%s"},
  181. {Opt_disable_roll_forward, "disable_roll_forward"},
  182. {Opt_norecovery, "norecovery"},
  183. {Opt_discard, "discard"},
  184. {Opt_nodiscard, "nodiscard"},
  185. {Opt_noheap, "no_heap"},
  186. {Opt_heap, "heap"},
  187. {Opt_user_xattr, "user_xattr"},
  188. {Opt_nouser_xattr, "nouser_xattr"},
  189. {Opt_acl, "acl"},
  190. {Opt_noacl, "noacl"},
  191. {Opt_active_logs, "active_logs=%u"},
  192. {Opt_disable_ext_identify, "disable_ext_identify"},
  193. {Opt_inline_xattr, "inline_xattr"},
  194. {Opt_noinline_xattr, "noinline_xattr"},
  195. {Opt_inline_xattr_size, "inline_xattr_size=%u"},
  196. {Opt_inline_data, "inline_data"},
  197. {Opt_inline_dentry, "inline_dentry"},
  198. {Opt_noinline_dentry, "noinline_dentry"},
  199. {Opt_flush_merge, "flush_merge"},
  200. {Opt_noflush_merge, "noflush_merge"},
  201. {Opt_barrier, "barrier"},
  202. {Opt_nobarrier, "nobarrier"},
  203. {Opt_fastboot, "fastboot"},
  204. {Opt_extent_cache, "extent_cache"},
  205. {Opt_noextent_cache, "noextent_cache"},
  206. {Opt_noinline_data, "noinline_data"},
  207. {Opt_data_flush, "data_flush"},
  208. {Opt_reserve_root, "reserve_root=%u"},
  209. {Opt_resgid, "resgid=%u"},
  210. {Opt_resuid, "resuid=%u"},
  211. {Opt_mode, "mode=%s"},
  212. {Opt_fault_injection, "fault_injection=%u"},
  213. {Opt_fault_type, "fault_type=%u"},
  214. {Opt_lazytime, "lazytime"},
  215. {Opt_nolazytime, "nolazytime"},
  216. {Opt_quota, "quota"},
  217. {Opt_noquota, "noquota"},
  218. {Opt_usrquota, "usrquota"},
  219. {Opt_grpquota, "grpquota"},
  220. {Opt_prjquota, "prjquota"},
  221. {Opt_usrjquota, "usrjquota=%s"},
  222. {Opt_grpjquota, "grpjquota=%s"},
  223. {Opt_prjjquota, "prjjquota=%s"},
  224. {Opt_offusrjquota, "usrjquota="},
  225. {Opt_offgrpjquota, "grpjquota="},
  226. {Opt_offprjjquota, "prjjquota="},
  227. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  228. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  229. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  230. {Opt_alloc, "alloc_mode=%s"},
  231. {Opt_fsync, "fsync_mode=%s"},
  232. {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
  233. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  234. {Opt_inlinecrypt, "inlinecrypt"},
  235. {Opt_checkpoint_disable, "checkpoint=disable"},
  236. {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
  237. {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
  238. {Opt_checkpoint_enable, "checkpoint=enable"},
  239. {Opt_checkpoint_merge, "checkpoint_merge"},
  240. {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
  241. {Opt_compress_algorithm, "compress_algorithm=%s"},
  242. {Opt_compress_log_size, "compress_log_size=%u"},
  243. {Opt_compress_extension, "compress_extension=%s"},
  244. {Opt_nocompress_extension, "nocompress_extension=%s"},
  245. {Opt_compress_chksum, "compress_chksum"},
  246. {Opt_compress_mode, "compress_mode=%s"},
  247. {Opt_compress_cache, "compress_cache"},
  248. {Opt_atgc, "atgc"},
  249. {Opt_gc_merge, "gc_merge"},
  250. {Opt_nogc_merge, "nogc_merge"},
  251. {Opt_discard_unit, "discard_unit=%s"},
  252. {Opt_memory_mode, "memory=%s"},
  253. {Opt_age_extent_cache, "age_extent_cache"},
  254. {Opt_errors, "errors=%s"},
  255. {Opt_err, NULL},
  256. };
  257. void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
  258. const char *fmt, ...)
  259. {
  260. struct va_format vaf;
  261. va_list args;
  262. int level;
  263. va_start(args, fmt);
  264. level = printk_get_level(fmt);
  265. vaf.fmt = printk_skip_level(fmt);
  266. vaf.va = &args;
  267. if (limit_rate)
  268. printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
  269. KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
  270. else
  271. printk("%c%cF2FS-fs (%s): %pV\n",
  272. KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
  273. va_end(args);
  274. }
  275. #if IS_ENABLED(CONFIG_UNICODE)
  276. static const struct f2fs_sb_encodings {
  277. __u16 magic;
  278. char *name;
  279. unsigned int version;
  280. } f2fs_sb_encoding_map[] = {
  281. {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
  282. };
  283. static const struct f2fs_sb_encodings *
  284. f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
  285. {
  286. __u16 magic = le16_to_cpu(sb->s_encoding);
  287. int i;
  288. for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
  289. if (magic == f2fs_sb_encoding_map[i].magic)
  290. return &f2fs_sb_encoding_map[i];
  291. return NULL;
  292. }
  293. struct kmem_cache *f2fs_cf_name_slab;
  294. static int __init f2fs_create_casefold_cache(void)
  295. {
  296. f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
  297. F2FS_NAME_LEN);
  298. return f2fs_cf_name_slab ? 0 : -ENOMEM;
  299. }
  300. static void f2fs_destroy_casefold_cache(void)
  301. {
  302. kmem_cache_destroy(f2fs_cf_name_slab);
  303. }
  304. #else
  305. static int __init f2fs_create_casefold_cache(void) { return 0; }
  306. static void f2fs_destroy_casefold_cache(void) { }
  307. #endif
  308. static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
  309. {
  310. block_t limit = min((sbi->user_block_count >> 3),
  311. sbi->user_block_count - sbi->reserved_blocks);
  312. /* limit is 12.5% */
  313. if (test_opt(sbi, RESERVE_ROOT) &&
  314. F2FS_OPTION(sbi).root_reserved_blocks > limit) {
  315. F2FS_OPTION(sbi).root_reserved_blocks = limit;
  316. f2fs_info(sbi, "Reduce reserved blocks for root = %u",
  317. F2FS_OPTION(sbi).root_reserved_blocks);
  318. }
  319. if (!test_opt(sbi, RESERVE_ROOT) &&
  320. (!uid_eq(F2FS_OPTION(sbi).s_resuid,
  321. make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
  322. !gid_eq(F2FS_OPTION(sbi).s_resgid,
  323. make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
  324. f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
  325. from_kuid_munged(&init_user_ns,
  326. F2FS_OPTION(sbi).s_resuid),
  327. from_kgid_munged(&init_user_ns,
  328. F2FS_OPTION(sbi).s_resgid));
  329. }
  330. static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
  331. {
  332. if (!F2FS_OPTION(sbi).unusable_cap_perc)
  333. return;
  334. if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
  335. F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
  336. else
  337. F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
  338. F2FS_OPTION(sbi).unusable_cap_perc;
  339. f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
  340. F2FS_OPTION(sbi).unusable_cap,
  341. F2FS_OPTION(sbi).unusable_cap_perc);
  342. }
  343. static void init_once(void *foo)
  344. {
  345. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  346. inode_init_once(&fi->vfs_inode);
  347. }
  348. #ifdef CONFIG_QUOTA
  349. static const char * const quotatypes[] = INITQFNAMES;
  350. #define QTYPE2NAME(t) (quotatypes[t])
  351. static int f2fs_set_qf_name(struct super_block *sb, int qtype,
  352. substring_t *args)
  353. {
  354. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  355. char *qname;
  356. int ret = -EINVAL;
  357. if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
  358. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  359. return -EINVAL;
  360. }
  361. if (f2fs_sb_has_quota_ino(sbi)) {
  362. f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
  363. return 0;
  364. }
  365. qname = match_strdup(args);
  366. if (!qname) {
  367. f2fs_err(sbi, "Not enough memory for storing quotafile name");
  368. return -ENOMEM;
  369. }
  370. if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
  371. if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
  372. ret = 0;
  373. else
  374. f2fs_err(sbi, "%s quota file already specified",
  375. QTYPE2NAME(qtype));
  376. goto errout;
  377. }
  378. if (strchr(qname, '/')) {
  379. f2fs_err(sbi, "quotafile must be on filesystem root");
  380. goto errout;
  381. }
  382. F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
  383. set_opt(sbi, QUOTA);
  384. return 0;
  385. errout:
  386. kfree(qname);
  387. return ret;
  388. }
  389. static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
  390. {
  391. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  392. if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
  393. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  394. return -EINVAL;
  395. }
  396. kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
  397. F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
  398. return 0;
  399. }
  400. static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
  401. {
  402. /*
  403. * We do the test below only for project quotas. 'usrquota' and
  404. * 'grpquota' mount options are allowed even without quota feature
  405. * to support legacy quotas in quota files.
  406. */
  407. if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
  408. f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
  409. return -1;
  410. }
  411. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
  412. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
  413. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
  414. if (test_opt(sbi, USRQUOTA) &&
  415. F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  416. clear_opt(sbi, USRQUOTA);
  417. if (test_opt(sbi, GRPQUOTA) &&
  418. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  419. clear_opt(sbi, GRPQUOTA);
  420. if (test_opt(sbi, PRJQUOTA) &&
  421. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  422. clear_opt(sbi, PRJQUOTA);
  423. if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
  424. test_opt(sbi, PRJQUOTA)) {
  425. f2fs_err(sbi, "old and new quota format mixing");
  426. return -1;
  427. }
  428. if (!F2FS_OPTION(sbi).s_jquota_fmt) {
  429. f2fs_err(sbi, "journaled quota format not specified");
  430. return -1;
  431. }
  432. }
  433. if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
  434. f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
  435. F2FS_OPTION(sbi).s_jquota_fmt = 0;
  436. }
  437. return 0;
  438. }
  439. #endif
  440. static int f2fs_set_test_dummy_encryption(struct super_block *sb,
  441. const char *opt,
  442. const substring_t *arg,
  443. bool is_remount)
  444. {
  445. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  446. struct fs_parameter param = {
  447. .type = fs_value_is_string,
  448. .string = arg->from ? arg->from : "",
  449. };
  450. struct fscrypt_dummy_policy *policy =
  451. &F2FS_OPTION(sbi).dummy_enc_policy;
  452. int err;
  453. if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
  454. f2fs_warn(sbi, "test_dummy_encryption option not supported");
  455. return -EINVAL;
  456. }
  457. if (!f2fs_sb_has_encrypt(sbi)) {
  458. f2fs_err(sbi, "Encrypt feature is off");
  459. return -EINVAL;
  460. }
  461. /*
  462. * This mount option is just for testing, and it's not worthwhile to
  463. * implement the extra complexity (e.g. RCU protection) that would be
  464. * needed to allow it to be set or changed during remount. We do allow
  465. * it to be specified during remount, but only if there is no change.
  466. */
  467. if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
  468. f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
  469. return -EINVAL;
  470. }
  471. err = fscrypt_parse_test_dummy_encryption(&param, policy);
  472. if (err) {
  473. if (err == -EEXIST)
  474. f2fs_warn(sbi,
  475. "Can't change test_dummy_encryption on remount");
  476. else if (err == -EINVAL)
  477. f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
  478. opt);
  479. else
  480. f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
  481. opt, err);
  482. return -EINVAL;
  483. }
  484. f2fs_warn(sbi, "Test dummy encryption mode enabled");
  485. return 0;
  486. }
  487. #ifdef CONFIG_F2FS_FS_COMPRESSION
  488. static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
  489. const char *new_ext, bool is_ext)
  490. {
  491. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  492. int ext_cnt;
  493. int i;
  494. if (is_ext) {
  495. ext = F2FS_OPTION(sbi).extensions;
  496. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  497. } else {
  498. ext = F2FS_OPTION(sbi).noextensions;
  499. ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  500. }
  501. for (i = 0; i < ext_cnt; i++) {
  502. if (!strcasecmp(new_ext, ext[i]))
  503. return true;
  504. }
  505. return false;
  506. }
  507. /*
  508. * 1. The same extension name cannot not appear in both compress and non-compress extension
  509. * at the same time.
  510. * 2. If the compress extension specifies all files, the types specified by the non-compress
  511. * extension will be treated as special cases and will not be compressed.
  512. * 3. Don't allow the non-compress extension specifies all files.
  513. */
  514. static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
  515. {
  516. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  517. unsigned char (*noext)[F2FS_EXTENSION_LEN];
  518. int ext_cnt, noext_cnt, index = 0, no_index = 0;
  519. ext = F2FS_OPTION(sbi).extensions;
  520. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  521. noext = F2FS_OPTION(sbi).noextensions;
  522. noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  523. if (!noext_cnt)
  524. return 0;
  525. for (no_index = 0; no_index < noext_cnt; no_index++) {
  526. if (!strcasecmp("*", noext[no_index])) {
  527. f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
  528. return -EINVAL;
  529. }
  530. for (index = 0; index < ext_cnt; index++) {
  531. if (!strcasecmp(ext[index], noext[no_index])) {
  532. f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
  533. ext[index]);
  534. return -EINVAL;
  535. }
  536. }
  537. }
  538. return 0;
  539. }
  540. #ifdef CONFIG_F2FS_FS_LZ4
  541. static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
  542. {
  543. #ifdef CONFIG_F2FS_FS_LZ4HC
  544. unsigned int level;
  545. if (strlen(str) == 3) {
  546. F2FS_OPTION(sbi).compress_level = 0;
  547. return 0;
  548. }
  549. str += 3;
  550. if (str[0] != ':') {
  551. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  552. return -EINVAL;
  553. }
  554. if (kstrtouint(str + 1, 10, &level))
  555. return -EINVAL;
  556. if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
  557. f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
  558. return -EINVAL;
  559. }
  560. F2FS_OPTION(sbi).compress_level = level;
  561. return 0;
  562. #else
  563. if (strlen(str) == 3) {
  564. F2FS_OPTION(sbi).compress_level = 0;
  565. return 0;
  566. }
  567. f2fs_info(sbi, "kernel doesn't support lz4hc compression");
  568. return -EINVAL;
  569. #endif
  570. }
  571. #endif
  572. #ifdef CONFIG_F2FS_FS_ZSTD
  573. static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
  574. {
  575. int level;
  576. int len = 4;
  577. if (strlen(str) == len) {
  578. F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
  579. return 0;
  580. }
  581. str += len;
  582. if (str[0] != ':') {
  583. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  584. return -EINVAL;
  585. }
  586. if (kstrtoint(str + 1, 10, &level))
  587. return -EINVAL;
  588. /* f2fs does not support negative compress level now */
  589. if (level < 0) {
  590. f2fs_info(sbi, "do not support negative compress level: %d", level);
  591. return -ERANGE;
  592. }
  593. if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
  594. f2fs_info(sbi, "invalid zstd compress level: %d", level);
  595. return -EINVAL;
  596. }
  597. F2FS_OPTION(sbi).compress_level = level;
  598. return 0;
  599. }
  600. #endif
  601. #endif
  602. static int parse_options(struct super_block *sb, char *options, bool is_remount)
  603. {
  604. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  605. substring_t args[MAX_OPT_ARGS];
  606. #ifdef CONFIG_F2FS_FS_COMPRESSION
  607. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  608. unsigned char (*noext)[F2FS_EXTENSION_LEN];
  609. int ext_cnt, noext_cnt;
  610. #endif
  611. char *p, *name;
  612. int arg = 0;
  613. kuid_t uid;
  614. kgid_t gid;
  615. int ret;
  616. if (!options)
  617. goto default_check;
  618. while ((p = strsep(&options, ",")) != NULL) {
  619. int token;
  620. if (!*p)
  621. continue;
  622. /*
  623. * Initialize args struct so we know whether arg was
  624. * found; some options take optional arguments.
  625. */
  626. args[0].to = args[0].from = NULL;
  627. token = match_token(p, f2fs_tokens, args);
  628. switch (token) {
  629. case Opt_gc_background:
  630. name = match_strdup(&args[0]);
  631. if (!name)
  632. return -ENOMEM;
  633. if (!strcmp(name, "on")) {
  634. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  635. } else if (!strcmp(name, "off")) {
  636. if (f2fs_sb_has_blkzoned(sbi)) {
  637. f2fs_warn(sbi, "zoned devices need bggc");
  638. kfree(name);
  639. return -EINVAL;
  640. }
  641. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
  642. } else if (!strcmp(name, "sync")) {
  643. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
  644. } else {
  645. kfree(name);
  646. return -EINVAL;
  647. }
  648. kfree(name);
  649. break;
  650. case Opt_disable_roll_forward:
  651. set_opt(sbi, DISABLE_ROLL_FORWARD);
  652. break;
  653. case Opt_norecovery:
  654. /* this option mounts f2fs with ro */
  655. set_opt(sbi, NORECOVERY);
  656. if (!f2fs_readonly(sb))
  657. return -EINVAL;
  658. break;
  659. case Opt_discard:
  660. if (!f2fs_hw_support_discard(sbi)) {
  661. f2fs_warn(sbi, "device does not support discard");
  662. break;
  663. }
  664. set_opt(sbi, DISCARD);
  665. break;
  666. case Opt_nodiscard:
  667. if (f2fs_hw_should_discard(sbi)) {
  668. f2fs_warn(sbi, "discard is required for zoned block devices");
  669. return -EINVAL;
  670. }
  671. clear_opt(sbi, DISCARD);
  672. break;
  673. case Opt_noheap:
  674. case Opt_heap:
  675. f2fs_warn(sbi, "heap/no_heap options were deprecated");
  676. break;
  677. #ifdef CONFIG_F2FS_FS_XATTR
  678. case Opt_user_xattr:
  679. set_opt(sbi, XATTR_USER);
  680. break;
  681. case Opt_nouser_xattr:
  682. clear_opt(sbi, XATTR_USER);
  683. break;
  684. case Opt_inline_xattr:
  685. set_opt(sbi, INLINE_XATTR);
  686. break;
  687. case Opt_noinline_xattr:
  688. clear_opt(sbi, INLINE_XATTR);
  689. break;
  690. case Opt_inline_xattr_size:
  691. if (args->from && match_int(args, &arg))
  692. return -EINVAL;
  693. set_opt(sbi, INLINE_XATTR_SIZE);
  694. F2FS_OPTION(sbi).inline_xattr_size = arg;
  695. break;
  696. #else
  697. case Opt_user_xattr:
  698. f2fs_info(sbi, "user_xattr options not supported");
  699. break;
  700. case Opt_nouser_xattr:
  701. f2fs_info(sbi, "nouser_xattr options not supported");
  702. break;
  703. case Opt_inline_xattr:
  704. f2fs_info(sbi, "inline_xattr options not supported");
  705. break;
  706. case Opt_noinline_xattr:
  707. f2fs_info(sbi, "noinline_xattr options not supported");
  708. break;
  709. #endif
  710. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  711. case Opt_acl:
  712. set_opt(sbi, POSIX_ACL);
  713. break;
  714. case Opt_noacl:
  715. clear_opt(sbi, POSIX_ACL);
  716. break;
  717. #else
  718. case Opt_acl:
  719. f2fs_info(sbi, "acl options not supported");
  720. break;
  721. case Opt_noacl:
  722. f2fs_info(sbi, "noacl options not supported");
  723. break;
  724. #endif
  725. case Opt_active_logs:
  726. if (args->from && match_int(args, &arg))
  727. return -EINVAL;
  728. if (arg != 2 && arg != 4 &&
  729. arg != NR_CURSEG_PERSIST_TYPE)
  730. return -EINVAL;
  731. F2FS_OPTION(sbi).active_logs = arg;
  732. break;
  733. case Opt_disable_ext_identify:
  734. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  735. break;
  736. case Opt_inline_data:
  737. set_opt(sbi, INLINE_DATA);
  738. break;
  739. case Opt_inline_dentry:
  740. set_opt(sbi, INLINE_DENTRY);
  741. break;
  742. case Opt_noinline_dentry:
  743. clear_opt(sbi, INLINE_DENTRY);
  744. break;
  745. case Opt_flush_merge:
  746. set_opt(sbi, FLUSH_MERGE);
  747. break;
  748. case Opt_noflush_merge:
  749. clear_opt(sbi, FLUSH_MERGE);
  750. break;
  751. case Opt_nobarrier:
  752. set_opt(sbi, NOBARRIER);
  753. break;
  754. case Opt_barrier:
  755. clear_opt(sbi, NOBARRIER);
  756. break;
  757. case Opt_fastboot:
  758. set_opt(sbi, FASTBOOT);
  759. break;
  760. case Opt_extent_cache:
  761. set_opt(sbi, READ_EXTENT_CACHE);
  762. break;
  763. case Opt_noextent_cache:
  764. clear_opt(sbi, READ_EXTENT_CACHE);
  765. break;
  766. case Opt_noinline_data:
  767. clear_opt(sbi, INLINE_DATA);
  768. break;
  769. case Opt_data_flush:
  770. set_opt(sbi, DATA_FLUSH);
  771. break;
  772. case Opt_reserve_root:
  773. if (args->from && match_int(args, &arg))
  774. return -EINVAL;
  775. if (test_opt(sbi, RESERVE_ROOT)) {
  776. f2fs_info(sbi, "Preserve previous reserve_root=%u",
  777. F2FS_OPTION(sbi).root_reserved_blocks);
  778. } else {
  779. F2FS_OPTION(sbi).root_reserved_blocks = arg;
  780. set_opt(sbi, RESERVE_ROOT);
  781. }
  782. break;
  783. case Opt_resuid:
  784. if (args->from && match_int(args, &arg))
  785. return -EINVAL;
  786. uid = make_kuid(current_user_ns(), arg);
  787. if (!uid_valid(uid)) {
  788. f2fs_err(sbi, "Invalid uid value %d", arg);
  789. return -EINVAL;
  790. }
  791. F2FS_OPTION(sbi).s_resuid = uid;
  792. break;
  793. case Opt_resgid:
  794. if (args->from && match_int(args, &arg))
  795. return -EINVAL;
  796. gid = make_kgid(current_user_ns(), arg);
  797. if (!gid_valid(gid)) {
  798. f2fs_err(sbi, "Invalid gid value %d", arg);
  799. return -EINVAL;
  800. }
  801. F2FS_OPTION(sbi).s_resgid = gid;
  802. break;
  803. case Opt_mode:
  804. name = match_strdup(&args[0]);
  805. if (!name)
  806. return -ENOMEM;
  807. if (!strcmp(name, "adaptive")) {
  808. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  809. } else if (!strcmp(name, "lfs")) {
  810. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  811. } else if (!strcmp(name, "fragment:segment")) {
  812. F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
  813. } else if (!strcmp(name, "fragment:block")) {
  814. F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
  815. } else {
  816. kfree(name);
  817. return -EINVAL;
  818. }
  819. kfree(name);
  820. break;
  821. #ifdef CONFIG_F2FS_FAULT_INJECTION
  822. case Opt_fault_injection:
  823. if (args->from && match_int(args, &arg))
  824. return -EINVAL;
  825. if (f2fs_build_fault_attr(sbi, arg,
  826. F2FS_ALL_FAULT_TYPE))
  827. return -EINVAL;
  828. set_opt(sbi, FAULT_INJECTION);
  829. break;
  830. case Opt_fault_type:
  831. if (args->from && match_int(args, &arg))
  832. return -EINVAL;
  833. if (f2fs_build_fault_attr(sbi, 0, arg))
  834. return -EINVAL;
  835. set_opt(sbi, FAULT_INJECTION);
  836. break;
  837. #else
  838. case Opt_fault_injection:
  839. f2fs_info(sbi, "fault_injection options not supported");
  840. break;
  841. case Opt_fault_type:
  842. f2fs_info(sbi, "fault_type options not supported");
  843. break;
  844. #endif
  845. case Opt_lazytime:
  846. sb->s_flags |= SB_LAZYTIME;
  847. break;
  848. case Opt_nolazytime:
  849. sb->s_flags &= ~SB_LAZYTIME;
  850. break;
  851. #ifdef CONFIG_QUOTA
  852. case Opt_quota:
  853. case Opt_usrquota:
  854. set_opt(sbi, USRQUOTA);
  855. break;
  856. case Opt_grpquota:
  857. set_opt(sbi, GRPQUOTA);
  858. break;
  859. case Opt_prjquota:
  860. set_opt(sbi, PRJQUOTA);
  861. break;
  862. case Opt_usrjquota:
  863. ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
  864. if (ret)
  865. return ret;
  866. break;
  867. case Opt_grpjquota:
  868. ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
  869. if (ret)
  870. return ret;
  871. break;
  872. case Opt_prjjquota:
  873. ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
  874. if (ret)
  875. return ret;
  876. break;
  877. case Opt_offusrjquota:
  878. ret = f2fs_clear_qf_name(sb, USRQUOTA);
  879. if (ret)
  880. return ret;
  881. break;
  882. case Opt_offgrpjquota:
  883. ret = f2fs_clear_qf_name(sb, GRPQUOTA);
  884. if (ret)
  885. return ret;
  886. break;
  887. case Opt_offprjjquota:
  888. ret = f2fs_clear_qf_name(sb, PRJQUOTA);
  889. if (ret)
  890. return ret;
  891. break;
  892. case Opt_jqfmt_vfsold:
  893. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
  894. break;
  895. case Opt_jqfmt_vfsv0:
  896. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
  897. break;
  898. case Opt_jqfmt_vfsv1:
  899. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
  900. break;
  901. case Opt_noquota:
  902. clear_opt(sbi, QUOTA);
  903. clear_opt(sbi, USRQUOTA);
  904. clear_opt(sbi, GRPQUOTA);
  905. clear_opt(sbi, PRJQUOTA);
  906. break;
  907. #else
  908. case Opt_quota:
  909. case Opt_usrquota:
  910. case Opt_grpquota:
  911. case Opt_prjquota:
  912. case Opt_usrjquota:
  913. case Opt_grpjquota:
  914. case Opt_prjjquota:
  915. case Opt_offusrjquota:
  916. case Opt_offgrpjquota:
  917. case Opt_offprjjquota:
  918. case Opt_jqfmt_vfsold:
  919. case Opt_jqfmt_vfsv0:
  920. case Opt_jqfmt_vfsv1:
  921. case Opt_noquota:
  922. f2fs_info(sbi, "quota operations not supported");
  923. break;
  924. #endif
  925. case Opt_alloc:
  926. name = match_strdup(&args[0]);
  927. if (!name)
  928. return -ENOMEM;
  929. if (!strcmp(name, "default")) {
  930. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  931. } else if (!strcmp(name, "reuse")) {
  932. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  933. } else {
  934. kfree(name);
  935. return -EINVAL;
  936. }
  937. kfree(name);
  938. break;
  939. case Opt_fsync:
  940. name = match_strdup(&args[0]);
  941. if (!name)
  942. return -ENOMEM;
  943. if (!strcmp(name, "posix")) {
  944. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  945. } else if (!strcmp(name, "strict")) {
  946. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
  947. } else if (!strcmp(name, "nobarrier")) {
  948. F2FS_OPTION(sbi).fsync_mode =
  949. FSYNC_MODE_NOBARRIER;
  950. } else {
  951. kfree(name);
  952. return -EINVAL;
  953. }
  954. kfree(name);
  955. break;
  956. case Opt_test_dummy_encryption:
  957. ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
  958. is_remount);
  959. if (ret)
  960. return ret;
  961. break;
  962. case Opt_inlinecrypt:
  963. #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
  964. sb->s_flags |= SB_INLINECRYPT;
  965. #else
  966. f2fs_info(sbi, "inline encryption not supported");
  967. #endif
  968. break;
  969. case Opt_checkpoint_disable_cap_perc:
  970. if (args->from && match_int(args, &arg))
  971. return -EINVAL;
  972. if (arg < 0 || arg > 100)
  973. return -EINVAL;
  974. F2FS_OPTION(sbi).unusable_cap_perc = arg;
  975. set_opt(sbi, DISABLE_CHECKPOINT);
  976. break;
  977. case Opt_checkpoint_disable_cap:
  978. if (args->from && match_int(args, &arg))
  979. return -EINVAL;
  980. F2FS_OPTION(sbi).unusable_cap = arg;
  981. set_opt(sbi, DISABLE_CHECKPOINT);
  982. break;
  983. case Opt_checkpoint_disable:
  984. set_opt(sbi, DISABLE_CHECKPOINT);
  985. break;
  986. case Opt_checkpoint_enable:
  987. clear_opt(sbi, DISABLE_CHECKPOINT);
  988. break;
  989. case Opt_checkpoint_merge:
  990. set_opt(sbi, MERGE_CHECKPOINT);
  991. break;
  992. case Opt_nocheckpoint_merge:
  993. clear_opt(sbi, MERGE_CHECKPOINT);
  994. break;
  995. #ifdef CONFIG_F2FS_FS_COMPRESSION
  996. case Opt_compress_algorithm:
  997. if (!f2fs_sb_has_compression(sbi)) {
  998. f2fs_info(sbi, "Image doesn't support compression");
  999. break;
  1000. }
  1001. name = match_strdup(&args[0]);
  1002. if (!name)
  1003. return -ENOMEM;
  1004. if (!strcmp(name, "lzo")) {
  1005. #ifdef CONFIG_F2FS_FS_LZO
  1006. F2FS_OPTION(sbi).compress_level = 0;
  1007. F2FS_OPTION(sbi).compress_algorithm =
  1008. COMPRESS_LZO;
  1009. #else
  1010. f2fs_info(sbi, "kernel doesn't support lzo compression");
  1011. #endif
  1012. } else if (!strncmp(name, "lz4", 3)) {
  1013. #ifdef CONFIG_F2FS_FS_LZ4
  1014. ret = f2fs_set_lz4hc_level(sbi, name);
  1015. if (ret) {
  1016. kfree(name);
  1017. return -EINVAL;
  1018. }
  1019. F2FS_OPTION(sbi).compress_algorithm =
  1020. COMPRESS_LZ4;
  1021. #else
  1022. f2fs_info(sbi, "kernel doesn't support lz4 compression");
  1023. #endif
  1024. } else if (!strncmp(name, "zstd", 4)) {
  1025. #ifdef CONFIG_F2FS_FS_ZSTD
  1026. ret = f2fs_set_zstd_level(sbi, name);
  1027. if (ret) {
  1028. kfree(name);
  1029. return -EINVAL;
  1030. }
  1031. F2FS_OPTION(sbi).compress_algorithm =
  1032. COMPRESS_ZSTD;
  1033. #else
  1034. f2fs_info(sbi, "kernel doesn't support zstd compression");
  1035. #endif
  1036. } else if (!strcmp(name, "lzo-rle")) {
  1037. #ifdef CONFIG_F2FS_FS_LZORLE
  1038. F2FS_OPTION(sbi).compress_level = 0;
  1039. F2FS_OPTION(sbi).compress_algorithm =
  1040. COMPRESS_LZORLE;
  1041. #else
  1042. f2fs_info(sbi, "kernel doesn't support lzorle compression");
  1043. #endif
  1044. } else {
  1045. kfree(name);
  1046. return -EINVAL;
  1047. }
  1048. kfree(name);
  1049. break;
  1050. case Opt_compress_log_size:
  1051. if (!f2fs_sb_has_compression(sbi)) {
  1052. f2fs_info(sbi, "Image doesn't support compression");
  1053. break;
  1054. }
  1055. if (args->from && match_int(args, &arg))
  1056. return -EINVAL;
  1057. if (arg < MIN_COMPRESS_LOG_SIZE ||
  1058. arg > MAX_COMPRESS_LOG_SIZE) {
  1059. f2fs_err(sbi,
  1060. "Compress cluster log size is out of range");
  1061. return -EINVAL;
  1062. }
  1063. F2FS_OPTION(sbi).compress_log_size = arg;
  1064. break;
  1065. case Opt_compress_extension:
  1066. if (!f2fs_sb_has_compression(sbi)) {
  1067. f2fs_info(sbi, "Image doesn't support compression");
  1068. break;
  1069. }
  1070. name = match_strdup(&args[0]);
  1071. if (!name)
  1072. return -ENOMEM;
  1073. ext = F2FS_OPTION(sbi).extensions;
  1074. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  1075. if (strlen(name) >= F2FS_EXTENSION_LEN ||
  1076. ext_cnt >= COMPRESS_EXT_NUM) {
  1077. f2fs_err(sbi,
  1078. "invalid extension length/number");
  1079. kfree(name);
  1080. return -EINVAL;
  1081. }
  1082. if (is_compress_extension_exist(sbi, name, true)) {
  1083. kfree(name);
  1084. break;
  1085. }
  1086. strcpy(ext[ext_cnt], name);
  1087. F2FS_OPTION(sbi).compress_ext_cnt++;
  1088. kfree(name);
  1089. break;
  1090. case Opt_nocompress_extension:
  1091. if (!f2fs_sb_has_compression(sbi)) {
  1092. f2fs_info(sbi, "Image doesn't support compression");
  1093. break;
  1094. }
  1095. name = match_strdup(&args[0]);
  1096. if (!name)
  1097. return -ENOMEM;
  1098. noext = F2FS_OPTION(sbi).noextensions;
  1099. noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  1100. if (strlen(name) >= F2FS_EXTENSION_LEN ||
  1101. noext_cnt >= COMPRESS_EXT_NUM) {
  1102. f2fs_err(sbi,
  1103. "invalid extension length/number");
  1104. kfree(name);
  1105. return -EINVAL;
  1106. }
  1107. if (is_compress_extension_exist(sbi, name, false)) {
  1108. kfree(name);
  1109. break;
  1110. }
  1111. strcpy(noext[noext_cnt], name);
  1112. F2FS_OPTION(sbi).nocompress_ext_cnt++;
  1113. kfree(name);
  1114. break;
  1115. case Opt_compress_chksum:
  1116. if (!f2fs_sb_has_compression(sbi)) {
  1117. f2fs_info(sbi, "Image doesn't support compression");
  1118. break;
  1119. }
  1120. F2FS_OPTION(sbi).compress_chksum = true;
  1121. break;
  1122. case Opt_compress_mode:
  1123. if (!f2fs_sb_has_compression(sbi)) {
  1124. f2fs_info(sbi, "Image doesn't support compression");
  1125. break;
  1126. }
  1127. name = match_strdup(&args[0]);
  1128. if (!name)
  1129. return -ENOMEM;
  1130. if (!strcmp(name, "fs")) {
  1131. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1132. } else if (!strcmp(name, "user")) {
  1133. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
  1134. } else {
  1135. kfree(name);
  1136. return -EINVAL;
  1137. }
  1138. kfree(name);
  1139. break;
  1140. case Opt_compress_cache:
  1141. if (!f2fs_sb_has_compression(sbi)) {
  1142. f2fs_info(sbi, "Image doesn't support compression");
  1143. break;
  1144. }
  1145. set_opt(sbi, COMPRESS_CACHE);
  1146. break;
  1147. #else
  1148. case Opt_compress_algorithm:
  1149. case Opt_compress_log_size:
  1150. case Opt_compress_extension:
  1151. case Opt_nocompress_extension:
  1152. case Opt_compress_chksum:
  1153. case Opt_compress_mode:
  1154. case Opt_compress_cache:
  1155. f2fs_info(sbi, "compression options not supported");
  1156. break;
  1157. #endif
  1158. case Opt_atgc:
  1159. set_opt(sbi, ATGC);
  1160. break;
  1161. case Opt_gc_merge:
  1162. set_opt(sbi, GC_MERGE);
  1163. break;
  1164. case Opt_nogc_merge:
  1165. clear_opt(sbi, GC_MERGE);
  1166. break;
  1167. case Opt_discard_unit:
  1168. name = match_strdup(&args[0]);
  1169. if (!name)
  1170. return -ENOMEM;
  1171. if (!strcmp(name, "block")) {
  1172. F2FS_OPTION(sbi).discard_unit =
  1173. DISCARD_UNIT_BLOCK;
  1174. } else if (!strcmp(name, "segment")) {
  1175. F2FS_OPTION(sbi).discard_unit =
  1176. DISCARD_UNIT_SEGMENT;
  1177. } else if (!strcmp(name, "section")) {
  1178. F2FS_OPTION(sbi).discard_unit =
  1179. DISCARD_UNIT_SECTION;
  1180. } else {
  1181. kfree(name);
  1182. return -EINVAL;
  1183. }
  1184. kfree(name);
  1185. break;
  1186. case Opt_memory_mode:
  1187. name = match_strdup(&args[0]);
  1188. if (!name)
  1189. return -ENOMEM;
  1190. if (!strcmp(name, "normal")) {
  1191. F2FS_OPTION(sbi).memory_mode =
  1192. MEMORY_MODE_NORMAL;
  1193. } else if (!strcmp(name, "low")) {
  1194. F2FS_OPTION(sbi).memory_mode =
  1195. MEMORY_MODE_LOW;
  1196. } else {
  1197. kfree(name);
  1198. return -EINVAL;
  1199. }
  1200. kfree(name);
  1201. break;
  1202. case Opt_age_extent_cache:
  1203. set_opt(sbi, AGE_EXTENT_CACHE);
  1204. break;
  1205. case Opt_errors:
  1206. name = match_strdup(&args[0]);
  1207. if (!name)
  1208. return -ENOMEM;
  1209. if (!strcmp(name, "remount-ro")) {
  1210. F2FS_OPTION(sbi).errors =
  1211. MOUNT_ERRORS_READONLY;
  1212. } else if (!strcmp(name, "continue")) {
  1213. F2FS_OPTION(sbi).errors =
  1214. MOUNT_ERRORS_CONTINUE;
  1215. } else if (!strcmp(name, "panic")) {
  1216. F2FS_OPTION(sbi).errors =
  1217. MOUNT_ERRORS_PANIC;
  1218. } else {
  1219. kfree(name);
  1220. return -EINVAL;
  1221. }
  1222. kfree(name);
  1223. break;
  1224. default:
  1225. f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
  1226. p);
  1227. return -EINVAL;
  1228. }
  1229. }
  1230. default_check:
  1231. #ifdef CONFIG_QUOTA
  1232. if (f2fs_check_quota_options(sbi))
  1233. return -EINVAL;
  1234. #else
  1235. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
  1236. f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1237. return -EINVAL;
  1238. }
  1239. if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
  1240. f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1241. return -EINVAL;
  1242. }
  1243. #endif
  1244. if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
  1245. f2fs_err(sbi,
  1246. "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  1247. return -EINVAL;
  1248. }
  1249. /*
  1250. * The BLKZONED feature indicates that the drive was formatted with
  1251. * zone alignment optimization. This is optional for host-aware
  1252. * devices, but mandatory for host-managed zoned block devices.
  1253. */
  1254. if (f2fs_sb_has_blkzoned(sbi)) {
  1255. #ifdef CONFIG_BLK_DEV_ZONED
  1256. if (F2FS_OPTION(sbi).discard_unit !=
  1257. DISCARD_UNIT_SECTION) {
  1258. f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
  1259. F2FS_OPTION(sbi).discard_unit =
  1260. DISCARD_UNIT_SECTION;
  1261. }
  1262. if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
  1263. f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
  1264. return -EINVAL;
  1265. }
  1266. #else
  1267. f2fs_err(sbi, "Zoned block device support is not enabled");
  1268. return -EINVAL;
  1269. #endif
  1270. }
  1271. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1272. if (f2fs_test_compress_extension(sbi)) {
  1273. f2fs_err(sbi, "invalid compress or nocompress extension");
  1274. return -EINVAL;
  1275. }
  1276. #endif
  1277. if (test_opt(sbi, INLINE_XATTR_SIZE)) {
  1278. int min_size, max_size;
  1279. if (!f2fs_sb_has_extra_attr(sbi) ||
  1280. !f2fs_sb_has_flexible_inline_xattr(sbi)) {
  1281. f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
  1282. return -EINVAL;
  1283. }
  1284. if (!test_opt(sbi, INLINE_XATTR)) {
  1285. f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
  1286. return -EINVAL;
  1287. }
  1288. min_size = MIN_INLINE_XATTR_SIZE;
  1289. max_size = MAX_INLINE_XATTR_SIZE;
  1290. if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
  1291. F2FS_OPTION(sbi).inline_xattr_size > max_size) {
  1292. f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
  1293. min_size, max_size);
  1294. return -EINVAL;
  1295. }
  1296. }
  1297. if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
  1298. f2fs_err(sbi, "LFS is not compatible with ATGC");
  1299. return -EINVAL;
  1300. }
  1301. if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
  1302. f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
  1303. return -EINVAL;
  1304. }
  1305. if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
  1306. f2fs_err(sbi, "Allow to mount readonly mode only");
  1307. return -EROFS;
  1308. }
  1309. return 0;
  1310. }
  1311. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  1312. {
  1313. struct f2fs_inode_info *fi;
  1314. if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
  1315. return NULL;
  1316. fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
  1317. if (!fi)
  1318. return NULL;
  1319. init_once((void *) fi);
  1320. /* Initialize f2fs-specific inode info */
  1321. atomic_set(&fi->dirty_pages, 0);
  1322. atomic_set(&fi->i_compr_blocks, 0);
  1323. init_f2fs_rwsem(&fi->i_sem);
  1324. spin_lock_init(&fi->i_size_lock);
  1325. INIT_LIST_HEAD(&fi->dirty_list);
  1326. INIT_LIST_HEAD(&fi->gdirty_list);
  1327. init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
  1328. init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
  1329. init_f2fs_rwsem(&fi->i_xattr_sem);
  1330. /* Will be used by directory only */
  1331. fi->i_dir_level = F2FS_SB(sb)->dir_level;
  1332. return &fi->vfs_inode;
  1333. }
  1334. static int f2fs_drop_inode(struct inode *inode)
  1335. {
  1336. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1337. int ret;
  1338. /*
  1339. * during filesystem shutdown, if checkpoint is disabled,
  1340. * drop useless meta/node dirty pages.
  1341. */
  1342. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
  1343. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1344. inode->i_ino == F2FS_META_INO(sbi)) {
  1345. trace_f2fs_drop_inode(inode, 1);
  1346. return 1;
  1347. }
  1348. }
  1349. /*
  1350. * This is to avoid a deadlock condition like below.
  1351. * writeback_single_inode(inode)
  1352. * - f2fs_write_data_page
  1353. * - f2fs_gc -> iput -> evict
  1354. * - inode_wait_for_writeback(inode)
  1355. */
  1356. if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
  1357. if (!inode->i_nlink && !is_bad_inode(inode)) {
  1358. /* to avoid evict_inode call simultaneously */
  1359. atomic_inc(&inode->i_count);
  1360. spin_unlock(&inode->i_lock);
  1361. /* should remain fi->extent_tree for writepage */
  1362. f2fs_destroy_extent_node(inode);
  1363. sb_start_intwrite(inode->i_sb);
  1364. f2fs_i_size_write(inode, 0);
  1365. f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
  1366. inode, NULL, 0, DATA);
  1367. truncate_inode_pages_final(inode->i_mapping);
  1368. if (F2FS_HAS_BLOCKS(inode))
  1369. f2fs_truncate(inode);
  1370. sb_end_intwrite(inode->i_sb);
  1371. spin_lock(&inode->i_lock);
  1372. atomic_dec(&inode->i_count);
  1373. }
  1374. trace_f2fs_drop_inode(inode, 0);
  1375. return 0;
  1376. }
  1377. ret = generic_drop_inode(inode);
  1378. if (!ret)
  1379. ret = fscrypt_drop_inode(inode);
  1380. trace_f2fs_drop_inode(inode, ret);
  1381. return ret;
  1382. }
  1383. int f2fs_inode_dirtied(struct inode *inode, bool sync)
  1384. {
  1385. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1386. int ret = 0;
  1387. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1388. if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1389. ret = 1;
  1390. } else {
  1391. set_inode_flag(inode, FI_DIRTY_INODE);
  1392. stat_inc_dirty_inode(sbi, DIRTY_META);
  1393. }
  1394. if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
  1395. list_add_tail(&F2FS_I(inode)->gdirty_list,
  1396. &sbi->inode_list[DIRTY_META]);
  1397. inc_page_count(sbi, F2FS_DIRTY_IMETA);
  1398. }
  1399. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1400. /* if atomic write is not committed, set inode w/ atomic dirty */
  1401. if (!ret && f2fs_is_atomic_file(inode) &&
  1402. !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
  1403. set_inode_flag(inode, FI_ATOMIC_DIRTIED);
  1404. return ret;
  1405. }
  1406. void f2fs_inode_synced(struct inode *inode)
  1407. {
  1408. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1409. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1410. if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1411. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1412. return;
  1413. }
  1414. if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
  1415. list_del_init(&F2FS_I(inode)->gdirty_list);
  1416. dec_page_count(sbi, F2FS_DIRTY_IMETA);
  1417. }
  1418. clear_inode_flag(inode, FI_DIRTY_INODE);
  1419. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1420. stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
  1421. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1422. }
  1423. /*
  1424. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  1425. *
  1426. * We should call set_dirty_inode to write the dirty inode through write_inode.
  1427. */
  1428. static void f2fs_dirty_inode(struct inode *inode, int flags)
  1429. {
  1430. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1431. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1432. inode->i_ino == F2FS_META_INO(sbi))
  1433. return;
  1434. if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
  1435. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1436. f2fs_inode_dirtied(inode, false);
  1437. }
  1438. static void f2fs_free_inode(struct inode *inode)
  1439. {
  1440. fscrypt_free_inode(inode);
  1441. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  1442. }
  1443. static void destroy_percpu_info(struct f2fs_sb_info *sbi)
  1444. {
  1445. percpu_counter_destroy(&sbi->total_valid_inode_count);
  1446. percpu_counter_destroy(&sbi->rf_node_block_count);
  1447. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  1448. }
  1449. static void destroy_device_list(struct f2fs_sb_info *sbi)
  1450. {
  1451. int i;
  1452. for (i = 0; i < sbi->s_ndevs; i++) {
  1453. if (i > 0)
  1454. bdev_fput(FDEV(i).bdev_file);
  1455. #ifdef CONFIG_BLK_DEV_ZONED
  1456. kvfree(FDEV(i).blkz_seq);
  1457. #endif
  1458. }
  1459. kvfree(sbi->devs);
  1460. }
  1461. static void f2fs_put_super(struct super_block *sb)
  1462. {
  1463. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1464. int i;
  1465. int err = 0;
  1466. bool done;
  1467. /* unregister procfs/sysfs entries in advance to avoid race case */
  1468. f2fs_unregister_sysfs(sbi);
  1469. f2fs_quota_off_umount(sb);
  1470. /* prevent remaining shrinker jobs */
  1471. mutex_lock(&sbi->umount_mutex);
  1472. /*
  1473. * flush all issued checkpoints and stop checkpoint issue thread.
  1474. * after then, all checkpoints should be done by each process context.
  1475. */
  1476. f2fs_stop_ckpt_thread(sbi);
  1477. /*
  1478. * We don't need to do checkpoint when superblock is clean.
  1479. * But, the previous checkpoint was not done by umount, it needs to do
  1480. * clean checkpoint again.
  1481. */
  1482. if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  1483. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
  1484. struct cp_control cpc = {
  1485. .reason = CP_UMOUNT,
  1486. };
  1487. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  1488. err = f2fs_write_checkpoint(sbi, &cpc);
  1489. }
  1490. /* be sure to wait for any on-going discard commands */
  1491. done = f2fs_issue_discard_timeout(sbi);
  1492. if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
  1493. struct cp_control cpc = {
  1494. .reason = CP_UMOUNT | CP_TRIMMED,
  1495. };
  1496. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  1497. err = f2fs_write_checkpoint(sbi, &cpc);
  1498. }
  1499. /*
  1500. * normally superblock is clean, so we need to release this.
  1501. * In addition, EIO will skip do checkpoint, we need this as well.
  1502. */
  1503. f2fs_release_ino_entry(sbi, true);
  1504. f2fs_leave_shrinker(sbi);
  1505. mutex_unlock(&sbi->umount_mutex);
  1506. /* our cp_error case, we can wait for any writeback page */
  1507. f2fs_flush_merged_writes(sbi);
  1508. f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
  1509. if (err || f2fs_cp_error(sbi)) {
  1510. truncate_inode_pages_final(NODE_MAPPING(sbi));
  1511. truncate_inode_pages_final(META_MAPPING(sbi));
  1512. }
  1513. for (i = 0; i < NR_COUNT_TYPE; i++) {
  1514. if (!get_pages(sbi, i))
  1515. continue;
  1516. f2fs_err(sbi, "detect filesystem reference count leak during "
  1517. "umount, type: %d, count: %lld", i, get_pages(sbi, i));
  1518. f2fs_bug_on(sbi, 1);
  1519. }
  1520. f2fs_bug_on(sbi, sbi->fsync_node_num);
  1521. f2fs_destroy_compress_inode(sbi);
  1522. iput(sbi->node_inode);
  1523. sbi->node_inode = NULL;
  1524. iput(sbi->meta_inode);
  1525. sbi->meta_inode = NULL;
  1526. /*
  1527. * iput() can update stat information, if f2fs_write_checkpoint()
  1528. * above failed with error.
  1529. */
  1530. f2fs_destroy_stats(sbi);
  1531. /* destroy f2fs internal modules */
  1532. f2fs_destroy_node_manager(sbi);
  1533. f2fs_destroy_segment_manager(sbi);
  1534. /* flush s_error_work before sbi destroy */
  1535. flush_work(&sbi->s_error_work);
  1536. f2fs_destroy_post_read_wq(sbi);
  1537. kvfree(sbi->ckpt);
  1538. if (sbi->s_chksum_driver)
  1539. crypto_free_shash(sbi->s_chksum_driver);
  1540. kfree(sbi->raw_super);
  1541. f2fs_destroy_page_array_cache(sbi);
  1542. f2fs_destroy_xattr_caches(sbi);
  1543. #ifdef CONFIG_QUOTA
  1544. for (i = 0; i < MAXQUOTAS; i++)
  1545. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  1546. #endif
  1547. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  1548. destroy_percpu_info(sbi);
  1549. f2fs_destroy_iostat(sbi);
  1550. for (i = 0; i < NR_PAGE_TYPE; i++)
  1551. kvfree(sbi->write_io[i]);
  1552. #if IS_ENABLED(CONFIG_UNICODE)
  1553. utf8_unload(sb->s_encoding);
  1554. #endif
  1555. }
  1556. int f2fs_sync_fs(struct super_block *sb, int sync)
  1557. {
  1558. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1559. int err = 0;
  1560. if (unlikely(f2fs_cp_error(sbi)))
  1561. return 0;
  1562. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  1563. return 0;
  1564. trace_f2fs_sync_fs(sb, sync);
  1565. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  1566. return -EAGAIN;
  1567. if (sync) {
  1568. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  1569. err = f2fs_issue_checkpoint(sbi);
  1570. }
  1571. return err;
  1572. }
  1573. static int f2fs_freeze(struct super_block *sb)
  1574. {
  1575. if (f2fs_readonly(sb))
  1576. return 0;
  1577. /* IO error happened before */
  1578. if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
  1579. return -EIO;
  1580. /* must be clean, since sync_filesystem() was already called */
  1581. if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
  1582. return -EINVAL;
  1583. /* Let's flush checkpoints and stop the thread. */
  1584. f2fs_flush_ckpt_thread(F2FS_SB(sb));
  1585. /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
  1586. set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
  1587. return 0;
  1588. }
  1589. static int f2fs_unfreeze(struct super_block *sb)
  1590. {
  1591. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1592. /*
  1593. * It will update discard_max_bytes of mounted lvm device to zero
  1594. * after creating snapshot on this lvm device, let's drop all
  1595. * remained discards.
  1596. * We don't need to disable real-time discard because discard_max_bytes
  1597. * will recover after removal of snapshot.
  1598. */
  1599. if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
  1600. f2fs_issue_discard_timeout(sbi);
  1601. clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
  1602. return 0;
  1603. }
  1604. #ifdef CONFIG_QUOTA
  1605. static int f2fs_statfs_project(struct super_block *sb,
  1606. kprojid_t projid, struct kstatfs *buf)
  1607. {
  1608. struct kqid qid;
  1609. struct dquot *dquot;
  1610. u64 limit;
  1611. u64 curblock;
  1612. qid = make_kqid_projid(projid);
  1613. dquot = dqget(sb, qid);
  1614. if (IS_ERR(dquot))
  1615. return PTR_ERR(dquot);
  1616. spin_lock(&dquot->dq_dqb_lock);
  1617. limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
  1618. dquot->dq_dqb.dqb_bhardlimit);
  1619. limit >>= sb->s_blocksize_bits;
  1620. if (limit) {
  1621. uint64_t remaining = 0;
  1622. curblock = (dquot->dq_dqb.dqb_curspace +
  1623. dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
  1624. if (limit > curblock)
  1625. remaining = limit - curblock;
  1626. buf->f_blocks = min(buf->f_blocks, limit);
  1627. buf->f_bfree = min(buf->f_bfree, remaining);
  1628. buf->f_bavail = min(buf->f_bavail, remaining);
  1629. }
  1630. limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
  1631. dquot->dq_dqb.dqb_ihardlimit);
  1632. if (limit) {
  1633. uint64_t remaining = 0;
  1634. if (limit > dquot->dq_dqb.dqb_curinodes)
  1635. remaining = limit - dquot->dq_dqb.dqb_curinodes;
  1636. buf->f_files = min(buf->f_files, limit);
  1637. buf->f_ffree = min(buf->f_ffree, remaining);
  1638. }
  1639. spin_unlock(&dquot->dq_dqb_lock);
  1640. dqput(dquot);
  1641. return 0;
  1642. }
  1643. #endif
  1644. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  1645. {
  1646. struct super_block *sb = dentry->d_sb;
  1647. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1648. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  1649. block_t total_count, user_block_count, start_count;
  1650. u64 avail_node_count;
  1651. unsigned int total_valid_node_count;
  1652. total_count = le64_to_cpu(sbi->raw_super->block_count);
  1653. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  1654. buf->f_type = F2FS_SUPER_MAGIC;
  1655. buf->f_bsize = sbi->blocksize;
  1656. buf->f_blocks = total_count - start_count;
  1657. spin_lock(&sbi->stat_lock);
  1658. user_block_count = sbi->user_block_count;
  1659. total_valid_node_count = valid_node_count(sbi);
  1660. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  1661. buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
  1662. sbi->current_reserved_blocks;
  1663. if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
  1664. buf->f_bfree = 0;
  1665. else
  1666. buf->f_bfree -= sbi->unusable_block_count;
  1667. spin_unlock(&sbi->stat_lock);
  1668. if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
  1669. buf->f_bavail = buf->f_bfree -
  1670. F2FS_OPTION(sbi).root_reserved_blocks;
  1671. else
  1672. buf->f_bavail = 0;
  1673. if (avail_node_count > user_block_count) {
  1674. buf->f_files = user_block_count;
  1675. buf->f_ffree = buf->f_bavail;
  1676. } else {
  1677. buf->f_files = avail_node_count;
  1678. buf->f_ffree = min(avail_node_count - total_valid_node_count,
  1679. buf->f_bavail);
  1680. }
  1681. buf->f_namelen = F2FS_NAME_LEN;
  1682. buf->f_fsid = u64_to_fsid(id);
  1683. #ifdef CONFIG_QUOTA
  1684. if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
  1685. sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
  1686. f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
  1687. }
  1688. #endif
  1689. return 0;
  1690. }
  1691. static inline void f2fs_show_quota_options(struct seq_file *seq,
  1692. struct super_block *sb)
  1693. {
  1694. #ifdef CONFIG_QUOTA
  1695. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1696. if (F2FS_OPTION(sbi).s_jquota_fmt) {
  1697. char *fmtname = "";
  1698. switch (F2FS_OPTION(sbi).s_jquota_fmt) {
  1699. case QFMT_VFS_OLD:
  1700. fmtname = "vfsold";
  1701. break;
  1702. case QFMT_VFS_V0:
  1703. fmtname = "vfsv0";
  1704. break;
  1705. case QFMT_VFS_V1:
  1706. fmtname = "vfsv1";
  1707. break;
  1708. }
  1709. seq_printf(seq, ",jqfmt=%s", fmtname);
  1710. }
  1711. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  1712. seq_show_option(seq, "usrjquota",
  1713. F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
  1714. if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  1715. seq_show_option(seq, "grpjquota",
  1716. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
  1717. if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  1718. seq_show_option(seq, "prjjquota",
  1719. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
  1720. #endif
  1721. }
  1722. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1723. static inline void f2fs_show_compress_options(struct seq_file *seq,
  1724. struct super_block *sb)
  1725. {
  1726. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1727. char *algtype = "";
  1728. int i;
  1729. if (!f2fs_sb_has_compression(sbi))
  1730. return;
  1731. switch (F2FS_OPTION(sbi).compress_algorithm) {
  1732. case COMPRESS_LZO:
  1733. algtype = "lzo";
  1734. break;
  1735. case COMPRESS_LZ4:
  1736. algtype = "lz4";
  1737. break;
  1738. case COMPRESS_ZSTD:
  1739. algtype = "zstd";
  1740. break;
  1741. case COMPRESS_LZORLE:
  1742. algtype = "lzo-rle";
  1743. break;
  1744. }
  1745. seq_printf(seq, ",compress_algorithm=%s", algtype);
  1746. if (F2FS_OPTION(sbi).compress_level)
  1747. seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
  1748. seq_printf(seq, ",compress_log_size=%u",
  1749. F2FS_OPTION(sbi).compress_log_size);
  1750. for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
  1751. seq_printf(seq, ",compress_extension=%s",
  1752. F2FS_OPTION(sbi).extensions[i]);
  1753. }
  1754. for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
  1755. seq_printf(seq, ",nocompress_extension=%s",
  1756. F2FS_OPTION(sbi).noextensions[i]);
  1757. }
  1758. if (F2FS_OPTION(sbi).compress_chksum)
  1759. seq_puts(seq, ",compress_chksum");
  1760. if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
  1761. seq_printf(seq, ",compress_mode=%s", "fs");
  1762. else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
  1763. seq_printf(seq, ",compress_mode=%s", "user");
  1764. if (test_opt(sbi, COMPRESS_CACHE))
  1765. seq_puts(seq, ",compress_cache");
  1766. }
  1767. #endif
  1768. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  1769. {
  1770. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  1771. if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
  1772. seq_printf(seq, ",background_gc=%s", "sync");
  1773. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
  1774. seq_printf(seq, ",background_gc=%s", "on");
  1775. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
  1776. seq_printf(seq, ",background_gc=%s", "off");
  1777. if (test_opt(sbi, GC_MERGE))
  1778. seq_puts(seq, ",gc_merge");
  1779. else
  1780. seq_puts(seq, ",nogc_merge");
  1781. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  1782. seq_puts(seq, ",disable_roll_forward");
  1783. if (test_opt(sbi, NORECOVERY))
  1784. seq_puts(seq, ",norecovery");
  1785. if (test_opt(sbi, DISCARD)) {
  1786. seq_puts(seq, ",discard");
  1787. if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
  1788. seq_printf(seq, ",discard_unit=%s", "block");
  1789. else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
  1790. seq_printf(seq, ",discard_unit=%s", "segment");
  1791. else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
  1792. seq_printf(seq, ",discard_unit=%s", "section");
  1793. } else {
  1794. seq_puts(seq, ",nodiscard");
  1795. }
  1796. #ifdef CONFIG_F2FS_FS_XATTR
  1797. if (test_opt(sbi, XATTR_USER))
  1798. seq_puts(seq, ",user_xattr");
  1799. else
  1800. seq_puts(seq, ",nouser_xattr");
  1801. if (test_opt(sbi, INLINE_XATTR))
  1802. seq_puts(seq, ",inline_xattr");
  1803. else
  1804. seq_puts(seq, ",noinline_xattr");
  1805. if (test_opt(sbi, INLINE_XATTR_SIZE))
  1806. seq_printf(seq, ",inline_xattr_size=%u",
  1807. F2FS_OPTION(sbi).inline_xattr_size);
  1808. #endif
  1809. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1810. if (test_opt(sbi, POSIX_ACL))
  1811. seq_puts(seq, ",acl");
  1812. else
  1813. seq_puts(seq, ",noacl");
  1814. #endif
  1815. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  1816. seq_puts(seq, ",disable_ext_identify");
  1817. if (test_opt(sbi, INLINE_DATA))
  1818. seq_puts(seq, ",inline_data");
  1819. else
  1820. seq_puts(seq, ",noinline_data");
  1821. if (test_opt(sbi, INLINE_DENTRY))
  1822. seq_puts(seq, ",inline_dentry");
  1823. else
  1824. seq_puts(seq, ",noinline_dentry");
  1825. if (test_opt(sbi, FLUSH_MERGE))
  1826. seq_puts(seq, ",flush_merge");
  1827. else
  1828. seq_puts(seq, ",noflush_merge");
  1829. if (test_opt(sbi, NOBARRIER))
  1830. seq_puts(seq, ",nobarrier");
  1831. else
  1832. seq_puts(seq, ",barrier");
  1833. if (test_opt(sbi, FASTBOOT))
  1834. seq_puts(seq, ",fastboot");
  1835. if (test_opt(sbi, READ_EXTENT_CACHE))
  1836. seq_puts(seq, ",extent_cache");
  1837. else
  1838. seq_puts(seq, ",noextent_cache");
  1839. if (test_opt(sbi, AGE_EXTENT_CACHE))
  1840. seq_puts(seq, ",age_extent_cache");
  1841. if (test_opt(sbi, DATA_FLUSH))
  1842. seq_puts(seq, ",data_flush");
  1843. seq_puts(seq, ",mode=");
  1844. if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
  1845. seq_puts(seq, "adaptive");
  1846. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
  1847. seq_puts(seq, "lfs");
  1848. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
  1849. seq_puts(seq, "fragment:segment");
  1850. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
  1851. seq_puts(seq, "fragment:block");
  1852. seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
  1853. if (test_opt(sbi, RESERVE_ROOT))
  1854. seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
  1855. F2FS_OPTION(sbi).root_reserved_blocks,
  1856. from_kuid_munged(&init_user_ns,
  1857. F2FS_OPTION(sbi).s_resuid),
  1858. from_kgid_munged(&init_user_ns,
  1859. F2FS_OPTION(sbi).s_resgid));
  1860. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1861. if (test_opt(sbi, FAULT_INJECTION)) {
  1862. seq_printf(seq, ",fault_injection=%u",
  1863. F2FS_OPTION(sbi).fault_info.inject_rate);
  1864. seq_printf(seq, ",fault_type=%u",
  1865. F2FS_OPTION(sbi).fault_info.inject_type);
  1866. }
  1867. #endif
  1868. #ifdef CONFIG_QUOTA
  1869. if (test_opt(sbi, QUOTA))
  1870. seq_puts(seq, ",quota");
  1871. if (test_opt(sbi, USRQUOTA))
  1872. seq_puts(seq, ",usrquota");
  1873. if (test_opt(sbi, GRPQUOTA))
  1874. seq_puts(seq, ",grpquota");
  1875. if (test_opt(sbi, PRJQUOTA))
  1876. seq_puts(seq, ",prjquota");
  1877. #endif
  1878. f2fs_show_quota_options(seq, sbi->sb);
  1879. fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
  1880. if (sbi->sb->s_flags & SB_INLINECRYPT)
  1881. seq_puts(seq, ",inlinecrypt");
  1882. if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
  1883. seq_printf(seq, ",alloc_mode=%s", "default");
  1884. else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
  1885. seq_printf(seq, ",alloc_mode=%s", "reuse");
  1886. if (test_opt(sbi, DISABLE_CHECKPOINT))
  1887. seq_printf(seq, ",checkpoint=disable:%u",
  1888. F2FS_OPTION(sbi).unusable_cap);
  1889. if (test_opt(sbi, MERGE_CHECKPOINT))
  1890. seq_puts(seq, ",checkpoint_merge");
  1891. else
  1892. seq_puts(seq, ",nocheckpoint_merge");
  1893. if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
  1894. seq_printf(seq, ",fsync_mode=%s", "posix");
  1895. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
  1896. seq_printf(seq, ",fsync_mode=%s", "strict");
  1897. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
  1898. seq_printf(seq, ",fsync_mode=%s", "nobarrier");
  1899. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1900. f2fs_show_compress_options(seq, sbi->sb);
  1901. #endif
  1902. if (test_opt(sbi, ATGC))
  1903. seq_puts(seq, ",atgc");
  1904. if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
  1905. seq_printf(seq, ",memory=%s", "normal");
  1906. else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
  1907. seq_printf(seq, ",memory=%s", "low");
  1908. if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
  1909. seq_printf(seq, ",errors=%s", "remount-ro");
  1910. else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
  1911. seq_printf(seq, ",errors=%s", "continue");
  1912. else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
  1913. seq_printf(seq, ",errors=%s", "panic");
  1914. return 0;
  1915. }
  1916. static void default_options(struct f2fs_sb_info *sbi, bool remount)
  1917. {
  1918. /* init some FS parameters */
  1919. if (!remount) {
  1920. set_opt(sbi, READ_EXTENT_CACHE);
  1921. clear_opt(sbi, DISABLE_CHECKPOINT);
  1922. if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
  1923. set_opt(sbi, DISCARD);
  1924. if (f2fs_sb_has_blkzoned(sbi))
  1925. F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
  1926. else
  1927. F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
  1928. }
  1929. if (f2fs_sb_has_readonly(sbi))
  1930. F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
  1931. else
  1932. F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
  1933. F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
  1934. if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
  1935. SMALL_VOLUME_SEGMENTS)
  1936. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  1937. else
  1938. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  1939. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  1940. F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
  1941. F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
  1942. if (f2fs_sb_has_compression(sbi)) {
  1943. F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
  1944. F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
  1945. F2FS_OPTION(sbi).compress_ext_cnt = 0;
  1946. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1947. }
  1948. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  1949. F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
  1950. F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
  1951. set_opt(sbi, INLINE_XATTR);
  1952. set_opt(sbi, INLINE_DATA);
  1953. set_opt(sbi, INLINE_DENTRY);
  1954. set_opt(sbi, MERGE_CHECKPOINT);
  1955. F2FS_OPTION(sbi).unusable_cap = 0;
  1956. sbi->sb->s_flags |= SB_LAZYTIME;
  1957. if (!f2fs_is_readonly(sbi))
  1958. set_opt(sbi, FLUSH_MERGE);
  1959. if (f2fs_sb_has_blkzoned(sbi))
  1960. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  1961. else
  1962. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  1963. #ifdef CONFIG_F2FS_FS_XATTR
  1964. set_opt(sbi, XATTR_USER);
  1965. #endif
  1966. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1967. set_opt(sbi, POSIX_ACL);
  1968. #endif
  1969. f2fs_build_fault_attr(sbi, 0, 0);
  1970. }
  1971. #ifdef CONFIG_QUOTA
  1972. static int f2fs_enable_quotas(struct super_block *sb);
  1973. #endif
  1974. static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
  1975. {
  1976. unsigned int s_flags = sbi->sb->s_flags;
  1977. struct cp_control cpc;
  1978. unsigned int gc_mode = sbi->gc_mode;
  1979. int err = 0;
  1980. int ret;
  1981. block_t unusable;
  1982. if (s_flags & SB_RDONLY) {
  1983. f2fs_err(sbi, "checkpoint=disable on readonly fs");
  1984. return -EINVAL;
  1985. }
  1986. sbi->sb->s_flags |= SB_ACTIVE;
  1987. /* check if we need more GC first */
  1988. unusable = f2fs_get_unusable_blocks(sbi);
  1989. if (!f2fs_disable_cp_again(sbi, unusable))
  1990. goto skip_gc;
  1991. f2fs_update_time(sbi, DISABLE_TIME);
  1992. sbi->gc_mode = GC_URGENT_HIGH;
  1993. while (!f2fs_time_over(sbi, DISABLE_TIME)) {
  1994. struct f2fs_gc_control gc_control = {
  1995. .victim_segno = NULL_SEGNO,
  1996. .init_gc_type = FG_GC,
  1997. .should_migrate_blocks = false,
  1998. .err_gc_skipped = true,
  1999. .no_bg_gc = true,
  2000. .nr_free_secs = 1 };
  2001. f2fs_down_write(&sbi->gc_lock);
  2002. stat_inc_gc_call_count(sbi, FOREGROUND);
  2003. err = f2fs_gc(sbi, &gc_control);
  2004. if (err == -ENODATA) {
  2005. err = 0;
  2006. break;
  2007. }
  2008. if (err && err != -EAGAIN)
  2009. break;
  2010. }
  2011. ret = sync_filesystem(sbi->sb);
  2012. if (ret || err) {
  2013. err = ret ? ret : err;
  2014. goto restore_flag;
  2015. }
  2016. unusable = f2fs_get_unusable_blocks(sbi);
  2017. if (f2fs_disable_cp_again(sbi, unusable)) {
  2018. err = -EAGAIN;
  2019. goto restore_flag;
  2020. }
  2021. skip_gc:
  2022. f2fs_down_write(&sbi->gc_lock);
  2023. cpc.reason = CP_PAUSE;
  2024. set_sbi_flag(sbi, SBI_CP_DISABLED);
  2025. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  2026. err = f2fs_write_checkpoint(sbi, &cpc);
  2027. if (err)
  2028. goto out_unlock;
  2029. spin_lock(&sbi->stat_lock);
  2030. sbi->unusable_block_count = unusable;
  2031. spin_unlock(&sbi->stat_lock);
  2032. out_unlock:
  2033. f2fs_up_write(&sbi->gc_lock);
  2034. restore_flag:
  2035. sbi->gc_mode = gc_mode;
  2036. sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  2037. return err;
  2038. }
  2039. static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
  2040. {
  2041. int retry = DEFAULT_RETRY_IO_COUNT;
  2042. /* we should flush all the data to keep data consistency */
  2043. do {
  2044. sync_inodes_sb(sbi->sb);
  2045. f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
  2046. } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
  2047. if (unlikely(retry < 0))
  2048. f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
  2049. f2fs_down_write(&sbi->gc_lock);
  2050. f2fs_dirty_to_prefree(sbi);
  2051. clear_sbi_flag(sbi, SBI_CP_DISABLED);
  2052. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2053. f2fs_up_write(&sbi->gc_lock);
  2054. f2fs_sync_fs(sbi->sb, 1);
  2055. /* Let's ensure there's no pending checkpoint anymore */
  2056. f2fs_flush_ckpt_thread(sbi);
  2057. }
  2058. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  2059. {
  2060. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2061. struct f2fs_mount_info org_mount_opt;
  2062. unsigned long old_sb_flags;
  2063. int err;
  2064. bool need_restart_gc = false, need_stop_gc = false;
  2065. bool need_restart_flush = false, need_stop_flush = false;
  2066. bool need_restart_discard = false, need_stop_discard = false;
  2067. bool need_enable_checkpoint = false, need_disable_checkpoint = false;
  2068. bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
  2069. bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
  2070. bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
  2071. bool no_atgc = !test_opt(sbi, ATGC);
  2072. bool no_discard = !test_opt(sbi, DISCARD);
  2073. bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
  2074. bool block_unit_discard = f2fs_block_unit_discard(sbi);
  2075. #ifdef CONFIG_QUOTA
  2076. int i, j;
  2077. #endif
  2078. /*
  2079. * Save the old mount options in case we
  2080. * need to restore them.
  2081. */
  2082. org_mount_opt = sbi->mount_opt;
  2083. old_sb_flags = sb->s_flags;
  2084. #ifdef CONFIG_QUOTA
  2085. org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
  2086. for (i = 0; i < MAXQUOTAS; i++) {
  2087. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  2088. org_mount_opt.s_qf_names[i] =
  2089. kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
  2090. GFP_KERNEL);
  2091. if (!org_mount_opt.s_qf_names[i]) {
  2092. for (j = 0; j < i; j++)
  2093. kfree(org_mount_opt.s_qf_names[j]);
  2094. return -ENOMEM;
  2095. }
  2096. } else {
  2097. org_mount_opt.s_qf_names[i] = NULL;
  2098. }
  2099. }
  2100. #endif
  2101. /* recover superblocks we couldn't write due to previous RO mount */
  2102. if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
  2103. err = f2fs_commit_super(sbi, false);
  2104. f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
  2105. err);
  2106. if (!err)
  2107. clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  2108. }
  2109. default_options(sbi, true);
  2110. /* parse mount options */
  2111. err = parse_options(sb, data, true);
  2112. if (err)
  2113. goto restore_opts;
  2114. #ifdef CONFIG_BLK_DEV_ZONED
  2115. if (f2fs_sb_has_blkzoned(sbi) &&
  2116. sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
  2117. f2fs_err(sbi,
  2118. "zoned: max open zones %u is too small, need at least %u open zones",
  2119. sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
  2120. err = -EINVAL;
  2121. goto restore_opts;
  2122. }
  2123. #endif
  2124. /* flush outstanding errors before changing fs state */
  2125. flush_work(&sbi->s_error_work);
  2126. /*
  2127. * Previous and new state of filesystem is RO,
  2128. * so skip checking GC and FLUSH_MERGE conditions.
  2129. */
  2130. if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
  2131. goto skip;
  2132. if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
  2133. err = -EROFS;
  2134. goto restore_opts;
  2135. }
  2136. #ifdef CONFIG_QUOTA
  2137. if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
  2138. err = dquot_suspend(sb, -1);
  2139. if (err < 0)
  2140. goto restore_opts;
  2141. } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
  2142. /* dquot_resume needs RW */
  2143. sb->s_flags &= ~SB_RDONLY;
  2144. if (sb_any_quota_suspended(sb)) {
  2145. dquot_resume(sb, -1);
  2146. } else if (f2fs_sb_has_quota_ino(sbi)) {
  2147. err = f2fs_enable_quotas(sb);
  2148. if (err)
  2149. goto restore_opts;
  2150. }
  2151. }
  2152. #endif
  2153. if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
  2154. err = -EINVAL;
  2155. f2fs_warn(sbi, "LFS is not compatible with IPU");
  2156. goto restore_opts;
  2157. }
  2158. /* disallow enable atgc dynamically */
  2159. if (no_atgc == !!test_opt(sbi, ATGC)) {
  2160. err = -EINVAL;
  2161. f2fs_warn(sbi, "switch atgc option is not allowed");
  2162. goto restore_opts;
  2163. }
  2164. /* disallow enable/disable extent_cache dynamically */
  2165. if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
  2166. err = -EINVAL;
  2167. f2fs_warn(sbi, "switch extent_cache option is not allowed");
  2168. goto restore_opts;
  2169. }
  2170. /* disallow enable/disable age extent_cache dynamically */
  2171. if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
  2172. err = -EINVAL;
  2173. f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
  2174. goto restore_opts;
  2175. }
  2176. if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
  2177. err = -EINVAL;
  2178. f2fs_warn(sbi, "switch compress_cache option is not allowed");
  2179. goto restore_opts;
  2180. }
  2181. if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
  2182. err = -EINVAL;
  2183. f2fs_warn(sbi, "switch discard_unit option is not allowed");
  2184. goto restore_opts;
  2185. }
  2186. if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
  2187. err = -EINVAL;
  2188. f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
  2189. goto restore_opts;
  2190. }
  2191. /*
  2192. * We stop the GC thread if FS is mounted as RO
  2193. * or if background_gc = off is passed in mount
  2194. * option. Also sync the filesystem.
  2195. */
  2196. if ((*flags & SB_RDONLY) ||
  2197. (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
  2198. !test_opt(sbi, GC_MERGE))) {
  2199. if (sbi->gc_thread) {
  2200. f2fs_stop_gc_thread(sbi);
  2201. need_restart_gc = true;
  2202. }
  2203. } else if (!sbi->gc_thread) {
  2204. err = f2fs_start_gc_thread(sbi);
  2205. if (err)
  2206. goto restore_opts;
  2207. need_stop_gc = true;
  2208. }
  2209. if (*flags & SB_RDONLY) {
  2210. sync_inodes_sb(sb);
  2211. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2212. set_sbi_flag(sbi, SBI_IS_CLOSE);
  2213. f2fs_sync_fs(sb, 1);
  2214. clear_sbi_flag(sbi, SBI_IS_CLOSE);
  2215. }
  2216. /*
  2217. * We stop issue flush thread if FS is mounted as RO
  2218. * or if flush_merge is not passed in mount option.
  2219. */
  2220. if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
  2221. clear_opt(sbi, FLUSH_MERGE);
  2222. f2fs_destroy_flush_cmd_control(sbi, false);
  2223. need_restart_flush = true;
  2224. } else {
  2225. err = f2fs_create_flush_cmd_control(sbi);
  2226. if (err)
  2227. goto restore_gc;
  2228. need_stop_flush = true;
  2229. }
  2230. if (no_discard == !!test_opt(sbi, DISCARD)) {
  2231. if (test_opt(sbi, DISCARD)) {
  2232. err = f2fs_start_discard_thread(sbi);
  2233. if (err)
  2234. goto restore_flush;
  2235. need_stop_discard = true;
  2236. } else {
  2237. f2fs_stop_discard_thread(sbi);
  2238. f2fs_issue_discard_timeout(sbi);
  2239. need_restart_discard = true;
  2240. }
  2241. }
  2242. if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
  2243. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  2244. err = f2fs_disable_checkpoint(sbi);
  2245. if (err)
  2246. goto restore_discard;
  2247. need_enable_checkpoint = true;
  2248. } else {
  2249. f2fs_enable_checkpoint(sbi);
  2250. need_disable_checkpoint = true;
  2251. }
  2252. }
  2253. /*
  2254. * Place this routine at the end, since a new checkpoint would be
  2255. * triggered while remount and we need to take care of it before
  2256. * returning from remount.
  2257. */
  2258. if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
  2259. !test_opt(sbi, MERGE_CHECKPOINT)) {
  2260. f2fs_stop_ckpt_thread(sbi);
  2261. } else {
  2262. /* Flush if the prevous checkpoint, if exists. */
  2263. f2fs_flush_ckpt_thread(sbi);
  2264. err = f2fs_start_ckpt_thread(sbi);
  2265. if (err) {
  2266. f2fs_err(sbi,
  2267. "Failed to start F2FS issue_checkpoint_thread (%d)",
  2268. err);
  2269. goto restore_checkpoint;
  2270. }
  2271. }
  2272. skip:
  2273. #ifdef CONFIG_QUOTA
  2274. /* Release old quota file names */
  2275. for (i = 0; i < MAXQUOTAS; i++)
  2276. kfree(org_mount_opt.s_qf_names[i]);
  2277. #endif
  2278. /* Update the POSIXACL Flag */
  2279. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  2280. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  2281. limit_reserve_root(sbi);
  2282. adjust_unusable_cap_perc(sbi);
  2283. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  2284. return 0;
  2285. restore_checkpoint:
  2286. if (need_enable_checkpoint) {
  2287. f2fs_enable_checkpoint(sbi);
  2288. } else if (need_disable_checkpoint) {
  2289. if (f2fs_disable_checkpoint(sbi))
  2290. f2fs_warn(sbi, "checkpoint has not been disabled");
  2291. }
  2292. restore_discard:
  2293. if (need_restart_discard) {
  2294. if (f2fs_start_discard_thread(sbi))
  2295. f2fs_warn(sbi, "discard has been stopped");
  2296. } else if (need_stop_discard) {
  2297. f2fs_stop_discard_thread(sbi);
  2298. }
  2299. restore_flush:
  2300. if (need_restart_flush) {
  2301. if (f2fs_create_flush_cmd_control(sbi))
  2302. f2fs_warn(sbi, "background flush thread has stopped");
  2303. } else if (need_stop_flush) {
  2304. clear_opt(sbi, FLUSH_MERGE);
  2305. f2fs_destroy_flush_cmd_control(sbi, false);
  2306. }
  2307. restore_gc:
  2308. if (need_restart_gc) {
  2309. if (f2fs_start_gc_thread(sbi))
  2310. f2fs_warn(sbi, "background gc thread has stopped");
  2311. } else if (need_stop_gc) {
  2312. f2fs_stop_gc_thread(sbi);
  2313. }
  2314. restore_opts:
  2315. #ifdef CONFIG_QUOTA
  2316. F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
  2317. for (i = 0; i < MAXQUOTAS; i++) {
  2318. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  2319. F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
  2320. }
  2321. #endif
  2322. sbi->mount_opt = org_mount_opt;
  2323. sb->s_flags = old_sb_flags;
  2324. return err;
  2325. }
  2326. static void f2fs_shutdown(struct super_block *sb)
  2327. {
  2328. f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
  2329. }
  2330. #ifdef CONFIG_QUOTA
  2331. static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
  2332. {
  2333. /* need to recovery orphan */
  2334. if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
  2335. return true;
  2336. /* need to recovery data */
  2337. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  2338. return false;
  2339. if (test_opt(sbi, NORECOVERY))
  2340. return false;
  2341. return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
  2342. }
  2343. static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
  2344. {
  2345. bool readonly = f2fs_readonly(sbi->sb);
  2346. if (!f2fs_need_recovery(sbi))
  2347. return false;
  2348. /* it doesn't need to check f2fs_sb_has_readonly() */
  2349. if (f2fs_hw_is_readonly(sbi))
  2350. return false;
  2351. if (readonly) {
  2352. sbi->sb->s_flags &= ~SB_RDONLY;
  2353. set_sbi_flag(sbi, SBI_IS_WRITABLE);
  2354. }
  2355. /*
  2356. * Turn on quotas which were not enabled for read-only mounts if
  2357. * filesystem has quota feature, so that they are updated correctly.
  2358. */
  2359. return f2fs_enable_quota_files(sbi, readonly);
  2360. }
  2361. static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
  2362. bool quota_enabled)
  2363. {
  2364. if (quota_enabled)
  2365. f2fs_quota_off_umount(sbi->sb);
  2366. if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
  2367. clear_sbi_flag(sbi, SBI_IS_WRITABLE);
  2368. sbi->sb->s_flags |= SB_RDONLY;
  2369. }
  2370. }
  2371. /* Read data from quotafile */
  2372. static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
  2373. size_t len, loff_t off)
  2374. {
  2375. struct inode *inode = sb_dqopt(sb)->files[type];
  2376. struct address_space *mapping = inode->i_mapping;
  2377. block_t blkidx = F2FS_BYTES_TO_BLK(off);
  2378. int offset = off & (sb->s_blocksize - 1);
  2379. int tocopy;
  2380. size_t toread;
  2381. loff_t i_size = i_size_read(inode);
  2382. struct page *page;
  2383. if (off > i_size)
  2384. return 0;
  2385. if (off + len > i_size)
  2386. len = i_size - off;
  2387. toread = len;
  2388. while (toread > 0) {
  2389. tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
  2390. repeat:
  2391. page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
  2392. if (IS_ERR(page)) {
  2393. if (PTR_ERR(page) == -ENOMEM) {
  2394. memalloc_retry_wait(GFP_NOFS);
  2395. goto repeat;
  2396. }
  2397. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2398. return PTR_ERR(page);
  2399. }
  2400. lock_page(page);
  2401. if (unlikely(page->mapping != mapping)) {
  2402. f2fs_put_page(page, 1);
  2403. goto repeat;
  2404. }
  2405. if (unlikely(!PageUptodate(page))) {
  2406. f2fs_put_page(page, 1);
  2407. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2408. return -EIO;
  2409. }
  2410. memcpy_from_page(data, page, offset, tocopy);
  2411. f2fs_put_page(page, 1);
  2412. offset = 0;
  2413. toread -= tocopy;
  2414. data += tocopy;
  2415. blkidx++;
  2416. }
  2417. return len;
  2418. }
  2419. /* Write to quotafile */
  2420. static ssize_t f2fs_quota_write(struct super_block *sb, int type,
  2421. const char *data, size_t len, loff_t off)
  2422. {
  2423. struct inode *inode = sb_dqopt(sb)->files[type];
  2424. struct address_space *mapping = inode->i_mapping;
  2425. const struct address_space_operations *a_ops = mapping->a_ops;
  2426. int offset = off & (sb->s_blocksize - 1);
  2427. size_t towrite = len;
  2428. struct folio *folio;
  2429. void *fsdata = NULL;
  2430. int err = 0;
  2431. int tocopy;
  2432. while (towrite > 0) {
  2433. tocopy = min_t(unsigned long, sb->s_blocksize - offset,
  2434. towrite);
  2435. retry:
  2436. err = a_ops->write_begin(NULL, mapping, off, tocopy,
  2437. &folio, &fsdata);
  2438. if (unlikely(err)) {
  2439. if (err == -ENOMEM) {
  2440. f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
  2441. goto retry;
  2442. }
  2443. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2444. break;
  2445. }
  2446. memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
  2447. a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
  2448. folio, fsdata);
  2449. offset = 0;
  2450. towrite -= tocopy;
  2451. off += tocopy;
  2452. data += tocopy;
  2453. cond_resched();
  2454. }
  2455. if (len == towrite)
  2456. return err;
  2457. inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
  2458. f2fs_mark_inode_dirty_sync(inode, false);
  2459. return len - towrite;
  2460. }
  2461. int f2fs_dquot_initialize(struct inode *inode)
  2462. {
  2463. if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
  2464. return -ESRCH;
  2465. return dquot_initialize(inode);
  2466. }
  2467. static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
  2468. {
  2469. return F2FS_I(inode)->i_dquot;
  2470. }
  2471. static qsize_t *f2fs_get_reserved_space(struct inode *inode)
  2472. {
  2473. return &F2FS_I(inode)->i_reserved_quota;
  2474. }
  2475. static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
  2476. {
  2477. if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
  2478. f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
  2479. return 0;
  2480. }
  2481. return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
  2482. F2FS_OPTION(sbi).s_jquota_fmt, type);
  2483. }
  2484. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
  2485. {
  2486. int enabled = 0;
  2487. int i, err;
  2488. if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
  2489. err = f2fs_enable_quotas(sbi->sb);
  2490. if (err) {
  2491. f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
  2492. return 0;
  2493. }
  2494. return 1;
  2495. }
  2496. for (i = 0; i < MAXQUOTAS; i++) {
  2497. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  2498. err = f2fs_quota_on_mount(sbi, i);
  2499. if (!err) {
  2500. enabled = 1;
  2501. continue;
  2502. }
  2503. f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
  2504. err, i);
  2505. }
  2506. }
  2507. return enabled;
  2508. }
  2509. static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
  2510. unsigned int flags)
  2511. {
  2512. struct inode *qf_inode;
  2513. unsigned long qf_inum;
  2514. unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
  2515. int err;
  2516. BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
  2517. qf_inum = f2fs_qf_ino(sb, type);
  2518. if (!qf_inum)
  2519. return -EPERM;
  2520. qf_inode = f2fs_iget(sb, qf_inum);
  2521. if (IS_ERR(qf_inode)) {
  2522. f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
  2523. return PTR_ERR(qf_inode);
  2524. }
  2525. /* Don't account quota for quota files to avoid recursion */
  2526. inode_lock(qf_inode);
  2527. qf_inode->i_flags |= S_NOQUOTA;
  2528. if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
  2529. F2FS_I(qf_inode)->i_flags |= qf_flag;
  2530. f2fs_set_inode_flags(qf_inode);
  2531. }
  2532. inode_unlock(qf_inode);
  2533. err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
  2534. iput(qf_inode);
  2535. return err;
  2536. }
  2537. static int f2fs_enable_quotas(struct super_block *sb)
  2538. {
  2539. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2540. int type, err = 0;
  2541. unsigned long qf_inum;
  2542. bool quota_mopt[MAXQUOTAS] = {
  2543. test_opt(sbi, USRQUOTA),
  2544. test_opt(sbi, GRPQUOTA),
  2545. test_opt(sbi, PRJQUOTA),
  2546. };
  2547. if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
  2548. f2fs_err(sbi, "quota file may be corrupted, skip loading it");
  2549. return 0;
  2550. }
  2551. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
  2552. for (type = 0; type < MAXQUOTAS; type++) {
  2553. qf_inum = f2fs_qf_ino(sb, type);
  2554. if (qf_inum) {
  2555. err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
  2556. DQUOT_USAGE_ENABLED |
  2557. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  2558. if (err) {
  2559. f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
  2560. type, err);
  2561. for (type--; type >= 0; type--)
  2562. dquot_quota_off(sb, type);
  2563. set_sbi_flag(F2FS_SB(sb),
  2564. SBI_QUOTA_NEED_REPAIR);
  2565. return err;
  2566. }
  2567. }
  2568. }
  2569. return 0;
  2570. }
  2571. static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
  2572. {
  2573. struct quota_info *dqopt = sb_dqopt(sbi->sb);
  2574. struct address_space *mapping = dqopt->files[type]->i_mapping;
  2575. int ret = 0;
  2576. ret = dquot_writeback_dquots(sbi->sb, type);
  2577. if (ret)
  2578. goto out;
  2579. ret = filemap_fdatawrite(mapping);
  2580. if (ret)
  2581. goto out;
  2582. /* if we are using journalled quota */
  2583. if (is_journalled_quota(sbi))
  2584. goto out;
  2585. ret = filemap_fdatawait(mapping);
  2586. truncate_inode_pages(&dqopt->files[type]->i_data, 0);
  2587. out:
  2588. if (ret)
  2589. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2590. return ret;
  2591. }
  2592. int f2fs_quota_sync(struct super_block *sb, int type)
  2593. {
  2594. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2595. struct quota_info *dqopt = sb_dqopt(sb);
  2596. int cnt;
  2597. int ret = 0;
  2598. /*
  2599. * Now when everything is written we can discard the pagecache so
  2600. * that userspace sees the changes.
  2601. */
  2602. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  2603. if (type != -1 && cnt != type)
  2604. continue;
  2605. if (!sb_has_quota_active(sb, cnt))
  2606. continue;
  2607. if (!f2fs_sb_has_quota_ino(sbi))
  2608. inode_lock(dqopt->files[cnt]);
  2609. /*
  2610. * do_quotactl
  2611. * f2fs_quota_sync
  2612. * f2fs_down_read(quota_sem)
  2613. * dquot_writeback_dquots()
  2614. * f2fs_dquot_commit
  2615. * block_operation
  2616. * f2fs_down_read(quota_sem)
  2617. */
  2618. f2fs_lock_op(sbi);
  2619. f2fs_down_read(&sbi->quota_sem);
  2620. ret = f2fs_quota_sync_file(sbi, cnt);
  2621. f2fs_up_read(&sbi->quota_sem);
  2622. f2fs_unlock_op(sbi);
  2623. if (!f2fs_sb_has_quota_ino(sbi))
  2624. inode_unlock(dqopt->files[cnt]);
  2625. if (ret)
  2626. break;
  2627. }
  2628. return ret;
  2629. }
  2630. static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
  2631. const struct path *path)
  2632. {
  2633. struct inode *inode;
  2634. int err;
  2635. /* if quota sysfile exists, deny enabling quota with specific file */
  2636. if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
  2637. f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
  2638. return -EBUSY;
  2639. }
  2640. if (path->dentry->d_sb != sb)
  2641. return -EXDEV;
  2642. err = f2fs_quota_sync(sb, type);
  2643. if (err)
  2644. return err;
  2645. inode = d_inode(path->dentry);
  2646. err = filemap_fdatawrite(inode->i_mapping);
  2647. if (err)
  2648. return err;
  2649. err = filemap_fdatawait(inode->i_mapping);
  2650. if (err)
  2651. return err;
  2652. err = dquot_quota_on(sb, type, format_id, path);
  2653. if (err)
  2654. return err;
  2655. inode_lock(inode);
  2656. F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
  2657. f2fs_set_inode_flags(inode);
  2658. inode_unlock(inode);
  2659. f2fs_mark_inode_dirty_sync(inode, false);
  2660. return 0;
  2661. }
  2662. static int __f2fs_quota_off(struct super_block *sb, int type)
  2663. {
  2664. struct inode *inode = sb_dqopt(sb)->files[type];
  2665. int err;
  2666. if (!inode || !igrab(inode))
  2667. return dquot_quota_off(sb, type);
  2668. err = f2fs_quota_sync(sb, type);
  2669. if (err)
  2670. goto out_put;
  2671. err = dquot_quota_off(sb, type);
  2672. if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
  2673. goto out_put;
  2674. inode_lock(inode);
  2675. F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
  2676. f2fs_set_inode_flags(inode);
  2677. inode_unlock(inode);
  2678. f2fs_mark_inode_dirty_sync(inode, false);
  2679. out_put:
  2680. iput(inode);
  2681. return err;
  2682. }
  2683. static int f2fs_quota_off(struct super_block *sb, int type)
  2684. {
  2685. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2686. int err;
  2687. err = __f2fs_quota_off(sb, type);
  2688. /*
  2689. * quotactl can shutdown journalled quota, result in inconsistence
  2690. * between quota record and fs data by following updates, tag the
  2691. * flag to let fsck be aware of it.
  2692. */
  2693. if (is_journalled_quota(sbi))
  2694. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2695. return err;
  2696. }
  2697. void f2fs_quota_off_umount(struct super_block *sb)
  2698. {
  2699. int type;
  2700. int err;
  2701. for (type = 0; type < MAXQUOTAS; type++) {
  2702. err = __f2fs_quota_off(sb, type);
  2703. if (err) {
  2704. int ret = dquot_quota_off(sb, type);
  2705. f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
  2706. type, err, ret);
  2707. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2708. }
  2709. }
  2710. /*
  2711. * In case of checkpoint=disable, we must flush quota blocks.
  2712. * This can cause NULL exception for node_inode in end_io, since
  2713. * put_super already dropped it.
  2714. */
  2715. sync_filesystem(sb);
  2716. }
  2717. static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
  2718. {
  2719. struct quota_info *dqopt = sb_dqopt(sb);
  2720. int type;
  2721. for (type = 0; type < MAXQUOTAS; type++) {
  2722. if (!dqopt->files[type])
  2723. continue;
  2724. f2fs_inode_synced(dqopt->files[type]);
  2725. }
  2726. }
  2727. static int f2fs_dquot_commit(struct dquot *dquot)
  2728. {
  2729. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2730. int ret;
  2731. f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
  2732. ret = dquot_commit(dquot);
  2733. if (ret < 0)
  2734. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2735. f2fs_up_read(&sbi->quota_sem);
  2736. return ret;
  2737. }
  2738. static int f2fs_dquot_acquire(struct dquot *dquot)
  2739. {
  2740. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2741. int ret;
  2742. f2fs_down_read(&sbi->quota_sem);
  2743. ret = dquot_acquire(dquot);
  2744. if (ret < 0)
  2745. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2746. f2fs_up_read(&sbi->quota_sem);
  2747. return ret;
  2748. }
  2749. static int f2fs_dquot_release(struct dquot *dquot)
  2750. {
  2751. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2752. int ret = dquot_release(dquot);
  2753. if (ret < 0)
  2754. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2755. return ret;
  2756. }
  2757. static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
  2758. {
  2759. struct super_block *sb = dquot->dq_sb;
  2760. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2761. int ret = dquot_mark_dquot_dirty(dquot);
  2762. /* if we are using journalled quota */
  2763. if (is_journalled_quota(sbi))
  2764. set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
  2765. return ret;
  2766. }
  2767. static int f2fs_dquot_commit_info(struct super_block *sb, int type)
  2768. {
  2769. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2770. int ret = dquot_commit_info(sb, type);
  2771. if (ret < 0)
  2772. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2773. return ret;
  2774. }
  2775. static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
  2776. {
  2777. *projid = F2FS_I(inode)->i_projid;
  2778. return 0;
  2779. }
  2780. static const struct dquot_operations f2fs_quota_operations = {
  2781. .get_reserved_space = f2fs_get_reserved_space,
  2782. .write_dquot = f2fs_dquot_commit,
  2783. .acquire_dquot = f2fs_dquot_acquire,
  2784. .release_dquot = f2fs_dquot_release,
  2785. .mark_dirty = f2fs_dquot_mark_dquot_dirty,
  2786. .write_info = f2fs_dquot_commit_info,
  2787. .alloc_dquot = dquot_alloc,
  2788. .destroy_dquot = dquot_destroy,
  2789. .get_projid = f2fs_get_projid,
  2790. .get_next_id = dquot_get_next_id,
  2791. };
  2792. static const struct quotactl_ops f2fs_quotactl_ops = {
  2793. .quota_on = f2fs_quota_on,
  2794. .quota_off = f2fs_quota_off,
  2795. .quota_sync = f2fs_quota_sync,
  2796. .get_state = dquot_get_state,
  2797. .set_info = dquot_set_dqinfo,
  2798. .get_dqblk = dquot_get_dqblk,
  2799. .set_dqblk = dquot_set_dqblk,
  2800. .get_nextdqblk = dquot_get_next_dqblk,
  2801. };
  2802. #else
  2803. int f2fs_dquot_initialize(struct inode *inode)
  2804. {
  2805. return 0;
  2806. }
  2807. int f2fs_quota_sync(struct super_block *sb, int type)
  2808. {
  2809. return 0;
  2810. }
  2811. void f2fs_quota_off_umount(struct super_block *sb)
  2812. {
  2813. }
  2814. #endif
  2815. static const struct super_operations f2fs_sops = {
  2816. .alloc_inode = f2fs_alloc_inode,
  2817. .free_inode = f2fs_free_inode,
  2818. .drop_inode = f2fs_drop_inode,
  2819. .write_inode = f2fs_write_inode,
  2820. .dirty_inode = f2fs_dirty_inode,
  2821. .show_options = f2fs_show_options,
  2822. #ifdef CONFIG_QUOTA
  2823. .quota_read = f2fs_quota_read,
  2824. .quota_write = f2fs_quota_write,
  2825. .get_dquots = f2fs_get_dquots,
  2826. #endif
  2827. .evict_inode = f2fs_evict_inode,
  2828. .put_super = f2fs_put_super,
  2829. .sync_fs = f2fs_sync_fs,
  2830. .freeze_fs = f2fs_freeze,
  2831. .unfreeze_fs = f2fs_unfreeze,
  2832. .statfs = f2fs_statfs,
  2833. .remount_fs = f2fs_remount,
  2834. .shutdown = f2fs_shutdown,
  2835. };
  2836. #ifdef CONFIG_FS_ENCRYPTION
  2837. static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
  2838. {
  2839. return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2840. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2841. ctx, len, NULL);
  2842. }
  2843. static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  2844. void *fs_data)
  2845. {
  2846. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2847. /*
  2848. * Encrypting the root directory is not allowed because fsck
  2849. * expects lost+found directory to exist and remain unencrypted
  2850. * if LOST_FOUND feature is enabled.
  2851. *
  2852. */
  2853. if (f2fs_sb_has_lost_found(sbi) &&
  2854. inode->i_ino == F2FS_ROOT_INO(sbi))
  2855. return -EPERM;
  2856. return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2857. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2858. ctx, len, fs_data, XATTR_CREATE);
  2859. }
  2860. static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
  2861. {
  2862. return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
  2863. }
  2864. static bool f2fs_has_stable_inodes(struct super_block *sb)
  2865. {
  2866. return true;
  2867. }
  2868. static struct block_device **f2fs_get_devices(struct super_block *sb,
  2869. unsigned int *num_devs)
  2870. {
  2871. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2872. struct block_device **devs;
  2873. int i;
  2874. if (!f2fs_is_multi_device(sbi))
  2875. return NULL;
  2876. devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
  2877. if (!devs)
  2878. return ERR_PTR(-ENOMEM);
  2879. for (i = 0; i < sbi->s_ndevs; i++)
  2880. devs[i] = FDEV(i).bdev;
  2881. *num_devs = sbi->s_ndevs;
  2882. return devs;
  2883. }
  2884. static const struct fscrypt_operations f2fs_cryptops = {
  2885. .needs_bounce_pages = 1,
  2886. .has_32bit_inodes = 1,
  2887. .supports_subblock_data_units = 1,
  2888. .legacy_key_prefix = "f2fs:",
  2889. .get_context = f2fs_get_context,
  2890. .set_context = f2fs_set_context,
  2891. .get_dummy_policy = f2fs_get_dummy_policy,
  2892. .empty_dir = f2fs_empty_dir,
  2893. .has_stable_inodes = f2fs_has_stable_inodes,
  2894. .get_devices = f2fs_get_devices,
  2895. };
  2896. #endif
  2897. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  2898. u64 ino, u32 generation)
  2899. {
  2900. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2901. struct inode *inode;
  2902. if (f2fs_check_nid_range(sbi, ino))
  2903. return ERR_PTR(-ESTALE);
  2904. /*
  2905. * f2fs_iget isn't quite right if the inode is currently unallocated!
  2906. * However f2fs_iget currently does appropriate checks to handle stale
  2907. * inodes so everything is OK.
  2908. */
  2909. inode = f2fs_iget(sb, ino);
  2910. if (IS_ERR(inode))
  2911. return ERR_CAST(inode);
  2912. if (unlikely(generation && inode->i_generation != generation)) {
  2913. /* we didn't find the right inode.. */
  2914. iput(inode);
  2915. return ERR_PTR(-ESTALE);
  2916. }
  2917. return inode;
  2918. }
  2919. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  2920. int fh_len, int fh_type)
  2921. {
  2922. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  2923. f2fs_nfs_get_inode);
  2924. }
  2925. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  2926. int fh_len, int fh_type)
  2927. {
  2928. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  2929. f2fs_nfs_get_inode);
  2930. }
  2931. static const struct export_operations f2fs_export_ops = {
  2932. .encode_fh = generic_encode_ino32_fh,
  2933. .fh_to_dentry = f2fs_fh_to_dentry,
  2934. .fh_to_parent = f2fs_fh_to_parent,
  2935. .get_parent = f2fs_get_parent,
  2936. };
  2937. loff_t max_file_blocks(struct inode *inode)
  2938. {
  2939. loff_t result = 0;
  2940. loff_t leaf_count;
  2941. /*
  2942. * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
  2943. * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
  2944. * space in inode.i_addr, it will be more safe to reassign
  2945. * result as zero.
  2946. */
  2947. if (inode && f2fs_compressed_file(inode))
  2948. leaf_count = ADDRS_PER_BLOCK(inode);
  2949. else
  2950. leaf_count = DEF_ADDRS_PER_BLOCK;
  2951. /* two direct node blocks */
  2952. result += (leaf_count * 2);
  2953. /* two indirect node blocks */
  2954. leaf_count *= NIDS_PER_BLOCK;
  2955. result += (leaf_count * 2);
  2956. /* one double indirect node block */
  2957. leaf_count *= NIDS_PER_BLOCK;
  2958. result += leaf_count;
  2959. /*
  2960. * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
  2961. * a 4K crypto data unit, we must restrict the max filesize to what can
  2962. * fit within U32_MAX + 1 data units.
  2963. */
  2964. result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
  2965. return result;
  2966. }
  2967. static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
  2968. pgoff_t index, bool update)
  2969. {
  2970. struct bio *bio;
  2971. /* it's rare case, we can do fua all the time */
  2972. blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
  2973. int ret;
  2974. folio_lock(folio);
  2975. folio_wait_writeback(folio);
  2976. if (update)
  2977. memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
  2978. sizeof(struct f2fs_super_block));
  2979. folio_mark_dirty(folio);
  2980. folio_clear_dirty_for_io(folio);
  2981. folio_start_writeback(folio);
  2982. folio_unlock(folio);
  2983. bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
  2984. /* it doesn't need to set crypto context for superblock update */
  2985. bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
  2986. if (!bio_add_folio(bio, folio, folio_size(folio), 0))
  2987. f2fs_bug_on(sbi, 1);
  2988. ret = submit_bio_wait(bio);
  2989. bio_put(bio);
  2990. folio_end_writeback(folio);
  2991. return ret;
  2992. }
  2993. static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
  2994. struct folio *folio, pgoff_t index)
  2995. {
  2996. struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
  2997. struct super_block *sb = sbi->sb;
  2998. u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2999. u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
  3000. u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
  3001. u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
  3002. u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  3003. u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  3004. u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
  3005. u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
  3006. u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
  3007. u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
  3008. u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  3009. u32 segment_count = le32_to_cpu(raw_super->segment_count);
  3010. u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3011. u64 main_end_blkaddr = main_blkaddr +
  3012. ((u64)segment_count_main << log_blocks_per_seg);
  3013. u64 seg_end_blkaddr = segment0_blkaddr +
  3014. ((u64)segment_count << log_blocks_per_seg);
  3015. if (segment0_blkaddr != cp_blkaddr) {
  3016. f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
  3017. segment0_blkaddr, cp_blkaddr);
  3018. return true;
  3019. }
  3020. if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
  3021. sit_blkaddr) {
  3022. f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
  3023. cp_blkaddr, sit_blkaddr,
  3024. segment_count_ckpt << log_blocks_per_seg);
  3025. return true;
  3026. }
  3027. if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
  3028. nat_blkaddr) {
  3029. f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
  3030. sit_blkaddr, nat_blkaddr,
  3031. segment_count_sit << log_blocks_per_seg);
  3032. return true;
  3033. }
  3034. if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
  3035. ssa_blkaddr) {
  3036. f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
  3037. nat_blkaddr, ssa_blkaddr,
  3038. segment_count_nat << log_blocks_per_seg);
  3039. return true;
  3040. }
  3041. if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
  3042. main_blkaddr) {
  3043. f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
  3044. ssa_blkaddr, main_blkaddr,
  3045. segment_count_ssa << log_blocks_per_seg);
  3046. return true;
  3047. }
  3048. if (main_end_blkaddr > seg_end_blkaddr) {
  3049. f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
  3050. main_blkaddr, seg_end_blkaddr,
  3051. segment_count_main << log_blocks_per_seg);
  3052. return true;
  3053. } else if (main_end_blkaddr < seg_end_blkaddr) {
  3054. int err = 0;
  3055. char *res;
  3056. /* fix in-memory information all the time */
  3057. raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
  3058. segment0_blkaddr) >> log_blocks_per_seg);
  3059. if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
  3060. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  3061. res = "internally";
  3062. } else {
  3063. err = __f2fs_commit_super(sbi, folio, index, false);
  3064. res = err ? "failed" : "done";
  3065. }
  3066. f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
  3067. res, main_blkaddr, seg_end_blkaddr,
  3068. segment_count_main << log_blocks_per_seg);
  3069. if (err)
  3070. return true;
  3071. }
  3072. return false;
  3073. }
  3074. static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
  3075. struct folio *folio, pgoff_t index)
  3076. {
  3077. block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
  3078. block_t total_sections, blocks_per_seg;
  3079. struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
  3080. size_t crc_offset = 0;
  3081. __u32 crc = 0;
  3082. if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
  3083. f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
  3084. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  3085. return -EINVAL;
  3086. }
  3087. /* Check checksum_offset and crc in superblock */
  3088. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
  3089. crc_offset = le32_to_cpu(raw_super->checksum_offset);
  3090. if (crc_offset !=
  3091. offsetof(struct f2fs_super_block, crc)) {
  3092. f2fs_info(sbi, "Invalid SB checksum offset: %zu",
  3093. crc_offset);
  3094. return -EFSCORRUPTED;
  3095. }
  3096. crc = le32_to_cpu(raw_super->crc);
  3097. if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
  3098. f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
  3099. return -EFSCORRUPTED;
  3100. }
  3101. }
  3102. /* only support block_size equals to PAGE_SIZE */
  3103. if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
  3104. f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
  3105. le32_to_cpu(raw_super->log_blocksize),
  3106. F2FS_BLKSIZE_BITS);
  3107. return -EFSCORRUPTED;
  3108. }
  3109. /* check log blocks per segment */
  3110. if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
  3111. f2fs_info(sbi, "Invalid log blocks per segment (%u)",
  3112. le32_to_cpu(raw_super->log_blocks_per_seg));
  3113. return -EFSCORRUPTED;
  3114. }
  3115. /* Currently, support 512/1024/2048/4096/16K bytes sector size */
  3116. if (le32_to_cpu(raw_super->log_sectorsize) >
  3117. F2FS_MAX_LOG_SECTOR_SIZE ||
  3118. le32_to_cpu(raw_super->log_sectorsize) <
  3119. F2FS_MIN_LOG_SECTOR_SIZE) {
  3120. f2fs_info(sbi, "Invalid log sectorsize (%u)",
  3121. le32_to_cpu(raw_super->log_sectorsize));
  3122. return -EFSCORRUPTED;
  3123. }
  3124. if (le32_to_cpu(raw_super->log_sectors_per_block) +
  3125. le32_to_cpu(raw_super->log_sectorsize) !=
  3126. F2FS_MAX_LOG_SECTOR_SIZE) {
  3127. f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
  3128. le32_to_cpu(raw_super->log_sectors_per_block),
  3129. le32_to_cpu(raw_super->log_sectorsize));
  3130. return -EFSCORRUPTED;
  3131. }
  3132. segment_count = le32_to_cpu(raw_super->segment_count);
  3133. segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  3134. segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  3135. secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  3136. total_sections = le32_to_cpu(raw_super->section_count);
  3137. /* blocks_per_seg should be 512, given the above check */
  3138. blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
  3139. if (segment_count > F2FS_MAX_SEGMENT ||
  3140. segment_count < F2FS_MIN_SEGMENTS) {
  3141. f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
  3142. return -EFSCORRUPTED;
  3143. }
  3144. if (total_sections > segment_count_main || total_sections < 1 ||
  3145. segs_per_sec > segment_count || !segs_per_sec) {
  3146. f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
  3147. segment_count, total_sections, segs_per_sec);
  3148. return -EFSCORRUPTED;
  3149. }
  3150. if (segment_count_main != total_sections * segs_per_sec) {
  3151. f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
  3152. segment_count_main, total_sections, segs_per_sec);
  3153. return -EFSCORRUPTED;
  3154. }
  3155. if ((segment_count / segs_per_sec) < total_sections) {
  3156. f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
  3157. segment_count, segs_per_sec, total_sections);
  3158. return -EFSCORRUPTED;
  3159. }
  3160. if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
  3161. f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
  3162. segment_count, le64_to_cpu(raw_super->block_count));
  3163. return -EFSCORRUPTED;
  3164. }
  3165. if (RDEV(0).path[0]) {
  3166. block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
  3167. int i = 1;
  3168. while (i < MAX_DEVICES && RDEV(i).path[0]) {
  3169. dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
  3170. i++;
  3171. }
  3172. if (segment_count != dev_seg_count) {
  3173. f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
  3174. segment_count, dev_seg_count);
  3175. return -EFSCORRUPTED;
  3176. }
  3177. } else {
  3178. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
  3179. !bdev_is_zoned(sbi->sb->s_bdev)) {
  3180. f2fs_info(sbi, "Zoned block device path is missing");
  3181. return -EFSCORRUPTED;
  3182. }
  3183. }
  3184. if (secs_per_zone > total_sections || !secs_per_zone) {
  3185. f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
  3186. secs_per_zone, total_sections);
  3187. return -EFSCORRUPTED;
  3188. }
  3189. if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
  3190. raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
  3191. (le32_to_cpu(raw_super->extension_count) +
  3192. raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
  3193. f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
  3194. le32_to_cpu(raw_super->extension_count),
  3195. raw_super->hot_ext_count,
  3196. F2FS_MAX_EXTENSION);
  3197. return -EFSCORRUPTED;
  3198. }
  3199. if (le32_to_cpu(raw_super->cp_payload) >=
  3200. (blocks_per_seg - F2FS_CP_PACKS -
  3201. NR_CURSEG_PERSIST_TYPE)) {
  3202. f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
  3203. le32_to_cpu(raw_super->cp_payload),
  3204. blocks_per_seg - F2FS_CP_PACKS -
  3205. NR_CURSEG_PERSIST_TYPE);
  3206. return -EFSCORRUPTED;
  3207. }
  3208. /* check reserved ino info */
  3209. if (le32_to_cpu(raw_super->node_ino) != 1 ||
  3210. le32_to_cpu(raw_super->meta_ino) != 2 ||
  3211. le32_to_cpu(raw_super->root_ino) != 3) {
  3212. f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
  3213. le32_to_cpu(raw_super->node_ino),
  3214. le32_to_cpu(raw_super->meta_ino),
  3215. le32_to_cpu(raw_super->root_ino));
  3216. return -EFSCORRUPTED;
  3217. }
  3218. /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
  3219. if (sanity_check_area_boundary(sbi, folio, index))
  3220. return -EFSCORRUPTED;
  3221. return 0;
  3222. }
  3223. int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
  3224. {
  3225. unsigned int total, fsmeta;
  3226. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3227. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  3228. unsigned int ovp_segments, reserved_segments;
  3229. unsigned int main_segs, blocks_per_seg;
  3230. unsigned int sit_segs, nat_segs;
  3231. unsigned int sit_bitmap_size, nat_bitmap_size;
  3232. unsigned int log_blocks_per_seg;
  3233. unsigned int segment_count_main;
  3234. unsigned int cp_pack_start_sum, cp_payload;
  3235. block_t user_block_count, valid_user_blocks;
  3236. block_t avail_node_count, valid_node_count;
  3237. unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
  3238. unsigned int sit_blk_cnt;
  3239. int i, j;
  3240. total = le32_to_cpu(raw_super->segment_count);
  3241. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  3242. sit_segs = le32_to_cpu(raw_super->segment_count_sit);
  3243. fsmeta += sit_segs;
  3244. nat_segs = le32_to_cpu(raw_super->segment_count_nat);
  3245. fsmeta += nat_segs;
  3246. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  3247. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  3248. if (unlikely(fsmeta >= total))
  3249. return 1;
  3250. ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  3251. reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  3252. if (!f2fs_sb_has_readonly(sbi) &&
  3253. unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
  3254. ovp_segments == 0 || reserved_segments == 0)) {
  3255. f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
  3256. return 1;
  3257. }
  3258. user_block_count = le64_to_cpu(ckpt->user_block_count);
  3259. segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
  3260. (f2fs_sb_has_readonly(sbi) ? 1 : 0);
  3261. log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3262. if (!user_block_count || user_block_count >=
  3263. segment_count_main << log_blocks_per_seg) {
  3264. f2fs_err(sbi, "Wrong user_block_count: %u",
  3265. user_block_count);
  3266. return 1;
  3267. }
  3268. valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
  3269. if (valid_user_blocks > user_block_count) {
  3270. f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
  3271. valid_user_blocks, user_block_count);
  3272. return 1;
  3273. }
  3274. valid_node_count = le32_to_cpu(ckpt->valid_node_count);
  3275. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  3276. if (valid_node_count > avail_node_count) {
  3277. f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
  3278. valid_node_count, avail_node_count);
  3279. return 1;
  3280. }
  3281. main_segs = le32_to_cpu(raw_super->segment_count_main);
  3282. blocks_per_seg = BLKS_PER_SEG(sbi);
  3283. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  3284. if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
  3285. le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
  3286. return 1;
  3287. if (f2fs_sb_has_readonly(sbi))
  3288. goto check_data;
  3289. for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
  3290. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  3291. le32_to_cpu(ckpt->cur_node_segno[j])) {
  3292. f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
  3293. i, j,
  3294. le32_to_cpu(ckpt->cur_node_segno[i]));
  3295. return 1;
  3296. }
  3297. }
  3298. }
  3299. check_data:
  3300. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  3301. if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
  3302. le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
  3303. return 1;
  3304. if (f2fs_sb_has_readonly(sbi))
  3305. goto skip_cross;
  3306. for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
  3307. if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
  3308. le32_to_cpu(ckpt->cur_data_segno[j])) {
  3309. f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
  3310. i, j,
  3311. le32_to_cpu(ckpt->cur_data_segno[i]));
  3312. return 1;
  3313. }
  3314. }
  3315. }
  3316. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  3317. for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
  3318. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  3319. le32_to_cpu(ckpt->cur_data_segno[j])) {
  3320. f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
  3321. i, j,
  3322. le32_to_cpu(ckpt->cur_node_segno[i]));
  3323. return 1;
  3324. }
  3325. }
  3326. }
  3327. skip_cross:
  3328. sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  3329. nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  3330. if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
  3331. nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
  3332. f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
  3333. sit_bitmap_size, nat_bitmap_size);
  3334. return 1;
  3335. }
  3336. sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
  3337. if (sit_bitmap_size * 8 < sit_blk_cnt) {
  3338. f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
  3339. sit_bitmap_size, sit_blk_cnt);
  3340. return 1;
  3341. }
  3342. cp_pack_start_sum = __start_sum_addr(sbi);
  3343. cp_payload = __cp_payload(sbi);
  3344. if (cp_pack_start_sum < cp_payload + 1 ||
  3345. cp_pack_start_sum > blocks_per_seg - 1 -
  3346. NR_CURSEG_PERSIST_TYPE) {
  3347. f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
  3348. cp_pack_start_sum);
  3349. return 1;
  3350. }
  3351. if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
  3352. le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
  3353. f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
  3354. "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
  3355. "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
  3356. le32_to_cpu(ckpt->checksum_offset));
  3357. return 1;
  3358. }
  3359. nat_blocks = nat_segs << log_blocks_per_seg;
  3360. nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
  3361. nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
  3362. if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
  3363. (cp_payload + F2FS_CP_PACKS +
  3364. NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
  3365. f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
  3366. cp_payload, nat_bits_blocks);
  3367. return 1;
  3368. }
  3369. if (unlikely(f2fs_cp_error(sbi))) {
  3370. f2fs_err(sbi, "A bug case: need to run fsck");
  3371. return 1;
  3372. }
  3373. return 0;
  3374. }
  3375. static void init_sb_info(struct f2fs_sb_info *sbi)
  3376. {
  3377. struct f2fs_super_block *raw_super = sbi->raw_super;
  3378. int i;
  3379. sbi->log_sectors_per_block =
  3380. le32_to_cpu(raw_super->log_sectors_per_block);
  3381. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  3382. sbi->blocksize = BIT(sbi->log_blocksize);
  3383. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3384. sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
  3385. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  3386. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  3387. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  3388. sbi->total_node_count = SEGS_TO_BLKS(sbi,
  3389. ((le32_to_cpu(raw_super->segment_count_nat) / 2) *
  3390. NAT_ENTRY_PER_BLOCK));
  3391. F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
  3392. F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
  3393. F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
  3394. sbi->cur_victim_sec = NULL_SECNO;
  3395. sbi->gc_mode = GC_NORMAL;
  3396. sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
  3397. sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
  3398. sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
  3399. sbi->migration_granularity = SEGS_PER_SEC(sbi);
  3400. sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
  3401. DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
  3402. sbi->seq_file_ra_mul = MIN_RA_MUL;
  3403. sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
  3404. sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
  3405. spin_lock_init(&sbi->gc_remaining_trials_lock);
  3406. atomic64_set(&sbi->current_atomic_write, 0);
  3407. sbi->dir_level = DEF_DIR_LEVEL;
  3408. sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
  3409. sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
  3410. sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
  3411. sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
  3412. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
  3413. sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
  3414. DEF_UMOUNT_DISCARD_TIMEOUT;
  3415. clear_sbi_flag(sbi, SBI_NEED_FSCK);
  3416. for (i = 0; i < NR_COUNT_TYPE; i++)
  3417. atomic_set(&sbi->nr_pages[i], 0);
  3418. for (i = 0; i < META; i++)
  3419. atomic_set(&sbi->wb_sync_req[i], 0);
  3420. INIT_LIST_HEAD(&sbi->s_list);
  3421. mutex_init(&sbi->umount_mutex);
  3422. init_f2fs_rwsem(&sbi->io_order_lock);
  3423. spin_lock_init(&sbi->cp_lock);
  3424. sbi->dirty_device = 0;
  3425. spin_lock_init(&sbi->dev_lock);
  3426. init_f2fs_rwsem(&sbi->sb_lock);
  3427. init_f2fs_rwsem(&sbi->pin_sem);
  3428. }
  3429. static int init_percpu_info(struct f2fs_sb_info *sbi)
  3430. {
  3431. int err;
  3432. err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
  3433. if (err)
  3434. return err;
  3435. err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
  3436. if (err)
  3437. goto err_valid_block;
  3438. err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
  3439. GFP_KERNEL);
  3440. if (err)
  3441. goto err_node_block;
  3442. return 0;
  3443. err_node_block:
  3444. percpu_counter_destroy(&sbi->rf_node_block_count);
  3445. err_valid_block:
  3446. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  3447. return err;
  3448. }
  3449. #ifdef CONFIG_BLK_DEV_ZONED
  3450. struct f2fs_report_zones_args {
  3451. struct f2fs_sb_info *sbi;
  3452. struct f2fs_dev_info *dev;
  3453. };
  3454. static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
  3455. void *data)
  3456. {
  3457. struct f2fs_report_zones_args *rz_args = data;
  3458. block_t unusable_blocks = (zone->len - zone->capacity) >>
  3459. F2FS_LOG_SECTORS_PER_BLOCK;
  3460. if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
  3461. return 0;
  3462. set_bit(idx, rz_args->dev->blkz_seq);
  3463. if (!rz_args->sbi->unusable_blocks_per_sec) {
  3464. rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
  3465. return 0;
  3466. }
  3467. if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
  3468. f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
  3469. return -EINVAL;
  3470. }
  3471. return 0;
  3472. }
  3473. static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
  3474. {
  3475. struct block_device *bdev = FDEV(devi).bdev;
  3476. sector_t nr_sectors = bdev_nr_sectors(bdev);
  3477. struct f2fs_report_zones_args rep_zone_arg;
  3478. u64 zone_sectors;
  3479. unsigned int max_open_zones;
  3480. int ret;
  3481. if (!f2fs_sb_has_blkzoned(sbi))
  3482. return 0;
  3483. if (bdev_is_zoned(FDEV(devi).bdev)) {
  3484. max_open_zones = bdev_max_open_zones(bdev);
  3485. if (max_open_zones && (max_open_zones < sbi->max_open_zones))
  3486. sbi->max_open_zones = max_open_zones;
  3487. if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
  3488. f2fs_err(sbi,
  3489. "zoned: max open zones %u is too small, need at least %u open zones",
  3490. sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
  3491. return -EINVAL;
  3492. }
  3493. }
  3494. zone_sectors = bdev_zone_sectors(bdev);
  3495. if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
  3496. SECTOR_TO_BLOCK(zone_sectors))
  3497. return -EINVAL;
  3498. sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
  3499. FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
  3500. sbi->blocks_per_blkz);
  3501. if (nr_sectors & (zone_sectors - 1))
  3502. FDEV(devi).nr_blkz++;
  3503. FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
  3504. BITS_TO_LONGS(FDEV(devi).nr_blkz)
  3505. * sizeof(unsigned long),
  3506. GFP_KERNEL);
  3507. if (!FDEV(devi).blkz_seq)
  3508. return -ENOMEM;
  3509. rep_zone_arg.sbi = sbi;
  3510. rep_zone_arg.dev = &FDEV(devi);
  3511. ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
  3512. &rep_zone_arg);
  3513. if (ret < 0)
  3514. return ret;
  3515. return 0;
  3516. }
  3517. #endif
  3518. /*
  3519. * Read f2fs raw super block.
  3520. * Because we have two copies of super block, so read both of them
  3521. * to get the first valid one. If any one of them is broken, we pass
  3522. * them recovery flag back to the caller.
  3523. */
  3524. static int read_raw_super_block(struct f2fs_sb_info *sbi,
  3525. struct f2fs_super_block **raw_super,
  3526. int *valid_super_block, int *recovery)
  3527. {
  3528. struct super_block *sb = sbi->sb;
  3529. int block;
  3530. struct folio *folio;
  3531. struct f2fs_super_block *super;
  3532. int err = 0;
  3533. super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
  3534. if (!super)
  3535. return -ENOMEM;
  3536. for (block = 0; block < 2; block++) {
  3537. folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
  3538. if (IS_ERR(folio)) {
  3539. f2fs_err(sbi, "Unable to read %dth superblock",
  3540. block + 1);
  3541. err = PTR_ERR(folio);
  3542. *recovery = 1;
  3543. continue;
  3544. }
  3545. /* sanity checking of raw super */
  3546. err = sanity_check_raw_super(sbi, folio, block);
  3547. if (err) {
  3548. f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
  3549. block + 1);
  3550. folio_put(folio);
  3551. *recovery = 1;
  3552. continue;
  3553. }
  3554. if (!*raw_super) {
  3555. memcpy(super, F2FS_SUPER_BLOCK(folio, block),
  3556. sizeof(*super));
  3557. *valid_super_block = block;
  3558. *raw_super = super;
  3559. }
  3560. folio_put(folio);
  3561. }
  3562. /* No valid superblock */
  3563. if (!*raw_super)
  3564. kfree(super);
  3565. else
  3566. err = 0;
  3567. return err;
  3568. }
  3569. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
  3570. {
  3571. struct folio *folio;
  3572. pgoff_t index;
  3573. __u32 crc = 0;
  3574. int err;
  3575. if ((recover && f2fs_readonly(sbi->sb)) ||
  3576. f2fs_hw_is_readonly(sbi)) {
  3577. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  3578. return -EROFS;
  3579. }
  3580. /* we should update superblock crc here */
  3581. if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
  3582. crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
  3583. offsetof(struct f2fs_super_block, crc));
  3584. F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
  3585. }
  3586. /* write back-up superblock first */
  3587. index = sbi->valid_super_block ? 0 : 1;
  3588. folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
  3589. if (IS_ERR(folio))
  3590. return PTR_ERR(folio);
  3591. err = __f2fs_commit_super(sbi, folio, index, true);
  3592. folio_put(folio);
  3593. /* if we are in recovery path, skip writing valid superblock */
  3594. if (recover || err)
  3595. return err;
  3596. /* write current valid superblock */
  3597. index = sbi->valid_super_block;
  3598. folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
  3599. if (IS_ERR(folio))
  3600. return PTR_ERR(folio);
  3601. err = __f2fs_commit_super(sbi, folio, index, true);
  3602. folio_put(folio);
  3603. return err;
  3604. }
  3605. static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
  3606. {
  3607. unsigned long flags;
  3608. spin_lock_irqsave(&sbi->error_lock, flags);
  3609. if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
  3610. sbi->stop_reason[reason]++;
  3611. spin_unlock_irqrestore(&sbi->error_lock, flags);
  3612. }
  3613. static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
  3614. {
  3615. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3616. unsigned long flags;
  3617. int err;
  3618. f2fs_down_write(&sbi->sb_lock);
  3619. spin_lock_irqsave(&sbi->error_lock, flags);
  3620. if (sbi->error_dirty) {
  3621. memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
  3622. MAX_F2FS_ERRORS);
  3623. sbi->error_dirty = false;
  3624. }
  3625. memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
  3626. spin_unlock_irqrestore(&sbi->error_lock, flags);
  3627. err = f2fs_commit_super(sbi, false);
  3628. f2fs_up_write(&sbi->sb_lock);
  3629. if (err)
  3630. f2fs_err_ratelimited(sbi,
  3631. "f2fs_commit_super fails to record stop_reason, err:%d",
  3632. err);
  3633. }
  3634. void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
  3635. {
  3636. unsigned long flags;
  3637. spin_lock_irqsave(&sbi->error_lock, flags);
  3638. if (!test_bit(flag, (unsigned long *)sbi->errors)) {
  3639. set_bit(flag, (unsigned long *)sbi->errors);
  3640. sbi->error_dirty = true;
  3641. }
  3642. spin_unlock_irqrestore(&sbi->error_lock, flags);
  3643. }
  3644. static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
  3645. {
  3646. unsigned long flags;
  3647. bool need_update = false;
  3648. spin_lock_irqsave(&sbi->error_lock, flags);
  3649. if (sbi->error_dirty) {
  3650. memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
  3651. MAX_F2FS_ERRORS);
  3652. sbi->error_dirty = false;
  3653. need_update = true;
  3654. }
  3655. spin_unlock_irqrestore(&sbi->error_lock, flags);
  3656. return need_update;
  3657. }
  3658. static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
  3659. {
  3660. int err;
  3661. f2fs_down_write(&sbi->sb_lock);
  3662. if (!f2fs_update_errors(sbi))
  3663. goto out_unlock;
  3664. err = f2fs_commit_super(sbi, false);
  3665. if (err)
  3666. f2fs_err_ratelimited(sbi,
  3667. "f2fs_commit_super fails to record errors:%u, err:%d",
  3668. error, err);
  3669. out_unlock:
  3670. f2fs_up_write(&sbi->sb_lock);
  3671. }
  3672. void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
  3673. {
  3674. f2fs_save_errors(sbi, error);
  3675. f2fs_record_errors(sbi, error);
  3676. }
  3677. void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error)
  3678. {
  3679. f2fs_save_errors(sbi, error);
  3680. if (!sbi->error_dirty)
  3681. return;
  3682. if (!test_bit(error, (unsigned long *)sbi->errors))
  3683. return;
  3684. schedule_work(&sbi->s_error_work);
  3685. }
  3686. static bool system_going_down(void)
  3687. {
  3688. return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
  3689. || system_state == SYSTEM_RESTART;
  3690. }
  3691. void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
  3692. {
  3693. struct super_block *sb = sbi->sb;
  3694. bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
  3695. bool continue_fs = !shutdown &&
  3696. F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
  3697. set_ckpt_flags(sbi, CP_ERROR_FLAG);
  3698. if (!f2fs_hw_is_readonly(sbi)) {
  3699. save_stop_reason(sbi, reason);
  3700. /*
  3701. * always create an asynchronous task to record stop_reason
  3702. * in order to avoid potential deadlock when running into
  3703. * f2fs_record_stop_reason() synchronously.
  3704. */
  3705. schedule_work(&sbi->s_error_work);
  3706. }
  3707. /*
  3708. * We force ERRORS_RO behavior when system is rebooting. Otherwise we
  3709. * could panic during 'reboot -f' as the underlying device got already
  3710. * disabled.
  3711. */
  3712. if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
  3713. !shutdown && !system_going_down() &&
  3714. !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
  3715. panic("F2FS-fs (device %s): panic forced after error\n",
  3716. sb->s_id);
  3717. if (shutdown)
  3718. set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
  3719. /*
  3720. * Continue filesystem operators if errors=continue. Should not set
  3721. * RO by shutdown, since RO bypasses thaw_super which can hang the
  3722. * system.
  3723. */
  3724. if (continue_fs || f2fs_readonly(sb) || shutdown) {
  3725. f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
  3726. return;
  3727. }
  3728. f2fs_warn(sbi, "Remounting filesystem read-only");
  3729. /*
  3730. * We have already set CP_ERROR_FLAG flag to stop all updates
  3731. * to filesystem, so it doesn't need to set SB_RDONLY flag here
  3732. * because the flag should be set covered w/ sb->s_umount semaphore
  3733. * via remount procedure, otherwise, it will confuse code like
  3734. * freeze_super() which will lead to deadlocks and other problems.
  3735. */
  3736. }
  3737. static void f2fs_record_error_work(struct work_struct *work)
  3738. {
  3739. struct f2fs_sb_info *sbi = container_of(work,
  3740. struct f2fs_sb_info, s_error_work);
  3741. f2fs_record_stop_reason(sbi);
  3742. }
  3743. static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
  3744. {
  3745. #ifdef CONFIG_BLK_DEV_ZONED
  3746. unsigned int zoneno, total_zones;
  3747. int devi;
  3748. if (!f2fs_sb_has_blkzoned(sbi))
  3749. return NULL_SEGNO;
  3750. for (devi = 0; devi < sbi->s_ndevs; devi++) {
  3751. if (!bdev_is_zoned(FDEV(devi).bdev))
  3752. continue;
  3753. total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
  3754. for (zoneno = 0; zoneno < total_zones; zoneno++) {
  3755. unsigned int segs, blks;
  3756. if (!f2fs_zone_is_seq(sbi, devi, zoneno))
  3757. continue;
  3758. segs = GET_SEG_FROM_SEC(sbi,
  3759. zoneno * sbi->secs_per_zone);
  3760. blks = SEGS_TO_BLKS(sbi, segs);
  3761. return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
  3762. }
  3763. }
  3764. #endif
  3765. return NULL_SEGNO;
  3766. }
  3767. static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
  3768. {
  3769. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3770. unsigned int max_devices = MAX_DEVICES;
  3771. unsigned int logical_blksize;
  3772. blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
  3773. int i;
  3774. /* Initialize single device information */
  3775. if (!RDEV(0).path[0]) {
  3776. if (!bdev_is_zoned(sbi->sb->s_bdev))
  3777. return 0;
  3778. max_devices = 1;
  3779. }
  3780. /*
  3781. * Initialize multiple devices information, or single
  3782. * zoned block device information.
  3783. */
  3784. sbi->devs = f2fs_kzalloc(sbi,
  3785. array_size(max_devices,
  3786. sizeof(struct f2fs_dev_info)),
  3787. GFP_KERNEL);
  3788. if (!sbi->devs)
  3789. return -ENOMEM;
  3790. logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
  3791. sbi->aligned_blksize = true;
  3792. #ifdef CONFIG_BLK_DEV_ZONED
  3793. sbi->max_open_zones = UINT_MAX;
  3794. sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
  3795. #endif
  3796. for (i = 0; i < max_devices; i++) {
  3797. if (max_devices == 1) {
  3798. FDEV(i).total_segments =
  3799. le32_to_cpu(raw_super->segment_count_main);
  3800. FDEV(i).start_blk = 0;
  3801. FDEV(i).end_blk = FDEV(i).total_segments *
  3802. BLKS_PER_SEG(sbi);
  3803. }
  3804. if (i == 0)
  3805. FDEV(0).bdev_file = sbi->sb->s_bdev_file;
  3806. else if (!RDEV(i).path[0])
  3807. break;
  3808. if (max_devices > 1) {
  3809. /* Multi-device mount */
  3810. memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
  3811. FDEV(i).total_segments =
  3812. le32_to_cpu(RDEV(i).total_segments);
  3813. if (i == 0) {
  3814. FDEV(i).start_blk = 0;
  3815. FDEV(i).end_blk = FDEV(i).start_blk +
  3816. SEGS_TO_BLKS(sbi,
  3817. FDEV(i).total_segments) - 1 +
  3818. le32_to_cpu(raw_super->segment0_blkaddr);
  3819. } else {
  3820. FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
  3821. FDEV(i).end_blk = FDEV(i).start_blk +
  3822. SEGS_TO_BLKS(sbi,
  3823. FDEV(i).total_segments) - 1;
  3824. FDEV(i).bdev_file = bdev_file_open_by_path(
  3825. FDEV(i).path, mode, sbi->sb, NULL);
  3826. }
  3827. }
  3828. if (IS_ERR(FDEV(i).bdev_file))
  3829. return PTR_ERR(FDEV(i).bdev_file);
  3830. FDEV(i).bdev = file_bdev(FDEV(i).bdev_file);
  3831. /* to release errored devices */
  3832. sbi->s_ndevs = i + 1;
  3833. if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
  3834. sbi->aligned_blksize = false;
  3835. #ifdef CONFIG_BLK_DEV_ZONED
  3836. if (bdev_is_zoned(FDEV(i).bdev)) {
  3837. if (!f2fs_sb_has_blkzoned(sbi)) {
  3838. f2fs_err(sbi, "Zoned block device feature not enabled");
  3839. return -EINVAL;
  3840. }
  3841. if (init_blkz_info(sbi, i)) {
  3842. f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
  3843. return -EINVAL;
  3844. }
  3845. if (max_devices == 1)
  3846. break;
  3847. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
  3848. i, FDEV(i).path,
  3849. FDEV(i).total_segments,
  3850. FDEV(i).start_blk, FDEV(i).end_blk);
  3851. continue;
  3852. }
  3853. #endif
  3854. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
  3855. i, FDEV(i).path,
  3856. FDEV(i).total_segments,
  3857. FDEV(i).start_blk, FDEV(i).end_blk);
  3858. }
  3859. return 0;
  3860. }
  3861. static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
  3862. {
  3863. #if IS_ENABLED(CONFIG_UNICODE)
  3864. if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
  3865. const struct f2fs_sb_encodings *encoding_info;
  3866. struct unicode_map *encoding;
  3867. __u16 encoding_flags;
  3868. encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
  3869. if (!encoding_info) {
  3870. f2fs_err(sbi,
  3871. "Encoding requested by superblock is unknown");
  3872. return -EINVAL;
  3873. }
  3874. encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
  3875. encoding = utf8_load(encoding_info->version);
  3876. if (IS_ERR(encoding)) {
  3877. f2fs_err(sbi,
  3878. "can't mount with superblock charset: %s-%u.%u.%u "
  3879. "not supported by the kernel. flags: 0x%x.",
  3880. encoding_info->name,
  3881. unicode_major(encoding_info->version),
  3882. unicode_minor(encoding_info->version),
  3883. unicode_rev(encoding_info->version),
  3884. encoding_flags);
  3885. return PTR_ERR(encoding);
  3886. }
  3887. f2fs_info(sbi, "Using encoding defined by superblock: "
  3888. "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
  3889. unicode_major(encoding_info->version),
  3890. unicode_minor(encoding_info->version),
  3891. unicode_rev(encoding_info->version),
  3892. encoding_flags);
  3893. sbi->sb->s_encoding = encoding;
  3894. sbi->sb->s_encoding_flags = encoding_flags;
  3895. }
  3896. #else
  3897. if (f2fs_sb_has_casefold(sbi)) {
  3898. f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  3899. return -EINVAL;
  3900. }
  3901. #endif
  3902. return 0;
  3903. }
  3904. static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
  3905. {
  3906. /* adjust parameters according to the volume size */
  3907. if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
  3908. if (f2fs_block_unit_discard(sbi))
  3909. SM_I(sbi)->dcc_info->discard_granularity =
  3910. MIN_DISCARD_GRANULARITY;
  3911. if (!f2fs_lfs_mode(sbi))
  3912. SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
  3913. BIT(F2FS_IPU_HONOR_OPU_WRITE);
  3914. }
  3915. sbi->readdir_ra = true;
  3916. }
  3917. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  3918. {
  3919. struct f2fs_sb_info *sbi;
  3920. struct f2fs_super_block *raw_super;
  3921. struct inode *root;
  3922. int err;
  3923. bool skip_recovery = false, need_fsck = false;
  3924. char *options = NULL;
  3925. int recovery, i, valid_super_block;
  3926. struct curseg_info *seg_i;
  3927. int retry_cnt = 1;
  3928. #ifdef CONFIG_QUOTA
  3929. bool quota_enabled = false;
  3930. #endif
  3931. try_onemore:
  3932. err = -EINVAL;
  3933. raw_super = NULL;
  3934. valid_super_block = -1;
  3935. recovery = 0;
  3936. /* allocate memory for f2fs-specific super block info */
  3937. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  3938. if (!sbi)
  3939. return -ENOMEM;
  3940. sbi->sb = sb;
  3941. /* initialize locks within allocated memory */
  3942. init_f2fs_rwsem(&sbi->gc_lock);
  3943. mutex_init(&sbi->writepages);
  3944. init_f2fs_rwsem(&sbi->cp_global_sem);
  3945. init_f2fs_rwsem(&sbi->node_write);
  3946. init_f2fs_rwsem(&sbi->node_change);
  3947. spin_lock_init(&sbi->stat_lock);
  3948. init_f2fs_rwsem(&sbi->cp_rwsem);
  3949. init_f2fs_rwsem(&sbi->quota_sem);
  3950. init_waitqueue_head(&sbi->cp_wait);
  3951. spin_lock_init(&sbi->error_lock);
  3952. for (i = 0; i < NR_INODE_TYPE; i++) {
  3953. INIT_LIST_HEAD(&sbi->inode_list[i]);
  3954. spin_lock_init(&sbi->inode_lock[i]);
  3955. }
  3956. mutex_init(&sbi->flush_lock);
  3957. /* Load the checksum driver */
  3958. sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
  3959. if (IS_ERR(sbi->s_chksum_driver)) {
  3960. f2fs_err(sbi, "Cannot load crc32 driver.");
  3961. err = PTR_ERR(sbi->s_chksum_driver);
  3962. sbi->s_chksum_driver = NULL;
  3963. goto free_sbi;
  3964. }
  3965. /* set a block size */
  3966. if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
  3967. f2fs_err(sbi, "unable to set blocksize");
  3968. goto free_sbi;
  3969. }
  3970. err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  3971. &recovery);
  3972. if (err)
  3973. goto free_sbi;
  3974. sb->s_fs_info = sbi;
  3975. sbi->raw_super = raw_super;
  3976. INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
  3977. memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
  3978. memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
  3979. /* precompute checksum seed for metadata */
  3980. if (f2fs_sb_has_inode_chksum(sbi))
  3981. sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
  3982. sizeof(raw_super->uuid));
  3983. default_options(sbi, false);
  3984. /* parse mount options */
  3985. options = kstrdup((const char *)data, GFP_KERNEL);
  3986. if (data && !options) {
  3987. err = -ENOMEM;
  3988. goto free_sb_buf;
  3989. }
  3990. err = parse_options(sb, options, false);
  3991. if (err)
  3992. goto free_options;
  3993. sb->s_maxbytes = max_file_blocks(NULL) <<
  3994. le32_to_cpu(raw_super->log_blocksize);
  3995. sb->s_max_links = F2FS_LINK_MAX;
  3996. err = f2fs_setup_casefold(sbi);
  3997. if (err)
  3998. goto free_options;
  3999. #ifdef CONFIG_QUOTA
  4000. sb->dq_op = &f2fs_quota_operations;
  4001. sb->s_qcop = &f2fs_quotactl_ops;
  4002. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  4003. if (f2fs_sb_has_quota_ino(sbi)) {
  4004. for (i = 0; i < MAXQUOTAS; i++) {
  4005. if (f2fs_qf_ino(sbi->sb, i))
  4006. sbi->nquota_files++;
  4007. }
  4008. }
  4009. #endif
  4010. sb->s_op = &f2fs_sops;
  4011. #ifdef CONFIG_FS_ENCRYPTION
  4012. sb->s_cop = &f2fs_cryptops;
  4013. #endif
  4014. #ifdef CONFIG_FS_VERITY
  4015. sb->s_vop = &f2fs_verityops;
  4016. #endif
  4017. sb->s_xattr = f2fs_xattr_handlers;
  4018. sb->s_export_op = &f2fs_export_ops;
  4019. sb->s_magic = F2FS_SUPER_MAGIC;
  4020. sb->s_time_gran = 1;
  4021. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  4022. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  4023. super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
  4024. super_set_sysfs_name_bdev(sb);
  4025. sb->s_iflags |= SB_I_CGROUPWB;
  4026. /* init f2fs-specific super block info */
  4027. sbi->valid_super_block = valid_super_block;
  4028. /* disallow all the data/node/meta page writes */
  4029. set_sbi_flag(sbi, SBI_POR_DOING);
  4030. err = f2fs_init_write_merge_io(sbi);
  4031. if (err)
  4032. goto free_bio_info;
  4033. init_sb_info(sbi);
  4034. err = f2fs_init_iostat(sbi);
  4035. if (err)
  4036. goto free_bio_info;
  4037. err = init_percpu_info(sbi);
  4038. if (err)
  4039. goto free_iostat;
  4040. /* init per sbi slab cache */
  4041. err = f2fs_init_xattr_caches(sbi);
  4042. if (err)
  4043. goto free_percpu;
  4044. err = f2fs_init_page_array_cache(sbi);
  4045. if (err)
  4046. goto free_xattr_cache;
  4047. /* get an inode for meta space */
  4048. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  4049. if (IS_ERR(sbi->meta_inode)) {
  4050. f2fs_err(sbi, "Failed to read F2FS meta data inode");
  4051. err = PTR_ERR(sbi->meta_inode);
  4052. goto free_page_array_cache;
  4053. }
  4054. err = f2fs_get_valid_checkpoint(sbi);
  4055. if (err) {
  4056. f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
  4057. goto free_meta_inode;
  4058. }
  4059. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
  4060. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  4061. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
  4062. set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  4063. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
  4064. }
  4065. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
  4066. set_sbi_flag(sbi, SBI_NEED_FSCK);
  4067. /* Initialize device list */
  4068. err = f2fs_scan_devices(sbi);
  4069. if (err) {
  4070. f2fs_err(sbi, "Failed to find devices");
  4071. goto free_devices;
  4072. }
  4073. err = f2fs_init_post_read_wq(sbi);
  4074. if (err) {
  4075. f2fs_err(sbi, "Failed to initialize post read workqueue");
  4076. goto free_devices;
  4077. }
  4078. sbi->total_valid_node_count =
  4079. le32_to_cpu(sbi->ckpt->valid_node_count);
  4080. percpu_counter_set(&sbi->total_valid_inode_count,
  4081. le32_to_cpu(sbi->ckpt->valid_inode_count));
  4082. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  4083. sbi->total_valid_block_count =
  4084. le64_to_cpu(sbi->ckpt->valid_block_count);
  4085. sbi->last_valid_block_count = sbi->total_valid_block_count;
  4086. sbi->reserved_blocks = 0;
  4087. sbi->current_reserved_blocks = 0;
  4088. limit_reserve_root(sbi);
  4089. adjust_unusable_cap_perc(sbi);
  4090. f2fs_init_extent_cache_info(sbi);
  4091. f2fs_init_ino_entry_info(sbi);
  4092. f2fs_init_fsync_node_info(sbi);
  4093. /* setup checkpoint request control and start checkpoint issue thread */
  4094. f2fs_init_ckpt_req_control(sbi);
  4095. if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
  4096. test_opt(sbi, MERGE_CHECKPOINT)) {
  4097. err = f2fs_start_ckpt_thread(sbi);
  4098. if (err) {
  4099. f2fs_err(sbi,
  4100. "Failed to start F2FS issue_checkpoint_thread (%d)",
  4101. err);
  4102. goto stop_ckpt_thread;
  4103. }
  4104. }
  4105. /* setup f2fs internal modules */
  4106. err = f2fs_build_segment_manager(sbi);
  4107. if (err) {
  4108. f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
  4109. err);
  4110. goto free_sm;
  4111. }
  4112. err = f2fs_build_node_manager(sbi);
  4113. if (err) {
  4114. f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
  4115. err);
  4116. goto free_nm;
  4117. }
  4118. /* For write statistics */
  4119. sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
  4120. /* get segno of first zoned block device */
  4121. sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
  4122. /* Read accumulated write IO statistics if exists */
  4123. seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  4124. if (__exist_node_summaries(sbi))
  4125. sbi->kbytes_written =
  4126. le64_to_cpu(seg_i->journal->info.kbytes_written);
  4127. f2fs_build_gc_manager(sbi);
  4128. err = f2fs_build_stats(sbi);
  4129. if (err)
  4130. goto free_nm;
  4131. /* get an inode for node space */
  4132. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  4133. if (IS_ERR(sbi->node_inode)) {
  4134. f2fs_err(sbi, "Failed to read node inode");
  4135. err = PTR_ERR(sbi->node_inode);
  4136. goto free_stats;
  4137. }
  4138. /* read root inode and dentry */
  4139. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  4140. if (IS_ERR(root)) {
  4141. f2fs_err(sbi, "Failed to read root inode");
  4142. err = PTR_ERR(root);
  4143. goto free_node_inode;
  4144. }
  4145. if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
  4146. !root->i_size || !root->i_nlink) {
  4147. iput(root);
  4148. err = -EINVAL;
  4149. goto free_node_inode;
  4150. }
  4151. generic_set_sb_d_ops(sb);
  4152. sb->s_root = d_make_root(root); /* allocate root dentry */
  4153. if (!sb->s_root) {
  4154. err = -ENOMEM;
  4155. goto free_node_inode;
  4156. }
  4157. err = f2fs_init_compress_inode(sbi);
  4158. if (err)
  4159. goto free_root_inode;
  4160. err = f2fs_register_sysfs(sbi);
  4161. if (err)
  4162. goto free_compress_inode;
  4163. #ifdef CONFIG_QUOTA
  4164. /* Enable quota usage during mount */
  4165. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
  4166. err = f2fs_enable_quotas(sb);
  4167. if (err)
  4168. f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
  4169. }
  4170. quota_enabled = f2fs_recover_quota_begin(sbi);
  4171. #endif
  4172. /* if there are any orphan inodes, free them */
  4173. err = f2fs_recover_orphan_inodes(sbi);
  4174. if (err)
  4175. goto free_meta;
  4176. if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
  4177. goto reset_checkpoint;
  4178. /* recover fsynced data */
  4179. if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
  4180. !test_opt(sbi, NORECOVERY)) {
  4181. /*
  4182. * mount should be failed, when device has readonly mode, and
  4183. * previous checkpoint was not done by clean system shutdown.
  4184. */
  4185. if (f2fs_hw_is_readonly(sbi)) {
  4186. if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  4187. err = f2fs_recover_fsync_data(sbi, true);
  4188. if (err > 0) {
  4189. err = -EROFS;
  4190. f2fs_err(sbi, "Need to recover fsync data, but "
  4191. "write access unavailable, please try "
  4192. "mount w/ disable_roll_forward or norecovery");
  4193. }
  4194. if (err < 0)
  4195. goto free_meta;
  4196. }
  4197. f2fs_info(sbi, "write access unavailable, skipping recovery");
  4198. goto reset_checkpoint;
  4199. }
  4200. if (need_fsck)
  4201. set_sbi_flag(sbi, SBI_NEED_FSCK);
  4202. if (skip_recovery)
  4203. goto reset_checkpoint;
  4204. err = f2fs_recover_fsync_data(sbi, false);
  4205. if (err < 0) {
  4206. if (err != -ENOMEM)
  4207. skip_recovery = true;
  4208. need_fsck = true;
  4209. f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
  4210. err);
  4211. goto free_meta;
  4212. }
  4213. } else {
  4214. err = f2fs_recover_fsync_data(sbi, true);
  4215. if (!f2fs_readonly(sb) && err > 0) {
  4216. err = -EINVAL;
  4217. f2fs_err(sbi, "Need to recover fsync data");
  4218. goto free_meta;
  4219. }
  4220. }
  4221. #ifdef CONFIG_QUOTA
  4222. f2fs_recover_quota_end(sbi, quota_enabled);
  4223. #endif
  4224. reset_checkpoint:
  4225. /*
  4226. * If the f2fs is not readonly and fsync data recovery succeeds,
  4227. * check zoned block devices' write pointer consistency.
  4228. */
  4229. if (f2fs_sb_has_blkzoned(sbi) && !f2fs_readonly(sb)) {
  4230. int err2;
  4231. f2fs_notice(sbi, "Checking entire write pointers");
  4232. err2 = f2fs_check_write_pointer(sbi);
  4233. if (err2)
  4234. err = err2;
  4235. }
  4236. if (err)
  4237. goto free_meta;
  4238. err = f2fs_init_inmem_curseg(sbi);
  4239. if (err)
  4240. goto sync_free_meta;
  4241. /* f2fs_recover_fsync_data() cleared this already */
  4242. clear_sbi_flag(sbi, SBI_POR_DOING);
  4243. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  4244. err = f2fs_disable_checkpoint(sbi);
  4245. if (err)
  4246. goto sync_free_meta;
  4247. } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
  4248. f2fs_enable_checkpoint(sbi);
  4249. }
  4250. /*
  4251. * If filesystem is not mounted as read-only then
  4252. * do start the gc_thread.
  4253. */
  4254. if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
  4255. test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
  4256. /* After POR, we can run background GC thread.*/
  4257. err = f2fs_start_gc_thread(sbi);
  4258. if (err)
  4259. goto sync_free_meta;
  4260. }
  4261. kvfree(options);
  4262. /* recover broken superblock */
  4263. if (recovery) {
  4264. err = f2fs_commit_super(sbi, true);
  4265. f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
  4266. sbi->valid_super_block ? 1 : 2, err);
  4267. }
  4268. f2fs_join_shrinker(sbi);
  4269. f2fs_tuning_parameters(sbi);
  4270. f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
  4271. cur_cp_version(F2FS_CKPT(sbi)));
  4272. f2fs_update_time(sbi, CP_TIME);
  4273. f2fs_update_time(sbi, REQ_TIME);
  4274. clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  4275. return 0;
  4276. sync_free_meta:
  4277. /* safe to flush all the data */
  4278. sync_filesystem(sbi->sb);
  4279. retry_cnt = 0;
  4280. free_meta:
  4281. #ifdef CONFIG_QUOTA
  4282. f2fs_truncate_quota_inode_pages(sb);
  4283. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
  4284. f2fs_quota_off_umount(sbi->sb);
  4285. #endif
  4286. /*
  4287. * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
  4288. * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
  4289. * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
  4290. * falls into an infinite loop in f2fs_sync_meta_pages().
  4291. */
  4292. truncate_inode_pages_final(META_MAPPING(sbi));
  4293. /* evict some inodes being cached by GC */
  4294. evict_inodes(sb);
  4295. f2fs_unregister_sysfs(sbi);
  4296. free_compress_inode:
  4297. f2fs_destroy_compress_inode(sbi);
  4298. free_root_inode:
  4299. dput(sb->s_root);
  4300. sb->s_root = NULL;
  4301. free_node_inode:
  4302. f2fs_release_ino_entry(sbi, true);
  4303. truncate_inode_pages_final(NODE_MAPPING(sbi));
  4304. iput(sbi->node_inode);
  4305. sbi->node_inode = NULL;
  4306. free_stats:
  4307. f2fs_destroy_stats(sbi);
  4308. free_nm:
  4309. /* stop discard thread before destroying node manager */
  4310. f2fs_stop_discard_thread(sbi);
  4311. f2fs_destroy_node_manager(sbi);
  4312. free_sm:
  4313. f2fs_destroy_segment_manager(sbi);
  4314. stop_ckpt_thread:
  4315. f2fs_stop_ckpt_thread(sbi);
  4316. /* flush s_error_work before sbi destroy */
  4317. flush_work(&sbi->s_error_work);
  4318. f2fs_destroy_post_read_wq(sbi);
  4319. free_devices:
  4320. destroy_device_list(sbi);
  4321. kvfree(sbi->ckpt);
  4322. free_meta_inode:
  4323. make_bad_inode(sbi->meta_inode);
  4324. iput(sbi->meta_inode);
  4325. sbi->meta_inode = NULL;
  4326. free_page_array_cache:
  4327. f2fs_destroy_page_array_cache(sbi);
  4328. free_xattr_cache:
  4329. f2fs_destroy_xattr_caches(sbi);
  4330. free_percpu:
  4331. destroy_percpu_info(sbi);
  4332. free_iostat:
  4333. f2fs_destroy_iostat(sbi);
  4334. free_bio_info:
  4335. for (i = 0; i < NR_PAGE_TYPE; i++)
  4336. kvfree(sbi->write_io[i]);
  4337. #if IS_ENABLED(CONFIG_UNICODE)
  4338. utf8_unload(sb->s_encoding);
  4339. sb->s_encoding = NULL;
  4340. #endif
  4341. free_options:
  4342. #ifdef CONFIG_QUOTA
  4343. for (i = 0; i < MAXQUOTAS; i++)
  4344. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  4345. #endif
  4346. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  4347. kvfree(options);
  4348. free_sb_buf:
  4349. kfree(raw_super);
  4350. free_sbi:
  4351. if (sbi->s_chksum_driver)
  4352. crypto_free_shash(sbi->s_chksum_driver);
  4353. kfree(sbi);
  4354. sb->s_fs_info = NULL;
  4355. /* give only one another chance */
  4356. if (retry_cnt > 0 && skip_recovery) {
  4357. retry_cnt--;
  4358. shrink_dcache_sb(sb);
  4359. goto try_onemore;
  4360. }
  4361. return err;
  4362. }
  4363. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  4364. const char *dev_name, void *data)
  4365. {
  4366. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  4367. }
  4368. static void kill_f2fs_super(struct super_block *sb)
  4369. {
  4370. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  4371. if (sb->s_root) {
  4372. set_sbi_flag(sbi, SBI_IS_CLOSE);
  4373. f2fs_stop_gc_thread(sbi);
  4374. f2fs_stop_discard_thread(sbi);
  4375. #ifdef CONFIG_F2FS_FS_COMPRESSION
  4376. /*
  4377. * latter evict_inode() can bypass checking and invalidating
  4378. * compress inode cache.
  4379. */
  4380. if (test_opt(sbi, COMPRESS_CACHE))
  4381. truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
  4382. #endif
  4383. if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  4384. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  4385. struct cp_control cpc = {
  4386. .reason = CP_UMOUNT,
  4387. };
  4388. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  4389. f2fs_write_checkpoint(sbi, &cpc);
  4390. }
  4391. if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
  4392. sb->s_flags &= ~SB_RDONLY;
  4393. }
  4394. kill_block_super(sb);
  4395. /* Release block devices last, after fscrypt_destroy_keyring(). */
  4396. if (sbi) {
  4397. destroy_device_list(sbi);
  4398. kfree(sbi);
  4399. sb->s_fs_info = NULL;
  4400. }
  4401. }
  4402. static struct file_system_type f2fs_fs_type = {
  4403. .owner = THIS_MODULE,
  4404. .name = "f2fs",
  4405. .mount = f2fs_mount,
  4406. .kill_sb = kill_f2fs_super,
  4407. .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
  4408. };
  4409. MODULE_ALIAS_FS("f2fs");
  4410. static int __init init_inodecache(void)
  4411. {
  4412. f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
  4413. sizeof(struct f2fs_inode_info), 0,
  4414. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
  4415. return f2fs_inode_cachep ? 0 : -ENOMEM;
  4416. }
  4417. static void destroy_inodecache(void)
  4418. {
  4419. /*
  4420. * Make sure all delayed rcu free inodes are flushed before we
  4421. * destroy cache.
  4422. */
  4423. rcu_barrier();
  4424. kmem_cache_destroy(f2fs_inode_cachep);
  4425. }
  4426. static int __init init_f2fs_fs(void)
  4427. {
  4428. int err;
  4429. err = init_inodecache();
  4430. if (err)
  4431. goto fail;
  4432. err = f2fs_create_node_manager_caches();
  4433. if (err)
  4434. goto free_inodecache;
  4435. err = f2fs_create_segment_manager_caches();
  4436. if (err)
  4437. goto free_node_manager_caches;
  4438. err = f2fs_create_checkpoint_caches();
  4439. if (err)
  4440. goto free_segment_manager_caches;
  4441. err = f2fs_create_recovery_cache();
  4442. if (err)
  4443. goto free_checkpoint_caches;
  4444. err = f2fs_create_extent_cache();
  4445. if (err)
  4446. goto free_recovery_cache;
  4447. err = f2fs_create_garbage_collection_cache();
  4448. if (err)
  4449. goto free_extent_cache;
  4450. err = f2fs_init_sysfs();
  4451. if (err)
  4452. goto free_garbage_collection_cache;
  4453. err = f2fs_init_shrinker();
  4454. if (err)
  4455. goto free_sysfs;
  4456. f2fs_create_root_stats();
  4457. err = f2fs_init_post_read_processing();
  4458. if (err)
  4459. goto free_root_stats;
  4460. err = f2fs_init_iostat_processing();
  4461. if (err)
  4462. goto free_post_read;
  4463. err = f2fs_init_bio_entry_cache();
  4464. if (err)
  4465. goto free_iostat;
  4466. err = f2fs_init_bioset();
  4467. if (err)
  4468. goto free_bio_entry_cache;
  4469. err = f2fs_init_compress_mempool();
  4470. if (err)
  4471. goto free_bioset;
  4472. err = f2fs_init_compress_cache();
  4473. if (err)
  4474. goto free_compress_mempool;
  4475. err = f2fs_create_casefold_cache();
  4476. if (err)
  4477. goto free_compress_cache;
  4478. err = register_filesystem(&f2fs_fs_type);
  4479. if (err)
  4480. goto free_casefold_cache;
  4481. return 0;
  4482. free_casefold_cache:
  4483. f2fs_destroy_casefold_cache();
  4484. free_compress_cache:
  4485. f2fs_destroy_compress_cache();
  4486. free_compress_mempool:
  4487. f2fs_destroy_compress_mempool();
  4488. free_bioset:
  4489. f2fs_destroy_bioset();
  4490. free_bio_entry_cache:
  4491. f2fs_destroy_bio_entry_cache();
  4492. free_iostat:
  4493. f2fs_destroy_iostat_processing();
  4494. free_post_read:
  4495. f2fs_destroy_post_read_processing();
  4496. free_root_stats:
  4497. f2fs_destroy_root_stats();
  4498. f2fs_exit_shrinker();
  4499. free_sysfs:
  4500. f2fs_exit_sysfs();
  4501. free_garbage_collection_cache:
  4502. f2fs_destroy_garbage_collection_cache();
  4503. free_extent_cache:
  4504. f2fs_destroy_extent_cache();
  4505. free_recovery_cache:
  4506. f2fs_destroy_recovery_cache();
  4507. free_checkpoint_caches:
  4508. f2fs_destroy_checkpoint_caches();
  4509. free_segment_manager_caches:
  4510. f2fs_destroy_segment_manager_caches();
  4511. free_node_manager_caches:
  4512. f2fs_destroy_node_manager_caches();
  4513. free_inodecache:
  4514. destroy_inodecache();
  4515. fail:
  4516. return err;
  4517. }
  4518. static void __exit exit_f2fs_fs(void)
  4519. {
  4520. unregister_filesystem(&f2fs_fs_type);
  4521. f2fs_destroy_casefold_cache();
  4522. f2fs_destroy_compress_cache();
  4523. f2fs_destroy_compress_mempool();
  4524. f2fs_destroy_bioset();
  4525. f2fs_destroy_bio_entry_cache();
  4526. f2fs_destroy_iostat_processing();
  4527. f2fs_destroy_post_read_processing();
  4528. f2fs_destroy_root_stats();
  4529. f2fs_exit_shrinker();
  4530. f2fs_exit_sysfs();
  4531. f2fs_destroy_garbage_collection_cache();
  4532. f2fs_destroy_extent_cache();
  4533. f2fs_destroy_recovery_cache();
  4534. f2fs_destroy_checkpoint_caches();
  4535. f2fs_destroy_segment_manager_caches();
  4536. f2fs_destroy_node_manager_caches();
  4537. destroy_inodecache();
  4538. }
  4539. module_init(init_f2fs_fs)
  4540. module_exit(exit_f2fs_fs)
  4541. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  4542. MODULE_DESCRIPTION("Flash Friendly File System");
  4543. MODULE_LICENSE("GPL");
  4544. MODULE_SOFTDEP("pre: crc32");