block-group.c 141 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/sizes.h>
  3. #include <linux/list_sort.h>
  4. #include "misc.h"
  5. #include "ctree.h"
  6. #include "block-group.h"
  7. #include "space-info.h"
  8. #include "disk-io.h"
  9. #include "free-space-cache.h"
  10. #include "free-space-tree.h"
  11. #include "volumes.h"
  12. #include "transaction.h"
  13. #include "ref-verify.h"
  14. #include "sysfs.h"
  15. #include "tree-log.h"
  16. #include "delalloc-space.h"
  17. #include "discard.h"
  18. #include "raid56.h"
  19. #include "zoned.h"
  20. #include "fs.h"
  21. #include "accessors.h"
  22. #include "extent-tree.h"
  23. #ifdef CONFIG_BTRFS_DEBUG
  24. int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group)
  25. {
  26. struct btrfs_fs_info *fs_info = block_group->fs_info;
  27. return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
  28. block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
  29. (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
  30. block_group->flags & BTRFS_BLOCK_GROUP_DATA);
  31. }
  32. #endif
  33. static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group)
  34. {
  35. /* The meta_write_pointer is available only on the zoned setup. */
  36. if (!btrfs_is_zoned(block_group->fs_info))
  37. return false;
  38. if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
  39. return false;
  40. return block_group->start + block_group->alloc_offset >
  41. block_group->meta_write_pointer;
  42. }
  43. /*
  44. * Return target flags in extended format or 0 if restripe for this chunk_type
  45. * is not in progress
  46. *
  47. * Should be called with balance_lock held
  48. */
  49. static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags)
  50. {
  51. const struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  52. u64 target = 0;
  53. if (!bctl)
  54. return 0;
  55. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  56. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  57. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  58. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  59. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  60. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  61. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  62. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  63. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  64. }
  65. return target;
  66. }
  67. /*
  68. * @flags: available profiles in extended format (see ctree.h)
  69. *
  70. * Return reduced profile in chunk format. If profile changing is in progress
  71. * (either running or paused) picks the target profile (if it's already
  72. * available), otherwise falls back to plain reducing.
  73. */
  74. static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  75. {
  76. u64 num_devices = fs_info->fs_devices->rw_devices;
  77. u64 target;
  78. u64 raid_type;
  79. u64 allowed = 0;
  80. /*
  81. * See if restripe for this chunk_type is in progress, if so try to
  82. * reduce to the target profile
  83. */
  84. spin_lock(&fs_info->balance_lock);
  85. target = get_restripe_target(fs_info, flags);
  86. if (target) {
  87. spin_unlock(&fs_info->balance_lock);
  88. return extended_to_chunk(target);
  89. }
  90. spin_unlock(&fs_info->balance_lock);
  91. /* First, mask out the RAID levels which aren't possible */
  92. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  93. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  94. allowed |= btrfs_raid_array[raid_type].bg_flag;
  95. }
  96. allowed &= flags;
  97. /* Select the highest-redundancy RAID level. */
  98. if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
  99. allowed = BTRFS_BLOCK_GROUP_RAID1C4;
  100. else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  101. allowed = BTRFS_BLOCK_GROUP_RAID6;
  102. else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
  103. allowed = BTRFS_BLOCK_GROUP_RAID1C3;
  104. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  105. allowed = BTRFS_BLOCK_GROUP_RAID5;
  106. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  107. allowed = BTRFS_BLOCK_GROUP_RAID10;
  108. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  109. allowed = BTRFS_BLOCK_GROUP_RAID1;
  110. else if (allowed & BTRFS_BLOCK_GROUP_DUP)
  111. allowed = BTRFS_BLOCK_GROUP_DUP;
  112. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  113. allowed = BTRFS_BLOCK_GROUP_RAID0;
  114. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  115. return extended_to_chunk(flags | allowed);
  116. }
  117. u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  118. {
  119. unsigned seq;
  120. u64 flags;
  121. do {
  122. flags = orig_flags;
  123. seq = read_seqbegin(&fs_info->profiles_lock);
  124. if (flags & BTRFS_BLOCK_GROUP_DATA)
  125. flags |= fs_info->avail_data_alloc_bits;
  126. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  127. flags |= fs_info->avail_system_alloc_bits;
  128. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  129. flags |= fs_info->avail_metadata_alloc_bits;
  130. } while (read_seqretry(&fs_info->profiles_lock, seq));
  131. return btrfs_reduce_alloc_profile(fs_info, flags);
  132. }
  133. void btrfs_get_block_group(struct btrfs_block_group *cache)
  134. {
  135. refcount_inc(&cache->refs);
  136. }
  137. void btrfs_put_block_group(struct btrfs_block_group *cache)
  138. {
  139. if (refcount_dec_and_test(&cache->refs)) {
  140. WARN_ON(cache->pinned > 0);
  141. /*
  142. * If there was a failure to cleanup a log tree, very likely due
  143. * to an IO failure on a writeback attempt of one or more of its
  144. * extent buffers, we could not do proper (and cheap) unaccounting
  145. * of their reserved space, so don't warn on reserved > 0 in that
  146. * case.
  147. */
  148. if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
  149. !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
  150. WARN_ON(cache->reserved > 0);
  151. /*
  152. * A block_group shouldn't be on the discard_list anymore.
  153. * Remove the block_group from the discard_list to prevent us
  154. * from causing a panic due to NULL pointer dereference.
  155. */
  156. if (WARN_ON(!list_empty(&cache->discard_list)))
  157. btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
  158. cache);
  159. kfree(cache->free_space_ctl);
  160. btrfs_free_chunk_map(cache->physical_map);
  161. kfree(cache);
  162. }
  163. }
  164. /*
  165. * This adds the block group to the fs_info rb tree for the block group cache
  166. */
  167. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  168. struct btrfs_block_group *block_group)
  169. {
  170. struct rb_node **p;
  171. struct rb_node *parent = NULL;
  172. struct btrfs_block_group *cache;
  173. bool leftmost = true;
  174. ASSERT(block_group->length != 0);
  175. write_lock(&info->block_group_cache_lock);
  176. p = &info->block_group_cache_tree.rb_root.rb_node;
  177. while (*p) {
  178. parent = *p;
  179. cache = rb_entry(parent, struct btrfs_block_group, cache_node);
  180. if (block_group->start < cache->start) {
  181. p = &(*p)->rb_left;
  182. } else if (block_group->start > cache->start) {
  183. p = &(*p)->rb_right;
  184. leftmost = false;
  185. } else {
  186. write_unlock(&info->block_group_cache_lock);
  187. return -EEXIST;
  188. }
  189. }
  190. rb_link_node(&block_group->cache_node, parent, p);
  191. rb_insert_color_cached(&block_group->cache_node,
  192. &info->block_group_cache_tree, leftmost);
  193. write_unlock(&info->block_group_cache_lock);
  194. return 0;
  195. }
  196. /*
  197. * This will return the block group at or after bytenr if contains is 0, else
  198. * it will return the block group that contains the bytenr
  199. */
  200. static struct btrfs_block_group *block_group_cache_tree_search(
  201. struct btrfs_fs_info *info, u64 bytenr, int contains)
  202. {
  203. struct btrfs_block_group *cache, *ret = NULL;
  204. struct rb_node *n;
  205. u64 end, start;
  206. read_lock(&info->block_group_cache_lock);
  207. n = info->block_group_cache_tree.rb_root.rb_node;
  208. while (n) {
  209. cache = rb_entry(n, struct btrfs_block_group, cache_node);
  210. end = cache->start + cache->length - 1;
  211. start = cache->start;
  212. if (bytenr < start) {
  213. if (!contains && (!ret || start < ret->start))
  214. ret = cache;
  215. n = n->rb_left;
  216. } else if (bytenr > start) {
  217. if (contains && bytenr <= end) {
  218. ret = cache;
  219. break;
  220. }
  221. n = n->rb_right;
  222. } else {
  223. ret = cache;
  224. break;
  225. }
  226. }
  227. if (ret)
  228. btrfs_get_block_group(ret);
  229. read_unlock(&info->block_group_cache_lock);
  230. return ret;
  231. }
  232. /*
  233. * Return the block group that starts at or after bytenr
  234. */
  235. struct btrfs_block_group *btrfs_lookup_first_block_group(
  236. struct btrfs_fs_info *info, u64 bytenr)
  237. {
  238. return block_group_cache_tree_search(info, bytenr, 0);
  239. }
  240. /*
  241. * Return the block group that contains the given bytenr
  242. */
  243. struct btrfs_block_group *btrfs_lookup_block_group(
  244. struct btrfs_fs_info *info, u64 bytenr)
  245. {
  246. return block_group_cache_tree_search(info, bytenr, 1);
  247. }
  248. struct btrfs_block_group *btrfs_next_block_group(
  249. struct btrfs_block_group *cache)
  250. {
  251. struct btrfs_fs_info *fs_info = cache->fs_info;
  252. struct rb_node *node;
  253. read_lock(&fs_info->block_group_cache_lock);
  254. /* If our block group was removed, we need a full search. */
  255. if (RB_EMPTY_NODE(&cache->cache_node)) {
  256. const u64 next_bytenr = cache->start + cache->length;
  257. read_unlock(&fs_info->block_group_cache_lock);
  258. btrfs_put_block_group(cache);
  259. return btrfs_lookup_first_block_group(fs_info, next_bytenr);
  260. }
  261. node = rb_next(&cache->cache_node);
  262. btrfs_put_block_group(cache);
  263. if (node) {
  264. cache = rb_entry(node, struct btrfs_block_group, cache_node);
  265. btrfs_get_block_group(cache);
  266. } else
  267. cache = NULL;
  268. read_unlock(&fs_info->block_group_cache_lock);
  269. return cache;
  270. }
  271. /*
  272. * Check if we can do a NOCOW write for a given extent.
  273. *
  274. * @fs_info: The filesystem information object.
  275. * @bytenr: Logical start address of the extent.
  276. *
  277. * Check if we can do a NOCOW write for the given extent, and increments the
  278. * number of NOCOW writers in the block group that contains the extent, as long
  279. * as the block group exists and it's currently not in read-only mode.
  280. *
  281. * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
  282. * is responsible for calling btrfs_dec_nocow_writers() later.
  283. *
  284. * Or NULL if we can not do a NOCOW write
  285. */
  286. struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
  287. u64 bytenr)
  288. {
  289. struct btrfs_block_group *bg;
  290. bool can_nocow = true;
  291. bg = btrfs_lookup_block_group(fs_info, bytenr);
  292. if (!bg)
  293. return NULL;
  294. spin_lock(&bg->lock);
  295. if (bg->ro)
  296. can_nocow = false;
  297. else
  298. atomic_inc(&bg->nocow_writers);
  299. spin_unlock(&bg->lock);
  300. if (!can_nocow) {
  301. btrfs_put_block_group(bg);
  302. return NULL;
  303. }
  304. /* No put on block group, done by btrfs_dec_nocow_writers(). */
  305. return bg;
  306. }
  307. /*
  308. * Decrement the number of NOCOW writers in a block group.
  309. *
  310. * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
  311. * and on the block group returned by that call. Typically this is called after
  312. * creating an ordered extent for a NOCOW write, to prevent races with scrub and
  313. * relocation.
  314. *
  315. * After this call, the caller should not use the block group anymore. It it wants
  316. * to use it, then it should get a reference on it before calling this function.
  317. */
  318. void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
  319. {
  320. if (atomic_dec_and_test(&bg->nocow_writers))
  321. wake_up_var(&bg->nocow_writers);
  322. /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
  323. btrfs_put_block_group(bg);
  324. }
  325. void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
  326. {
  327. wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
  328. }
  329. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  330. const u64 start)
  331. {
  332. struct btrfs_block_group *bg;
  333. bg = btrfs_lookup_block_group(fs_info, start);
  334. ASSERT(bg);
  335. if (atomic_dec_and_test(&bg->reservations))
  336. wake_up_var(&bg->reservations);
  337. btrfs_put_block_group(bg);
  338. }
  339. void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
  340. {
  341. struct btrfs_space_info *space_info = bg->space_info;
  342. ASSERT(bg->ro);
  343. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  344. return;
  345. /*
  346. * Our block group is read only but before we set it to read only,
  347. * some task might have had allocated an extent from it already, but it
  348. * has not yet created a respective ordered extent (and added it to a
  349. * root's list of ordered extents).
  350. * Therefore wait for any task currently allocating extents, since the
  351. * block group's reservations counter is incremented while a read lock
  352. * on the groups' semaphore is held and decremented after releasing
  353. * the read access on that semaphore and creating the ordered extent.
  354. */
  355. down_write(&space_info->groups_sem);
  356. up_write(&space_info->groups_sem);
  357. wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
  358. }
  359. struct btrfs_caching_control *btrfs_get_caching_control(
  360. struct btrfs_block_group *cache)
  361. {
  362. struct btrfs_caching_control *ctl;
  363. spin_lock(&cache->lock);
  364. if (!cache->caching_ctl) {
  365. spin_unlock(&cache->lock);
  366. return NULL;
  367. }
  368. ctl = cache->caching_ctl;
  369. refcount_inc(&ctl->count);
  370. spin_unlock(&cache->lock);
  371. return ctl;
  372. }
  373. static void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
  374. {
  375. if (refcount_dec_and_test(&ctl->count))
  376. kfree(ctl);
  377. }
  378. /*
  379. * When we wait for progress in the block group caching, its because our
  380. * allocation attempt failed at least once. So, we must sleep and let some
  381. * progress happen before we try again.
  382. *
  383. * This function will sleep at least once waiting for new free space to show
  384. * up, and then it will check the block group free space numbers for our min
  385. * num_bytes. Another option is to have it go ahead and look in the rbtree for
  386. * a free extent of a given size, but this is a good start.
  387. *
  388. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  389. * any of the information in this block group.
  390. */
  391. void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
  392. u64 num_bytes)
  393. {
  394. struct btrfs_caching_control *caching_ctl;
  395. int progress;
  396. caching_ctl = btrfs_get_caching_control(cache);
  397. if (!caching_ctl)
  398. return;
  399. /*
  400. * We've already failed to allocate from this block group, so even if
  401. * there's enough space in the block group it isn't contiguous enough to
  402. * allow for an allocation, so wait for at least the next wakeup tick,
  403. * or for the thing to be done.
  404. */
  405. progress = atomic_read(&caching_ctl->progress);
  406. wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
  407. (progress != atomic_read(&caching_ctl->progress) &&
  408. (cache->free_space_ctl->free_space >= num_bytes)));
  409. btrfs_put_caching_control(caching_ctl);
  410. }
  411. static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
  412. struct btrfs_caching_control *caching_ctl)
  413. {
  414. wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
  415. return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
  416. }
  417. static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
  418. {
  419. struct btrfs_caching_control *caching_ctl;
  420. int ret;
  421. caching_ctl = btrfs_get_caching_control(cache);
  422. if (!caching_ctl)
  423. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  424. ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
  425. btrfs_put_caching_control(caching_ctl);
  426. return ret;
  427. }
  428. #ifdef CONFIG_BTRFS_DEBUG
  429. static void fragment_free_space(struct btrfs_block_group *block_group)
  430. {
  431. struct btrfs_fs_info *fs_info = block_group->fs_info;
  432. u64 start = block_group->start;
  433. u64 len = block_group->length;
  434. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  435. fs_info->nodesize : fs_info->sectorsize;
  436. u64 step = chunk << 1;
  437. while (len > chunk) {
  438. btrfs_remove_free_space(block_group, start, chunk);
  439. start += step;
  440. if (len < step)
  441. len = 0;
  442. else
  443. len -= step;
  444. }
  445. }
  446. #endif
  447. /*
  448. * Add a free space range to the in memory free space cache of a block group.
  449. * This checks if the range contains super block locations and any such
  450. * locations are not added to the free space cache.
  451. *
  452. * @block_group: The target block group.
  453. * @start: Start offset of the range.
  454. * @end: End offset of the range (exclusive).
  455. * @total_added_ret: Optional pointer to return the total amount of space
  456. * added to the block group's free space cache.
  457. *
  458. * Returns 0 on success or < 0 on error.
  459. */
  460. int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
  461. u64 end, u64 *total_added_ret)
  462. {
  463. struct btrfs_fs_info *info = block_group->fs_info;
  464. u64 extent_start, extent_end, size;
  465. int ret;
  466. if (total_added_ret)
  467. *total_added_ret = 0;
  468. while (start < end) {
  469. if (!find_first_extent_bit(&info->excluded_extents, start,
  470. &extent_start, &extent_end,
  471. EXTENT_DIRTY | EXTENT_UPTODATE,
  472. NULL))
  473. break;
  474. if (extent_start <= start) {
  475. start = extent_end + 1;
  476. } else if (extent_start > start && extent_start < end) {
  477. size = extent_start - start;
  478. ret = btrfs_add_free_space_async_trimmed(block_group,
  479. start, size);
  480. if (ret)
  481. return ret;
  482. if (total_added_ret)
  483. *total_added_ret += size;
  484. start = extent_end + 1;
  485. } else {
  486. break;
  487. }
  488. }
  489. if (start < end) {
  490. size = end - start;
  491. ret = btrfs_add_free_space_async_trimmed(block_group, start,
  492. size);
  493. if (ret)
  494. return ret;
  495. if (total_added_ret)
  496. *total_added_ret += size;
  497. }
  498. return 0;
  499. }
  500. /*
  501. * Get an arbitrary extent item index / max_index through the block group
  502. *
  503. * @block_group the block group to sample from
  504. * @index: the integral step through the block group to grab from
  505. * @max_index: the granularity of the sampling
  506. * @key: return value parameter for the item we find
  507. *
  508. * Pre-conditions on indices:
  509. * 0 <= index <= max_index
  510. * 0 < max_index
  511. *
  512. * Returns: 0 on success, 1 if the search didn't yield a useful item, negative
  513. * error code on error.
  514. */
  515. static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl,
  516. struct btrfs_block_group *block_group,
  517. int index, int max_index,
  518. struct btrfs_key *found_key)
  519. {
  520. struct btrfs_fs_info *fs_info = block_group->fs_info;
  521. struct btrfs_root *extent_root;
  522. u64 search_offset;
  523. u64 search_end = block_group->start + block_group->length;
  524. struct btrfs_path *path;
  525. struct btrfs_key search_key;
  526. int ret = 0;
  527. ASSERT(index >= 0);
  528. ASSERT(index <= max_index);
  529. ASSERT(max_index > 0);
  530. lockdep_assert_held(&caching_ctl->mutex);
  531. lockdep_assert_held_read(&fs_info->commit_root_sem);
  532. path = btrfs_alloc_path();
  533. if (!path)
  534. return -ENOMEM;
  535. extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start,
  536. BTRFS_SUPER_INFO_OFFSET));
  537. path->skip_locking = 1;
  538. path->search_commit_root = 1;
  539. path->reada = READA_FORWARD;
  540. search_offset = index * div_u64(block_group->length, max_index);
  541. search_key.objectid = block_group->start + search_offset;
  542. search_key.type = BTRFS_EXTENT_ITEM_KEY;
  543. search_key.offset = 0;
  544. btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
  545. /* Success; sampled an extent item in the block group */
  546. if (found_key->type == BTRFS_EXTENT_ITEM_KEY &&
  547. found_key->objectid >= block_group->start &&
  548. found_key->objectid + found_key->offset <= search_end)
  549. break;
  550. /* We can't possibly find a valid extent item anymore */
  551. if (found_key->objectid >= search_end) {
  552. ret = 1;
  553. break;
  554. }
  555. }
  556. lockdep_assert_held(&caching_ctl->mutex);
  557. lockdep_assert_held_read(&fs_info->commit_root_sem);
  558. btrfs_free_path(path);
  559. return ret;
  560. }
  561. /*
  562. * Best effort attempt to compute a block group's size class while caching it.
  563. *
  564. * @block_group: the block group we are caching
  565. *
  566. * We cannot infer the size class while adding free space extents, because that
  567. * logic doesn't care about contiguous file extents (it doesn't differentiate
  568. * between a 100M extent and 100 contiguous 1M extents). So we need to read the
  569. * file extent items. Reading all of them is quite wasteful, because usually
  570. * only a handful are enough to give a good answer. Therefore, we just grab 5 of
  571. * them at even steps through the block group and pick the smallest size class
  572. * we see. Since size class is best effort, and not guaranteed in general,
  573. * inaccuracy is acceptable.
  574. *
  575. * To be more explicit about why this algorithm makes sense:
  576. *
  577. * If we are caching in a block group from disk, then there are three major cases
  578. * to consider:
  579. * 1. the block group is well behaved and all extents in it are the same size
  580. * class.
  581. * 2. the block group is mostly one size class with rare exceptions for last
  582. * ditch allocations
  583. * 3. the block group was populated before size classes and can have a totally
  584. * arbitrary mix of size classes.
  585. *
  586. * In case 1, looking at any extent in the block group will yield the correct
  587. * result. For the mixed cases, taking the minimum size class seems like a good
  588. * approximation, since gaps from frees will be usable to the size class. For
  589. * 2., a small handful of file extents is likely to yield the right answer. For
  590. * 3, we can either read every file extent, or admit that this is best effort
  591. * anyway and try to stay fast.
  592. *
  593. * Returns: 0 on success, negative error code on error.
  594. */
  595. static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl,
  596. struct btrfs_block_group *block_group)
  597. {
  598. struct btrfs_fs_info *fs_info = block_group->fs_info;
  599. struct btrfs_key key;
  600. int i;
  601. u64 min_size = block_group->length;
  602. enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE;
  603. int ret;
  604. if (!btrfs_block_group_should_use_size_class(block_group))
  605. return 0;
  606. lockdep_assert_held(&caching_ctl->mutex);
  607. lockdep_assert_held_read(&fs_info->commit_root_sem);
  608. for (i = 0; i < 5; ++i) {
  609. ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key);
  610. if (ret < 0)
  611. goto out;
  612. if (ret > 0)
  613. continue;
  614. min_size = min_t(u64, min_size, key.offset);
  615. size_class = btrfs_calc_block_group_size_class(min_size);
  616. }
  617. if (size_class != BTRFS_BG_SZ_NONE) {
  618. spin_lock(&block_group->lock);
  619. block_group->size_class = size_class;
  620. spin_unlock(&block_group->lock);
  621. }
  622. out:
  623. return ret;
  624. }
  625. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  626. {
  627. struct btrfs_block_group *block_group = caching_ctl->block_group;
  628. struct btrfs_fs_info *fs_info = block_group->fs_info;
  629. struct btrfs_root *extent_root;
  630. struct btrfs_path *path;
  631. struct extent_buffer *leaf;
  632. struct btrfs_key key;
  633. u64 total_found = 0;
  634. u64 last = 0;
  635. u32 nritems;
  636. int ret;
  637. bool wakeup = true;
  638. path = btrfs_alloc_path();
  639. if (!path)
  640. return -ENOMEM;
  641. last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
  642. extent_root = btrfs_extent_root(fs_info, last);
  643. #ifdef CONFIG_BTRFS_DEBUG
  644. /*
  645. * If we're fragmenting we don't want to make anybody think we can
  646. * allocate from this block group until we've had a chance to fragment
  647. * the free space.
  648. */
  649. if (btrfs_should_fragment_free_space(block_group))
  650. wakeup = false;
  651. #endif
  652. /*
  653. * We don't want to deadlock with somebody trying to allocate a new
  654. * extent for the extent root while also trying to search the extent
  655. * root to add free space. So we skip locking and search the commit
  656. * root, since its read-only
  657. */
  658. path->skip_locking = 1;
  659. path->search_commit_root = 1;
  660. path->reada = READA_FORWARD;
  661. key.objectid = last;
  662. key.offset = 0;
  663. key.type = BTRFS_EXTENT_ITEM_KEY;
  664. next:
  665. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  666. if (ret < 0)
  667. goto out;
  668. leaf = path->nodes[0];
  669. nritems = btrfs_header_nritems(leaf);
  670. while (1) {
  671. if (btrfs_fs_closing(fs_info) > 1) {
  672. last = (u64)-1;
  673. break;
  674. }
  675. if (path->slots[0] < nritems) {
  676. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  677. } else {
  678. ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
  679. if (ret)
  680. break;
  681. if (need_resched() ||
  682. rwsem_is_contended(&fs_info->commit_root_sem)) {
  683. btrfs_release_path(path);
  684. up_read(&fs_info->commit_root_sem);
  685. mutex_unlock(&caching_ctl->mutex);
  686. cond_resched();
  687. mutex_lock(&caching_ctl->mutex);
  688. down_read(&fs_info->commit_root_sem);
  689. goto next;
  690. }
  691. ret = btrfs_next_leaf(extent_root, path);
  692. if (ret < 0)
  693. goto out;
  694. if (ret)
  695. break;
  696. leaf = path->nodes[0];
  697. nritems = btrfs_header_nritems(leaf);
  698. continue;
  699. }
  700. if (key.objectid < last) {
  701. key.objectid = last;
  702. key.offset = 0;
  703. key.type = BTRFS_EXTENT_ITEM_KEY;
  704. btrfs_release_path(path);
  705. goto next;
  706. }
  707. if (key.objectid < block_group->start) {
  708. path->slots[0]++;
  709. continue;
  710. }
  711. if (key.objectid >= block_group->start + block_group->length)
  712. break;
  713. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  714. key.type == BTRFS_METADATA_ITEM_KEY) {
  715. u64 space_added;
  716. ret = btrfs_add_new_free_space(block_group, last,
  717. key.objectid, &space_added);
  718. if (ret)
  719. goto out;
  720. total_found += space_added;
  721. if (key.type == BTRFS_METADATA_ITEM_KEY)
  722. last = key.objectid +
  723. fs_info->nodesize;
  724. else
  725. last = key.objectid + key.offset;
  726. if (total_found > CACHING_CTL_WAKE_UP) {
  727. total_found = 0;
  728. if (wakeup) {
  729. atomic_inc(&caching_ctl->progress);
  730. wake_up(&caching_ctl->wait);
  731. }
  732. }
  733. }
  734. path->slots[0]++;
  735. }
  736. ret = btrfs_add_new_free_space(block_group, last,
  737. block_group->start + block_group->length,
  738. NULL);
  739. out:
  740. btrfs_free_path(path);
  741. return ret;
  742. }
  743. static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
  744. {
  745. clear_extent_bits(&bg->fs_info->excluded_extents, bg->start,
  746. bg->start + bg->length - 1, EXTENT_UPTODATE);
  747. }
  748. static noinline void caching_thread(struct btrfs_work *work)
  749. {
  750. struct btrfs_block_group *block_group;
  751. struct btrfs_fs_info *fs_info;
  752. struct btrfs_caching_control *caching_ctl;
  753. int ret;
  754. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  755. block_group = caching_ctl->block_group;
  756. fs_info = block_group->fs_info;
  757. mutex_lock(&caching_ctl->mutex);
  758. down_read(&fs_info->commit_root_sem);
  759. load_block_group_size_class(caching_ctl, block_group);
  760. if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
  761. ret = load_free_space_cache(block_group);
  762. if (ret == 1) {
  763. ret = 0;
  764. goto done;
  765. }
  766. /*
  767. * We failed to load the space cache, set ourselves to
  768. * CACHE_STARTED and carry on.
  769. */
  770. spin_lock(&block_group->lock);
  771. block_group->cached = BTRFS_CACHE_STARTED;
  772. spin_unlock(&block_group->lock);
  773. wake_up(&caching_ctl->wait);
  774. }
  775. /*
  776. * If we are in the transaction that populated the free space tree we
  777. * can't actually cache from the free space tree as our commit root and
  778. * real root are the same, so we could change the contents of the blocks
  779. * while caching. Instead do the slow caching in this case, and after
  780. * the transaction has committed we will be safe.
  781. */
  782. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
  783. !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
  784. ret = load_free_space_tree(caching_ctl);
  785. else
  786. ret = load_extent_tree_free(caching_ctl);
  787. done:
  788. spin_lock(&block_group->lock);
  789. block_group->caching_ctl = NULL;
  790. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  791. spin_unlock(&block_group->lock);
  792. #ifdef CONFIG_BTRFS_DEBUG
  793. if (btrfs_should_fragment_free_space(block_group)) {
  794. u64 bytes_used;
  795. spin_lock(&block_group->space_info->lock);
  796. spin_lock(&block_group->lock);
  797. bytes_used = block_group->length - block_group->used;
  798. block_group->space_info->bytes_used += bytes_used >> 1;
  799. spin_unlock(&block_group->lock);
  800. spin_unlock(&block_group->space_info->lock);
  801. fragment_free_space(block_group);
  802. }
  803. #endif
  804. up_read(&fs_info->commit_root_sem);
  805. btrfs_free_excluded_extents(block_group);
  806. mutex_unlock(&caching_ctl->mutex);
  807. wake_up(&caching_ctl->wait);
  808. btrfs_put_caching_control(caching_ctl);
  809. btrfs_put_block_group(block_group);
  810. }
  811. int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
  812. {
  813. struct btrfs_fs_info *fs_info = cache->fs_info;
  814. struct btrfs_caching_control *caching_ctl = NULL;
  815. int ret = 0;
  816. /* Allocator for zoned filesystems does not use the cache at all */
  817. if (btrfs_is_zoned(fs_info))
  818. return 0;
  819. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  820. if (!caching_ctl)
  821. return -ENOMEM;
  822. INIT_LIST_HEAD(&caching_ctl->list);
  823. mutex_init(&caching_ctl->mutex);
  824. init_waitqueue_head(&caching_ctl->wait);
  825. caching_ctl->block_group = cache;
  826. refcount_set(&caching_ctl->count, 2);
  827. atomic_set(&caching_ctl->progress, 0);
  828. btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
  829. spin_lock(&cache->lock);
  830. if (cache->cached != BTRFS_CACHE_NO) {
  831. kfree(caching_ctl);
  832. caching_ctl = cache->caching_ctl;
  833. if (caching_ctl)
  834. refcount_inc(&caching_ctl->count);
  835. spin_unlock(&cache->lock);
  836. goto out;
  837. }
  838. WARN_ON(cache->caching_ctl);
  839. cache->caching_ctl = caching_ctl;
  840. cache->cached = BTRFS_CACHE_STARTED;
  841. spin_unlock(&cache->lock);
  842. write_lock(&fs_info->block_group_cache_lock);
  843. refcount_inc(&caching_ctl->count);
  844. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  845. write_unlock(&fs_info->block_group_cache_lock);
  846. btrfs_get_block_group(cache);
  847. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  848. out:
  849. if (wait && caching_ctl)
  850. ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
  851. if (caching_ctl)
  852. btrfs_put_caching_control(caching_ctl);
  853. return ret;
  854. }
  855. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  856. {
  857. u64 extra_flags = chunk_to_extended(flags) &
  858. BTRFS_EXTENDED_PROFILE_MASK;
  859. write_seqlock(&fs_info->profiles_lock);
  860. if (flags & BTRFS_BLOCK_GROUP_DATA)
  861. fs_info->avail_data_alloc_bits &= ~extra_flags;
  862. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  863. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  864. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  865. fs_info->avail_system_alloc_bits &= ~extra_flags;
  866. write_sequnlock(&fs_info->profiles_lock);
  867. }
  868. /*
  869. * Clear incompat bits for the following feature(s):
  870. *
  871. * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
  872. * in the whole filesystem
  873. *
  874. * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
  875. */
  876. static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
  877. {
  878. bool found_raid56 = false;
  879. bool found_raid1c34 = false;
  880. if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
  881. (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
  882. (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
  883. struct list_head *head = &fs_info->space_info;
  884. struct btrfs_space_info *sinfo;
  885. list_for_each_entry_rcu(sinfo, head, list) {
  886. down_read(&sinfo->groups_sem);
  887. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
  888. found_raid56 = true;
  889. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
  890. found_raid56 = true;
  891. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
  892. found_raid1c34 = true;
  893. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
  894. found_raid1c34 = true;
  895. up_read(&sinfo->groups_sem);
  896. }
  897. if (!found_raid56)
  898. btrfs_clear_fs_incompat(fs_info, RAID56);
  899. if (!found_raid1c34)
  900. btrfs_clear_fs_incompat(fs_info, RAID1C34);
  901. }
  902. }
  903. static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
  904. {
  905. if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
  906. return fs_info->block_group_root;
  907. return btrfs_extent_root(fs_info, 0);
  908. }
  909. static int remove_block_group_item(struct btrfs_trans_handle *trans,
  910. struct btrfs_path *path,
  911. struct btrfs_block_group *block_group)
  912. {
  913. struct btrfs_fs_info *fs_info = trans->fs_info;
  914. struct btrfs_root *root;
  915. struct btrfs_key key;
  916. int ret;
  917. root = btrfs_block_group_root(fs_info);
  918. key.objectid = block_group->start;
  919. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  920. key.offset = block_group->length;
  921. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  922. if (ret > 0)
  923. ret = -ENOENT;
  924. if (ret < 0)
  925. return ret;
  926. ret = btrfs_del_item(trans, root, path);
  927. return ret;
  928. }
  929. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  930. struct btrfs_chunk_map *map)
  931. {
  932. struct btrfs_fs_info *fs_info = trans->fs_info;
  933. struct btrfs_path *path;
  934. struct btrfs_block_group *block_group;
  935. struct btrfs_free_cluster *cluster;
  936. struct inode *inode;
  937. struct kobject *kobj = NULL;
  938. int ret;
  939. int index;
  940. int factor;
  941. struct btrfs_caching_control *caching_ctl = NULL;
  942. bool remove_map;
  943. bool remove_rsv = false;
  944. block_group = btrfs_lookup_block_group(fs_info, map->start);
  945. if (!block_group)
  946. return -ENOENT;
  947. BUG_ON(!block_group->ro);
  948. trace_btrfs_remove_block_group(block_group);
  949. /*
  950. * Free the reserved super bytes from this block group before
  951. * remove it.
  952. */
  953. btrfs_free_excluded_extents(block_group);
  954. btrfs_free_ref_tree_range(fs_info, block_group->start,
  955. block_group->length);
  956. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  957. factor = btrfs_bg_type_to_factor(block_group->flags);
  958. /* make sure this block group isn't part of an allocation cluster */
  959. cluster = &fs_info->data_alloc_cluster;
  960. spin_lock(&cluster->refill_lock);
  961. btrfs_return_cluster_to_free_space(block_group, cluster);
  962. spin_unlock(&cluster->refill_lock);
  963. /*
  964. * make sure this block group isn't part of a metadata
  965. * allocation cluster
  966. */
  967. cluster = &fs_info->meta_alloc_cluster;
  968. spin_lock(&cluster->refill_lock);
  969. btrfs_return_cluster_to_free_space(block_group, cluster);
  970. spin_unlock(&cluster->refill_lock);
  971. btrfs_clear_treelog_bg(block_group);
  972. btrfs_clear_data_reloc_bg(block_group);
  973. path = btrfs_alloc_path();
  974. if (!path) {
  975. ret = -ENOMEM;
  976. goto out;
  977. }
  978. /*
  979. * get the inode first so any iput calls done for the io_list
  980. * aren't the final iput (no unlinks allowed now)
  981. */
  982. inode = lookup_free_space_inode(block_group, path);
  983. mutex_lock(&trans->transaction->cache_write_mutex);
  984. /*
  985. * Make sure our free space cache IO is done before removing the
  986. * free space inode
  987. */
  988. spin_lock(&trans->transaction->dirty_bgs_lock);
  989. if (!list_empty(&block_group->io_list)) {
  990. list_del_init(&block_group->io_list);
  991. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  992. spin_unlock(&trans->transaction->dirty_bgs_lock);
  993. btrfs_wait_cache_io(trans, block_group, path);
  994. btrfs_put_block_group(block_group);
  995. spin_lock(&trans->transaction->dirty_bgs_lock);
  996. }
  997. if (!list_empty(&block_group->dirty_list)) {
  998. list_del_init(&block_group->dirty_list);
  999. remove_rsv = true;
  1000. btrfs_put_block_group(block_group);
  1001. }
  1002. spin_unlock(&trans->transaction->dirty_bgs_lock);
  1003. mutex_unlock(&trans->transaction->cache_write_mutex);
  1004. ret = btrfs_remove_free_space_inode(trans, inode, block_group);
  1005. if (ret)
  1006. goto out;
  1007. write_lock(&fs_info->block_group_cache_lock);
  1008. rb_erase_cached(&block_group->cache_node,
  1009. &fs_info->block_group_cache_tree);
  1010. RB_CLEAR_NODE(&block_group->cache_node);
  1011. /* Once for the block groups rbtree */
  1012. btrfs_put_block_group(block_group);
  1013. write_unlock(&fs_info->block_group_cache_lock);
  1014. down_write(&block_group->space_info->groups_sem);
  1015. /*
  1016. * we must use list_del_init so people can check to see if they
  1017. * are still on the list after taking the semaphore
  1018. */
  1019. list_del_init(&block_group->list);
  1020. if (list_empty(&block_group->space_info->block_groups[index])) {
  1021. kobj = block_group->space_info->block_group_kobjs[index];
  1022. block_group->space_info->block_group_kobjs[index] = NULL;
  1023. clear_avail_alloc_bits(fs_info, block_group->flags);
  1024. }
  1025. up_write(&block_group->space_info->groups_sem);
  1026. clear_incompat_bg_bits(fs_info, block_group->flags);
  1027. if (kobj) {
  1028. kobject_del(kobj);
  1029. kobject_put(kobj);
  1030. }
  1031. if (block_group->cached == BTRFS_CACHE_STARTED)
  1032. btrfs_wait_block_group_cache_done(block_group);
  1033. write_lock(&fs_info->block_group_cache_lock);
  1034. caching_ctl = btrfs_get_caching_control(block_group);
  1035. if (!caching_ctl) {
  1036. struct btrfs_caching_control *ctl;
  1037. list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
  1038. if (ctl->block_group == block_group) {
  1039. caching_ctl = ctl;
  1040. refcount_inc(&caching_ctl->count);
  1041. break;
  1042. }
  1043. }
  1044. }
  1045. if (caching_ctl)
  1046. list_del_init(&caching_ctl->list);
  1047. write_unlock(&fs_info->block_group_cache_lock);
  1048. if (caching_ctl) {
  1049. /* Once for the caching bgs list and once for us. */
  1050. btrfs_put_caching_control(caching_ctl);
  1051. btrfs_put_caching_control(caching_ctl);
  1052. }
  1053. spin_lock(&trans->transaction->dirty_bgs_lock);
  1054. WARN_ON(!list_empty(&block_group->dirty_list));
  1055. WARN_ON(!list_empty(&block_group->io_list));
  1056. spin_unlock(&trans->transaction->dirty_bgs_lock);
  1057. btrfs_remove_free_space_cache(block_group);
  1058. spin_lock(&block_group->space_info->lock);
  1059. list_del_init(&block_group->ro_list);
  1060. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  1061. WARN_ON(block_group->space_info->total_bytes
  1062. < block_group->length);
  1063. WARN_ON(block_group->space_info->bytes_readonly
  1064. < block_group->length - block_group->zone_unusable);
  1065. WARN_ON(block_group->space_info->bytes_zone_unusable
  1066. < block_group->zone_unusable);
  1067. WARN_ON(block_group->space_info->disk_total
  1068. < block_group->length * factor);
  1069. }
  1070. block_group->space_info->total_bytes -= block_group->length;
  1071. block_group->space_info->bytes_readonly -=
  1072. (block_group->length - block_group->zone_unusable);
  1073. btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
  1074. -block_group->zone_unusable);
  1075. block_group->space_info->disk_total -= block_group->length * factor;
  1076. spin_unlock(&block_group->space_info->lock);
  1077. /*
  1078. * Remove the free space for the block group from the free space tree
  1079. * and the block group's item from the extent tree before marking the
  1080. * block group as removed. This is to prevent races with tasks that
  1081. * freeze and unfreeze a block group, this task and another task
  1082. * allocating a new block group - the unfreeze task ends up removing
  1083. * the block group's extent map before the task calling this function
  1084. * deletes the block group item from the extent tree, allowing for
  1085. * another task to attempt to create another block group with the same
  1086. * item key (and failing with -EEXIST and a transaction abort).
  1087. */
  1088. ret = remove_block_group_free_space(trans, block_group);
  1089. if (ret)
  1090. goto out;
  1091. ret = remove_block_group_item(trans, path, block_group);
  1092. if (ret < 0)
  1093. goto out;
  1094. spin_lock(&block_group->lock);
  1095. /*
  1096. * Hitting this WARN means we removed a block group with an unwritten
  1097. * region. It will cause "unable to find chunk map for logical" errors.
  1098. */
  1099. if (WARN_ON(has_unwritten_metadata(block_group)))
  1100. btrfs_warn(fs_info,
  1101. "block group %llu is removed before metadata write out",
  1102. block_group->start);
  1103. set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
  1104. /*
  1105. * At this point trimming or scrub can't start on this block group,
  1106. * because we removed the block group from the rbtree
  1107. * fs_info->block_group_cache_tree so no one can't find it anymore and
  1108. * even if someone already got this block group before we removed it
  1109. * from the rbtree, they have already incremented block_group->frozen -
  1110. * if they didn't, for the trimming case they won't find any free space
  1111. * entries because we already removed them all when we called
  1112. * btrfs_remove_free_space_cache().
  1113. *
  1114. * And we must not remove the chunk map from the fs_info->mapping_tree
  1115. * to prevent the same logical address range and physical device space
  1116. * ranges from being reused for a new block group. This is needed to
  1117. * avoid races with trimming and scrub.
  1118. *
  1119. * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  1120. * completely transactionless, so while it is trimming a range the
  1121. * currently running transaction might finish and a new one start,
  1122. * allowing for new block groups to be created that can reuse the same
  1123. * physical device locations unless we take this special care.
  1124. *
  1125. * There may also be an implicit trim operation if the file system
  1126. * is mounted with -odiscard. The same protections must remain
  1127. * in place until the extents have been discarded completely when
  1128. * the transaction commit has completed.
  1129. */
  1130. remove_map = (atomic_read(&block_group->frozen) == 0);
  1131. spin_unlock(&block_group->lock);
  1132. if (remove_map)
  1133. btrfs_remove_chunk_map(fs_info, map);
  1134. out:
  1135. /* Once for the lookup reference */
  1136. btrfs_put_block_group(block_group);
  1137. if (remove_rsv)
  1138. btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
  1139. btrfs_free_path(path);
  1140. return ret;
  1141. }
  1142. struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
  1143. struct btrfs_fs_info *fs_info, const u64 chunk_offset)
  1144. {
  1145. struct btrfs_root *root = btrfs_block_group_root(fs_info);
  1146. struct btrfs_chunk_map *map;
  1147. unsigned int num_items;
  1148. map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
  1149. ASSERT(map != NULL);
  1150. ASSERT(map->start == chunk_offset);
  1151. /*
  1152. * We need to reserve 3 + N units from the metadata space info in order
  1153. * to remove a block group (done at btrfs_remove_chunk() and at
  1154. * btrfs_remove_block_group()), which are used for:
  1155. *
  1156. * 1 unit for adding the free space inode's orphan (located in the tree
  1157. * of tree roots).
  1158. * 1 unit for deleting the block group item (located in the extent
  1159. * tree).
  1160. * 1 unit for deleting the free space item (located in tree of tree
  1161. * roots).
  1162. * N units for deleting N device extent items corresponding to each
  1163. * stripe (located in the device tree).
  1164. *
  1165. * In order to remove a block group we also need to reserve units in the
  1166. * system space info in order to update the chunk tree (update one or
  1167. * more device items and remove one chunk item), but this is done at
  1168. * btrfs_remove_chunk() through a call to check_system_chunk().
  1169. */
  1170. num_items = 3 + map->num_stripes;
  1171. btrfs_free_chunk_map(map);
  1172. return btrfs_start_transaction_fallback_global_rsv(root, num_items);
  1173. }
  1174. /*
  1175. * Mark block group @cache read-only, so later write won't happen to block
  1176. * group @cache.
  1177. *
  1178. * If @force is not set, this function will only mark the block group readonly
  1179. * if we have enough free space (1M) in other metadata/system block groups.
  1180. * If @force is not set, this function will mark the block group readonly
  1181. * without checking free space.
  1182. *
  1183. * NOTE: This function doesn't care if other block groups can contain all the
  1184. * data in this block group. That check should be done by relocation routine,
  1185. * not this function.
  1186. */
  1187. static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
  1188. {
  1189. struct btrfs_space_info *sinfo = cache->space_info;
  1190. u64 num_bytes;
  1191. int ret = -ENOSPC;
  1192. spin_lock(&sinfo->lock);
  1193. spin_lock(&cache->lock);
  1194. if (cache->swap_extents) {
  1195. ret = -ETXTBSY;
  1196. goto out;
  1197. }
  1198. if (cache->ro) {
  1199. cache->ro++;
  1200. ret = 0;
  1201. goto out;
  1202. }
  1203. num_bytes = cache->length - cache->reserved - cache->pinned -
  1204. cache->bytes_super - cache->zone_unusable - cache->used;
  1205. /*
  1206. * Data never overcommits, even in mixed mode, so do just the straight
  1207. * check of left over space in how much we have allocated.
  1208. */
  1209. if (force) {
  1210. ret = 0;
  1211. } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
  1212. u64 sinfo_used = btrfs_space_info_used(sinfo, true);
  1213. /*
  1214. * Here we make sure if we mark this bg RO, we still have enough
  1215. * free space as buffer.
  1216. */
  1217. if (sinfo_used + num_bytes <= sinfo->total_bytes)
  1218. ret = 0;
  1219. } else {
  1220. /*
  1221. * We overcommit metadata, so we need to do the
  1222. * btrfs_can_overcommit check here, and we need to pass in
  1223. * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
  1224. * leeway to allow us to mark this block group as read only.
  1225. */
  1226. if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
  1227. BTRFS_RESERVE_NO_FLUSH))
  1228. ret = 0;
  1229. }
  1230. if (!ret) {
  1231. sinfo->bytes_readonly += num_bytes;
  1232. if (btrfs_is_zoned(cache->fs_info)) {
  1233. /* Migrate zone_unusable bytes to readonly */
  1234. sinfo->bytes_readonly += cache->zone_unusable;
  1235. btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
  1236. -cache->zone_unusable);
  1237. cache->zone_unusable = 0;
  1238. }
  1239. cache->ro++;
  1240. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  1241. }
  1242. out:
  1243. spin_unlock(&cache->lock);
  1244. spin_unlock(&sinfo->lock);
  1245. if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
  1246. btrfs_info(cache->fs_info,
  1247. "unable to make block group %llu ro", cache->start);
  1248. btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
  1249. }
  1250. return ret;
  1251. }
  1252. static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
  1253. const struct btrfs_block_group *bg)
  1254. {
  1255. struct btrfs_fs_info *fs_info = trans->fs_info;
  1256. struct btrfs_transaction *prev_trans = NULL;
  1257. const u64 start = bg->start;
  1258. const u64 end = start + bg->length - 1;
  1259. int ret;
  1260. spin_lock(&fs_info->trans_lock);
  1261. if (trans->transaction->list.prev != &fs_info->trans_list) {
  1262. prev_trans = list_last_entry(&trans->transaction->list,
  1263. struct btrfs_transaction, list);
  1264. refcount_inc(&prev_trans->use_count);
  1265. }
  1266. spin_unlock(&fs_info->trans_lock);
  1267. /*
  1268. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  1269. * btrfs_finish_extent_commit(). If we are at transaction N, another
  1270. * task might be running finish_extent_commit() for the previous
  1271. * transaction N - 1, and have seen a range belonging to the block
  1272. * group in pinned_extents before we were able to clear the whole block
  1273. * group range from pinned_extents. This means that task can lookup for
  1274. * the block group after we unpinned it from pinned_extents and removed
  1275. * it, leading to an error at unpin_extent_range().
  1276. */
  1277. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  1278. if (prev_trans) {
  1279. ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
  1280. EXTENT_DIRTY);
  1281. if (ret)
  1282. goto out;
  1283. }
  1284. ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
  1285. EXTENT_DIRTY);
  1286. out:
  1287. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  1288. if (prev_trans)
  1289. btrfs_put_transaction(prev_trans);
  1290. return ret == 0;
  1291. }
  1292. /*
  1293. * Link the block_group to a list via bg_list.
  1294. *
  1295. * @bg: The block_group to link to the list.
  1296. * @list: The list to link it to.
  1297. *
  1298. * Use this rather than list_add_tail() directly to ensure proper respect
  1299. * to locking and refcounting.
  1300. *
  1301. * Returns: true if the bg was linked with a refcount bump and false otherwise.
  1302. */
  1303. static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list)
  1304. {
  1305. struct btrfs_fs_info *fs_info = bg->fs_info;
  1306. bool added = false;
  1307. spin_lock(&fs_info->unused_bgs_lock);
  1308. if (list_empty(&bg->bg_list)) {
  1309. btrfs_get_block_group(bg);
  1310. list_add_tail(&bg->bg_list, list);
  1311. added = true;
  1312. }
  1313. spin_unlock(&fs_info->unused_bgs_lock);
  1314. return added;
  1315. }
  1316. /*
  1317. * Process the unused_bgs list and remove any that don't have any allocated
  1318. * space inside of them.
  1319. */
  1320. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  1321. {
  1322. LIST_HEAD(retry_list);
  1323. struct btrfs_block_group *block_group;
  1324. struct btrfs_space_info *space_info;
  1325. struct btrfs_trans_handle *trans;
  1326. const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
  1327. int ret = 0;
  1328. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  1329. return;
  1330. if (btrfs_fs_closing(fs_info))
  1331. return;
  1332. /*
  1333. * Long running balances can keep us blocked here for eternity, so
  1334. * simply skip deletion if we're unable to get the mutex.
  1335. */
  1336. if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
  1337. return;
  1338. spin_lock(&fs_info->unused_bgs_lock);
  1339. while (!list_empty(&fs_info->unused_bgs)) {
  1340. u64 used;
  1341. int trimming;
  1342. block_group = list_first_entry(&fs_info->unused_bgs,
  1343. struct btrfs_block_group,
  1344. bg_list);
  1345. list_del_init(&block_group->bg_list);
  1346. space_info = block_group->space_info;
  1347. if (ret || btrfs_mixed_space_info(space_info)) {
  1348. btrfs_put_block_group(block_group);
  1349. continue;
  1350. }
  1351. spin_unlock(&fs_info->unused_bgs_lock);
  1352. btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
  1353. /* Don't want to race with allocators so take the groups_sem */
  1354. down_write(&space_info->groups_sem);
  1355. /*
  1356. * Async discard moves the final block group discard to be prior
  1357. * to the unused_bgs code path. Therefore, if it's not fully
  1358. * trimmed, punt it back to the async discard lists.
  1359. */
  1360. if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
  1361. !btrfs_is_free_space_trimmed(block_group)) {
  1362. trace_btrfs_skip_unused_block_group(block_group);
  1363. up_write(&space_info->groups_sem);
  1364. /* Requeue if we failed because of async discard */
  1365. btrfs_discard_queue_work(&fs_info->discard_ctl,
  1366. block_group);
  1367. goto next;
  1368. }
  1369. spin_lock(&space_info->lock);
  1370. spin_lock(&block_group->lock);
  1371. if (btrfs_is_block_group_used(block_group) || block_group->ro ||
  1372. list_is_singular(&block_group->list)) {
  1373. /*
  1374. * We want to bail if we made new allocations or have
  1375. * outstanding allocations in this block group. We do
  1376. * the ro check in case balance is currently acting on
  1377. * this block group.
  1378. *
  1379. * Also bail out if this is the only block group for its
  1380. * type, because otherwise we would lose profile
  1381. * information from fs_info->avail_*_alloc_bits and the
  1382. * next block group of this type would be created with a
  1383. * "single" profile (even if we're in a raid fs) because
  1384. * fs_info->avail_*_alloc_bits would be 0.
  1385. */
  1386. trace_btrfs_skip_unused_block_group(block_group);
  1387. spin_unlock(&block_group->lock);
  1388. spin_unlock(&space_info->lock);
  1389. up_write(&space_info->groups_sem);
  1390. goto next;
  1391. }
  1392. /*
  1393. * The block group may be unused but there may be space reserved
  1394. * accounting with the existence of that block group, that is,
  1395. * space_info->bytes_may_use was incremented by a task but no
  1396. * space was yet allocated from the block group by the task.
  1397. * That space may or may not be allocated, as we are generally
  1398. * pessimistic about space reservation for metadata as well as
  1399. * for data when using compression (as we reserve space based on
  1400. * the worst case, when data can't be compressed, and before
  1401. * actually attempting compression, before starting writeback).
  1402. *
  1403. * So check if the total space of the space_info minus the size
  1404. * of this block group is less than the used space of the
  1405. * space_info - if that's the case, then it means we have tasks
  1406. * that might be relying on the block group in order to allocate
  1407. * extents, and add back the block group to the unused list when
  1408. * we finish, so that we retry later in case no tasks ended up
  1409. * needing to allocate extents from the block group.
  1410. */
  1411. used = btrfs_space_info_used(space_info, true);
  1412. if ((space_info->total_bytes - block_group->length < used &&
  1413. block_group->zone_unusable < block_group->length) ||
  1414. has_unwritten_metadata(block_group)) {
  1415. /*
  1416. * Add a reference for the list, compensate for the ref
  1417. * drop under the "next" label for the
  1418. * fs_info->unused_bgs list.
  1419. */
  1420. btrfs_link_bg_list(block_group, &retry_list);
  1421. trace_btrfs_skip_unused_block_group(block_group);
  1422. spin_unlock(&block_group->lock);
  1423. spin_unlock(&space_info->lock);
  1424. up_write(&space_info->groups_sem);
  1425. goto next;
  1426. }
  1427. spin_unlock(&block_group->lock);
  1428. spin_unlock(&space_info->lock);
  1429. /* We don't want to force the issue, only flip if it's ok. */
  1430. ret = inc_block_group_ro(block_group, 0);
  1431. up_write(&space_info->groups_sem);
  1432. if (ret < 0) {
  1433. ret = 0;
  1434. goto next;
  1435. }
  1436. ret = btrfs_zone_finish(block_group);
  1437. if (ret < 0) {
  1438. btrfs_dec_block_group_ro(block_group);
  1439. if (ret == -EAGAIN) {
  1440. btrfs_link_bg_list(block_group, &retry_list);
  1441. ret = 0;
  1442. }
  1443. goto next;
  1444. }
  1445. /*
  1446. * Want to do this before we do anything else so we can recover
  1447. * properly if we fail to join the transaction.
  1448. */
  1449. trans = btrfs_start_trans_remove_block_group(fs_info,
  1450. block_group->start);
  1451. if (IS_ERR(trans)) {
  1452. btrfs_dec_block_group_ro(block_group);
  1453. ret = PTR_ERR(trans);
  1454. goto next;
  1455. }
  1456. /*
  1457. * We could have pending pinned extents for this block group,
  1458. * just delete them, we don't care about them anymore.
  1459. */
  1460. if (!clean_pinned_extents(trans, block_group)) {
  1461. btrfs_dec_block_group_ro(block_group);
  1462. goto end_trans;
  1463. }
  1464. /*
  1465. * At this point, the block_group is read only and should fail
  1466. * new allocations. However, btrfs_finish_extent_commit() can
  1467. * cause this block_group to be placed back on the discard
  1468. * lists because now the block_group isn't fully discarded.
  1469. * Bail here and try again later after discarding everything.
  1470. */
  1471. spin_lock(&fs_info->discard_ctl.lock);
  1472. if (!list_empty(&block_group->discard_list)) {
  1473. spin_unlock(&fs_info->discard_ctl.lock);
  1474. btrfs_dec_block_group_ro(block_group);
  1475. btrfs_discard_queue_work(&fs_info->discard_ctl,
  1476. block_group);
  1477. goto end_trans;
  1478. }
  1479. spin_unlock(&fs_info->discard_ctl.lock);
  1480. /* Reset pinned so btrfs_put_block_group doesn't complain */
  1481. spin_lock(&space_info->lock);
  1482. spin_lock(&block_group->lock);
  1483. btrfs_space_info_update_bytes_pinned(fs_info, space_info,
  1484. -block_group->pinned);
  1485. space_info->bytes_readonly += block_group->pinned;
  1486. block_group->pinned = 0;
  1487. spin_unlock(&block_group->lock);
  1488. spin_unlock(&space_info->lock);
  1489. /*
  1490. * The normal path here is an unused block group is passed here,
  1491. * then trimming is handled in the transaction commit path.
  1492. * Async discard interposes before this to do the trimming
  1493. * before coming down the unused block group path as trimming
  1494. * will no longer be done later in the transaction commit path.
  1495. */
  1496. if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
  1497. goto flip_async;
  1498. /*
  1499. * DISCARD can flip during remount. On zoned filesystems, we
  1500. * need to reset sequential-required zones.
  1501. */
  1502. trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
  1503. btrfs_is_zoned(fs_info);
  1504. /* Implicit trim during transaction commit. */
  1505. if (trimming)
  1506. btrfs_freeze_block_group(block_group);
  1507. /*
  1508. * Btrfs_remove_chunk will abort the transaction if things go
  1509. * horribly wrong.
  1510. */
  1511. ret = btrfs_remove_chunk(trans, block_group->start);
  1512. if (ret) {
  1513. if (trimming)
  1514. btrfs_unfreeze_block_group(block_group);
  1515. goto end_trans;
  1516. }
  1517. /*
  1518. * If we're not mounted with -odiscard, we can just forget
  1519. * about this block group. Otherwise we'll need to wait
  1520. * until transaction commit to do the actual discard.
  1521. */
  1522. if (trimming) {
  1523. spin_lock(&fs_info->unused_bgs_lock);
  1524. /*
  1525. * A concurrent scrub might have added us to the list
  1526. * fs_info->unused_bgs, so use a list_move operation
  1527. * to add the block group to the deleted_bgs list.
  1528. */
  1529. list_move(&block_group->bg_list,
  1530. &trans->transaction->deleted_bgs);
  1531. spin_unlock(&fs_info->unused_bgs_lock);
  1532. btrfs_get_block_group(block_group);
  1533. }
  1534. end_trans:
  1535. btrfs_end_transaction(trans);
  1536. next:
  1537. btrfs_put_block_group(block_group);
  1538. spin_lock(&fs_info->unused_bgs_lock);
  1539. }
  1540. list_splice_tail(&retry_list, &fs_info->unused_bgs);
  1541. spin_unlock(&fs_info->unused_bgs_lock);
  1542. mutex_unlock(&fs_info->reclaim_bgs_lock);
  1543. return;
  1544. flip_async:
  1545. btrfs_end_transaction(trans);
  1546. spin_lock(&fs_info->unused_bgs_lock);
  1547. list_splice_tail(&retry_list, &fs_info->unused_bgs);
  1548. spin_unlock(&fs_info->unused_bgs_lock);
  1549. mutex_unlock(&fs_info->reclaim_bgs_lock);
  1550. btrfs_put_block_group(block_group);
  1551. btrfs_discard_punt_unused_bgs_list(fs_info);
  1552. }
  1553. void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
  1554. {
  1555. struct btrfs_fs_info *fs_info = bg->fs_info;
  1556. spin_lock(&fs_info->unused_bgs_lock);
  1557. if (list_empty(&bg->bg_list)) {
  1558. btrfs_get_block_group(bg);
  1559. trace_btrfs_add_unused_block_group(bg);
  1560. list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
  1561. } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
  1562. /* Pull out the block group from the reclaim_bgs list. */
  1563. trace_btrfs_add_unused_block_group(bg);
  1564. list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
  1565. }
  1566. spin_unlock(&fs_info->unused_bgs_lock);
  1567. }
  1568. /*
  1569. * We want block groups with a low number of used bytes to be in the beginning
  1570. * of the list, so they will get reclaimed first.
  1571. */
  1572. static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
  1573. const struct list_head *b)
  1574. {
  1575. const struct btrfs_block_group *bg1, *bg2;
  1576. bg1 = list_entry(a, struct btrfs_block_group, bg_list);
  1577. bg2 = list_entry(b, struct btrfs_block_group, bg_list);
  1578. return bg1->used > bg2->used;
  1579. }
  1580. static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
  1581. {
  1582. if (btrfs_is_zoned(fs_info))
  1583. return btrfs_zoned_should_reclaim(fs_info);
  1584. return true;
  1585. }
  1586. static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed)
  1587. {
  1588. const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
  1589. u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
  1590. const u64 new_val = bg->used;
  1591. const u64 old_val = new_val + bytes_freed;
  1592. if (thresh_bytes == 0)
  1593. return false;
  1594. /*
  1595. * If we were below the threshold before don't reclaim, we are likely a
  1596. * brand new block group and we don't want to relocate new block groups.
  1597. */
  1598. if (old_val < thresh_bytes)
  1599. return false;
  1600. if (new_val >= thresh_bytes)
  1601. return false;
  1602. return true;
  1603. }
  1604. void btrfs_reclaim_bgs_work(struct work_struct *work)
  1605. {
  1606. struct btrfs_fs_info *fs_info =
  1607. container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
  1608. struct btrfs_block_group *bg;
  1609. struct btrfs_space_info *space_info;
  1610. LIST_HEAD(retry_list);
  1611. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  1612. return;
  1613. if (btrfs_fs_closing(fs_info))
  1614. return;
  1615. if (!btrfs_should_reclaim(fs_info))
  1616. return;
  1617. sb_start_write(fs_info->sb);
  1618. if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
  1619. sb_end_write(fs_info->sb);
  1620. return;
  1621. }
  1622. /*
  1623. * Long running balances can keep us blocked here for eternity, so
  1624. * simply skip reclaim if we're unable to get the mutex.
  1625. */
  1626. if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
  1627. btrfs_exclop_finish(fs_info);
  1628. sb_end_write(fs_info->sb);
  1629. return;
  1630. }
  1631. spin_lock(&fs_info->unused_bgs_lock);
  1632. /*
  1633. * Sort happens under lock because we can't simply splice it and sort.
  1634. * The block groups might still be in use and reachable via bg_list,
  1635. * and their presence in the reclaim_bgs list must be preserved.
  1636. */
  1637. list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
  1638. while (!list_empty(&fs_info->reclaim_bgs)) {
  1639. u64 zone_unusable;
  1640. u64 reclaimed;
  1641. int ret = 0;
  1642. bg = list_first_entry(&fs_info->reclaim_bgs,
  1643. struct btrfs_block_group,
  1644. bg_list);
  1645. list_del_init(&bg->bg_list);
  1646. space_info = bg->space_info;
  1647. spin_unlock(&fs_info->unused_bgs_lock);
  1648. /* Don't race with allocators so take the groups_sem */
  1649. down_write(&space_info->groups_sem);
  1650. spin_lock(&space_info->lock);
  1651. spin_lock(&bg->lock);
  1652. if (bg->reserved || bg->pinned || bg->ro) {
  1653. /*
  1654. * We want to bail if we made new allocations or have
  1655. * outstanding allocations in this block group. We do
  1656. * the ro check in case balance is currently acting on
  1657. * this block group.
  1658. */
  1659. spin_unlock(&bg->lock);
  1660. spin_unlock(&space_info->lock);
  1661. up_write(&space_info->groups_sem);
  1662. goto next;
  1663. }
  1664. if (bg->used == 0) {
  1665. /*
  1666. * It is possible that we trigger relocation on a block
  1667. * group as its extents are deleted and it first goes
  1668. * below the threshold, then shortly after goes empty.
  1669. *
  1670. * In this case, relocating it does delete it, but has
  1671. * some overhead in relocation specific metadata, looking
  1672. * for the non-existent extents and running some extra
  1673. * transactions, which we can avoid by using one of the
  1674. * other mechanisms for dealing with empty block groups.
  1675. */
  1676. if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
  1677. btrfs_mark_bg_unused(bg);
  1678. spin_unlock(&bg->lock);
  1679. spin_unlock(&space_info->lock);
  1680. up_write(&space_info->groups_sem);
  1681. goto next;
  1682. }
  1683. /*
  1684. * The block group might no longer meet the reclaim condition by
  1685. * the time we get around to reclaiming it, so to avoid
  1686. * reclaiming overly full block_groups, skip reclaiming them.
  1687. *
  1688. * Since the decision making process also depends on the amount
  1689. * being freed, pass in a fake giant value to skip that extra
  1690. * check, which is more meaningful when adding to the list in
  1691. * the first place.
  1692. */
  1693. if (!should_reclaim_block_group(bg, bg->length)) {
  1694. spin_unlock(&bg->lock);
  1695. spin_unlock(&space_info->lock);
  1696. up_write(&space_info->groups_sem);
  1697. goto next;
  1698. }
  1699. /*
  1700. * Cache the zone_unusable value before turning the block group
  1701. * to read only. As soon as the block group is read only it's
  1702. * zone_unusable value gets moved to the block group's read-only
  1703. * bytes and isn't available for calculations anymore. We also
  1704. * cache it before unlocking the block group, to prevent races
  1705. * (reports from KCSAN and such tools) with tasks updating it.
  1706. */
  1707. zone_unusable = bg->zone_unusable;
  1708. spin_unlock(&bg->lock);
  1709. spin_unlock(&space_info->lock);
  1710. /*
  1711. * Get out fast, in case we're read-only or unmounting the
  1712. * filesystem. It is OK to drop block groups from the list even
  1713. * for the read-only case. As we did sb_start_write(),
  1714. * "mount -o remount,ro" won't happen and read-only filesystem
  1715. * means it is forced read-only due to a fatal error. So, it
  1716. * never gets back to read-write to let us reclaim again.
  1717. */
  1718. if (btrfs_need_cleaner_sleep(fs_info)) {
  1719. up_write(&space_info->groups_sem);
  1720. goto next;
  1721. }
  1722. ret = inc_block_group_ro(bg, 0);
  1723. up_write(&space_info->groups_sem);
  1724. if (ret < 0)
  1725. goto next;
  1726. btrfs_info(fs_info,
  1727. "reclaiming chunk %llu with %llu%% used %llu%% unusable",
  1728. bg->start,
  1729. div64_u64(bg->used * 100, bg->length),
  1730. div64_u64(zone_unusable * 100, bg->length));
  1731. trace_btrfs_reclaim_block_group(bg);
  1732. reclaimed = bg->used;
  1733. ret = btrfs_relocate_chunk(fs_info, bg->start);
  1734. if (ret) {
  1735. btrfs_dec_block_group_ro(bg);
  1736. btrfs_err(fs_info, "error relocating chunk %llu",
  1737. bg->start);
  1738. reclaimed = 0;
  1739. spin_lock(&space_info->lock);
  1740. space_info->reclaim_errors++;
  1741. if (READ_ONCE(space_info->periodic_reclaim))
  1742. space_info->periodic_reclaim_ready = false;
  1743. spin_unlock(&space_info->lock);
  1744. }
  1745. spin_lock(&space_info->lock);
  1746. space_info->reclaim_count++;
  1747. space_info->reclaim_bytes += reclaimed;
  1748. spin_unlock(&space_info->lock);
  1749. next:
  1750. if (ret && !READ_ONCE(space_info->periodic_reclaim))
  1751. btrfs_link_bg_list(bg, &retry_list);
  1752. btrfs_put_block_group(bg);
  1753. mutex_unlock(&fs_info->reclaim_bgs_lock);
  1754. /*
  1755. * Reclaiming all the block groups in the list can take really
  1756. * long. Prioritize cleaning up unused block groups.
  1757. */
  1758. btrfs_delete_unused_bgs(fs_info);
  1759. /*
  1760. * If we are interrupted by a balance, we can just bail out. The
  1761. * cleaner thread restart again if necessary.
  1762. */
  1763. if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
  1764. goto end;
  1765. spin_lock(&fs_info->unused_bgs_lock);
  1766. }
  1767. spin_unlock(&fs_info->unused_bgs_lock);
  1768. mutex_unlock(&fs_info->reclaim_bgs_lock);
  1769. end:
  1770. spin_lock(&fs_info->unused_bgs_lock);
  1771. list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
  1772. spin_unlock(&fs_info->unused_bgs_lock);
  1773. btrfs_exclop_finish(fs_info);
  1774. sb_end_write(fs_info->sb);
  1775. }
  1776. void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
  1777. {
  1778. btrfs_reclaim_sweep(fs_info);
  1779. spin_lock(&fs_info->unused_bgs_lock);
  1780. if (!list_empty(&fs_info->reclaim_bgs))
  1781. queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
  1782. spin_unlock(&fs_info->unused_bgs_lock);
  1783. }
  1784. void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
  1785. {
  1786. struct btrfs_fs_info *fs_info = bg->fs_info;
  1787. if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs))
  1788. trace_btrfs_add_reclaim_block_group(bg);
  1789. }
  1790. static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
  1791. const struct btrfs_path *path)
  1792. {
  1793. struct btrfs_chunk_map *map;
  1794. struct btrfs_block_group_item bg;
  1795. struct extent_buffer *leaf;
  1796. int slot;
  1797. u64 flags;
  1798. int ret = 0;
  1799. slot = path->slots[0];
  1800. leaf = path->nodes[0];
  1801. map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset);
  1802. if (!map) {
  1803. btrfs_err(fs_info,
  1804. "logical %llu len %llu found bg but no related chunk",
  1805. key->objectid, key->offset);
  1806. return -ENOENT;
  1807. }
  1808. if (map->start != key->objectid || map->chunk_len != key->offset) {
  1809. btrfs_err(fs_info,
  1810. "block group %llu len %llu mismatch with chunk %llu len %llu",
  1811. key->objectid, key->offset, map->start, map->chunk_len);
  1812. ret = -EUCLEAN;
  1813. goto out_free_map;
  1814. }
  1815. read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
  1816. sizeof(bg));
  1817. flags = btrfs_stack_block_group_flags(&bg) &
  1818. BTRFS_BLOCK_GROUP_TYPE_MASK;
  1819. if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  1820. btrfs_err(fs_info,
  1821. "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
  1822. key->objectid, key->offset, flags,
  1823. (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type));
  1824. ret = -EUCLEAN;
  1825. }
  1826. out_free_map:
  1827. btrfs_free_chunk_map(map);
  1828. return ret;
  1829. }
  1830. static int find_first_block_group(struct btrfs_fs_info *fs_info,
  1831. struct btrfs_path *path,
  1832. const struct btrfs_key *key)
  1833. {
  1834. struct btrfs_root *root = btrfs_block_group_root(fs_info);
  1835. int ret;
  1836. struct btrfs_key found_key;
  1837. btrfs_for_each_slot(root, key, &found_key, path, ret) {
  1838. if (found_key.objectid >= key->objectid &&
  1839. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1840. return read_bg_from_eb(fs_info, &found_key, path);
  1841. }
  1842. }
  1843. return ret;
  1844. }
  1845. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  1846. {
  1847. u64 extra_flags = chunk_to_extended(flags) &
  1848. BTRFS_EXTENDED_PROFILE_MASK;
  1849. write_seqlock(&fs_info->profiles_lock);
  1850. if (flags & BTRFS_BLOCK_GROUP_DATA)
  1851. fs_info->avail_data_alloc_bits |= extra_flags;
  1852. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  1853. fs_info->avail_metadata_alloc_bits |= extra_flags;
  1854. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  1855. fs_info->avail_system_alloc_bits |= extra_flags;
  1856. write_sequnlock(&fs_info->profiles_lock);
  1857. }
  1858. /*
  1859. * Map a physical disk address to a list of logical addresses.
  1860. *
  1861. * @fs_info: the filesystem
  1862. * @chunk_start: logical address of block group
  1863. * @physical: physical address to map to logical addresses
  1864. * @logical: return array of logical addresses which map to @physical
  1865. * @naddrs: length of @logical
  1866. * @stripe_len: size of IO stripe for the given block group
  1867. *
  1868. * Maps a particular @physical disk address to a list of @logical addresses.
  1869. * Used primarily to exclude those portions of a block group that contain super
  1870. * block copies.
  1871. */
  1872. int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
  1873. u64 physical, u64 **logical, int *naddrs, int *stripe_len)
  1874. {
  1875. struct btrfs_chunk_map *map;
  1876. u64 *buf;
  1877. u64 bytenr;
  1878. u64 data_stripe_length;
  1879. u64 io_stripe_size;
  1880. int i, nr = 0;
  1881. int ret = 0;
  1882. map = btrfs_get_chunk_map(fs_info, chunk_start, 1);
  1883. if (IS_ERR(map))
  1884. return -EIO;
  1885. data_stripe_length = map->stripe_size;
  1886. io_stripe_size = BTRFS_STRIPE_LEN;
  1887. chunk_start = map->start;
  1888. /* For RAID5/6 adjust to a full IO stripe length */
  1889. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
  1890. io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
  1891. buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
  1892. if (!buf) {
  1893. ret = -ENOMEM;
  1894. goto out;
  1895. }
  1896. for (i = 0; i < map->num_stripes; i++) {
  1897. bool already_inserted = false;
  1898. u32 stripe_nr;
  1899. u32 offset;
  1900. int j;
  1901. if (!in_range(physical, map->stripes[i].physical,
  1902. data_stripe_length))
  1903. continue;
  1904. stripe_nr = (physical - map->stripes[i].physical) >>
  1905. BTRFS_STRIPE_LEN_SHIFT;
  1906. offset = (physical - map->stripes[i].physical) &
  1907. BTRFS_STRIPE_LEN_MASK;
  1908. if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
  1909. BTRFS_BLOCK_GROUP_RAID10))
  1910. stripe_nr = div_u64(stripe_nr * map->num_stripes + i,
  1911. map->sub_stripes);
  1912. /*
  1913. * The remaining case would be for RAID56, multiply by
  1914. * nr_data_stripes(). Alternatively, just use rmap_len below
  1915. * instead of map->stripe_len
  1916. */
  1917. bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
  1918. /* Ensure we don't add duplicate addresses */
  1919. for (j = 0; j < nr; j++) {
  1920. if (buf[j] == bytenr) {
  1921. already_inserted = true;
  1922. break;
  1923. }
  1924. }
  1925. if (!already_inserted)
  1926. buf[nr++] = bytenr;
  1927. }
  1928. *logical = buf;
  1929. *naddrs = nr;
  1930. *stripe_len = io_stripe_size;
  1931. out:
  1932. btrfs_free_chunk_map(map);
  1933. return ret;
  1934. }
  1935. static int exclude_super_stripes(struct btrfs_block_group *cache)
  1936. {
  1937. struct btrfs_fs_info *fs_info = cache->fs_info;
  1938. const bool zoned = btrfs_is_zoned(fs_info);
  1939. u64 bytenr;
  1940. u64 *logical;
  1941. int stripe_len;
  1942. int i, nr, ret;
  1943. if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
  1944. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
  1945. cache->bytes_super += stripe_len;
  1946. ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
  1947. cache->start + stripe_len - 1,
  1948. EXTENT_UPTODATE, NULL);
  1949. if (ret)
  1950. return ret;
  1951. }
  1952. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  1953. bytenr = btrfs_sb_offset(i);
  1954. ret = btrfs_rmap_block(fs_info, cache->start,
  1955. bytenr, &logical, &nr, &stripe_len);
  1956. if (ret)
  1957. return ret;
  1958. /* Shouldn't have super stripes in sequential zones */
  1959. if (zoned && nr) {
  1960. kfree(logical);
  1961. btrfs_err(fs_info,
  1962. "zoned: block group %llu must not contain super block",
  1963. cache->start);
  1964. return -EUCLEAN;
  1965. }
  1966. while (nr--) {
  1967. u64 len = min_t(u64, stripe_len,
  1968. cache->start + cache->length - logical[nr]);
  1969. cache->bytes_super += len;
  1970. ret = set_extent_bit(&fs_info->excluded_extents, logical[nr],
  1971. logical[nr] + len - 1,
  1972. EXTENT_UPTODATE, NULL);
  1973. if (ret) {
  1974. kfree(logical);
  1975. return ret;
  1976. }
  1977. }
  1978. kfree(logical);
  1979. }
  1980. return 0;
  1981. }
  1982. static struct btrfs_block_group *btrfs_create_block_group_cache(
  1983. struct btrfs_fs_info *fs_info, u64 start)
  1984. {
  1985. struct btrfs_block_group *cache;
  1986. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  1987. if (!cache)
  1988. return NULL;
  1989. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  1990. GFP_NOFS);
  1991. if (!cache->free_space_ctl) {
  1992. kfree(cache);
  1993. return NULL;
  1994. }
  1995. cache->start = start;
  1996. cache->fs_info = fs_info;
  1997. cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
  1998. cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
  1999. refcount_set(&cache->refs, 1);
  2000. spin_lock_init(&cache->lock);
  2001. init_rwsem(&cache->data_rwsem);
  2002. INIT_LIST_HEAD(&cache->list);
  2003. INIT_LIST_HEAD(&cache->cluster_list);
  2004. INIT_LIST_HEAD(&cache->bg_list);
  2005. INIT_LIST_HEAD(&cache->ro_list);
  2006. INIT_LIST_HEAD(&cache->discard_list);
  2007. INIT_LIST_HEAD(&cache->dirty_list);
  2008. INIT_LIST_HEAD(&cache->io_list);
  2009. INIT_LIST_HEAD(&cache->active_bg_list);
  2010. btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
  2011. atomic_set(&cache->frozen, 0);
  2012. mutex_init(&cache->free_space_lock);
  2013. return cache;
  2014. }
  2015. /*
  2016. * Iterate all chunks and verify that each of them has the corresponding block
  2017. * group
  2018. */
  2019. static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
  2020. {
  2021. u64 start = 0;
  2022. int ret = 0;
  2023. while (1) {
  2024. struct btrfs_chunk_map *map;
  2025. struct btrfs_block_group *bg;
  2026. /*
  2027. * btrfs_find_chunk_map() will return the first chunk map
  2028. * intersecting the range, so setting @length to 1 is enough to
  2029. * get the first chunk.
  2030. */
  2031. map = btrfs_find_chunk_map(fs_info, start, 1);
  2032. if (!map)
  2033. break;
  2034. bg = btrfs_lookup_block_group(fs_info, map->start);
  2035. if (!bg) {
  2036. btrfs_err(fs_info,
  2037. "chunk start=%llu len=%llu doesn't have corresponding block group",
  2038. map->start, map->chunk_len);
  2039. ret = -EUCLEAN;
  2040. btrfs_free_chunk_map(map);
  2041. break;
  2042. }
  2043. if (bg->start != map->start || bg->length != map->chunk_len ||
  2044. (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
  2045. (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  2046. btrfs_err(fs_info,
  2047. "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
  2048. map->start, map->chunk_len,
  2049. map->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
  2050. bg->start, bg->length,
  2051. bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
  2052. ret = -EUCLEAN;
  2053. btrfs_free_chunk_map(map);
  2054. btrfs_put_block_group(bg);
  2055. break;
  2056. }
  2057. start = map->start + map->chunk_len;
  2058. btrfs_free_chunk_map(map);
  2059. btrfs_put_block_group(bg);
  2060. }
  2061. return ret;
  2062. }
  2063. static int read_one_block_group(struct btrfs_fs_info *info,
  2064. struct btrfs_block_group_item *bgi,
  2065. const struct btrfs_key *key,
  2066. int need_clear)
  2067. {
  2068. struct btrfs_block_group *cache;
  2069. const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
  2070. int ret;
  2071. ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
  2072. cache = btrfs_create_block_group_cache(info, key->objectid);
  2073. if (!cache)
  2074. return -ENOMEM;
  2075. cache->length = key->offset;
  2076. cache->used = btrfs_stack_block_group_used(bgi);
  2077. cache->commit_used = cache->used;
  2078. cache->flags = btrfs_stack_block_group_flags(bgi);
  2079. cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
  2080. set_free_space_tree_thresholds(cache);
  2081. if (need_clear) {
  2082. /*
  2083. * When we mount with old space cache, we need to
  2084. * set BTRFS_DC_CLEAR and set dirty flag.
  2085. *
  2086. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  2087. * truncate the old free space cache inode and
  2088. * setup a new one.
  2089. * b) Setting 'dirty flag' makes sure that we flush
  2090. * the new space cache info onto disk.
  2091. */
  2092. if (btrfs_test_opt(info, SPACE_CACHE))
  2093. cache->disk_cache_state = BTRFS_DC_CLEAR;
  2094. }
  2095. if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  2096. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  2097. btrfs_err(info,
  2098. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  2099. cache->start);
  2100. ret = -EINVAL;
  2101. goto error;
  2102. }
  2103. ret = btrfs_load_block_group_zone_info(cache, false);
  2104. if (ret) {
  2105. btrfs_err(info, "zoned: failed to load zone info of bg %llu",
  2106. cache->start);
  2107. goto error;
  2108. }
  2109. /*
  2110. * We need to exclude the super stripes now so that the space info has
  2111. * super bytes accounted for, otherwise we'll think we have more space
  2112. * than we actually do.
  2113. */
  2114. ret = exclude_super_stripes(cache);
  2115. if (ret) {
  2116. /* We may have excluded something, so call this just in case. */
  2117. btrfs_free_excluded_extents(cache);
  2118. goto error;
  2119. }
  2120. /*
  2121. * For zoned filesystem, space after the allocation offset is the only
  2122. * free space for a block group. So, we don't need any caching work.
  2123. * btrfs_calc_zone_unusable() will set the amount of free space and
  2124. * zone_unusable space.
  2125. *
  2126. * For regular filesystem, check for two cases, either we are full, and
  2127. * therefore don't need to bother with the caching work since we won't
  2128. * find any space, or we are empty, and we can just add all the space
  2129. * in and be done with it. This saves us _a_lot_ of time, particularly
  2130. * in the full case.
  2131. */
  2132. if (btrfs_is_zoned(info)) {
  2133. btrfs_calc_zone_unusable(cache);
  2134. /* Should not have any excluded extents. Just in case, though. */
  2135. btrfs_free_excluded_extents(cache);
  2136. } else if (cache->length == cache->used) {
  2137. cache->cached = BTRFS_CACHE_FINISHED;
  2138. btrfs_free_excluded_extents(cache);
  2139. } else if (cache->used == 0) {
  2140. cache->cached = BTRFS_CACHE_FINISHED;
  2141. ret = btrfs_add_new_free_space(cache, cache->start,
  2142. cache->start + cache->length, NULL);
  2143. btrfs_free_excluded_extents(cache);
  2144. if (ret)
  2145. goto error;
  2146. }
  2147. ret = btrfs_add_block_group_cache(info, cache);
  2148. if (ret) {
  2149. btrfs_remove_free_space_cache(cache);
  2150. goto error;
  2151. }
  2152. trace_btrfs_add_block_group(info, cache, 0);
  2153. btrfs_add_bg_to_space_info(info, cache);
  2154. set_avail_alloc_bits(info, cache->flags);
  2155. if (btrfs_chunk_writeable(info, cache->start)) {
  2156. if (cache->used == 0) {
  2157. ASSERT(list_empty(&cache->bg_list));
  2158. if (btrfs_test_opt(info, DISCARD_ASYNC))
  2159. btrfs_discard_queue_work(&info->discard_ctl, cache);
  2160. else
  2161. btrfs_mark_bg_unused(cache);
  2162. }
  2163. } else {
  2164. inc_block_group_ro(cache, 1);
  2165. }
  2166. return 0;
  2167. error:
  2168. btrfs_put_block_group(cache);
  2169. return ret;
  2170. }
  2171. static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
  2172. {
  2173. struct rb_node *node;
  2174. int ret = 0;
  2175. for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
  2176. struct btrfs_chunk_map *map;
  2177. struct btrfs_block_group *bg;
  2178. map = rb_entry(node, struct btrfs_chunk_map, rb_node);
  2179. bg = btrfs_create_block_group_cache(fs_info, map->start);
  2180. if (!bg) {
  2181. ret = -ENOMEM;
  2182. break;
  2183. }
  2184. /* Fill dummy cache as FULL */
  2185. bg->length = map->chunk_len;
  2186. bg->flags = map->type;
  2187. bg->cached = BTRFS_CACHE_FINISHED;
  2188. bg->used = map->chunk_len;
  2189. bg->flags = map->type;
  2190. ret = btrfs_add_block_group_cache(fs_info, bg);
  2191. /*
  2192. * We may have some valid block group cache added already, in
  2193. * that case we skip to the next one.
  2194. */
  2195. if (ret == -EEXIST) {
  2196. ret = 0;
  2197. btrfs_put_block_group(bg);
  2198. continue;
  2199. }
  2200. if (ret) {
  2201. btrfs_remove_free_space_cache(bg);
  2202. btrfs_put_block_group(bg);
  2203. break;
  2204. }
  2205. btrfs_add_bg_to_space_info(fs_info, bg);
  2206. set_avail_alloc_bits(fs_info, bg->flags);
  2207. }
  2208. if (!ret)
  2209. btrfs_init_global_block_rsv(fs_info);
  2210. return ret;
  2211. }
  2212. int btrfs_read_block_groups(struct btrfs_fs_info *info)
  2213. {
  2214. struct btrfs_root *root = btrfs_block_group_root(info);
  2215. struct btrfs_path *path;
  2216. int ret;
  2217. struct btrfs_block_group *cache;
  2218. struct btrfs_space_info *space_info;
  2219. struct btrfs_key key;
  2220. int need_clear = 0;
  2221. u64 cache_gen;
  2222. /*
  2223. * Either no extent root (with ibadroots rescue option) or we have
  2224. * unsupported RO options. The fs can never be mounted read-write, so no
  2225. * need to waste time searching block group items.
  2226. *
  2227. * This also allows new extent tree related changes to be RO compat,
  2228. * no need for a full incompat flag.
  2229. */
  2230. if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
  2231. ~BTRFS_FEATURE_COMPAT_RO_SUPP))
  2232. return fill_dummy_bgs(info);
  2233. key.objectid = 0;
  2234. key.offset = 0;
  2235. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  2236. path = btrfs_alloc_path();
  2237. if (!path)
  2238. return -ENOMEM;
  2239. cache_gen = btrfs_super_cache_generation(info->super_copy);
  2240. if (btrfs_test_opt(info, SPACE_CACHE) &&
  2241. btrfs_super_generation(info->super_copy) != cache_gen)
  2242. need_clear = 1;
  2243. if (btrfs_test_opt(info, CLEAR_CACHE))
  2244. need_clear = 1;
  2245. while (1) {
  2246. struct btrfs_block_group_item bgi;
  2247. struct extent_buffer *leaf;
  2248. int slot;
  2249. ret = find_first_block_group(info, path, &key);
  2250. if (ret > 0)
  2251. break;
  2252. if (ret != 0)
  2253. goto error;
  2254. leaf = path->nodes[0];
  2255. slot = path->slots[0];
  2256. read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
  2257. sizeof(bgi));
  2258. btrfs_item_key_to_cpu(leaf, &key, slot);
  2259. btrfs_release_path(path);
  2260. ret = read_one_block_group(info, &bgi, &key, need_clear);
  2261. if (ret < 0)
  2262. goto error;
  2263. key.objectid += key.offset;
  2264. key.offset = 0;
  2265. }
  2266. btrfs_release_path(path);
  2267. list_for_each_entry(space_info, &info->space_info, list) {
  2268. int i;
  2269. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  2270. if (list_empty(&space_info->block_groups[i]))
  2271. continue;
  2272. cache = list_first_entry(&space_info->block_groups[i],
  2273. struct btrfs_block_group,
  2274. list);
  2275. btrfs_sysfs_add_block_group_type(cache);
  2276. }
  2277. if (!(btrfs_get_alloc_profile(info, space_info->flags) &
  2278. (BTRFS_BLOCK_GROUP_RAID10 |
  2279. BTRFS_BLOCK_GROUP_RAID1_MASK |
  2280. BTRFS_BLOCK_GROUP_RAID56_MASK |
  2281. BTRFS_BLOCK_GROUP_DUP)))
  2282. continue;
  2283. /*
  2284. * Avoid allocating from un-mirrored block group if there are
  2285. * mirrored block groups.
  2286. */
  2287. list_for_each_entry(cache,
  2288. &space_info->block_groups[BTRFS_RAID_RAID0],
  2289. list)
  2290. inc_block_group_ro(cache, 1);
  2291. list_for_each_entry(cache,
  2292. &space_info->block_groups[BTRFS_RAID_SINGLE],
  2293. list)
  2294. inc_block_group_ro(cache, 1);
  2295. }
  2296. btrfs_init_global_block_rsv(info);
  2297. ret = check_chunk_block_group_mappings(info);
  2298. error:
  2299. btrfs_free_path(path);
  2300. /*
  2301. * We've hit some error while reading the extent tree, and have
  2302. * rescue=ibadroots mount option.
  2303. * Try to fill the tree using dummy block groups so that the user can
  2304. * continue to mount and grab their data.
  2305. */
  2306. if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
  2307. ret = fill_dummy_bgs(info);
  2308. return ret;
  2309. }
  2310. /*
  2311. * This function, insert_block_group_item(), belongs to the phase 2 of chunk
  2312. * allocation.
  2313. *
  2314. * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
  2315. * phases.
  2316. */
  2317. static int insert_block_group_item(struct btrfs_trans_handle *trans,
  2318. struct btrfs_block_group *block_group)
  2319. {
  2320. struct btrfs_fs_info *fs_info = trans->fs_info;
  2321. struct btrfs_block_group_item bgi;
  2322. struct btrfs_root *root = btrfs_block_group_root(fs_info);
  2323. struct btrfs_key key;
  2324. u64 old_commit_used;
  2325. int ret;
  2326. spin_lock(&block_group->lock);
  2327. btrfs_set_stack_block_group_used(&bgi, block_group->used);
  2328. btrfs_set_stack_block_group_chunk_objectid(&bgi,
  2329. block_group->global_root_id);
  2330. btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
  2331. old_commit_used = block_group->commit_used;
  2332. block_group->commit_used = block_group->used;
  2333. key.objectid = block_group->start;
  2334. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  2335. key.offset = block_group->length;
  2336. spin_unlock(&block_group->lock);
  2337. ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
  2338. if (ret < 0) {
  2339. spin_lock(&block_group->lock);
  2340. block_group->commit_used = old_commit_used;
  2341. spin_unlock(&block_group->lock);
  2342. }
  2343. return ret;
  2344. }
  2345. static int insert_dev_extent(struct btrfs_trans_handle *trans,
  2346. const struct btrfs_device *device, u64 chunk_offset,
  2347. u64 start, u64 num_bytes)
  2348. {
  2349. struct btrfs_fs_info *fs_info = device->fs_info;
  2350. struct btrfs_root *root = fs_info->dev_root;
  2351. struct btrfs_path *path;
  2352. struct btrfs_dev_extent *extent;
  2353. struct extent_buffer *leaf;
  2354. struct btrfs_key key;
  2355. int ret;
  2356. WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
  2357. WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
  2358. path = btrfs_alloc_path();
  2359. if (!path)
  2360. return -ENOMEM;
  2361. key.objectid = device->devid;
  2362. key.type = BTRFS_DEV_EXTENT_KEY;
  2363. key.offset = start;
  2364. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
  2365. if (ret)
  2366. goto out;
  2367. leaf = path->nodes[0];
  2368. extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
  2369. btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
  2370. btrfs_set_dev_extent_chunk_objectid(leaf, extent,
  2371. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  2372. btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
  2373. btrfs_set_dev_extent_length(leaf, extent, num_bytes);
  2374. btrfs_mark_buffer_dirty(trans, leaf);
  2375. out:
  2376. btrfs_free_path(path);
  2377. return ret;
  2378. }
  2379. /*
  2380. * This function belongs to phase 2.
  2381. *
  2382. * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
  2383. * phases.
  2384. */
  2385. static int insert_dev_extents(struct btrfs_trans_handle *trans,
  2386. u64 chunk_offset, u64 chunk_size)
  2387. {
  2388. struct btrfs_fs_info *fs_info = trans->fs_info;
  2389. struct btrfs_device *device;
  2390. struct btrfs_chunk_map *map;
  2391. u64 dev_offset;
  2392. int i;
  2393. int ret = 0;
  2394. map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
  2395. if (IS_ERR(map))
  2396. return PTR_ERR(map);
  2397. /*
  2398. * Take the device list mutex to prevent races with the final phase of
  2399. * a device replace operation that replaces the device object associated
  2400. * with the map's stripes, because the device object's id can change
  2401. * at any time during that final phase of the device replace operation
  2402. * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
  2403. * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
  2404. * resulting in persisting a device extent item with such ID.
  2405. */
  2406. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  2407. for (i = 0; i < map->num_stripes; i++) {
  2408. device = map->stripes[i].dev;
  2409. dev_offset = map->stripes[i].physical;
  2410. ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
  2411. map->stripe_size);
  2412. if (ret)
  2413. break;
  2414. }
  2415. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2416. btrfs_free_chunk_map(map);
  2417. return ret;
  2418. }
  2419. /*
  2420. * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
  2421. * chunk allocation.
  2422. *
  2423. * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
  2424. * phases.
  2425. */
  2426. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
  2427. {
  2428. struct btrfs_fs_info *fs_info = trans->fs_info;
  2429. struct btrfs_block_group *block_group;
  2430. int ret = 0;
  2431. while (!list_empty(&trans->new_bgs)) {
  2432. int index;
  2433. block_group = list_first_entry(&trans->new_bgs,
  2434. struct btrfs_block_group,
  2435. bg_list);
  2436. if (ret)
  2437. goto next;
  2438. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  2439. ret = insert_block_group_item(trans, block_group);
  2440. if (ret)
  2441. btrfs_abort_transaction(trans, ret);
  2442. if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
  2443. &block_group->runtime_flags)) {
  2444. mutex_lock(&fs_info->chunk_mutex);
  2445. ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
  2446. mutex_unlock(&fs_info->chunk_mutex);
  2447. if (ret)
  2448. btrfs_abort_transaction(trans, ret);
  2449. }
  2450. ret = insert_dev_extents(trans, block_group->start,
  2451. block_group->length);
  2452. if (ret)
  2453. btrfs_abort_transaction(trans, ret);
  2454. add_block_group_free_space(trans, block_group);
  2455. /*
  2456. * If we restriped during balance, we may have added a new raid
  2457. * type, so now add the sysfs entries when it is safe to do so.
  2458. * We don't have to worry about locking here as it's handled in
  2459. * btrfs_sysfs_add_block_group_type.
  2460. */
  2461. if (block_group->space_info->block_group_kobjs[index] == NULL)
  2462. btrfs_sysfs_add_block_group_type(block_group);
  2463. /* Already aborted the transaction if it failed. */
  2464. next:
  2465. btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
  2466. spin_lock(&fs_info->unused_bgs_lock);
  2467. list_del_init(&block_group->bg_list);
  2468. clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
  2469. btrfs_put_block_group(block_group);
  2470. spin_unlock(&fs_info->unused_bgs_lock);
  2471. /*
  2472. * If the block group is still unused, add it to the list of
  2473. * unused block groups. The block group may have been created in
  2474. * order to satisfy a space reservation, in which case the
  2475. * extent allocation only happens later. But often we don't
  2476. * actually need to allocate space that we previously reserved,
  2477. * so the block group may become unused for a long time. For
  2478. * example for metadata we generally reserve space for a worst
  2479. * possible scenario, but then don't end up allocating all that
  2480. * space or none at all (due to no need to COW, extent buffers
  2481. * were already COWed in the current transaction and still
  2482. * unwritten, tree heights lower than the maximum possible
  2483. * height, etc). For data we generally reserve the axact amount
  2484. * of space we are going to allocate later, the exception is
  2485. * when using compression, as we must reserve space based on the
  2486. * uncompressed data size, because the compression is only done
  2487. * when writeback triggered and we don't know how much space we
  2488. * are actually going to need, so we reserve the uncompressed
  2489. * size because the data may be uncompressible in the worst case.
  2490. */
  2491. if (ret == 0) {
  2492. bool used;
  2493. spin_lock(&block_group->lock);
  2494. used = btrfs_is_block_group_used(block_group);
  2495. spin_unlock(&block_group->lock);
  2496. if (!used)
  2497. btrfs_mark_bg_unused(block_group);
  2498. }
  2499. }
  2500. btrfs_trans_release_chunk_metadata(trans);
  2501. }
  2502. /*
  2503. * For extent tree v2 we use the block_group_item->chunk_offset to point at our
  2504. * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
  2505. */
  2506. static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset)
  2507. {
  2508. u64 div = SZ_1G;
  2509. u64 index;
  2510. if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
  2511. return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  2512. /* If we have a smaller fs index based on 128MiB. */
  2513. if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
  2514. div = SZ_128M;
  2515. offset = div64_u64(offset, div);
  2516. div64_u64_rem(offset, fs_info->nr_global_roots, &index);
  2517. return index;
  2518. }
  2519. struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
  2520. u64 type,
  2521. u64 chunk_offset, u64 size)
  2522. {
  2523. struct btrfs_fs_info *fs_info = trans->fs_info;
  2524. struct btrfs_block_group *cache;
  2525. int ret;
  2526. btrfs_set_log_full_commit(trans);
  2527. cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
  2528. if (!cache)
  2529. return ERR_PTR(-ENOMEM);
  2530. /*
  2531. * Mark it as new before adding it to the rbtree of block groups or any
  2532. * list, so that no other task finds it and calls btrfs_mark_bg_unused()
  2533. * before the new flag is set.
  2534. */
  2535. set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
  2536. cache->length = size;
  2537. set_free_space_tree_thresholds(cache);
  2538. cache->flags = type;
  2539. cache->cached = BTRFS_CACHE_FINISHED;
  2540. cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
  2541. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
  2542. set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
  2543. ret = btrfs_load_block_group_zone_info(cache, true);
  2544. if (ret) {
  2545. btrfs_put_block_group(cache);
  2546. return ERR_PTR(ret);
  2547. }
  2548. ret = exclude_super_stripes(cache);
  2549. if (ret) {
  2550. /* We may have excluded something, so call this just in case */
  2551. btrfs_free_excluded_extents(cache);
  2552. btrfs_put_block_group(cache);
  2553. return ERR_PTR(ret);
  2554. }
  2555. ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
  2556. btrfs_free_excluded_extents(cache);
  2557. if (ret) {
  2558. btrfs_put_block_group(cache);
  2559. return ERR_PTR(ret);
  2560. }
  2561. /*
  2562. * Ensure the corresponding space_info object is created and
  2563. * assigned to our block group. We want our bg to be added to the rbtree
  2564. * with its ->space_info set.
  2565. */
  2566. cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
  2567. ASSERT(cache->space_info);
  2568. ret = btrfs_add_block_group_cache(fs_info, cache);
  2569. if (ret) {
  2570. btrfs_remove_free_space_cache(cache);
  2571. btrfs_put_block_group(cache);
  2572. return ERR_PTR(ret);
  2573. }
  2574. /*
  2575. * Now that our block group has its ->space_info set and is inserted in
  2576. * the rbtree, update the space info's counters.
  2577. */
  2578. trace_btrfs_add_block_group(fs_info, cache, 1);
  2579. btrfs_add_bg_to_space_info(fs_info, cache);
  2580. btrfs_update_global_block_rsv(fs_info);
  2581. #ifdef CONFIG_BTRFS_DEBUG
  2582. if (btrfs_should_fragment_free_space(cache)) {
  2583. cache->space_info->bytes_used += size >> 1;
  2584. fragment_free_space(cache);
  2585. }
  2586. #endif
  2587. btrfs_link_bg_list(cache, &trans->new_bgs);
  2588. btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
  2589. set_avail_alloc_bits(fs_info, type);
  2590. return cache;
  2591. }
  2592. /*
  2593. * Mark one block group RO, can be called several times for the same block
  2594. * group.
  2595. *
  2596. * @cache: the destination block group
  2597. * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
  2598. * ensure we still have some free space after marking this
  2599. * block group RO.
  2600. */
  2601. int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
  2602. bool do_chunk_alloc)
  2603. {
  2604. struct btrfs_fs_info *fs_info = cache->fs_info;
  2605. struct btrfs_trans_handle *trans;
  2606. struct btrfs_root *root = btrfs_block_group_root(fs_info);
  2607. u64 alloc_flags;
  2608. int ret;
  2609. bool dirty_bg_running;
  2610. /*
  2611. * This can only happen when we are doing read-only scrub on read-only
  2612. * mount.
  2613. * In that case we should not start a new transaction on read-only fs.
  2614. * Thus here we skip all chunk allocations.
  2615. */
  2616. if (sb_rdonly(fs_info->sb)) {
  2617. mutex_lock(&fs_info->ro_block_group_mutex);
  2618. ret = inc_block_group_ro(cache, 0);
  2619. mutex_unlock(&fs_info->ro_block_group_mutex);
  2620. return ret;
  2621. }
  2622. do {
  2623. trans = btrfs_join_transaction(root);
  2624. if (IS_ERR(trans))
  2625. return PTR_ERR(trans);
  2626. dirty_bg_running = false;
  2627. /*
  2628. * We're not allowed to set block groups readonly after the dirty
  2629. * block group cache has started writing. If it already started,
  2630. * back off and let this transaction commit.
  2631. */
  2632. mutex_lock(&fs_info->ro_block_group_mutex);
  2633. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  2634. u64 transid = trans->transid;
  2635. mutex_unlock(&fs_info->ro_block_group_mutex);
  2636. btrfs_end_transaction(trans);
  2637. ret = btrfs_wait_for_commit(fs_info, transid);
  2638. if (ret)
  2639. return ret;
  2640. dirty_bg_running = true;
  2641. }
  2642. } while (dirty_bg_running);
  2643. if (do_chunk_alloc) {
  2644. /*
  2645. * If we are changing raid levels, try to allocate a
  2646. * corresponding block group with the new raid level.
  2647. */
  2648. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
  2649. if (alloc_flags != cache->flags) {
  2650. ret = btrfs_chunk_alloc(trans, alloc_flags,
  2651. CHUNK_ALLOC_FORCE);
  2652. /*
  2653. * ENOSPC is allowed here, we may have enough space
  2654. * already allocated at the new raid level to carry on
  2655. */
  2656. if (ret == -ENOSPC)
  2657. ret = 0;
  2658. if (ret < 0)
  2659. goto out;
  2660. }
  2661. }
  2662. ret = inc_block_group_ro(cache, 0);
  2663. if (!ret)
  2664. goto out;
  2665. if (ret == -ETXTBSY)
  2666. goto unlock_out;
  2667. /*
  2668. * Skip chunk allocation if the bg is SYSTEM, this is to avoid system
  2669. * chunk allocation storm to exhaust the system chunk array. Otherwise
  2670. * we still want to try our best to mark the block group read-only.
  2671. */
  2672. if (!do_chunk_alloc && ret == -ENOSPC &&
  2673. (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
  2674. goto unlock_out;
  2675. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
  2676. ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  2677. if (ret < 0)
  2678. goto out;
  2679. /*
  2680. * We have allocated a new chunk. We also need to activate that chunk to
  2681. * grant metadata tickets for zoned filesystem.
  2682. */
  2683. ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
  2684. if (ret < 0)
  2685. goto out;
  2686. ret = inc_block_group_ro(cache, 0);
  2687. if (ret == -ETXTBSY)
  2688. goto unlock_out;
  2689. out:
  2690. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  2691. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
  2692. mutex_lock(&fs_info->chunk_mutex);
  2693. check_system_chunk(trans, alloc_flags);
  2694. mutex_unlock(&fs_info->chunk_mutex);
  2695. }
  2696. unlock_out:
  2697. mutex_unlock(&fs_info->ro_block_group_mutex);
  2698. btrfs_end_transaction(trans);
  2699. return ret;
  2700. }
  2701. void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
  2702. {
  2703. struct btrfs_space_info *sinfo = cache->space_info;
  2704. u64 num_bytes;
  2705. BUG_ON(!cache->ro);
  2706. spin_lock(&sinfo->lock);
  2707. spin_lock(&cache->lock);
  2708. if (!--cache->ro) {
  2709. if (btrfs_is_zoned(cache->fs_info)) {
  2710. /* Migrate zone_unusable bytes back */
  2711. cache->zone_unusable =
  2712. (cache->alloc_offset - cache->used - cache->pinned -
  2713. cache->reserved) +
  2714. (cache->length - cache->zone_capacity);
  2715. btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
  2716. cache->zone_unusable);
  2717. sinfo->bytes_readonly -= cache->zone_unusable;
  2718. }
  2719. num_bytes = cache->length - cache->reserved -
  2720. cache->pinned - cache->bytes_super -
  2721. cache->zone_unusable - cache->used;
  2722. sinfo->bytes_readonly -= num_bytes;
  2723. list_del_init(&cache->ro_list);
  2724. }
  2725. spin_unlock(&cache->lock);
  2726. spin_unlock(&sinfo->lock);
  2727. }
  2728. static int update_block_group_item(struct btrfs_trans_handle *trans,
  2729. struct btrfs_path *path,
  2730. struct btrfs_block_group *cache)
  2731. {
  2732. struct btrfs_fs_info *fs_info = trans->fs_info;
  2733. int ret;
  2734. struct btrfs_root *root = btrfs_block_group_root(fs_info);
  2735. unsigned long bi;
  2736. struct extent_buffer *leaf;
  2737. struct btrfs_block_group_item bgi;
  2738. struct btrfs_key key;
  2739. u64 old_commit_used;
  2740. u64 used;
  2741. /*
  2742. * Block group items update can be triggered out of commit transaction
  2743. * critical section, thus we need a consistent view of used bytes.
  2744. * We cannot use cache->used directly outside of the spin lock, as it
  2745. * may be changed.
  2746. */
  2747. spin_lock(&cache->lock);
  2748. old_commit_used = cache->commit_used;
  2749. used = cache->used;
  2750. /* No change in used bytes, can safely skip it. */
  2751. if (cache->commit_used == used) {
  2752. spin_unlock(&cache->lock);
  2753. return 0;
  2754. }
  2755. cache->commit_used = used;
  2756. spin_unlock(&cache->lock);
  2757. key.objectid = cache->start;
  2758. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  2759. key.offset = cache->length;
  2760. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  2761. if (ret) {
  2762. if (ret > 0)
  2763. ret = -ENOENT;
  2764. goto fail;
  2765. }
  2766. leaf = path->nodes[0];
  2767. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2768. btrfs_set_stack_block_group_used(&bgi, used);
  2769. btrfs_set_stack_block_group_chunk_objectid(&bgi,
  2770. cache->global_root_id);
  2771. btrfs_set_stack_block_group_flags(&bgi, cache->flags);
  2772. write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
  2773. btrfs_mark_buffer_dirty(trans, leaf);
  2774. fail:
  2775. btrfs_release_path(path);
  2776. /*
  2777. * We didn't update the block group item, need to revert commit_used
  2778. * unless the block group item didn't exist yet - this is to prevent a
  2779. * race with a concurrent insertion of the block group item, with
  2780. * insert_block_group_item(), that happened just after we attempted to
  2781. * update. In that case we would reset commit_used to 0 just after the
  2782. * insertion set it to a value greater than 0 - if the block group later
  2783. * becomes with 0 used bytes, we would incorrectly skip its update.
  2784. */
  2785. if (ret < 0 && ret != -ENOENT) {
  2786. spin_lock(&cache->lock);
  2787. cache->commit_used = old_commit_used;
  2788. spin_unlock(&cache->lock);
  2789. }
  2790. return ret;
  2791. }
  2792. static int cache_save_setup(struct btrfs_block_group *block_group,
  2793. struct btrfs_trans_handle *trans,
  2794. struct btrfs_path *path)
  2795. {
  2796. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2797. struct inode *inode = NULL;
  2798. struct extent_changeset *data_reserved = NULL;
  2799. u64 alloc_hint = 0;
  2800. int dcs = BTRFS_DC_ERROR;
  2801. u64 cache_size = 0;
  2802. int retries = 0;
  2803. int ret = 0;
  2804. if (!btrfs_test_opt(fs_info, SPACE_CACHE))
  2805. return 0;
  2806. /*
  2807. * If this block group is smaller than 100 megs don't bother caching the
  2808. * block group.
  2809. */
  2810. if (block_group->length < (100 * SZ_1M)) {
  2811. spin_lock(&block_group->lock);
  2812. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2813. spin_unlock(&block_group->lock);
  2814. return 0;
  2815. }
  2816. if (TRANS_ABORTED(trans))
  2817. return 0;
  2818. again:
  2819. inode = lookup_free_space_inode(block_group, path);
  2820. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2821. ret = PTR_ERR(inode);
  2822. btrfs_release_path(path);
  2823. goto out;
  2824. }
  2825. if (IS_ERR(inode)) {
  2826. BUG_ON(retries);
  2827. retries++;
  2828. if (block_group->ro)
  2829. goto out_free;
  2830. ret = create_free_space_inode(trans, block_group, path);
  2831. if (ret)
  2832. goto out_free;
  2833. goto again;
  2834. }
  2835. /*
  2836. * We want to set the generation to 0, that way if anything goes wrong
  2837. * from here on out we know not to trust this cache when we load up next
  2838. * time.
  2839. */
  2840. BTRFS_I(inode)->generation = 0;
  2841. ret = btrfs_update_inode(trans, BTRFS_I(inode));
  2842. if (ret) {
  2843. /*
  2844. * So theoretically we could recover from this, simply set the
  2845. * super cache generation to 0 so we know to invalidate the
  2846. * cache, but then we'd have to keep track of the block groups
  2847. * that fail this way so we know we _have_ to reset this cache
  2848. * before the next commit or risk reading stale cache. So to
  2849. * limit our exposure to horrible edge cases lets just abort the
  2850. * transaction, this only happens in really bad situations
  2851. * anyway.
  2852. */
  2853. btrfs_abort_transaction(trans, ret);
  2854. goto out_put;
  2855. }
  2856. WARN_ON(ret);
  2857. /* We've already setup this transaction, go ahead and exit */
  2858. if (block_group->cache_generation == trans->transid &&
  2859. i_size_read(inode)) {
  2860. dcs = BTRFS_DC_SETUP;
  2861. goto out_put;
  2862. }
  2863. if (i_size_read(inode) > 0) {
  2864. ret = btrfs_check_trunc_cache_free_space(fs_info,
  2865. &fs_info->global_block_rsv);
  2866. if (ret)
  2867. goto out_put;
  2868. ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
  2869. if (ret)
  2870. goto out_put;
  2871. }
  2872. spin_lock(&block_group->lock);
  2873. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2874. !btrfs_test_opt(fs_info, SPACE_CACHE)) {
  2875. /*
  2876. * don't bother trying to write stuff out _if_
  2877. * a) we're not cached,
  2878. * b) we're with nospace_cache mount option,
  2879. * c) we're with v2 space_cache (FREE_SPACE_TREE).
  2880. */
  2881. dcs = BTRFS_DC_WRITTEN;
  2882. spin_unlock(&block_group->lock);
  2883. goto out_put;
  2884. }
  2885. spin_unlock(&block_group->lock);
  2886. /*
  2887. * We hit an ENOSPC when setting up the cache in this transaction, just
  2888. * skip doing the setup, we've already cleared the cache so we're safe.
  2889. */
  2890. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  2891. ret = -ENOSPC;
  2892. goto out_put;
  2893. }
  2894. /*
  2895. * Try to preallocate enough space based on how big the block group is.
  2896. * Keep in mind this has to include any pinned space which could end up
  2897. * taking up quite a bit since it's not folded into the other space
  2898. * cache.
  2899. */
  2900. cache_size = div_u64(block_group->length, SZ_256M);
  2901. if (!cache_size)
  2902. cache_size = 1;
  2903. cache_size *= 16;
  2904. cache_size *= fs_info->sectorsize;
  2905. ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
  2906. cache_size, false);
  2907. if (ret)
  2908. goto out_put;
  2909. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
  2910. cache_size, cache_size,
  2911. &alloc_hint);
  2912. /*
  2913. * Our cache requires contiguous chunks so that we don't modify a bunch
  2914. * of metadata or split extents when writing the cache out, which means
  2915. * we can enospc if we are heavily fragmented in addition to just normal
  2916. * out of space conditions. So if we hit this just skip setting up any
  2917. * other block groups for this transaction, maybe we'll unpin enough
  2918. * space the next time around.
  2919. */
  2920. if (!ret)
  2921. dcs = BTRFS_DC_SETUP;
  2922. else if (ret == -ENOSPC)
  2923. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  2924. out_put:
  2925. iput(inode);
  2926. out_free:
  2927. btrfs_release_path(path);
  2928. out:
  2929. spin_lock(&block_group->lock);
  2930. if (!ret && dcs == BTRFS_DC_SETUP)
  2931. block_group->cache_generation = trans->transid;
  2932. block_group->disk_cache_state = dcs;
  2933. spin_unlock(&block_group->lock);
  2934. extent_changeset_free(data_reserved);
  2935. return ret;
  2936. }
  2937. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
  2938. {
  2939. struct btrfs_fs_info *fs_info = trans->fs_info;
  2940. struct btrfs_block_group *cache, *tmp;
  2941. struct btrfs_transaction *cur_trans = trans->transaction;
  2942. struct btrfs_path *path;
  2943. if (list_empty(&cur_trans->dirty_bgs) ||
  2944. !btrfs_test_opt(fs_info, SPACE_CACHE))
  2945. return 0;
  2946. path = btrfs_alloc_path();
  2947. if (!path)
  2948. return -ENOMEM;
  2949. /* Could add new block groups, use _safe just in case */
  2950. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  2951. dirty_list) {
  2952. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2953. cache_save_setup(cache, trans, path);
  2954. }
  2955. btrfs_free_path(path);
  2956. return 0;
  2957. }
  2958. /*
  2959. * Transaction commit does final block group cache writeback during a critical
  2960. * section where nothing is allowed to change the FS. This is required in
  2961. * order for the cache to actually match the block group, but can introduce a
  2962. * lot of latency into the commit.
  2963. *
  2964. * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
  2965. * There's a chance we'll have to redo some of it if the block group changes
  2966. * again during the commit, but it greatly reduces the commit latency by
  2967. * getting rid of the easy block groups while we're still allowing others to
  2968. * join the commit.
  2969. */
  2970. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
  2971. {
  2972. struct btrfs_fs_info *fs_info = trans->fs_info;
  2973. struct btrfs_block_group *cache;
  2974. struct btrfs_transaction *cur_trans = trans->transaction;
  2975. int ret = 0;
  2976. int should_put;
  2977. struct btrfs_path *path = NULL;
  2978. LIST_HEAD(dirty);
  2979. struct list_head *io = &cur_trans->io_bgs;
  2980. int loops = 0;
  2981. spin_lock(&cur_trans->dirty_bgs_lock);
  2982. if (list_empty(&cur_trans->dirty_bgs)) {
  2983. spin_unlock(&cur_trans->dirty_bgs_lock);
  2984. return 0;
  2985. }
  2986. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  2987. spin_unlock(&cur_trans->dirty_bgs_lock);
  2988. again:
  2989. /* Make sure all the block groups on our dirty list actually exist */
  2990. btrfs_create_pending_block_groups(trans);
  2991. if (!path) {
  2992. path = btrfs_alloc_path();
  2993. if (!path) {
  2994. ret = -ENOMEM;
  2995. goto out;
  2996. }
  2997. }
  2998. /*
  2999. * cache_write_mutex is here only to save us from balance or automatic
  3000. * removal of empty block groups deleting this block group while we are
  3001. * writing out the cache
  3002. */
  3003. mutex_lock(&trans->transaction->cache_write_mutex);
  3004. while (!list_empty(&dirty)) {
  3005. bool drop_reserve = true;
  3006. cache = list_first_entry(&dirty, struct btrfs_block_group,
  3007. dirty_list);
  3008. /*
  3009. * This can happen if something re-dirties a block group that
  3010. * is already under IO. Just wait for it to finish and then do
  3011. * it all again
  3012. */
  3013. if (!list_empty(&cache->io_list)) {
  3014. list_del_init(&cache->io_list);
  3015. btrfs_wait_cache_io(trans, cache, path);
  3016. btrfs_put_block_group(cache);
  3017. }
  3018. /*
  3019. * btrfs_wait_cache_io uses the cache->dirty_list to decide if
  3020. * it should update the cache_state. Don't delete until after
  3021. * we wait.
  3022. *
  3023. * Since we're not running in the commit critical section
  3024. * we need the dirty_bgs_lock to protect from update_block_group
  3025. */
  3026. spin_lock(&cur_trans->dirty_bgs_lock);
  3027. list_del_init(&cache->dirty_list);
  3028. spin_unlock(&cur_trans->dirty_bgs_lock);
  3029. should_put = 1;
  3030. cache_save_setup(cache, trans, path);
  3031. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  3032. cache->io_ctl.inode = NULL;
  3033. ret = btrfs_write_out_cache(trans, cache, path);
  3034. if (ret == 0 && cache->io_ctl.inode) {
  3035. should_put = 0;
  3036. /*
  3037. * The cache_write_mutex is protecting the
  3038. * io_list, also refer to the definition of
  3039. * btrfs_transaction::io_bgs for more details
  3040. */
  3041. list_add_tail(&cache->io_list, io);
  3042. } else {
  3043. /*
  3044. * If we failed to write the cache, the
  3045. * generation will be bad and life goes on
  3046. */
  3047. ret = 0;
  3048. }
  3049. }
  3050. if (!ret) {
  3051. ret = update_block_group_item(trans, path, cache);
  3052. /*
  3053. * Our block group might still be attached to the list
  3054. * of new block groups in the transaction handle of some
  3055. * other task (struct btrfs_trans_handle->new_bgs). This
  3056. * means its block group item isn't yet in the extent
  3057. * tree. If this happens ignore the error, as we will
  3058. * try again later in the critical section of the
  3059. * transaction commit.
  3060. */
  3061. if (ret == -ENOENT) {
  3062. ret = 0;
  3063. spin_lock(&cur_trans->dirty_bgs_lock);
  3064. if (list_empty(&cache->dirty_list)) {
  3065. list_add_tail(&cache->dirty_list,
  3066. &cur_trans->dirty_bgs);
  3067. btrfs_get_block_group(cache);
  3068. drop_reserve = false;
  3069. }
  3070. spin_unlock(&cur_trans->dirty_bgs_lock);
  3071. } else if (ret) {
  3072. btrfs_abort_transaction(trans, ret);
  3073. }
  3074. }
  3075. /* If it's not on the io list, we need to put the block group */
  3076. if (should_put)
  3077. btrfs_put_block_group(cache);
  3078. if (drop_reserve)
  3079. btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
  3080. /*
  3081. * Avoid blocking other tasks for too long. It might even save
  3082. * us from writing caches for block groups that are going to be
  3083. * removed.
  3084. */
  3085. mutex_unlock(&trans->transaction->cache_write_mutex);
  3086. if (ret)
  3087. goto out;
  3088. mutex_lock(&trans->transaction->cache_write_mutex);
  3089. }
  3090. mutex_unlock(&trans->transaction->cache_write_mutex);
  3091. /*
  3092. * Go through delayed refs for all the stuff we've just kicked off
  3093. * and then loop back (just once)
  3094. */
  3095. if (!ret)
  3096. ret = btrfs_run_delayed_refs(trans, 0);
  3097. if (!ret && loops == 0) {
  3098. loops++;
  3099. spin_lock(&cur_trans->dirty_bgs_lock);
  3100. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3101. /*
  3102. * dirty_bgs_lock protects us from concurrent block group
  3103. * deletes too (not just cache_write_mutex).
  3104. */
  3105. if (!list_empty(&dirty)) {
  3106. spin_unlock(&cur_trans->dirty_bgs_lock);
  3107. goto again;
  3108. }
  3109. spin_unlock(&cur_trans->dirty_bgs_lock);
  3110. }
  3111. out:
  3112. if (ret < 0) {
  3113. spin_lock(&cur_trans->dirty_bgs_lock);
  3114. list_splice_init(&dirty, &cur_trans->dirty_bgs);
  3115. spin_unlock(&cur_trans->dirty_bgs_lock);
  3116. btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
  3117. }
  3118. btrfs_free_path(path);
  3119. return ret;
  3120. }
  3121. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
  3122. {
  3123. struct btrfs_fs_info *fs_info = trans->fs_info;
  3124. struct btrfs_block_group *cache;
  3125. struct btrfs_transaction *cur_trans = trans->transaction;
  3126. int ret = 0;
  3127. int should_put;
  3128. struct btrfs_path *path;
  3129. struct list_head *io = &cur_trans->io_bgs;
  3130. path = btrfs_alloc_path();
  3131. if (!path)
  3132. return -ENOMEM;
  3133. /*
  3134. * Even though we are in the critical section of the transaction commit,
  3135. * we can still have concurrent tasks adding elements to this
  3136. * transaction's list of dirty block groups. These tasks correspond to
  3137. * endio free space workers started when writeback finishes for a
  3138. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  3139. * allocate new block groups as a result of COWing nodes of the root
  3140. * tree when updating the free space inode. The writeback for the space
  3141. * caches is triggered by an earlier call to
  3142. * btrfs_start_dirty_block_groups() and iterations of the following
  3143. * loop.
  3144. * Also we want to do the cache_save_setup first and then run the
  3145. * delayed refs to make sure we have the best chance at doing this all
  3146. * in one shot.
  3147. */
  3148. spin_lock(&cur_trans->dirty_bgs_lock);
  3149. while (!list_empty(&cur_trans->dirty_bgs)) {
  3150. cache = list_first_entry(&cur_trans->dirty_bgs,
  3151. struct btrfs_block_group,
  3152. dirty_list);
  3153. /*
  3154. * This can happen if cache_save_setup re-dirties a block group
  3155. * that is already under IO. Just wait for it to finish and
  3156. * then do it all again
  3157. */
  3158. if (!list_empty(&cache->io_list)) {
  3159. spin_unlock(&cur_trans->dirty_bgs_lock);
  3160. list_del_init(&cache->io_list);
  3161. btrfs_wait_cache_io(trans, cache, path);
  3162. btrfs_put_block_group(cache);
  3163. spin_lock(&cur_trans->dirty_bgs_lock);
  3164. }
  3165. /*
  3166. * Don't remove from the dirty list until after we've waited on
  3167. * any pending IO
  3168. */
  3169. list_del_init(&cache->dirty_list);
  3170. spin_unlock(&cur_trans->dirty_bgs_lock);
  3171. should_put = 1;
  3172. cache_save_setup(cache, trans, path);
  3173. if (!ret)
  3174. ret = btrfs_run_delayed_refs(trans, U64_MAX);
  3175. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  3176. cache->io_ctl.inode = NULL;
  3177. ret = btrfs_write_out_cache(trans, cache, path);
  3178. if (ret == 0 && cache->io_ctl.inode) {
  3179. should_put = 0;
  3180. list_add_tail(&cache->io_list, io);
  3181. } else {
  3182. /*
  3183. * If we failed to write the cache, the
  3184. * generation will be bad and life goes on
  3185. */
  3186. ret = 0;
  3187. }
  3188. }
  3189. if (!ret) {
  3190. ret = update_block_group_item(trans, path, cache);
  3191. /*
  3192. * One of the free space endio workers might have
  3193. * created a new block group while updating a free space
  3194. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  3195. * and hasn't released its transaction handle yet, in
  3196. * which case the new block group is still attached to
  3197. * its transaction handle and its creation has not
  3198. * finished yet (no block group item in the extent tree
  3199. * yet, etc). If this is the case, wait for all free
  3200. * space endio workers to finish and retry. This is a
  3201. * very rare case so no need for a more efficient and
  3202. * complex approach.
  3203. */
  3204. if (ret == -ENOENT) {
  3205. wait_event(cur_trans->writer_wait,
  3206. atomic_read(&cur_trans->num_writers) == 1);
  3207. ret = update_block_group_item(trans, path, cache);
  3208. }
  3209. if (ret)
  3210. btrfs_abort_transaction(trans, ret);
  3211. }
  3212. /* If its not on the io list, we need to put the block group */
  3213. if (should_put)
  3214. btrfs_put_block_group(cache);
  3215. btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
  3216. spin_lock(&cur_trans->dirty_bgs_lock);
  3217. }
  3218. spin_unlock(&cur_trans->dirty_bgs_lock);
  3219. /*
  3220. * Refer to the definition of io_bgs member for details why it's safe
  3221. * to use it without any locking
  3222. */
  3223. while (!list_empty(io)) {
  3224. cache = list_first_entry(io, struct btrfs_block_group,
  3225. io_list);
  3226. list_del_init(&cache->io_list);
  3227. btrfs_wait_cache_io(trans, cache, path);
  3228. btrfs_put_block_group(cache);
  3229. }
  3230. btrfs_free_path(path);
  3231. return ret;
  3232. }
  3233. int btrfs_update_block_group(struct btrfs_trans_handle *trans,
  3234. u64 bytenr, u64 num_bytes, bool alloc)
  3235. {
  3236. struct btrfs_fs_info *info = trans->fs_info;
  3237. struct btrfs_space_info *space_info;
  3238. struct btrfs_block_group *cache;
  3239. u64 old_val;
  3240. bool reclaim = false;
  3241. bool bg_already_dirty = true;
  3242. int factor;
  3243. /* Block accounting for super block */
  3244. spin_lock(&info->delalloc_root_lock);
  3245. old_val = btrfs_super_bytes_used(info->super_copy);
  3246. if (alloc)
  3247. old_val += num_bytes;
  3248. else
  3249. old_val -= num_bytes;
  3250. btrfs_set_super_bytes_used(info->super_copy, old_val);
  3251. spin_unlock(&info->delalloc_root_lock);
  3252. cache = btrfs_lookup_block_group(info, bytenr);
  3253. if (!cache)
  3254. return -ENOENT;
  3255. /* An extent can not span multiple block groups. */
  3256. ASSERT(bytenr + num_bytes <= cache->start + cache->length);
  3257. space_info = cache->space_info;
  3258. factor = btrfs_bg_type_to_factor(cache->flags);
  3259. /*
  3260. * If this block group has free space cache written out, we need to make
  3261. * sure to load it if we are removing space. This is because we need
  3262. * the unpinning stage to actually add the space back to the block group,
  3263. * otherwise we will leak space.
  3264. */
  3265. if (!alloc && !btrfs_block_group_done(cache))
  3266. btrfs_cache_block_group(cache, true);
  3267. spin_lock(&space_info->lock);
  3268. spin_lock(&cache->lock);
  3269. if (btrfs_test_opt(info, SPACE_CACHE) &&
  3270. cache->disk_cache_state < BTRFS_DC_CLEAR)
  3271. cache->disk_cache_state = BTRFS_DC_CLEAR;
  3272. old_val = cache->used;
  3273. if (alloc) {
  3274. old_val += num_bytes;
  3275. cache->used = old_val;
  3276. cache->reserved -= num_bytes;
  3277. cache->reclaim_mark = 0;
  3278. space_info->bytes_reserved -= num_bytes;
  3279. space_info->bytes_used += num_bytes;
  3280. space_info->disk_used += num_bytes * factor;
  3281. if (READ_ONCE(space_info->periodic_reclaim))
  3282. btrfs_space_info_update_reclaimable(space_info, -num_bytes);
  3283. spin_unlock(&cache->lock);
  3284. spin_unlock(&space_info->lock);
  3285. } else {
  3286. old_val -= num_bytes;
  3287. cache->used = old_val;
  3288. cache->pinned += num_bytes;
  3289. btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
  3290. space_info->bytes_used -= num_bytes;
  3291. space_info->disk_used -= num_bytes * factor;
  3292. if (READ_ONCE(space_info->periodic_reclaim))
  3293. btrfs_space_info_update_reclaimable(space_info, num_bytes);
  3294. else
  3295. reclaim = should_reclaim_block_group(cache, num_bytes);
  3296. spin_unlock(&cache->lock);
  3297. spin_unlock(&space_info->lock);
  3298. set_extent_bit(&trans->transaction->pinned_extents, bytenr,
  3299. bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
  3300. }
  3301. spin_lock(&trans->transaction->dirty_bgs_lock);
  3302. if (list_empty(&cache->dirty_list)) {
  3303. list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
  3304. bg_already_dirty = false;
  3305. btrfs_get_block_group(cache);
  3306. }
  3307. spin_unlock(&trans->transaction->dirty_bgs_lock);
  3308. /*
  3309. * No longer have used bytes in this block group, queue it for deletion.
  3310. * We do this after adding the block group to the dirty list to avoid
  3311. * races between cleaner kthread and space cache writeout.
  3312. */
  3313. if (!alloc && old_val == 0) {
  3314. if (!btrfs_test_opt(info, DISCARD_ASYNC))
  3315. btrfs_mark_bg_unused(cache);
  3316. } else if (!alloc && reclaim) {
  3317. btrfs_mark_bg_to_reclaim(cache);
  3318. }
  3319. btrfs_put_block_group(cache);
  3320. /* Modified block groups are accounted for in the delayed_refs_rsv. */
  3321. if (!bg_already_dirty)
  3322. btrfs_inc_delayed_refs_rsv_bg_updates(info);
  3323. return 0;
  3324. }
  3325. /*
  3326. * Update the block_group and space info counters.
  3327. *
  3328. * @cache: The cache we are manipulating
  3329. * @ram_bytes: The number of bytes of file content, and will be same to
  3330. * @num_bytes except for the compress path.
  3331. * @num_bytes: The number of bytes in question
  3332. * @delalloc: The blocks are allocated for the delalloc write
  3333. *
  3334. * This is called by the allocator when it reserves space. If this is a
  3335. * reservation and the block group has become read only we cannot make the
  3336. * reservation and return -EAGAIN, otherwise this function always succeeds.
  3337. */
  3338. int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
  3339. u64 ram_bytes, u64 num_bytes, int delalloc,
  3340. bool force_wrong_size_class)
  3341. {
  3342. struct btrfs_space_info *space_info = cache->space_info;
  3343. enum btrfs_block_group_size_class size_class;
  3344. int ret = 0;
  3345. spin_lock(&space_info->lock);
  3346. spin_lock(&cache->lock);
  3347. if (cache->ro) {
  3348. ret = -EAGAIN;
  3349. goto out;
  3350. }
  3351. if (btrfs_block_group_should_use_size_class(cache)) {
  3352. size_class = btrfs_calc_block_group_size_class(num_bytes);
  3353. ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
  3354. if (ret)
  3355. goto out;
  3356. }
  3357. cache->reserved += num_bytes;
  3358. space_info->bytes_reserved += num_bytes;
  3359. trace_btrfs_space_reservation(cache->fs_info, "space_info",
  3360. space_info->flags, num_bytes, 1);
  3361. btrfs_space_info_update_bytes_may_use(cache->fs_info,
  3362. space_info, -ram_bytes);
  3363. if (delalloc)
  3364. cache->delalloc_bytes += num_bytes;
  3365. /*
  3366. * Compression can use less space than we reserved, so wake tickets if
  3367. * that happens.
  3368. */
  3369. if (num_bytes < ram_bytes)
  3370. btrfs_try_granting_tickets(cache->fs_info, space_info);
  3371. out:
  3372. spin_unlock(&cache->lock);
  3373. spin_unlock(&space_info->lock);
  3374. return ret;
  3375. }
  3376. /*
  3377. * Update the block_group and space info counters.
  3378. *
  3379. * @cache: The cache we are manipulating
  3380. * @num_bytes: The number of bytes in question
  3381. * @delalloc: The blocks are allocated for the delalloc write
  3382. *
  3383. * This is called by somebody who is freeing space that was never actually used
  3384. * on disk. For example if you reserve some space for a new leaf in transaction
  3385. * A and before transaction A commits you free that leaf, you call this with
  3386. * reserve set to 0 in order to clear the reservation.
  3387. */
  3388. void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
  3389. u64 num_bytes, int delalloc)
  3390. {
  3391. struct btrfs_space_info *space_info = cache->space_info;
  3392. spin_lock(&space_info->lock);
  3393. spin_lock(&cache->lock);
  3394. if (cache->ro)
  3395. space_info->bytes_readonly += num_bytes;
  3396. else if (btrfs_is_zoned(cache->fs_info))
  3397. space_info->bytes_zone_unusable += num_bytes;
  3398. cache->reserved -= num_bytes;
  3399. space_info->bytes_reserved -= num_bytes;
  3400. space_info->max_extent_size = 0;
  3401. if (delalloc)
  3402. cache->delalloc_bytes -= num_bytes;
  3403. spin_unlock(&cache->lock);
  3404. btrfs_try_granting_tickets(cache->fs_info, space_info);
  3405. spin_unlock(&space_info->lock);
  3406. }
  3407. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3408. {
  3409. struct list_head *head = &info->space_info;
  3410. struct btrfs_space_info *found;
  3411. list_for_each_entry(found, head, list) {
  3412. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3413. found->force_alloc = CHUNK_ALLOC_FORCE;
  3414. }
  3415. }
  3416. static int should_alloc_chunk(const struct btrfs_fs_info *fs_info,
  3417. const struct btrfs_space_info *sinfo, int force)
  3418. {
  3419. u64 bytes_used = btrfs_space_info_used(sinfo, false);
  3420. u64 thresh;
  3421. if (force == CHUNK_ALLOC_FORCE)
  3422. return 1;
  3423. /*
  3424. * in limited mode, we want to have some free space up to
  3425. * about 1% of the FS size.
  3426. */
  3427. if (force == CHUNK_ALLOC_LIMITED) {
  3428. thresh = btrfs_super_total_bytes(fs_info->super_copy);
  3429. thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
  3430. if (sinfo->total_bytes - bytes_used < thresh)
  3431. return 1;
  3432. }
  3433. if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
  3434. return 0;
  3435. return 1;
  3436. }
  3437. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
  3438. {
  3439. u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
  3440. return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  3441. }
  3442. static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
  3443. {
  3444. struct btrfs_block_group *bg;
  3445. int ret;
  3446. /*
  3447. * Check if we have enough space in the system space info because we
  3448. * will need to update device items in the chunk btree and insert a new
  3449. * chunk item in the chunk btree as well. This will allocate a new
  3450. * system block group if needed.
  3451. */
  3452. check_system_chunk(trans, flags);
  3453. bg = btrfs_create_chunk(trans, flags);
  3454. if (IS_ERR(bg)) {
  3455. ret = PTR_ERR(bg);
  3456. goto out;
  3457. }
  3458. ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
  3459. /*
  3460. * Normally we are not expected to fail with -ENOSPC here, since we have
  3461. * previously reserved space in the system space_info and allocated one
  3462. * new system chunk if necessary. However there are three exceptions:
  3463. *
  3464. * 1) We may have enough free space in the system space_info but all the
  3465. * existing system block groups have a profile which can not be used
  3466. * for extent allocation.
  3467. *
  3468. * This happens when mounting in degraded mode. For example we have a
  3469. * RAID1 filesystem with 2 devices, lose one device and mount the fs
  3470. * using the other device in degraded mode. If we then allocate a chunk,
  3471. * we may have enough free space in the existing system space_info, but
  3472. * none of the block groups can be used for extent allocation since they
  3473. * have a RAID1 profile, and because we are in degraded mode with a
  3474. * single device, we are forced to allocate a new system chunk with a
  3475. * SINGLE profile. Making check_system_chunk() iterate over all system
  3476. * block groups and check if they have a usable profile and enough space
  3477. * can be slow on very large filesystems, so we tolerate the -ENOSPC and
  3478. * try again after forcing allocation of a new system chunk. Like this
  3479. * we avoid paying the cost of that search in normal circumstances, when
  3480. * we were not mounted in degraded mode;
  3481. *
  3482. * 2) We had enough free space info the system space_info, and one suitable
  3483. * block group to allocate from when we called check_system_chunk()
  3484. * above. However right after we called it, the only system block group
  3485. * with enough free space got turned into RO mode by a running scrub,
  3486. * and in this case we have to allocate a new one and retry. We only
  3487. * need do this allocate and retry once, since we have a transaction
  3488. * handle and scrub uses the commit root to search for block groups;
  3489. *
  3490. * 3) We had one system block group with enough free space when we called
  3491. * check_system_chunk(), but after that, right before we tried to
  3492. * allocate the last extent buffer we needed, a discard operation came
  3493. * in and it temporarily removed the last free space entry from the
  3494. * block group (discard removes a free space entry, discards it, and
  3495. * then adds back the entry to the block group cache).
  3496. */
  3497. if (ret == -ENOSPC) {
  3498. const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
  3499. struct btrfs_block_group *sys_bg;
  3500. sys_bg = btrfs_create_chunk(trans, sys_flags);
  3501. if (IS_ERR(sys_bg)) {
  3502. ret = PTR_ERR(sys_bg);
  3503. btrfs_abort_transaction(trans, ret);
  3504. goto out;
  3505. }
  3506. ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
  3507. if (ret) {
  3508. btrfs_abort_transaction(trans, ret);
  3509. goto out;
  3510. }
  3511. ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
  3512. if (ret) {
  3513. btrfs_abort_transaction(trans, ret);
  3514. goto out;
  3515. }
  3516. } else if (ret) {
  3517. btrfs_abort_transaction(trans, ret);
  3518. goto out;
  3519. }
  3520. out:
  3521. btrfs_trans_release_chunk_metadata(trans);
  3522. if (ret)
  3523. return ERR_PTR(ret);
  3524. btrfs_get_block_group(bg);
  3525. return bg;
  3526. }
  3527. /*
  3528. * Chunk allocation is done in 2 phases:
  3529. *
  3530. * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
  3531. * the chunk, the chunk mapping, create its block group and add the items
  3532. * that belong in the chunk btree to it - more specifically, we need to
  3533. * update device items in the chunk btree and add a new chunk item to it.
  3534. *
  3535. * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
  3536. * group item to the extent btree and the device extent items to the devices
  3537. * btree.
  3538. *
  3539. * This is done to prevent deadlocks. For example when COWing a node from the
  3540. * extent btree we are holding a write lock on the node's parent and if we
  3541. * trigger chunk allocation and attempted to insert the new block group item
  3542. * in the extent btree right way, we could deadlock because the path for the
  3543. * insertion can include that parent node. At first glance it seems impossible
  3544. * to trigger chunk allocation after starting a transaction since tasks should
  3545. * reserve enough transaction units (metadata space), however while that is true
  3546. * most of the time, chunk allocation may still be triggered for several reasons:
  3547. *
  3548. * 1) When reserving metadata, we check if there is enough free space in the
  3549. * metadata space_info and therefore don't trigger allocation of a new chunk.
  3550. * However later when the task actually tries to COW an extent buffer from
  3551. * the extent btree or from the device btree for example, it is forced to
  3552. * allocate a new block group (chunk) because the only one that had enough
  3553. * free space was just turned to RO mode by a running scrub for example (or
  3554. * device replace, block group reclaim thread, etc), so we can not use it
  3555. * for allocating an extent and end up being forced to allocate a new one;
  3556. *
  3557. * 2) Because we only check that the metadata space_info has enough free bytes,
  3558. * we end up not allocating a new metadata chunk in that case. However if
  3559. * the filesystem was mounted in degraded mode, none of the existing block
  3560. * groups might be suitable for extent allocation due to their incompatible
  3561. * profile (for e.g. mounting a 2 devices filesystem, where all block groups
  3562. * use a RAID1 profile, in degraded mode using a single device). In this case
  3563. * when the task attempts to COW some extent buffer of the extent btree for
  3564. * example, it will trigger allocation of a new metadata block group with a
  3565. * suitable profile (SINGLE profile in the example of the degraded mount of
  3566. * the RAID1 filesystem);
  3567. *
  3568. * 3) The task has reserved enough transaction units / metadata space, but when
  3569. * it attempts to COW an extent buffer from the extent or device btree for
  3570. * example, it does not find any free extent in any metadata block group,
  3571. * therefore forced to try to allocate a new metadata block group.
  3572. * This is because some other task allocated all available extents in the
  3573. * meanwhile - this typically happens with tasks that don't reserve space
  3574. * properly, either intentionally or as a bug. One example where this is
  3575. * done intentionally is fsync, as it does not reserve any transaction units
  3576. * and ends up allocating a variable number of metadata extents for log
  3577. * tree extent buffers;
  3578. *
  3579. * 4) The task has reserved enough transaction units / metadata space, but right
  3580. * before it tries to allocate the last extent buffer it needs, a discard
  3581. * operation comes in and, temporarily, removes the last free space entry from
  3582. * the only metadata block group that had free space (discard starts by
  3583. * removing a free space entry from a block group, then does the discard
  3584. * operation and, once it's done, it adds back the free space entry to the
  3585. * block group).
  3586. *
  3587. * We also need this 2 phases setup when adding a device to a filesystem with
  3588. * a seed device - we must create new metadata and system chunks without adding
  3589. * any of the block group items to the chunk, extent and device btrees. If we
  3590. * did not do it this way, we would get ENOSPC when attempting to update those
  3591. * btrees, since all the chunks from the seed device are read-only.
  3592. *
  3593. * Phase 1 does the updates and insertions to the chunk btree because if we had
  3594. * it done in phase 2 and have a thundering herd of tasks allocating chunks in
  3595. * parallel, we risk having too many system chunks allocated by many tasks if
  3596. * many tasks reach phase 1 without the previous ones completing phase 2. In the
  3597. * extreme case this leads to exhaustion of the system chunk array in the
  3598. * superblock. This is easier to trigger if using a btree node/leaf size of 64K
  3599. * and with RAID filesystems (so we have more device items in the chunk btree).
  3600. * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
  3601. * the system chunk array due to concurrent allocations") provides more details.
  3602. *
  3603. * Allocation of system chunks does not happen through this function. A task that
  3604. * needs to update the chunk btree (the only btree that uses system chunks), must
  3605. * preallocate chunk space by calling either check_system_chunk() or
  3606. * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
  3607. * metadata chunk or when removing a chunk, while the later is used before doing
  3608. * a modification to the chunk btree - use cases for the later are adding,
  3609. * removing and resizing a device as well as relocation of a system chunk.
  3610. * See the comment below for more details.
  3611. *
  3612. * The reservation of system space, done through check_system_chunk(), as well
  3613. * as all the updates and insertions into the chunk btree must be done while
  3614. * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
  3615. * an extent buffer from the chunks btree we never trigger allocation of a new
  3616. * system chunk, which would result in a deadlock (trying to lock twice an
  3617. * extent buffer of the chunk btree, first time before triggering the chunk
  3618. * allocation and the second time during chunk allocation while attempting to
  3619. * update the chunks btree). The system chunk array is also updated while holding
  3620. * that mutex. The same logic applies to removing chunks - we must reserve system
  3621. * space, update the chunk btree and the system chunk array in the superblock
  3622. * while holding fs_info->chunk_mutex.
  3623. *
  3624. * This function, btrfs_chunk_alloc(), belongs to phase 1.
  3625. *
  3626. * If @force is CHUNK_ALLOC_FORCE:
  3627. * - return 1 if it successfully allocates a chunk,
  3628. * - return errors including -ENOSPC otherwise.
  3629. * If @force is NOT CHUNK_ALLOC_FORCE:
  3630. * - return 0 if it doesn't need to allocate a new chunk,
  3631. * - return 1 if it successfully allocates a chunk,
  3632. * - return errors including -ENOSPC otherwise.
  3633. */
  3634. int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
  3635. enum btrfs_chunk_alloc_enum force)
  3636. {
  3637. struct btrfs_fs_info *fs_info = trans->fs_info;
  3638. struct btrfs_space_info *space_info;
  3639. struct btrfs_block_group *ret_bg;
  3640. bool wait_for_alloc = false;
  3641. bool should_alloc = false;
  3642. bool from_extent_allocation = false;
  3643. int ret = 0;
  3644. if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
  3645. from_extent_allocation = true;
  3646. force = CHUNK_ALLOC_FORCE;
  3647. }
  3648. /* Don't re-enter if we're already allocating a chunk */
  3649. if (trans->allocating_chunk)
  3650. return -ENOSPC;
  3651. /*
  3652. * Allocation of system chunks can not happen through this path, as we
  3653. * could end up in a deadlock if we are allocating a data or metadata
  3654. * chunk and there is another task modifying the chunk btree.
  3655. *
  3656. * This is because while we are holding the chunk mutex, we will attempt
  3657. * to add the new chunk item to the chunk btree or update an existing
  3658. * device item in the chunk btree, while the other task that is modifying
  3659. * the chunk btree is attempting to COW an extent buffer while holding a
  3660. * lock on it and on its parent - if the COW operation triggers a system
  3661. * chunk allocation, then we can deadlock because we are holding the
  3662. * chunk mutex and we may need to access that extent buffer or its parent
  3663. * in order to add the chunk item or update a device item.
  3664. *
  3665. * Tasks that want to modify the chunk tree should reserve system space
  3666. * before updating the chunk btree, by calling either
  3667. * btrfs_reserve_chunk_metadata() or check_system_chunk().
  3668. * It's possible that after a task reserves the space, it still ends up
  3669. * here - this happens in the cases described above at do_chunk_alloc().
  3670. * The task will have to either retry or fail.
  3671. */
  3672. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3673. return -ENOSPC;
  3674. space_info = btrfs_find_space_info(fs_info, flags);
  3675. ASSERT(space_info);
  3676. do {
  3677. spin_lock(&space_info->lock);
  3678. if (force < space_info->force_alloc)
  3679. force = space_info->force_alloc;
  3680. should_alloc = should_alloc_chunk(fs_info, space_info, force);
  3681. if (space_info->full) {
  3682. /* No more free physical space */
  3683. if (should_alloc)
  3684. ret = -ENOSPC;
  3685. else
  3686. ret = 0;
  3687. spin_unlock(&space_info->lock);
  3688. return ret;
  3689. } else if (!should_alloc) {
  3690. spin_unlock(&space_info->lock);
  3691. return 0;
  3692. } else if (space_info->chunk_alloc) {
  3693. /*
  3694. * Someone is already allocating, so we need to block
  3695. * until this someone is finished and then loop to
  3696. * recheck if we should continue with our allocation
  3697. * attempt.
  3698. */
  3699. wait_for_alloc = true;
  3700. force = CHUNK_ALLOC_NO_FORCE;
  3701. spin_unlock(&space_info->lock);
  3702. mutex_lock(&fs_info->chunk_mutex);
  3703. mutex_unlock(&fs_info->chunk_mutex);
  3704. } else {
  3705. /* Proceed with allocation */
  3706. space_info->chunk_alloc = 1;
  3707. wait_for_alloc = false;
  3708. spin_unlock(&space_info->lock);
  3709. }
  3710. cond_resched();
  3711. } while (wait_for_alloc);
  3712. mutex_lock(&fs_info->chunk_mutex);
  3713. trans->allocating_chunk = true;
  3714. /*
  3715. * If we have mixed data/metadata chunks we want to make sure we keep
  3716. * allocating mixed chunks instead of individual chunks.
  3717. */
  3718. if (btrfs_mixed_space_info(space_info))
  3719. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3720. /*
  3721. * if we're doing a data chunk, go ahead and make sure that
  3722. * we keep a reasonable number of metadata chunks allocated in the
  3723. * FS as well.
  3724. */
  3725. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3726. fs_info->data_chunk_allocations++;
  3727. if (!(fs_info->data_chunk_allocations %
  3728. fs_info->metadata_ratio))
  3729. force_metadata_allocation(fs_info);
  3730. }
  3731. ret_bg = do_chunk_alloc(trans, flags);
  3732. trans->allocating_chunk = false;
  3733. if (IS_ERR(ret_bg)) {
  3734. ret = PTR_ERR(ret_bg);
  3735. } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) {
  3736. /*
  3737. * New block group is likely to be used soon. Try to activate
  3738. * it now. Failure is OK for now.
  3739. */
  3740. btrfs_zone_activate(ret_bg);
  3741. }
  3742. if (!ret)
  3743. btrfs_put_block_group(ret_bg);
  3744. spin_lock(&space_info->lock);
  3745. if (ret < 0) {
  3746. if (ret == -ENOSPC)
  3747. space_info->full = 1;
  3748. else
  3749. goto out;
  3750. } else {
  3751. ret = 1;
  3752. space_info->max_extent_size = 0;
  3753. }
  3754. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3755. out:
  3756. space_info->chunk_alloc = 0;
  3757. spin_unlock(&space_info->lock);
  3758. mutex_unlock(&fs_info->chunk_mutex);
  3759. return ret;
  3760. }
  3761. static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type)
  3762. {
  3763. u64 num_dev;
  3764. num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
  3765. if (!num_dev)
  3766. num_dev = fs_info->fs_devices->rw_devices;
  3767. return num_dev;
  3768. }
  3769. static void reserve_chunk_space(struct btrfs_trans_handle *trans,
  3770. u64 bytes,
  3771. u64 type)
  3772. {
  3773. struct btrfs_fs_info *fs_info = trans->fs_info;
  3774. struct btrfs_space_info *info;
  3775. u64 left;
  3776. int ret = 0;
  3777. /*
  3778. * Needed because we can end up allocating a system chunk and for an
  3779. * atomic and race free space reservation in the chunk block reserve.
  3780. */
  3781. lockdep_assert_held(&fs_info->chunk_mutex);
  3782. info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3783. spin_lock(&info->lock);
  3784. left = info->total_bytes - btrfs_space_info_used(info, true);
  3785. spin_unlock(&info->lock);
  3786. if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  3787. btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
  3788. left, bytes, type);
  3789. btrfs_dump_space_info(fs_info, info, 0, 0);
  3790. }
  3791. if (left < bytes) {
  3792. u64 flags = btrfs_system_alloc_profile(fs_info);
  3793. struct btrfs_block_group *bg;
  3794. /*
  3795. * Ignore failure to create system chunk. We might end up not
  3796. * needing it, as we might not need to COW all nodes/leafs from
  3797. * the paths we visit in the chunk tree (they were already COWed
  3798. * or created in the current transaction for example).
  3799. */
  3800. bg = btrfs_create_chunk(trans, flags);
  3801. if (IS_ERR(bg)) {
  3802. ret = PTR_ERR(bg);
  3803. } else {
  3804. /*
  3805. * We have a new chunk. We also need to activate it for
  3806. * zoned filesystem.
  3807. */
  3808. ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
  3809. if (ret < 0)
  3810. return;
  3811. /*
  3812. * If we fail to add the chunk item here, we end up
  3813. * trying again at phase 2 of chunk allocation, at
  3814. * btrfs_create_pending_block_groups(). So ignore
  3815. * any error here. An ENOSPC here could happen, due to
  3816. * the cases described at do_chunk_alloc() - the system
  3817. * block group we just created was just turned into RO
  3818. * mode by a scrub for example, or a running discard
  3819. * temporarily removed its free space entries, etc.
  3820. */
  3821. btrfs_chunk_alloc_add_chunk_item(trans, bg);
  3822. }
  3823. }
  3824. if (!ret) {
  3825. ret = btrfs_block_rsv_add(fs_info,
  3826. &fs_info->chunk_block_rsv,
  3827. bytes, BTRFS_RESERVE_NO_FLUSH);
  3828. if (!ret)
  3829. trans->chunk_bytes_reserved += bytes;
  3830. }
  3831. }
  3832. /*
  3833. * Reserve space in the system space for allocating or removing a chunk.
  3834. * The caller must be holding fs_info->chunk_mutex.
  3835. */
  3836. void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
  3837. {
  3838. struct btrfs_fs_info *fs_info = trans->fs_info;
  3839. const u64 num_devs = get_profile_num_devs(fs_info, type);
  3840. u64 bytes;
  3841. /* num_devs device items to update and 1 chunk item to add or remove. */
  3842. bytes = btrfs_calc_metadata_size(fs_info, num_devs) +
  3843. btrfs_calc_insert_metadata_size(fs_info, 1);
  3844. reserve_chunk_space(trans, bytes, type);
  3845. }
  3846. /*
  3847. * Reserve space in the system space, if needed, for doing a modification to the
  3848. * chunk btree.
  3849. *
  3850. * @trans: A transaction handle.
  3851. * @is_item_insertion: Indicate if the modification is for inserting a new item
  3852. * in the chunk btree or if it's for the deletion or update
  3853. * of an existing item.
  3854. *
  3855. * This is used in a context where we need to update the chunk btree outside
  3856. * block group allocation and removal, to avoid a deadlock with a concurrent
  3857. * task that is allocating a metadata or data block group and therefore needs to
  3858. * update the chunk btree while holding the chunk mutex. After the update to the
  3859. * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called.
  3860. *
  3861. */
  3862. void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
  3863. bool is_item_insertion)
  3864. {
  3865. struct btrfs_fs_info *fs_info = trans->fs_info;
  3866. u64 bytes;
  3867. if (is_item_insertion)
  3868. bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
  3869. else
  3870. bytes = btrfs_calc_metadata_size(fs_info, 1);
  3871. mutex_lock(&fs_info->chunk_mutex);
  3872. reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM);
  3873. mutex_unlock(&fs_info->chunk_mutex);
  3874. }
  3875. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  3876. {
  3877. struct btrfs_block_group *block_group;
  3878. block_group = btrfs_lookup_first_block_group(info, 0);
  3879. while (block_group) {
  3880. btrfs_wait_block_group_cache_done(block_group);
  3881. spin_lock(&block_group->lock);
  3882. if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
  3883. &block_group->runtime_flags)) {
  3884. struct btrfs_inode *inode = block_group->inode;
  3885. block_group->inode = NULL;
  3886. spin_unlock(&block_group->lock);
  3887. ASSERT(block_group->io_ctl.inode == NULL);
  3888. iput(&inode->vfs_inode);
  3889. } else {
  3890. spin_unlock(&block_group->lock);
  3891. }
  3892. block_group = btrfs_next_block_group(block_group);
  3893. }
  3894. }
  3895. /*
  3896. * Must be called only after stopping all workers, since we could have block
  3897. * group caching kthreads running, and therefore they could race with us if we
  3898. * freed the block groups before stopping them.
  3899. */
  3900. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  3901. {
  3902. struct btrfs_block_group *block_group;
  3903. struct btrfs_space_info *space_info;
  3904. struct btrfs_caching_control *caching_ctl;
  3905. struct rb_node *n;
  3906. if (btrfs_is_zoned(info)) {
  3907. if (info->active_meta_bg) {
  3908. btrfs_put_block_group(info->active_meta_bg);
  3909. info->active_meta_bg = NULL;
  3910. }
  3911. if (info->active_system_bg) {
  3912. btrfs_put_block_group(info->active_system_bg);
  3913. info->active_system_bg = NULL;
  3914. }
  3915. }
  3916. write_lock(&info->block_group_cache_lock);
  3917. while (!list_empty(&info->caching_block_groups)) {
  3918. caching_ctl = list_entry(info->caching_block_groups.next,
  3919. struct btrfs_caching_control, list);
  3920. list_del(&caching_ctl->list);
  3921. btrfs_put_caching_control(caching_ctl);
  3922. }
  3923. write_unlock(&info->block_group_cache_lock);
  3924. spin_lock(&info->unused_bgs_lock);
  3925. while (!list_empty(&info->unused_bgs)) {
  3926. block_group = list_first_entry(&info->unused_bgs,
  3927. struct btrfs_block_group,
  3928. bg_list);
  3929. list_del_init(&block_group->bg_list);
  3930. btrfs_put_block_group(block_group);
  3931. }
  3932. while (!list_empty(&info->reclaim_bgs)) {
  3933. block_group = list_first_entry(&info->reclaim_bgs,
  3934. struct btrfs_block_group,
  3935. bg_list);
  3936. list_del_init(&block_group->bg_list);
  3937. btrfs_put_block_group(block_group);
  3938. }
  3939. spin_unlock(&info->unused_bgs_lock);
  3940. spin_lock(&info->zone_active_bgs_lock);
  3941. while (!list_empty(&info->zone_active_bgs)) {
  3942. block_group = list_first_entry(&info->zone_active_bgs,
  3943. struct btrfs_block_group,
  3944. active_bg_list);
  3945. list_del_init(&block_group->active_bg_list);
  3946. btrfs_put_block_group(block_group);
  3947. }
  3948. spin_unlock(&info->zone_active_bgs_lock);
  3949. write_lock(&info->block_group_cache_lock);
  3950. while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
  3951. block_group = rb_entry(n, struct btrfs_block_group,
  3952. cache_node);
  3953. rb_erase_cached(&block_group->cache_node,
  3954. &info->block_group_cache_tree);
  3955. RB_CLEAR_NODE(&block_group->cache_node);
  3956. write_unlock(&info->block_group_cache_lock);
  3957. down_write(&block_group->space_info->groups_sem);
  3958. list_del(&block_group->list);
  3959. up_write(&block_group->space_info->groups_sem);
  3960. /*
  3961. * We haven't cached this block group, which means we could
  3962. * possibly have excluded extents on this block group.
  3963. */
  3964. if (block_group->cached == BTRFS_CACHE_NO ||
  3965. block_group->cached == BTRFS_CACHE_ERROR)
  3966. btrfs_free_excluded_extents(block_group);
  3967. btrfs_remove_free_space_cache(block_group);
  3968. ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
  3969. ASSERT(list_empty(&block_group->dirty_list));
  3970. ASSERT(list_empty(&block_group->io_list));
  3971. ASSERT(list_empty(&block_group->bg_list));
  3972. ASSERT(refcount_read(&block_group->refs) == 1);
  3973. ASSERT(block_group->swap_extents == 0);
  3974. btrfs_put_block_group(block_group);
  3975. write_lock(&info->block_group_cache_lock);
  3976. }
  3977. write_unlock(&info->block_group_cache_lock);
  3978. btrfs_release_global_block_rsv(info);
  3979. while (!list_empty(&info->space_info)) {
  3980. space_info = list_entry(info->space_info.next,
  3981. struct btrfs_space_info,
  3982. list);
  3983. /*
  3984. * Do not hide this behind enospc_debug, this is actually
  3985. * important and indicates a real bug if this happens.
  3986. */
  3987. if (WARN_ON(space_info->bytes_pinned > 0 ||
  3988. space_info->bytes_may_use > 0))
  3989. btrfs_dump_space_info(info, space_info, 0, 0);
  3990. /*
  3991. * If there was a failure to cleanup a log tree, very likely due
  3992. * to an IO failure on a writeback attempt of one or more of its
  3993. * extent buffers, we could not do proper (and cheap) unaccounting
  3994. * of their reserved space, so don't warn on bytes_reserved > 0 in
  3995. * that case.
  3996. */
  3997. if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
  3998. !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
  3999. if (WARN_ON(space_info->bytes_reserved > 0))
  4000. btrfs_dump_space_info(info, space_info, 0, 0);
  4001. }
  4002. WARN_ON(space_info->reclaim_size > 0);
  4003. list_del(&space_info->list);
  4004. btrfs_sysfs_remove_space_info(space_info);
  4005. }
  4006. return 0;
  4007. }
  4008. void btrfs_freeze_block_group(struct btrfs_block_group *cache)
  4009. {
  4010. atomic_inc(&cache->frozen);
  4011. }
  4012. void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
  4013. {
  4014. struct btrfs_fs_info *fs_info = block_group->fs_info;
  4015. bool cleanup;
  4016. spin_lock(&block_group->lock);
  4017. cleanup = (atomic_dec_and_test(&block_group->frozen) &&
  4018. test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
  4019. spin_unlock(&block_group->lock);
  4020. if (cleanup) {
  4021. struct btrfs_chunk_map *map;
  4022. map = btrfs_find_chunk_map(fs_info, block_group->start, 1);
  4023. /* Logic error, can't happen. */
  4024. ASSERT(map);
  4025. btrfs_remove_chunk_map(fs_info, map);
  4026. /* Once for our lookup reference. */
  4027. btrfs_free_chunk_map(map);
  4028. /*
  4029. * We may have left one free space entry and other possible
  4030. * tasks trimming this block group have left 1 entry each one.
  4031. * Free them if any.
  4032. */
  4033. btrfs_remove_free_space_cache(block_group);
  4034. }
  4035. }
  4036. bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
  4037. {
  4038. bool ret = true;
  4039. spin_lock(&bg->lock);
  4040. if (bg->ro)
  4041. ret = false;
  4042. else
  4043. bg->swap_extents++;
  4044. spin_unlock(&bg->lock);
  4045. return ret;
  4046. }
  4047. void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
  4048. {
  4049. spin_lock(&bg->lock);
  4050. ASSERT(!bg->ro);
  4051. ASSERT(bg->swap_extents >= amount);
  4052. bg->swap_extents -= amount;
  4053. spin_unlock(&bg->lock);
  4054. }
  4055. enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size)
  4056. {
  4057. if (size <= SZ_128K)
  4058. return BTRFS_BG_SZ_SMALL;
  4059. if (size <= SZ_8M)
  4060. return BTRFS_BG_SZ_MEDIUM;
  4061. return BTRFS_BG_SZ_LARGE;
  4062. }
  4063. /*
  4064. * Handle a block group allocating an extent in a size class
  4065. *
  4066. * @bg: The block group we allocated in.
  4067. * @size_class: The size class of the allocation.
  4068. * @force_wrong_size_class: Whether we are desperate enough to allow
  4069. * mismatched size classes.
  4070. *
  4071. * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
  4072. * case of a race that leads to the wrong size class without
  4073. * force_wrong_size_class set.
  4074. *
  4075. * find_free_extent will skip block groups with a mismatched size class until
  4076. * it really needs to avoid ENOSPC. In that case it will set
  4077. * force_wrong_size_class. However, if a block group is newly allocated and
  4078. * doesn't yet have a size class, then it is possible for two allocations of
  4079. * different sizes to race and both try to use it. The loser is caught here and
  4080. * has to retry.
  4081. */
  4082. int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
  4083. enum btrfs_block_group_size_class size_class,
  4084. bool force_wrong_size_class)
  4085. {
  4086. ASSERT(size_class != BTRFS_BG_SZ_NONE);
  4087. /* The new allocation is in the right size class, do nothing */
  4088. if (bg->size_class == size_class)
  4089. return 0;
  4090. /*
  4091. * The new allocation is in a mismatched size class.
  4092. * This means one of two things:
  4093. *
  4094. * 1. Two tasks in find_free_extent for different size_classes raced
  4095. * and hit the same empty block_group. Make the loser try again.
  4096. * 2. A call to find_free_extent got desperate enough to set
  4097. * 'force_wrong_slab'. Don't change the size_class, but allow the
  4098. * allocation.
  4099. */
  4100. if (bg->size_class != BTRFS_BG_SZ_NONE) {
  4101. if (force_wrong_size_class)
  4102. return 0;
  4103. return -EAGAIN;
  4104. }
  4105. /*
  4106. * The happy new block group case: the new allocation is the first
  4107. * one in the block_group so we set size_class.
  4108. */
  4109. bg->size_class = size_class;
  4110. return 0;
  4111. }
  4112. bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg)
  4113. {
  4114. if (btrfs_is_zoned(bg->fs_info))
  4115. return false;
  4116. if (!btrfs_is_block_group_data_only(bg))
  4117. return false;
  4118. return true;
  4119. }