relocation.c 117 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/writeback.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/rbtree.h>
  10. #include <linux/slab.h>
  11. #include <linux/error-injection.h>
  12. #include "ctree.h"
  13. #include "disk-io.h"
  14. #include "transaction.h"
  15. #include "volumes.h"
  16. #include "locking.h"
  17. #include "btrfs_inode.h"
  18. #include "async-thread.h"
  19. #include "free-space-cache.h"
  20. #include "qgroup.h"
  21. #include "print-tree.h"
  22. #include "delalloc-space.h"
  23. #include "block-group.h"
  24. #include "backref.h"
  25. #include "misc.h"
  26. #include "subpage.h"
  27. #include "zoned.h"
  28. #include "inode-item.h"
  29. #include "space-info.h"
  30. #include "fs.h"
  31. #include "accessors.h"
  32. #include "extent-tree.h"
  33. #include "root-tree.h"
  34. #include "file-item.h"
  35. #include "relocation.h"
  36. #include "super.h"
  37. #include "tree-checker.h"
  38. #include "raid-stripe-tree.h"
  39. /*
  40. * Relocation overview
  41. *
  42. * [What does relocation do]
  43. *
  44. * The objective of relocation is to relocate all extents of the target block
  45. * group to other block groups.
  46. * This is utilized by resize (shrink only), profile converting, compacting
  47. * space, or balance routine to spread chunks over devices.
  48. *
  49. * Before | After
  50. * ------------------------------------------------------------------
  51. * BG A: 10 data extents | BG A: deleted
  52. * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
  53. * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
  54. *
  55. * [How does relocation work]
  56. *
  57. * 1. Mark the target block group read-only
  58. * New extents won't be allocated from the target block group.
  59. *
  60. * 2.1 Record each extent in the target block group
  61. * To build a proper map of extents to be relocated.
  62. *
  63. * 2.2 Build data reloc tree and reloc trees
  64. * Data reloc tree will contain an inode, recording all newly relocated
  65. * data extents.
  66. * There will be only one data reloc tree for one data block group.
  67. *
  68. * Reloc tree will be a special snapshot of its source tree, containing
  69. * relocated tree blocks.
  70. * Each tree referring to a tree block in target block group will get its
  71. * reloc tree built.
  72. *
  73. * 2.3 Swap source tree with its corresponding reloc tree
  74. * Each involved tree only refers to new extents after swap.
  75. *
  76. * 3. Cleanup reloc trees and data reloc tree.
  77. * As old extents in the target block group are still referenced by reloc
  78. * trees, we need to clean them up before really freeing the target block
  79. * group.
  80. *
  81. * The main complexity is in steps 2.2 and 2.3.
  82. *
  83. * The entry point of relocation is relocate_block_group() function.
  84. */
  85. #define RELOCATION_RESERVED_NODES 256
  86. /*
  87. * map address of tree root to tree
  88. */
  89. struct mapping_node {
  90. struct {
  91. struct rb_node rb_node;
  92. u64 bytenr;
  93. }; /* Use rb_simle_node for search/insert */
  94. void *data;
  95. };
  96. struct mapping_tree {
  97. struct rb_root rb_root;
  98. spinlock_t lock;
  99. };
  100. /*
  101. * present a tree block to process
  102. */
  103. struct tree_block {
  104. struct {
  105. struct rb_node rb_node;
  106. u64 bytenr;
  107. }; /* Use rb_simple_node for search/insert */
  108. u64 owner;
  109. struct btrfs_key key;
  110. u8 level;
  111. bool key_ready;
  112. };
  113. #define MAX_EXTENTS 128
  114. struct file_extent_cluster {
  115. u64 start;
  116. u64 end;
  117. u64 boundary[MAX_EXTENTS];
  118. unsigned int nr;
  119. u64 owning_root;
  120. };
  121. /* Stages of data relocation. */
  122. enum reloc_stage {
  123. MOVE_DATA_EXTENTS,
  124. UPDATE_DATA_PTRS
  125. };
  126. struct reloc_control {
  127. /* block group to relocate */
  128. struct btrfs_block_group *block_group;
  129. /* extent tree */
  130. struct btrfs_root *extent_root;
  131. /* inode for moving data */
  132. struct inode *data_inode;
  133. struct btrfs_block_rsv *block_rsv;
  134. struct btrfs_backref_cache backref_cache;
  135. struct file_extent_cluster cluster;
  136. /* tree blocks have been processed */
  137. struct extent_io_tree processed_blocks;
  138. /* map start of tree root to corresponding reloc tree */
  139. struct mapping_tree reloc_root_tree;
  140. /* list of reloc trees */
  141. struct list_head reloc_roots;
  142. /* list of subvolume trees that get relocated */
  143. struct list_head dirty_subvol_roots;
  144. /* size of metadata reservation for merging reloc trees */
  145. u64 merging_rsv_size;
  146. /* size of relocated tree nodes */
  147. u64 nodes_relocated;
  148. /* reserved size for block group relocation*/
  149. u64 reserved_bytes;
  150. u64 search_start;
  151. u64 extents_found;
  152. enum reloc_stage stage;
  153. bool create_reloc_tree;
  154. bool merge_reloc_tree;
  155. bool found_file_extent;
  156. };
  157. static void mark_block_processed(struct reloc_control *rc,
  158. struct btrfs_backref_node *node)
  159. {
  160. u32 blocksize;
  161. if (node->level == 0 ||
  162. in_range(node->bytenr, rc->block_group->start,
  163. rc->block_group->length)) {
  164. blocksize = rc->extent_root->fs_info->nodesize;
  165. set_extent_bit(&rc->processed_blocks, node->bytenr,
  166. node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL);
  167. }
  168. node->processed = 1;
  169. }
  170. /*
  171. * walk up backref nodes until reach node presents tree root
  172. */
  173. static struct btrfs_backref_node *walk_up_backref(
  174. struct btrfs_backref_node *node,
  175. struct btrfs_backref_edge *edges[], int *index)
  176. {
  177. struct btrfs_backref_edge *edge;
  178. int idx = *index;
  179. while (!list_empty(&node->upper)) {
  180. edge = list_entry(node->upper.next,
  181. struct btrfs_backref_edge, list[LOWER]);
  182. edges[idx++] = edge;
  183. node = edge->node[UPPER];
  184. }
  185. BUG_ON(node->detached);
  186. *index = idx;
  187. return node;
  188. }
  189. /*
  190. * walk down backref nodes to find start of next reference path
  191. */
  192. static struct btrfs_backref_node *walk_down_backref(
  193. struct btrfs_backref_edge *edges[], int *index)
  194. {
  195. struct btrfs_backref_edge *edge;
  196. struct btrfs_backref_node *lower;
  197. int idx = *index;
  198. while (idx > 0) {
  199. edge = edges[idx - 1];
  200. lower = edge->node[LOWER];
  201. if (list_is_last(&edge->list[LOWER], &lower->upper)) {
  202. idx--;
  203. continue;
  204. }
  205. edge = list_entry(edge->list[LOWER].next,
  206. struct btrfs_backref_edge, list[LOWER]);
  207. edges[idx - 1] = edge;
  208. *index = idx;
  209. return edge->node[UPPER];
  210. }
  211. *index = 0;
  212. return NULL;
  213. }
  214. static bool reloc_root_is_dead(const struct btrfs_root *root)
  215. {
  216. /*
  217. * Pair with set_bit/clear_bit in clean_dirty_subvols and
  218. * btrfs_update_reloc_root. We need to see the updated bit before
  219. * trying to access reloc_root
  220. */
  221. smp_rmb();
  222. if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
  223. return true;
  224. return false;
  225. }
  226. /*
  227. * Check if this subvolume tree has valid reloc tree.
  228. *
  229. * Reloc tree after swap is considered dead, thus not considered as valid.
  230. * This is enough for most callers, as they don't distinguish dead reloc root
  231. * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
  232. * special case.
  233. */
  234. static bool have_reloc_root(const struct btrfs_root *root)
  235. {
  236. if (reloc_root_is_dead(root))
  237. return false;
  238. if (!root->reloc_root)
  239. return false;
  240. return true;
  241. }
  242. bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
  243. {
  244. struct btrfs_root *reloc_root;
  245. if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
  246. return false;
  247. /* This root has been merged with its reloc tree, we can ignore it */
  248. if (reloc_root_is_dead(root))
  249. return true;
  250. reloc_root = root->reloc_root;
  251. if (!reloc_root)
  252. return false;
  253. if (btrfs_header_generation(reloc_root->commit_root) ==
  254. root->fs_info->running_transaction->transid)
  255. return false;
  256. /*
  257. * If there is reloc tree and it was created in previous transaction
  258. * backref lookup can find the reloc tree, so backref node for the fs
  259. * tree root is useless for relocation.
  260. */
  261. return true;
  262. }
  263. /*
  264. * find reloc tree by address of tree root
  265. */
  266. struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
  267. {
  268. struct reloc_control *rc = fs_info->reloc_ctl;
  269. struct rb_node *rb_node;
  270. struct mapping_node *node;
  271. struct btrfs_root *root = NULL;
  272. ASSERT(rc);
  273. spin_lock(&rc->reloc_root_tree.lock);
  274. rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
  275. if (rb_node) {
  276. node = rb_entry(rb_node, struct mapping_node, rb_node);
  277. root = node->data;
  278. }
  279. spin_unlock(&rc->reloc_root_tree.lock);
  280. return btrfs_grab_root(root);
  281. }
  282. /*
  283. * For useless nodes, do two major clean ups:
  284. *
  285. * - Cleanup the children edges and nodes
  286. * If child node is also orphan (no parent) during cleanup, then the child
  287. * node will also be cleaned up.
  288. *
  289. * - Freeing up leaves (level 0), keeps nodes detached
  290. * For nodes, the node is still cached as "detached"
  291. *
  292. * Return false if @node is not in the @useless_nodes list.
  293. * Return true if @node is in the @useless_nodes list.
  294. */
  295. static bool handle_useless_nodes(struct reloc_control *rc,
  296. struct btrfs_backref_node *node)
  297. {
  298. struct btrfs_backref_cache *cache = &rc->backref_cache;
  299. struct list_head *useless_node = &cache->useless_node;
  300. bool ret = false;
  301. while (!list_empty(useless_node)) {
  302. struct btrfs_backref_node *cur;
  303. cur = list_first_entry(useless_node, struct btrfs_backref_node,
  304. list);
  305. list_del_init(&cur->list);
  306. /* Only tree root nodes can be added to @useless_nodes */
  307. ASSERT(list_empty(&cur->upper));
  308. if (cur == node)
  309. ret = true;
  310. /* The node is the lowest node */
  311. if (cur->lowest) {
  312. list_del_init(&cur->lower);
  313. cur->lowest = 0;
  314. }
  315. /* Cleanup the lower edges */
  316. while (!list_empty(&cur->lower)) {
  317. struct btrfs_backref_edge *edge;
  318. struct btrfs_backref_node *lower;
  319. edge = list_entry(cur->lower.next,
  320. struct btrfs_backref_edge, list[UPPER]);
  321. list_del(&edge->list[UPPER]);
  322. list_del(&edge->list[LOWER]);
  323. lower = edge->node[LOWER];
  324. btrfs_backref_free_edge(cache, edge);
  325. /* Child node is also orphan, queue for cleanup */
  326. if (list_empty(&lower->upper))
  327. list_add(&lower->list, useless_node);
  328. }
  329. /* Mark this block processed for relocation */
  330. mark_block_processed(rc, cur);
  331. /*
  332. * Backref nodes for tree leaves are deleted from the cache.
  333. * Backref nodes for upper level tree blocks are left in the
  334. * cache to avoid unnecessary backref lookup.
  335. */
  336. if (cur->level > 0) {
  337. list_add(&cur->list, &cache->detached);
  338. cur->detached = 1;
  339. } else {
  340. rb_erase(&cur->rb_node, &cache->rb_root);
  341. btrfs_backref_free_node(cache, cur);
  342. }
  343. }
  344. return ret;
  345. }
  346. /*
  347. * Build backref tree for a given tree block. Root of the backref tree
  348. * corresponds the tree block, leaves of the backref tree correspond roots of
  349. * b-trees that reference the tree block.
  350. *
  351. * The basic idea of this function is check backrefs of a given block to find
  352. * upper level blocks that reference the block, and then check backrefs of
  353. * these upper level blocks recursively. The recursion stops when tree root is
  354. * reached or backrefs for the block is cached.
  355. *
  356. * NOTE: if we find that backrefs for a block are cached, we know backrefs for
  357. * all upper level blocks that directly/indirectly reference the block are also
  358. * cached.
  359. */
  360. static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
  361. struct btrfs_trans_handle *trans,
  362. struct reloc_control *rc, struct btrfs_key *node_key,
  363. int level, u64 bytenr)
  364. {
  365. struct btrfs_backref_iter *iter;
  366. struct btrfs_backref_cache *cache = &rc->backref_cache;
  367. /* For searching parent of TREE_BLOCK_REF */
  368. struct btrfs_path *path;
  369. struct btrfs_backref_node *cur;
  370. struct btrfs_backref_node *node = NULL;
  371. struct btrfs_backref_edge *edge;
  372. int ret;
  373. iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
  374. if (!iter)
  375. return ERR_PTR(-ENOMEM);
  376. path = btrfs_alloc_path();
  377. if (!path) {
  378. ret = -ENOMEM;
  379. goto out;
  380. }
  381. node = btrfs_backref_alloc_node(cache, bytenr, level);
  382. if (!node) {
  383. ret = -ENOMEM;
  384. goto out;
  385. }
  386. node->lowest = 1;
  387. cur = node;
  388. /* Breadth-first search to build backref cache */
  389. do {
  390. ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
  391. node_key, cur);
  392. if (ret < 0)
  393. goto out;
  394. edge = list_first_entry_or_null(&cache->pending_edge,
  395. struct btrfs_backref_edge, list[UPPER]);
  396. /*
  397. * The pending list isn't empty, take the first block to
  398. * process
  399. */
  400. if (edge) {
  401. list_del_init(&edge->list[UPPER]);
  402. cur = edge->node[UPPER];
  403. }
  404. } while (edge);
  405. /* Finish the upper linkage of newly added edges/nodes */
  406. ret = btrfs_backref_finish_upper_links(cache, node);
  407. if (ret < 0)
  408. goto out;
  409. if (handle_useless_nodes(rc, node))
  410. node = NULL;
  411. out:
  412. btrfs_free_path(iter->path);
  413. kfree(iter);
  414. btrfs_free_path(path);
  415. if (ret) {
  416. btrfs_backref_error_cleanup(cache, node);
  417. return ERR_PTR(ret);
  418. }
  419. ASSERT(!node || !node->detached);
  420. ASSERT(list_empty(&cache->useless_node) &&
  421. list_empty(&cache->pending_edge));
  422. return node;
  423. }
  424. /*
  425. * helper to add backref node for the newly created snapshot.
  426. * the backref node is created by cloning backref node that
  427. * corresponds to root of source tree
  428. */
  429. static int clone_backref_node(struct btrfs_trans_handle *trans,
  430. struct reloc_control *rc,
  431. const struct btrfs_root *src,
  432. struct btrfs_root *dest)
  433. {
  434. struct btrfs_root *reloc_root = src->reloc_root;
  435. struct btrfs_backref_cache *cache = &rc->backref_cache;
  436. struct btrfs_backref_node *node = NULL;
  437. struct btrfs_backref_node *new_node;
  438. struct btrfs_backref_edge *edge;
  439. struct btrfs_backref_edge *new_edge;
  440. struct rb_node *rb_node;
  441. rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
  442. if (rb_node) {
  443. node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
  444. if (node->detached)
  445. node = NULL;
  446. else
  447. BUG_ON(node->new_bytenr != reloc_root->node->start);
  448. }
  449. if (!node) {
  450. rb_node = rb_simple_search(&cache->rb_root,
  451. reloc_root->commit_root->start);
  452. if (rb_node) {
  453. node = rb_entry(rb_node, struct btrfs_backref_node,
  454. rb_node);
  455. BUG_ON(node->detached);
  456. }
  457. }
  458. if (!node)
  459. return 0;
  460. new_node = btrfs_backref_alloc_node(cache, dest->node->start,
  461. node->level);
  462. if (!new_node)
  463. return -ENOMEM;
  464. new_node->lowest = node->lowest;
  465. new_node->checked = 1;
  466. new_node->root = btrfs_grab_root(dest);
  467. ASSERT(new_node->root);
  468. if (!node->lowest) {
  469. list_for_each_entry(edge, &node->lower, list[UPPER]) {
  470. new_edge = btrfs_backref_alloc_edge(cache);
  471. if (!new_edge)
  472. goto fail;
  473. btrfs_backref_link_edge(new_edge, edge->node[LOWER],
  474. new_node, LINK_UPPER);
  475. }
  476. } else {
  477. list_add_tail(&new_node->lower, &cache->leaves);
  478. }
  479. rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
  480. &new_node->rb_node);
  481. if (rb_node)
  482. btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
  483. if (!new_node->lowest) {
  484. list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
  485. list_add_tail(&new_edge->list[LOWER],
  486. &new_edge->node[LOWER]->upper);
  487. }
  488. }
  489. return 0;
  490. fail:
  491. while (!list_empty(&new_node->lower)) {
  492. new_edge = list_entry(new_node->lower.next,
  493. struct btrfs_backref_edge, list[UPPER]);
  494. list_del(&new_edge->list[UPPER]);
  495. btrfs_backref_free_edge(cache, new_edge);
  496. }
  497. btrfs_backref_free_node(cache, new_node);
  498. return -ENOMEM;
  499. }
  500. /*
  501. * helper to add 'address of tree root -> reloc tree' mapping
  502. */
  503. static int __add_reloc_root(struct btrfs_root *root)
  504. {
  505. struct btrfs_fs_info *fs_info = root->fs_info;
  506. struct rb_node *rb_node;
  507. struct mapping_node *node;
  508. struct reloc_control *rc = fs_info->reloc_ctl;
  509. node = kmalloc(sizeof(*node), GFP_NOFS);
  510. if (!node)
  511. return -ENOMEM;
  512. node->bytenr = root->commit_root->start;
  513. node->data = root;
  514. spin_lock(&rc->reloc_root_tree.lock);
  515. rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
  516. node->bytenr, &node->rb_node);
  517. spin_unlock(&rc->reloc_root_tree.lock);
  518. if (rb_node) {
  519. btrfs_err(fs_info,
  520. "Duplicate root found for start=%llu while inserting into relocation tree",
  521. node->bytenr);
  522. return -EEXIST;
  523. }
  524. list_add_tail(&root->root_list, &rc->reloc_roots);
  525. return 0;
  526. }
  527. /*
  528. * helper to delete the 'address of tree root -> reloc tree'
  529. * mapping
  530. */
  531. static void __del_reloc_root(struct btrfs_root *root)
  532. {
  533. struct btrfs_fs_info *fs_info = root->fs_info;
  534. struct rb_node *rb_node;
  535. struct mapping_node *node = NULL;
  536. struct reloc_control *rc = fs_info->reloc_ctl;
  537. bool put_ref = false;
  538. if (rc && root->node) {
  539. spin_lock(&rc->reloc_root_tree.lock);
  540. rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
  541. root->commit_root->start);
  542. if (rb_node) {
  543. node = rb_entry(rb_node, struct mapping_node, rb_node);
  544. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  545. RB_CLEAR_NODE(&node->rb_node);
  546. }
  547. spin_unlock(&rc->reloc_root_tree.lock);
  548. ASSERT(!node || (struct btrfs_root *)node->data == root);
  549. }
  550. /*
  551. * We only put the reloc root here if it's on the list. There's a lot
  552. * of places where the pattern is to splice the rc->reloc_roots, process
  553. * the reloc roots, and then add the reloc root back onto
  554. * rc->reloc_roots. If we call __del_reloc_root while it's off of the
  555. * list we don't want the reference being dropped, because the guy
  556. * messing with the list is in charge of the reference.
  557. */
  558. spin_lock(&fs_info->trans_lock);
  559. if (!list_empty(&root->root_list)) {
  560. put_ref = true;
  561. list_del_init(&root->root_list);
  562. }
  563. spin_unlock(&fs_info->trans_lock);
  564. if (put_ref)
  565. btrfs_put_root(root);
  566. kfree(node);
  567. }
  568. /*
  569. * helper to update the 'address of tree root -> reloc tree'
  570. * mapping
  571. */
  572. static int __update_reloc_root(struct btrfs_root *root)
  573. {
  574. struct btrfs_fs_info *fs_info = root->fs_info;
  575. struct rb_node *rb_node;
  576. struct mapping_node *node = NULL;
  577. struct reloc_control *rc = fs_info->reloc_ctl;
  578. spin_lock(&rc->reloc_root_tree.lock);
  579. rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
  580. root->commit_root->start);
  581. if (rb_node) {
  582. node = rb_entry(rb_node, struct mapping_node, rb_node);
  583. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  584. }
  585. spin_unlock(&rc->reloc_root_tree.lock);
  586. if (!node)
  587. return 0;
  588. BUG_ON((struct btrfs_root *)node->data != root);
  589. spin_lock(&rc->reloc_root_tree.lock);
  590. node->bytenr = root->node->start;
  591. rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
  592. node->bytenr, &node->rb_node);
  593. spin_unlock(&rc->reloc_root_tree.lock);
  594. if (rb_node)
  595. btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
  596. return 0;
  597. }
  598. static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
  599. struct btrfs_root *root, u64 objectid)
  600. {
  601. struct btrfs_fs_info *fs_info = root->fs_info;
  602. struct btrfs_root *reloc_root;
  603. struct extent_buffer *eb;
  604. struct btrfs_root_item *root_item;
  605. struct btrfs_key root_key;
  606. int ret = 0;
  607. bool must_abort = false;
  608. root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
  609. if (!root_item)
  610. return ERR_PTR(-ENOMEM);
  611. root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  612. root_key.type = BTRFS_ROOT_ITEM_KEY;
  613. root_key.offset = objectid;
  614. if (btrfs_root_id(root) == objectid) {
  615. u64 commit_root_gen;
  616. /*
  617. * Relocation will wait for cleaner thread, and any half-dropped
  618. * subvolume will be fully cleaned up at mount time.
  619. * So here we shouldn't hit a subvolume with non-zero drop_progress.
  620. *
  621. * If this isn't the case, error out since it can make us attempt to
  622. * drop references for extents that were already dropped before.
  623. */
  624. if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
  625. struct btrfs_key cpu_key;
  626. btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
  627. btrfs_err(fs_info,
  628. "cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)",
  629. objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset);
  630. ret = -EUCLEAN;
  631. goto fail;
  632. }
  633. /* called by btrfs_init_reloc_root */
  634. ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
  635. BTRFS_TREE_RELOC_OBJECTID);
  636. if (ret)
  637. goto fail;
  638. /*
  639. * Set the last_snapshot field to the generation of the commit
  640. * root - like this ctree.c:btrfs_block_can_be_shared() behaves
  641. * correctly (returns true) when the relocation root is created
  642. * either inside the critical section of a transaction commit
  643. * (through transaction.c:qgroup_account_snapshot()) and when
  644. * it's created before the transaction commit is started.
  645. */
  646. commit_root_gen = btrfs_header_generation(root->commit_root);
  647. btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
  648. } else {
  649. /*
  650. * called by btrfs_reloc_post_snapshot_hook.
  651. * the source tree is a reloc tree, all tree blocks
  652. * modified after it was created have RELOC flag
  653. * set in their headers. so it's OK to not update
  654. * the 'last_snapshot'.
  655. */
  656. ret = btrfs_copy_root(trans, root, root->node, &eb,
  657. BTRFS_TREE_RELOC_OBJECTID);
  658. if (ret)
  659. goto fail;
  660. }
  661. /*
  662. * We have changed references at this point, we must abort the
  663. * transaction if anything fails.
  664. */
  665. must_abort = true;
  666. memcpy(root_item, &root->root_item, sizeof(*root_item));
  667. btrfs_set_root_bytenr(root_item, eb->start);
  668. btrfs_set_root_level(root_item, btrfs_header_level(eb));
  669. btrfs_set_root_generation(root_item, trans->transid);
  670. if (btrfs_root_id(root) == objectid) {
  671. btrfs_set_root_refs(root_item, 0);
  672. memset(&root_item->drop_progress, 0,
  673. sizeof(struct btrfs_disk_key));
  674. btrfs_set_root_drop_level(root_item, 0);
  675. }
  676. btrfs_tree_unlock(eb);
  677. free_extent_buffer(eb);
  678. ret = btrfs_insert_root(trans, fs_info->tree_root,
  679. &root_key, root_item);
  680. if (ret)
  681. goto fail;
  682. kfree(root_item);
  683. reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
  684. if (IS_ERR(reloc_root)) {
  685. ret = PTR_ERR(reloc_root);
  686. goto abort;
  687. }
  688. set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
  689. btrfs_set_root_last_trans(reloc_root, trans->transid);
  690. return reloc_root;
  691. fail:
  692. kfree(root_item);
  693. abort:
  694. if (must_abort)
  695. btrfs_abort_transaction(trans, ret);
  696. return ERR_PTR(ret);
  697. }
  698. /*
  699. * create reloc tree for a given fs tree. reloc tree is just a
  700. * snapshot of the fs tree with special root objectid.
  701. *
  702. * The reloc_root comes out of here with two references, one for
  703. * root->reloc_root, and another for being on the rc->reloc_roots list.
  704. */
  705. int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
  706. struct btrfs_root *root)
  707. {
  708. struct btrfs_fs_info *fs_info = root->fs_info;
  709. struct btrfs_root *reloc_root;
  710. struct reloc_control *rc = fs_info->reloc_ctl;
  711. struct btrfs_block_rsv *rsv;
  712. int clear_rsv = 0;
  713. int ret;
  714. if (!rc)
  715. return 0;
  716. /*
  717. * The subvolume has reloc tree but the swap is finished, no need to
  718. * create/update the dead reloc tree
  719. */
  720. if (reloc_root_is_dead(root))
  721. return 0;
  722. /*
  723. * This is subtle but important. We do not do
  724. * record_root_in_transaction for reloc roots, instead we record their
  725. * corresponding fs root, and then here we update the last trans for the
  726. * reloc root. This means that we have to do this for the entire life
  727. * of the reloc root, regardless of which stage of the relocation we are
  728. * in.
  729. */
  730. if (root->reloc_root) {
  731. reloc_root = root->reloc_root;
  732. btrfs_set_root_last_trans(reloc_root, trans->transid);
  733. return 0;
  734. }
  735. /*
  736. * We are merging reloc roots, we do not need new reloc trees. Also
  737. * reloc trees never need their own reloc tree.
  738. */
  739. if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
  740. return 0;
  741. if (!trans->reloc_reserved) {
  742. rsv = trans->block_rsv;
  743. trans->block_rsv = rc->block_rsv;
  744. clear_rsv = 1;
  745. }
  746. reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
  747. if (clear_rsv)
  748. trans->block_rsv = rsv;
  749. if (IS_ERR(reloc_root))
  750. return PTR_ERR(reloc_root);
  751. ret = __add_reloc_root(reloc_root);
  752. ASSERT(ret != -EEXIST);
  753. if (ret) {
  754. /* Pairs with create_reloc_root */
  755. btrfs_put_root(reloc_root);
  756. return ret;
  757. }
  758. root->reloc_root = btrfs_grab_root(reloc_root);
  759. return 0;
  760. }
  761. /*
  762. * update root item of reloc tree
  763. */
  764. int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
  765. struct btrfs_root *root)
  766. {
  767. struct btrfs_fs_info *fs_info = root->fs_info;
  768. struct btrfs_root *reloc_root;
  769. struct btrfs_root_item *root_item;
  770. int ret;
  771. if (!have_reloc_root(root))
  772. return 0;
  773. reloc_root = root->reloc_root;
  774. root_item = &reloc_root->root_item;
  775. /*
  776. * We are probably ok here, but __del_reloc_root() will drop its ref of
  777. * the root. We have the ref for root->reloc_root, but just in case
  778. * hold it while we update the reloc root.
  779. */
  780. btrfs_grab_root(reloc_root);
  781. /* root->reloc_root will stay until current relocation finished */
  782. if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
  783. btrfs_root_refs(root_item) == 0) {
  784. set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
  785. /*
  786. * Mark the tree as dead before we change reloc_root so
  787. * have_reloc_root will not touch it from now on.
  788. */
  789. smp_wmb();
  790. __del_reloc_root(reloc_root);
  791. }
  792. if (reloc_root->commit_root != reloc_root->node) {
  793. __update_reloc_root(reloc_root);
  794. btrfs_set_root_node(root_item, reloc_root->node);
  795. free_extent_buffer(reloc_root->commit_root);
  796. reloc_root->commit_root = btrfs_root_node(reloc_root);
  797. }
  798. ret = btrfs_update_root(trans, fs_info->tree_root,
  799. &reloc_root->root_key, root_item);
  800. btrfs_put_root(reloc_root);
  801. return ret;
  802. }
  803. /*
  804. * get new location of data
  805. */
  806. static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
  807. u64 bytenr, u64 num_bytes)
  808. {
  809. struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
  810. struct btrfs_path *path;
  811. struct btrfs_file_extent_item *fi;
  812. struct extent_buffer *leaf;
  813. int ret;
  814. path = btrfs_alloc_path();
  815. if (!path)
  816. return -ENOMEM;
  817. bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
  818. ret = btrfs_lookup_file_extent(NULL, root, path,
  819. btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
  820. if (ret < 0)
  821. goto out;
  822. if (ret > 0) {
  823. ret = -ENOENT;
  824. goto out;
  825. }
  826. leaf = path->nodes[0];
  827. fi = btrfs_item_ptr(leaf, path->slots[0],
  828. struct btrfs_file_extent_item);
  829. BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
  830. btrfs_file_extent_compression(leaf, fi) ||
  831. btrfs_file_extent_encryption(leaf, fi) ||
  832. btrfs_file_extent_other_encoding(leaf, fi));
  833. if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
  834. ret = -EINVAL;
  835. goto out;
  836. }
  837. *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  838. ret = 0;
  839. out:
  840. btrfs_free_path(path);
  841. return ret;
  842. }
  843. /*
  844. * update file extent items in the tree leaf to point to
  845. * the new locations.
  846. */
  847. static noinline_for_stack
  848. int replace_file_extents(struct btrfs_trans_handle *trans,
  849. struct reloc_control *rc,
  850. struct btrfs_root *root,
  851. struct extent_buffer *leaf)
  852. {
  853. struct btrfs_fs_info *fs_info = root->fs_info;
  854. struct btrfs_key key;
  855. struct btrfs_file_extent_item *fi;
  856. struct btrfs_inode *inode = NULL;
  857. u64 parent;
  858. u64 bytenr;
  859. u64 new_bytenr = 0;
  860. u64 num_bytes;
  861. u64 end;
  862. u32 nritems;
  863. u32 i;
  864. int ret = 0;
  865. int first = 1;
  866. int dirty = 0;
  867. if (rc->stage != UPDATE_DATA_PTRS)
  868. return 0;
  869. /* reloc trees always use full backref */
  870. if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
  871. parent = leaf->start;
  872. else
  873. parent = 0;
  874. nritems = btrfs_header_nritems(leaf);
  875. for (i = 0; i < nritems; i++) {
  876. struct btrfs_ref ref = { 0 };
  877. cond_resched();
  878. btrfs_item_key_to_cpu(leaf, &key, i);
  879. if (key.type != BTRFS_EXTENT_DATA_KEY)
  880. continue;
  881. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  882. if (btrfs_file_extent_type(leaf, fi) ==
  883. BTRFS_FILE_EXTENT_INLINE)
  884. continue;
  885. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  886. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  887. if (bytenr == 0)
  888. continue;
  889. if (!in_range(bytenr, rc->block_group->start,
  890. rc->block_group->length))
  891. continue;
  892. /*
  893. * if we are modifying block in fs tree, wait for read_folio
  894. * to complete and drop the extent cache
  895. */
  896. if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
  897. if (first) {
  898. inode = btrfs_find_first_inode(root, key.objectid);
  899. first = 0;
  900. } else if (inode && btrfs_ino(inode) < key.objectid) {
  901. btrfs_add_delayed_iput(inode);
  902. inode = btrfs_find_first_inode(root, key.objectid);
  903. }
  904. if (inode && btrfs_ino(inode) == key.objectid) {
  905. struct extent_state *cached_state = NULL;
  906. end = key.offset +
  907. btrfs_file_extent_num_bytes(leaf, fi);
  908. WARN_ON(!IS_ALIGNED(key.offset,
  909. fs_info->sectorsize));
  910. WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
  911. end--;
  912. /* Take mmap lock to serialize with reflinks. */
  913. if (!down_read_trylock(&inode->i_mmap_lock))
  914. continue;
  915. ret = try_lock_extent(&inode->io_tree, key.offset,
  916. end, &cached_state);
  917. if (!ret) {
  918. up_read(&inode->i_mmap_lock);
  919. continue;
  920. }
  921. btrfs_drop_extent_map_range(inode, key.offset, end, true);
  922. unlock_extent(&inode->io_tree, key.offset, end,
  923. &cached_state);
  924. up_read(&inode->i_mmap_lock);
  925. }
  926. }
  927. ret = get_new_location(rc->data_inode, &new_bytenr,
  928. bytenr, num_bytes);
  929. if (ret) {
  930. /*
  931. * Don't have to abort since we've not changed anything
  932. * in the file extent yet.
  933. */
  934. break;
  935. }
  936. btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
  937. dirty = 1;
  938. key.offset -= btrfs_file_extent_offset(leaf, fi);
  939. ref.action = BTRFS_ADD_DELAYED_REF;
  940. ref.bytenr = new_bytenr;
  941. ref.num_bytes = num_bytes;
  942. ref.parent = parent;
  943. ref.owning_root = btrfs_root_id(root);
  944. ref.ref_root = btrfs_header_owner(leaf);
  945. btrfs_init_data_ref(&ref, key.objectid, key.offset,
  946. btrfs_root_id(root), false);
  947. ret = btrfs_inc_extent_ref(trans, &ref);
  948. if (ret) {
  949. btrfs_abort_transaction(trans, ret);
  950. break;
  951. }
  952. ref.action = BTRFS_DROP_DELAYED_REF;
  953. ref.bytenr = bytenr;
  954. ref.num_bytes = num_bytes;
  955. ref.parent = parent;
  956. ref.owning_root = btrfs_root_id(root);
  957. ref.ref_root = btrfs_header_owner(leaf);
  958. btrfs_init_data_ref(&ref, key.objectid, key.offset,
  959. btrfs_root_id(root), false);
  960. ret = btrfs_free_extent(trans, &ref);
  961. if (ret) {
  962. btrfs_abort_transaction(trans, ret);
  963. break;
  964. }
  965. }
  966. if (dirty)
  967. btrfs_mark_buffer_dirty(trans, leaf);
  968. if (inode)
  969. btrfs_add_delayed_iput(inode);
  970. return ret;
  971. }
  972. static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
  973. int slot, const struct btrfs_path *path,
  974. int level)
  975. {
  976. struct btrfs_disk_key key1;
  977. struct btrfs_disk_key key2;
  978. btrfs_node_key(eb, &key1, slot);
  979. btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
  980. return memcmp(&key1, &key2, sizeof(key1));
  981. }
  982. /*
  983. * try to replace tree blocks in fs tree with the new blocks
  984. * in reloc tree. tree blocks haven't been modified since the
  985. * reloc tree was create can be replaced.
  986. *
  987. * if a block was replaced, level of the block + 1 is returned.
  988. * if no block got replaced, 0 is returned. if there are other
  989. * errors, a negative error number is returned.
  990. */
  991. static noinline_for_stack
  992. int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
  993. struct btrfs_root *dest, struct btrfs_root *src,
  994. struct btrfs_path *path, struct btrfs_key *next_key,
  995. int lowest_level, int max_level)
  996. {
  997. struct btrfs_fs_info *fs_info = dest->fs_info;
  998. struct extent_buffer *eb;
  999. struct extent_buffer *parent;
  1000. struct btrfs_ref ref = { 0 };
  1001. struct btrfs_key key;
  1002. u64 old_bytenr;
  1003. u64 new_bytenr;
  1004. u64 old_ptr_gen;
  1005. u64 new_ptr_gen;
  1006. u64 last_snapshot;
  1007. u32 blocksize;
  1008. int cow = 0;
  1009. int level;
  1010. int ret;
  1011. int slot;
  1012. ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
  1013. ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
  1014. last_snapshot = btrfs_root_last_snapshot(&src->root_item);
  1015. again:
  1016. slot = path->slots[lowest_level];
  1017. btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
  1018. eb = btrfs_lock_root_node(dest);
  1019. level = btrfs_header_level(eb);
  1020. if (level < lowest_level) {
  1021. btrfs_tree_unlock(eb);
  1022. free_extent_buffer(eb);
  1023. return 0;
  1024. }
  1025. if (cow) {
  1026. ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
  1027. BTRFS_NESTING_COW);
  1028. if (ret) {
  1029. btrfs_tree_unlock(eb);
  1030. free_extent_buffer(eb);
  1031. return ret;
  1032. }
  1033. }
  1034. if (next_key) {
  1035. next_key->objectid = (u64)-1;
  1036. next_key->type = (u8)-1;
  1037. next_key->offset = (u64)-1;
  1038. }
  1039. parent = eb;
  1040. while (1) {
  1041. level = btrfs_header_level(parent);
  1042. ASSERT(level >= lowest_level);
  1043. ret = btrfs_bin_search(parent, 0, &key, &slot);
  1044. if (ret < 0)
  1045. break;
  1046. if (ret && slot > 0)
  1047. slot--;
  1048. if (next_key && slot + 1 < btrfs_header_nritems(parent))
  1049. btrfs_node_key_to_cpu(parent, next_key, slot + 1);
  1050. old_bytenr = btrfs_node_blockptr(parent, slot);
  1051. blocksize = fs_info->nodesize;
  1052. old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
  1053. if (level <= max_level) {
  1054. eb = path->nodes[level];
  1055. new_bytenr = btrfs_node_blockptr(eb,
  1056. path->slots[level]);
  1057. new_ptr_gen = btrfs_node_ptr_generation(eb,
  1058. path->slots[level]);
  1059. } else {
  1060. new_bytenr = 0;
  1061. new_ptr_gen = 0;
  1062. }
  1063. if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
  1064. ret = level;
  1065. break;
  1066. }
  1067. if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
  1068. memcmp_node_keys(parent, slot, path, level)) {
  1069. if (level <= lowest_level) {
  1070. ret = 0;
  1071. break;
  1072. }
  1073. eb = btrfs_read_node_slot(parent, slot);
  1074. if (IS_ERR(eb)) {
  1075. ret = PTR_ERR(eb);
  1076. break;
  1077. }
  1078. btrfs_tree_lock(eb);
  1079. if (cow) {
  1080. ret = btrfs_cow_block(trans, dest, eb, parent,
  1081. slot, &eb,
  1082. BTRFS_NESTING_COW);
  1083. if (ret) {
  1084. btrfs_tree_unlock(eb);
  1085. free_extent_buffer(eb);
  1086. break;
  1087. }
  1088. }
  1089. btrfs_tree_unlock(parent);
  1090. free_extent_buffer(parent);
  1091. parent = eb;
  1092. continue;
  1093. }
  1094. if (!cow) {
  1095. btrfs_tree_unlock(parent);
  1096. free_extent_buffer(parent);
  1097. cow = 1;
  1098. goto again;
  1099. }
  1100. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1101. path->slots[level]);
  1102. btrfs_release_path(path);
  1103. path->lowest_level = level;
  1104. set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
  1105. ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
  1106. clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
  1107. path->lowest_level = 0;
  1108. if (ret) {
  1109. if (ret > 0)
  1110. ret = -ENOENT;
  1111. break;
  1112. }
  1113. /*
  1114. * Info qgroup to trace both subtrees.
  1115. *
  1116. * We must trace both trees.
  1117. * 1) Tree reloc subtree
  1118. * If not traced, we will leak data numbers
  1119. * 2) Fs subtree
  1120. * If not traced, we will double count old data
  1121. *
  1122. * We don't scan the subtree right now, but only record
  1123. * the swapped tree blocks.
  1124. * The real subtree rescan is delayed until we have new
  1125. * CoW on the subtree root node before transaction commit.
  1126. */
  1127. ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
  1128. rc->block_group, parent, slot,
  1129. path->nodes[level], path->slots[level],
  1130. last_snapshot);
  1131. if (ret < 0)
  1132. break;
  1133. /*
  1134. * swap blocks in fs tree and reloc tree.
  1135. */
  1136. btrfs_set_node_blockptr(parent, slot, new_bytenr);
  1137. btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
  1138. btrfs_mark_buffer_dirty(trans, parent);
  1139. btrfs_set_node_blockptr(path->nodes[level],
  1140. path->slots[level], old_bytenr);
  1141. btrfs_set_node_ptr_generation(path->nodes[level],
  1142. path->slots[level], old_ptr_gen);
  1143. btrfs_mark_buffer_dirty(trans, path->nodes[level]);
  1144. ref.action = BTRFS_ADD_DELAYED_REF;
  1145. ref.bytenr = old_bytenr;
  1146. ref.num_bytes = blocksize;
  1147. ref.parent = path->nodes[level]->start;
  1148. ref.owning_root = btrfs_root_id(src);
  1149. ref.ref_root = btrfs_root_id(src);
  1150. btrfs_init_tree_ref(&ref, level - 1, 0, true);
  1151. ret = btrfs_inc_extent_ref(trans, &ref);
  1152. if (ret) {
  1153. btrfs_abort_transaction(trans, ret);
  1154. break;
  1155. }
  1156. ref.action = BTRFS_ADD_DELAYED_REF;
  1157. ref.bytenr = new_bytenr;
  1158. ref.num_bytes = blocksize;
  1159. ref.parent = 0;
  1160. ref.owning_root = btrfs_root_id(dest);
  1161. ref.ref_root = btrfs_root_id(dest);
  1162. btrfs_init_tree_ref(&ref, level - 1, 0, true);
  1163. ret = btrfs_inc_extent_ref(trans, &ref);
  1164. if (ret) {
  1165. btrfs_abort_transaction(trans, ret);
  1166. break;
  1167. }
  1168. /* We don't know the real owning_root, use 0. */
  1169. ref.action = BTRFS_DROP_DELAYED_REF;
  1170. ref.bytenr = new_bytenr;
  1171. ref.num_bytes = blocksize;
  1172. ref.parent = path->nodes[level]->start;
  1173. ref.owning_root = 0;
  1174. ref.ref_root = btrfs_root_id(src);
  1175. btrfs_init_tree_ref(&ref, level - 1, 0, true);
  1176. ret = btrfs_free_extent(trans, &ref);
  1177. if (ret) {
  1178. btrfs_abort_transaction(trans, ret);
  1179. break;
  1180. }
  1181. /* We don't know the real owning_root, use 0. */
  1182. ref.action = BTRFS_DROP_DELAYED_REF;
  1183. ref.bytenr = old_bytenr;
  1184. ref.num_bytes = blocksize;
  1185. ref.parent = 0;
  1186. ref.owning_root = 0;
  1187. ref.ref_root = btrfs_root_id(dest);
  1188. btrfs_init_tree_ref(&ref, level - 1, 0, true);
  1189. ret = btrfs_free_extent(trans, &ref);
  1190. if (ret) {
  1191. btrfs_abort_transaction(trans, ret);
  1192. break;
  1193. }
  1194. btrfs_unlock_up_safe(path, 0);
  1195. ret = level;
  1196. break;
  1197. }
  1198. btrfs_tree_unlock(parent);
  1199. free_extent_buffer(parent);
  1200. return ret;
  1201. }
  1202. /*
  1203. * helper to find next relocated block in reloc tree
  1204. */
  1205. static noinline_for_stack
  1206. int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1207. int *level)
  1208. {
  1209. struct extent_buffer *eb;
  1210. int i;
  1211. u64 last_snapshot;
  1212. u32 nritems;
  1213. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1214. for (i = 0; i < *level; i++) {
  1215. free_extent_buffer(path->nodes[i]);
  1216. path->nodes[i] = NULL;
  1217. }
  1218. for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
  1219. eb = path->nodes[i];
  1220. nritems = btrfs_header_nritems(eb);
  1221. while (path->slots[i] + 1 < nritems) {
  1222. path->slots[i]++;
  1223. if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
  1224. last_snapshot)
  1225. continue;
  1226. *level = i;
  1227. return 0;
  1228. }
  1229. free_extent_buffer(path->nodes[i]);
  1230. path->nodes[i] = NULL;
  1231. }
  1232. return 1;
  1233. }
  1234. /*
  1235. * walk down reloc tree to find relocated block of lowest level
  1236. */
  1237. static noinline_for_stack
  1238. int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1239. int *level)
  1240. {
  1241. struct extent_buffer *eb = NULL;
  1242. int i;
  1243. u64 ptr_gen = 0;
  1244. u64 last_snapshot;
  1245. u32 nritems;
  1246. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1247. for (i = *level; i > 0; i--) {
  1248. eb = path->nodes[i];
  1249. nritems = btrfs_header_nritems(eb);
  1250. while (path->slots[i] < nritems) {
  1251. ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
  1252. if (ptr_gen > last_snapshot)
  1253. break;
  1254. path->slots[i]++;
  1255. }
  1256. if (path->slots[i] >= nritems) {
  1257. if (i == *level)
  1258. break;
  1259. *level = i + 1;
  1260. return 0;
  1261. }
  1262. if (i == 1) {
  1263. *level = i;
  1264. return 0;
  1265. }
  1266. eb = btrfs_read_node_slot(eb, path->slots[i]);
  1267. if (IS_ERR(eb))
  1268. return PTR_ERR(eb);
  1269. BUG_ON(btrfs_header_level(eb) != i - 1);
  1270. path->nodes[i - 1] = eb;
  1271. path->slots[i - 1] = 0;
  1272. }
  1273. return 1;
  1274. }
  1275. /*
  1276. * invalidate extent cache for file extents whose key in range of
  1277. * [min_key, max_key)
  1278. */
  1279. static int invalidate_extent_cache(struct btrfs_root *root,
  1280. const struct btrfs_key *min_key,
  1281. const struct btrfs_key *max_key)
  1282. {
  1283. struct btrfs_fs_info *fs_info = root->fs_info;
  1284. struct btrfs_inode *inode = NULL;
  1285. u64 objectid;
  1286. u64 start, end;
  1287. u64 ino;
  1288. objectid = min_key->objectid;
  1289. while (1) {
  1290. struct extent_state *cached_state = NULL;
  1291. cond_resched();
  1292. if (inode)
  1293. iput(&inode->vfs_inode);
  1294. if (objectid > max_key->objectid)
  1295. break;
  1296. inode = btrfs_find_first_inode(root, objectid);
  1297. if (!inode)
  1298. break;
  1299. ino = btrfs_ino(inode);
  1300. if (ino > max_key->objectid) {
  1301. iput(&inode->vfs_inode);
  1302. break;
  1303. }
  1304. objectid = ino + 1;
  1305. if (!S_ISREG(inode->vfs_inode.i_mode))
  1306. continue;
  1307. if (unlikely(min_key->objectid == ino)) {
  1308. if (min_key->type > BTRFS_EXTENT_DATA_KEY)
  1309. continue;
  1310. if (min_key->type < BTRFS_EXTENT_DATA_KEY)
  1311. start = 0;
  1312. else {
  1313. start = min_key->offset;
  1314. WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
  1315. }
  1316. } else {
  1317. start = 0;
  1318. }
  1319. if (unlikely(max_key->objectid == ino)) {
  1320. if (max_key->type < BTRFS_EXTENT_DATA_KEY)
  1321. continue;
  1322. if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
  1323. end = (u64)-1;
  1324. } else {
  1325. if (max_key->offset == 0)
  1326. continue;
  1327. end = max_key->offset;
  1328. WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
  1329. end--;
  1330. }
  1331. } else {
  1332. end = (u64)-1;
  1333. }
  1334. /* the lock_extent waits for read_folio to complete */
  1335. lock_extent(&inode->io_tree, start, end, &cached_state);
  1336. btrfs_drop_extent_map_range(inode, start, end, true);
  1337. unlock_extent(&inode->io_tree, start, end, &cached_state);
  1338. }
  1339. return 0;
  1340. }
  1341. static int find_next_key(struct btrfs_path *path, int level,
  1342. struct btrfs_key *key)
  1343. {
  1344. while (level < BTRFS_MAX_LEVEL) {
  1345. if (!path->nodes[level])
  1346. break;
  1347. if (path->slots[level] + 1 <
  1348. btrfs_header_nritems(path->nodes[level])) {
  1349. btrfs_node_key_to_cpu(path->nodes[level], key,
  1350. path->slots[level] + 1);
  1351. return 0;
  1352. }
  1353. level++;
  1354. }
  1355. return 1;
  1356. }
  1357. /*
  1358. * Insert current subvolume into reloc_control::dirty_subvol_roots
  1359. */
  1360. static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
  1361. struct reloc_control *rc,
  1362. struct btrfs_root *root)
  1363. {
  1364. struct btrfs_root *reloc_root = root->reloc_root;
  1365. struct btrfs_root_item *reloc_root_item;
  1366. int ret;
  1367. /* @root must be a subvolume tree root with a valid reloc tree */
  1368. ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
  1369. ASSERT(reloc_root);
  1370. reloc_root_item = &reloc_root->root_item;
  1371. memset(&reloc_root_item->drop_progress, 0,
  1372. sizeof(reloc_root_item->drop_progress));
  1373. btrfs_set_root_drop_level(reloc_root_item, 0);
  1374. btrfs_set_root_refs(reloc_root_item, 0);
  1375. ret = btrfs_update_reloc_root(trans, root);
  1376. if (ret)
  1377. return ret;
  1378. if (list_empty(&root->reloc_dirty_list)) {
  1379. btrfs_grab_root(root);
  1380. list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
  1381. }
  1382. return 0;
  1383. }
  1384. static int clean_dirty_subvols(struct reloc_control *rc)
  1385. {
  1386. struct btrfs_root *root;
  1387. struct btrfs_root *next;
  1388. int ret = 0;
  1389. int ret2;
  1390. list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
  1391. reloc_dirty_list) {
  1392. if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
  1393. /* Merged subvolume, cleanup its reloc root */
  1394. struct btrfs_root *reloc_root = root->reloc_root;
  1395. list_del_init(&root->reloc_dirty_list);
  1396. root->reloc_root = NULL;
  1397. /*
  1398. * Need barrier to ensure clear_bit() only happens after
  1399. * root->reloc_root = NULL. Pairs with have_reloc_root.
  1400. */
  1401. smp_wmb();
  1402. clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
  1403. if (reloc_root) {
  1404. /*
  1405. * btrfs_drop_snapshot drops our ref we hold for
  1406. * ->reloc_root. If it fails however we must
  1407. * drop the ref ourselves.
  1408. */
  1409. ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
  1410. if (ret2 < 0) {
  1411. btrfs_put_root(reloc_root);
  1412. if (!ret)
  1413. ret = ret2;
  1414. }
  1415. }
  1416. btrfs_put_root(root);
  1417. } else {
  1418. /* Orphan reloc tree, just clean it up */
  1419. ret2 = btrfs_drop_snapshot(root, 0, 1);
  1420. if (ret2 < 0) {
  1421. btrfs_put_root(root);
  1422. if (!ret)
  1423. ret = ret2;
  1424. }
  1425. }
  1426. }
  1427. return ret;
  1428. }
  1429. /*
  1430. * merge the relocated tree blocks in reloc tree with corresponding
  1431. * fs tree.
  1432. */
  1433. static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
  1434. struct btrfs_root *root)
  1435. {
  1436. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  1437. struct btrfs_key key;
  1438. struct btrfs_key next_key;
  1439. struct btrfs_trans_handle *trans = NULL;
  1440. struct btrfs_root *reloc_root;
  1441. struct btrfs_root_item *root_item;
  1442. struct btrfs_path *path;
  1443. struct extent_buffer *leaf;
  1444. int reserve_level;
  1445. int level;
  1446. int max_level;
  1447. int replaced = 0;
  1448. int ret = 0;
  1449. u32 min_reserved;
  1450. path = btrfs_alloc_path();
  1451. if (!path)
  1452. return -ENOMEM;
  1453. path->reada = READA_FORWARD;
  1454. reloc_root = root->reloc_root;
  1455. root_item = &reloc_root->root_item;
  1456. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  1457. level = btrfs_root_level(root_item);
  1458. atomic_inc(&reloc_root->node->refs);
  1459. path->nodes[level] = reloc_root->node;
  1460. path->slots[level] = 0;
  1461. } else {
  1462. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  1463. level = btrfs_root_drop_level(root_item);
  1464. BUG_ON(level == 0);
  1465. path->lowest_level = level;
  1466. ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
  1467. path->lowest_level = 0;
  1468. if (ret < 0) {
  1469. btrfs_free_path(path);
  1470. return ret;
  1471. }
  1472. btrfs_node_key_to_cpu(path->nodes[level], &next_key,
  1473. path->slots[level]);
  1474. WARN_ON(memcmp(&key, &next_key, sizeof(key)));
  1475. btrfs_unlock_up_safe(path, 0);
  1476. }
  1477. /*
  1478. * In merge_reloc_root(), we modify the upper level pointer to swap the
  1479. * tree blocks between reloc tree and subvolume tree. Thus for tree
  1480. * block COW, we COW at most from level 1 to root level for each tree.
  1481. *
  1482. * Thus the needed metadata size is at most root_level * nodesize,
  1483. * and * 2 since we have two trees to COW.
  1484. */
  1485. reserve_level = max_t(int, 1, btrfs_root_level(root_item));
  1486. min_reserved = fs_info->nodesize * reserve_level * 2;
  1487. memset(&next_key, 0, sizeof(next_key));
  1488. while (1) {
  1489. ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
  1490. min_reserved,
  1491. BTRFS_RESERVE_FLUSH_LIMIT);
  1492. if (ret)
  1493. goto out;
  1494. trans = btrfs_start_transaction(root, 0);
  1495. if (IS_ERR(trans)) {
  1496. ret = PTR_ERR(trans);
  1497. trans = NULL;
  1498. goto out;
  1499. }
  1500. /*
  1501. * At this point we no longer have a reloc_control, so we can't
  1502. * depend on btrfs_init_reloc_root to update our last_trans.
  1503. *
  1504. * But that's ok, we started the trans handle on our
  1505. * corresponding fs_root, which means it's been added to the
  1506. * dirty list. At commit time we'll still call
  1507. * btrfs_update_reloc_root() and update our root item
  1508. * appropriately.
  1509. */
  1510. btrfs_set_root_last_trans(reloc_root, trans->transid);
  1511. trans->block_rsv = rc->block_rsv;
  1512. replaced = 0;
  1513. max_level = level;
  1514. ret = walk_down_reloc_tree(reloc_root, path, &level);
  1515. if (ret < 0)
  1516. goto out;
  1517. if (ret > 0)
  1518. break;
  1519. if (!find_next_key(path, level, &key) &&
  1520. btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
  1521. ret = 0;
  1522. } else {
  1523. ret = replace_path(trans, rc, root, reloc_root, path,
  1524. &next_key, level, max_level);
  1525. }
  1526. if (ret < 0)
  1527. goto out;
  1528. if (ret > 0) {
  1529. level = ret;
  1530. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1531. path->slots[level]);
  1532. replaced = 1;
  1533. }
  1534. ret = walk_up_reloc_tree(reloc_root, path, &level);
  1535. if (ret > 0)
  1536. break;
  1537. BUG_ON(level == 0);
  1538. /*
  1539. * save the merging progress in the drop_progress.
  1540. * this is OK since root refs == 1 in this case.
  1541. */
  1542. btrfs_node_key(path->nodes[level], &root_item->drop_progress,
  1543. path->slots[level]);
  1544. btrfs_set_root_drop_level(root_item, level);
  1545. btrfs_end_transaction_throttle(trans);
  1546. trans = NULL;
  1547. btrfs_btree_balance_dirty(fs_info);
  1548. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1549. invalidate_extent_cache(root, &key, &next_key);
  1550. }
  1551. /*
  1552. * handle the case only one block in the fs tree need to be
  1553. * relocated and the block is tree root.
  1554. */
  1555. leaf = btrfs_lock_root_node(root);
  1556. ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
  1557. BTRFS_NESTING_COW);
  1558. btrfs_tree_unlock(leaf);
  1559. free_extent_buffer(leaf);
  1560. out:
  1561. btrfs_free_path(path);
  1562. if (ret == 0) {
  1563. ret = insert_dirty_subvol(trans, rc, root);
  1564. if (ret)
  1565. btrfs_abort_transaction(trans, ret);
  1566. }
  1567. if (trans)
  1568. btrfs_end_transaction_throttle(trans);
  1569. btrfs_btree_balance_dirty(fs_info);
  1570. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1571. invalidate_extent_cache(root, &key, &next_key);
  1572. return ret;
  1573. }
  1574. static noinline_for_stack
  1575. int prepare_to_merge(struct reloc_control *rc, int err)
  1576. {
  1577. struct btrfs_root *root = rc->extent_root;
  1578. struct btrfs_fs_info *fs_info = root->fs_info;
  1579. struct btrfs_root *reloc_root;
  1580. struct btrfs_trans_handle *trans;
  1581. LIST_HEAD(reloc_roots);
  1582. u64 num_bytes = 0;
  1583. int ret;
  1584. mutex_lock(&fs_info->reloc_mutex);
  1585. rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
  1586. rc->merging_rsv_size += rc->nodes_relocated * 2;
  1587. mutex_unlock(&fs_info->reloc_mutex);
  1588. again:
  1589. if (!err) {
  1590. num_bytes = rc->merging_rsv_size;
  1591. ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
  1592. BTRFS_RESERVE_FLUSH_ALL);
  1593. if (ret)
  1594. err = ret;
  1595. }
  1596. trans = btrfs_join_transaction(rc->extent_root);
  1597. if (IS_ERR(trans)) {
  1598. if (!err)
  1599. btrfs_block_rsv_release(fs_info, rc->block_rsv,
  1600. num_bytes, NULL);
  1601. return PTR_ERR(trans);
  1602. }
  1603. if (!err) {
  1604. if (num_bytes != rc->merging_rsv_size) {
  1605. btrfs_end_transaction(trans);
  1606. btrfs_block_rsv_release(fs_info, rc->block_rsv,
  1607. num_bytes, NULL);
  1608. goto again;
  1609. }
  1610. }
  1611. rc->merge_reloc_tree = true;
  1612. while (!list_empty(&rc->reloc_roots)) {
  1613. reloc_root = list_entry(rc->reloc_roots.next,
  1614. struct btrfs_root, root_list);
  1615. list_del_init(&reloc_root->root_list);
  1616. root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
  1617. false);
  1618. if (IS_ERR(root)) {
  1619. /*
  1620. * Even if we have an error we need this reloc root
  1621. * back on our list so we can clean up properly.
  1622. */
  1623. list_add(&reloc_root->root_list, &reloc_roots);
  1624. btrfs_abort_transaction(trans, (int)PTR_ERR(root));
  1625. if (!err)
  1626. err = PTR_ERR(root);
  1627. break;
  1628. }
  1629. if (unlikely(root->reloc_root != reloc_root)) {
  1630. if (root->reloc_root) {
  1631. btrfs_err(fs_info,
  1632. "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
  1633. btrfs_root_id(root),
  1634. btrfs_root_id(root->reloc_root),
  1635. root->reloc_root->root_key.type,
  1636. root->reloc_root->root_key.offset,
  1637. btrfs_root_generation(
  1638. &root->reloc_root->root_item),
  1639. btrfs_root_id(reloc_root),
  1640. reloc_root->root_key.type,
  1641. reloc_root->root_key.offset,
  1642. btrfs_root_generation(
  1643. &reloc_root->root_item));
  1644. } else {
  1645. btrfs_err(fs_info,
  1646. "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
  1647. btrfs_root_id(root),
  1648. btrfs_root_id(reloc_root),
  1649. reloc_root->root_key.type,
  1650. reloc_root->root_key.offset,
  1651. btrfs_root_generation(
  1652. &reloc_root->root_item));
  1653. }
  1654. list_add(&reloc_root->root_list, &reloc_roots);
  1655. btrfs_put_root(root);
  1656. btrfs_abort_transaction(trans, -EUCLEAN);
  1657. if (!err)
  1658. err = -EUCLEAN;
  1659. break;
  1660. }
  1661. /*
  1662. * set reference count to 1, so btrfs_recover_relocation
  1663. * knows it should resumes merging
  1664. */
  1665. if (!err)
  1666. btrfs_set_root_refs(&reloc_root->root_item, 1);
  1667. ret = btrfs_update_reloc_root(trans, root);
  1668. /*
  1669. * Even if we have an error we need this reloc root back on our
  1670. * list so we can clean up properly.
  1671. */
  1672. list_add(&reloc_root->root_list, &reloc_roots);
  1673. btrfs_put_root(root);
  1674. if (ret) {
  1675. btrfs_abort_transaction(trans, ret);
  1676. if (!err)
  1677. err = ret;
  1678. break;
  1679. }
  1680. }
  1681. list_splice(&reloc_roots, &rc->reloc_roots);
  1682. if (!err)
  1683. err = btrfs_commit_transaction(trans);
  1684. else
  1685. btrfs_end_transaction(trans);
  1686. return err;
  1687. }
  1688. static noinline_for_stack
  1689. void free_reloc_roots(struct list_head *list)
  1690. {
  1691. struct btrfs_root *reloc_root, *tmp;
  1692. list_for_each_entry_safe(reloc_root, tmp, list, root_list)
  1693. __del_reloc_root(reloc_root);
  1694. }
  1695. static noinline_for_stack
  1696. void merge_reloc_roots(struct reloc_control *rc)
  1697. {
  1698. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  1699. struct btrfs_root *root;
  1700. struct btrfs_root *reloc_root;
  1701. LIST_HEAD(reloc_roots);
  1702. int found = 0;
  1703. int ret = 0;
  1704. again:
  1705. root = rc->extent_root;
  1706. /*
  1707. * this serializes us with btrfs_record_root_in_transaction,
  1708. * we have to make sure nobody is in the middle of
  1709. * adding their roots to the list while we are
  1710. * doing this splice
  1711. */
  1712. mutex_lock(&fs_info->reloc_mutex);
  1713. list_splice_init(&rc->reloc_roots, &reloc_roots);
  1714. mutex_unlock(&fs_info->reloc_mutex);
  1715. while (!list_empty(&reloc_roots)) {
  1716. found = 1;
  1717. reloc_root = list_entry(reloc_roots.next,
  1718. struct btrfs_root, root_list);
  1719. root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
  1720. false);
  1721. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  1722. if (WARN_ON(IS_ERR(root))) {
  1723. /*
  1724. * For recovery we read the fs roots on mount,
  1725. * and if we didn't find the root then we marked
  1726. * the reloc root as a garbage root. For normal
  1727. * relocation obviously the root should exist in
  1728. * memory. However there's no reason we can't
  1729. * handle the error properly here just in case.
  1730. */
  1731. ret = PTR_ERR(root);
  1732. goto out;
  1733. }
  1734. if (WARN_ON(root->reloc_root != reloc_root)) {
  1735. /*
  1736. * This can happen if on-disk metadata has some
  1737. * corruption, e.g. bad reloc tree key offset.
  1738. */
  1739. ret = -EINVAL;
  1740. goto out;
  1741. }
  1742. ret = merge_reloc_root(rc, root);
  1743. btrfs_put_root(root);
  1744. if (ret) {
  1745. if (list_empty(&reloc_root->root_list))
  1746. list_add_tail(&reloc_root->root_list,
  1747. &reloc_roots);
  1748. goto out;
  1749. }
  1750. } else {
  1751. if (!IS_ERR(root)) {
  1752. if (root->reloc_root == reloc_root) {
  1753. root->reloc_root = NULL;
  1754. btrfs_put_root(reloc_root);
  1755. }
  1756. clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
  1757. &root->state);
  1758. btrfs_put_root(root);
  1759. }
  1760. list_del_init(&reloc_root->root_list);
  1761. /* Don't forget to queue this reloc root for cleanup */
  1762. list_add_tail(&reloc_root->reloc_dirty_list,
  1763. &rc->dirty_subvol_roots);
  1764. }
  1765. }
  1766. if (found) {
  1767. found = 0;
  1768. goto again;
  1769. }
  1770. out:
  1771. if (ret) {
  1772. btrfs_handle_fs_error(fs_info, ret, NULL);
  1773. free_reloc_roots(&reloc_roots);
  1774. /* new reloc root may be added */
  1775. mutex_lock(&fs_info->reloc_mutex);
  1776. list_splice_init(&rc->reloc_roots, &reloc_roots);
  1777. mutex_unlock(&fs_info->reloc_mutex);
  1778. free_reloc_roots(&reloc_roots);
  1779. }
  1780. /*
  1781. * We used to have
  1782. *
  1783. * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
  1784. *
  1785. * here, but it's wrong. If we fail to start the transaction in
  1786. * prepare_to_merge() we will have only 0 ref reloc roots, none of which
  1787. * have actually been removed from the reloc_root_tree rb tree. This is
  1788. * fine because we're bailing here, and we hold a reference on the root
  1789. * for the list that holds it, so these roots will be cleaned up when we
  1790. * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
  1791. * will be cleaned up on unmount.
  1792. *
  1793. * The remaining nodes will be cleaned up by free_reloc_control.
  1794. */
  1795. }
  1796. static void free_block_list(struct rb_root *blocks)
  1797. {
  1798. struct tree_block *block;
  1799. struct rb_node *rb_node;
  1800. while ((rb_node = rb_first(blocks))) {
  1801. block = rb_entry(rb_node, struct tree_block, rb_node);
  1802. rb_erase(rb_node, blocks);
  1803. kfree(block);
  1804. }
  1805. }
  1806. static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
  1807. struct btrfs_root *reloc_root)
  1808. {
  1809. struct btrfs_fs_info *fs_info = reloc_root->fs_info;
  1810. struct btrfs_root *root;
  1811. int ret;
  1812. if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
  1813. return 0;
  1814. root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
  1815. /*
  1816. * This should succeed, since we can't have a reloc root without having
  1817. * already looked up the actual root and created the reloc root for this
  1818. * root.
  1819. *
  1820. * However if there's some sort of corruption where we have a ref to a
  1821. * reloc root without a corresponding root this could return ENOENT.
  1822. */
  1823. if (IS_ERR(root)) {
  1824. ASSERT(0);
  1825. return PTR_ERR(root);
  1826. }
  1827. if (root->reloc_root != reloc_root) {
  1828. ASSERT(0);
  1829. btrfs_err(fs_info,
  1830. "root %llu has two reloc roots associated with it",
  1831. reloc_root->root_key.offset);
  1832. btrfs_put_root(root);
  1833. return -EUCLEAN;
  1834. }
  1835. ret = btrfs_record_root_in_trans(trans, root);
  1836. btrfs_put_root(root);
  1837. return ret;
  1838. }
  1839. static noinline_for_stack
  1840. struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
  1841. struct reloc_control *rc,
  1842. struct btrfs_backref_node *node,
  1843. struct btrfs_backref_edge *edges[])
  1844. {
  1845. struct btrfs_backref_node *next;
  1846. struct btrfs_root *root;
  1847. int index = 0;
  1848. int ret;
  1849. next = node;
  1850. while (1) {
  1851. cond_resched();
  1852. next = walk_up_backref(next, edges, &index);
  1853. root = next->root;
  1854. /*
  1855. * If there is no root, then our references for this block are
  1856. * incomplete, as we should be able to walk all the way up to a
  1857. * block that is owned by a root.
  1858. *
  1859. * This path is only for SHAREABLE roots, so if we come upon a
  1860. * non-SHAREABLE root then we have backrefs that resolve
  1861. * improperly.
  1862. *
  1863. * Both of these cases indicate file system corruption, or a bug
  1864. * in the backref walking code.
  1865. */
  1866. if (!root) {
  1867. ASSERT(0);
  1868. btrfs_err(trans->fs_info,
  1869. "bytenr %llu doesn't have a backref path ending in a root",
  1870. node->bytenr);
  1871. return ERR_PTR(-EUCLEAN);
  1872. }
  1873. if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
  1874. ASSERT(0);
  1875. btrfs_err(trans->fs_info,
  1876. "bytenr %llu has multiple refs with one ending in a non-shareable root",
  1877. node->bytenr);
  1878. return ERR_PTR(-EUCLEAN);
  1879. }
  1880. if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
  1881. ret = record_reloc_root_in_trans(trans, root);
  1882. if (ret)
  1883. return ERR_PTR(ret);
  1884. break;
  1885. }
  1886. ret = btrfs_record_root_in_trans(trans, root);
  1887. if (ret)
  1888. return ERR_PTR(ret);
  1889. root = root->reloc_root;
  1890. /*
  1891. * We could have raced with another thread which failed, so
  1892. * root->reloc_root may not be set, return ENOENT in this case.
  1893. */
  1894. if (!root)
  1895. return ERR_PTR(-ENOENT);
  1896. if (next->new_bytenr != root->node->start) {
  1897. /*
  1898. * We just created the reloc root, so we shouldn't have
  1899. * ->new_bytenr set and this shouldn't be in the changed
  1900. * list. If it is then we have multiple roots pointing
  1901. * at the same bytenr which indicates corruption, or
  1902. * we've made a mistake in the backref walking code.
  1903. */
  1904. ASSERT(next->new_bytenr == 0);
  1905. ASSERT(list_empty(&next->list));
  1906. if (next->new_bytenr || !list_empty(&next->list)) {
  1907. btrfs_err(trans->fs_info,
  1908. "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
  1909. node->bytenr, next->bytenr);
  1910. return ERR_PTR(-EUCLEAN);
  1911. }
  1912. next->new_bytenr = root->node->start;
  1913. btrfs_put_root(next->root);
  1914. next->root = btrfs_grab_root(root);
  1915. ASSERT(next->root);
  1916. list_add_tail(&next->list,
  1917. &rc->backref_cache.changed);
  1918. mark_block_processed(rc, next);
  1919. break;
  1920. }
  1921. WARN_ON(1);
  1922. root = NULL;
  1923. next = walk_down_backref(edges, &index);
  1924. if (!next || next->level <= node->level)
  1925. break;
  1926. }
  1927. if (!root) {
  1928. /*
  1929. * This can happen if there's fs corruption or if there's a bug
  1930. * in the backref lookup code.
  1931. */
  1932. ASSERT(0);
  1933. return ERR_PTR(-ENOENT);
  1934. }
  1935. next = node;
  1936. /* setup backref node path for btrfs_reloc_cow_block */
  1937. while (1) {
  1938. rc->backref_cache.path[next->level] = next;
  1939. if (--index < 0)
  1940. break;
  1941. next = edges[index]->node[UPPER];
  1942. }
  1943. return root;
  1944. }
  1945. /*
  1946. * Select a tree root for relocation.
  1947. *
  1948. * Return NULL if the block is not shareable. We should use do_relocation() in
  1949. * this case.
  1950. *
  1951. * Return a tree root pointer if the block is shareable.
  1952. * Return -ENOENT if the block is root of reloc tree.
  1953. */
  1954. static noinline_for_stack
  1955. struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
  1956. {
  1957. struct btrfs_backref_node *next;
  1958. struct btrfs_root *root;
  1959. struct btrfs_root *fs_root = NULL;
  1960. struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  1961. int index = 0;
  1962. next = node;
  1963. while (1) {
  1964. cond_resched();
  1965. next = walk_up_backref(next, edges, &index);
  1966. root = next->root;
  1967. /*
  1968. * This can occur if we have incomplete extent refs leading all
  1969. * the way up a particular path, in this case return -EUCLEAN.
  1970. */
  1971. if (!root)
  1972. return ERR_PTR(-EUCLEAN);
  1973. /* No other choice for non-shareable tree */
  1974. if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
  1975. return root;
  1976. if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
  1977. fs_root = root;
  1978. if (next != node)
  1979. return NULL;
  1980. next = walk_down_backref(edges, &index);
  1981. if (!next || next->level <= node->level)
  1982. break;
  1983. }
  1984. if (!fs_root)
  1985. return ERR_PTR(-ENOENT);
  1986. return fs_root;
  1987. }
  1988. static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
  1989. struct btrfs_backref_node *node)
  1990. {
  1991. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  1992. struct btrfs_backref_node *next = node;
  1993. struct btrfs_backref_edge *edge;
  1994. struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  1995. u64 num_bytes = 0;
  1996. int index = 0;
  1997. BUG_ON(node->processed);
  1998. while (next) {
  1999. cond_resched();
  2000. while (1) {
  2001. if (next->processed)
  2002. break;
  2003. num_bytes += fs_info->nodesize;
  2004. if (list_empty(&next->upper))
  2005. break;
  2006. edge = list_entry(next->upper.next,
  2007. struct btrfs_backref_edge, list[LOWER]);
  2008. edges[index++] = edge;
  2009. next = edge->node[UPPER];
  2010. }
  2011. next = walk_down_backref(edges, &index);
  2012. }
  2013. return num_bytes;
  2014. }
  2015. static int reserve_metadata_space(struct btrfs_trans_handle *trans,
  2016. struct reloc_control *rc,
  2017. struct btrfs_backref_node *node)
  2018. {
  2019. struct btrfs_root *root = rc->extent_root;
  2020. struct btrfs_fs_info *fs_info = root->fs_info;
  2021. u64 num_bytes;
  2022. int ret;
  2023. u64 tmp;
  2024. num_bytes = calcu_metadata_size(rc, node) * 2;
  2025. trans->block_rsv = rc->block_rsv;
  2026. rc->reserved_bytes += num_bytes;
  2027. /*
  2028. * We are under a transaction here so we can only do limited flushing.
  2029. * If we get an enospc just kick back -EAGAIN so we know to drop the
  2030. * transaction and try to refill when we can flush all the things.
  2031. */
  2032. ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
  2033. BTRFS_RESERVE_FLUSH_LIMIT);
  2034. if (ret) {
  2035. tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
  2036. while (tmp <= rc->reserved_bytes)
  2037. tmp <<= 1;
  2038. /*
  2039. * only one thread can access block_rsv at this point,
  2040. * so we don't need hold lock to protect block_rsv.
  2041. * we expand more reservation size here to allow enough
  2042. * space for relocation and we will return earlier in
  2043. * enospc case.
  2044. */
  2045. rc->block_rsv->size = tmp + fs_info->nodesize *
  2046. RELOCATION_RESERVED_NODES;
  2047. return -EAGAIN;
  2048. }
  2049. return 0;
  2050. }
  2051. /*
  2052. * relocate a block tree, and then update pointers in upper level
  2053. * blocks that reference the block to point to the new location.
  2054. *
  2055. * if called by link_to_upper, the block has already been relocated.
  2056. * in that case this function just updates pointers.
  2057. */
  2058. static int do_relocation(struct btrfs_trans_handle *trans,
  2059. struct reloc_control *rc,
  2060. struct btrfs_backref_node *node,
  2061. struct btrfs_key *key,
  2062. struct btrfs_path *path, int lowest)
  2063. {
  2064. struct btrfs_backref_node *upper;
  2065. struct btrfs_backref_edge *edge;
  2066. struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2067. struct btrfs_root *root;
  2068. struct extent_buffer *eb;
  2069. u32 blocksize;
  2070. u64 bytenr;
  2071. int slot;
  2072. int ret = 0;
  2073. /*
  2074. * If we are lowest then this is the first time we're processing this
  2075. * block, and thus shouldn't have an eb associated with it yet.
  2076. */
  2077. ASSERT(!lowest || !node->eb);
  2078. path->lowest_level = node->level + 1;
  2079. rc->backref_cache.path[node->level] = node;
  2080. list_for_each_entry(edge, &node->upper, list[LOWER]) {
  2081. cond_resched();
  2082. upper = edge->node[UPPER];
  2083. root = select_reloc_root(trans, rc, upper, edges);
  2084. if (IS_ERR(root)) {
  2085. ret = PTR_ERR(root);
  2086. goto next;
  2087. }
  2088. if (upper->eb && !upper->locked) {
  2089. if (!lowest) {
  2090. ret = btrfs_bin_search(upper->eb, 0, key, &slot);
  2091. if (ret < 0)
  2092. goto next;
  2093. BUG_ON(ret);
  2094. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2095. if (node->eb->start == bytenr)
  2096. goto next;
  2097. }
  2098. btrfs_backref_drop_node_buffer(upper);
  2099. }
  2100. if (!upper->eb) {
  2101. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2102. if (ret) {
  2103. if (ret > 0)
  2104. ret = -ENOENT;
  2105. btrfs_release_path(path);
  2106. break;
  2107. }
  2108. if (!upper->eb) {
  2109. upper->eb = path->nodes[upper->level];
  2110. path->nodes[upper->level] = NULL;
  2111. } else {
  2112. BUG_ON(upper->eb != path->nodes[upper->level]);
  2113. }
  2114. upper->locked = 1;
  2115. path->locks[upper->level] = 0;
  2116. slot = path->slots[upper->level];
  2117. btrfs_release_path(path);
  2118. } else {
  2119. ret = btrfs_bin_search(upper->eb, 0, key, &slot);
  2120. if (ret < 0)
  2121. goto next;
  2122. BUG_ON(ret);
  2123. }
  2124. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2125. if (lowest) {
  2126. if (bytenr != node->bytenr) {
  2127. btrfs_err(root->fs_info,
  2128. "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
  2129. bytenr, node->bytenr, slot,
  2130. upper->eb->start);
  2131. ret = -EIO;
  2132. goto next;
  2133. }
  2134. } else {
  2135. if (node->eb->start == bytenr)
  2136. goto next;
  2137. }
  2138. blocksize = root->fs_info->nodesize;
  2139. eb = btrfs_read_node_slot(upper->eb, slot);
  2140. if (IS_ERR(eb)) {
  2141. ret = PTR_ERR(eb);
  2142. goto next;
  2143. }
  2144. btrfs_tree_lock(eb);
  2145. if (!node->eb) {
  2146. ret = btrfs_cow_block(trans, root, eb, upper->eb,
  2147. slot, &eb, BTRFS_NESTING_COW);
  2148. btrfs_tree_unlock(eb);
  2149. free_extent_buffer(eb);
  2150. if (ret < 0)
  2151. goto next;
  2152. /*
  2153. * We've just COWed this block, it should have updated
  2154. * the correct backref node entry.
  2155. */
  2156. ASSERT(node->eb == eb);
  2157. } else {
  2158. struct btrfs_ref ref = {
  2159. .action = BTRFS_ADD_DELAYED_REF,
  2160. .bytenr = node->eb->start,
  2161. .num_bytes = blocksize,
  2162. .parent = upper->eb->start,
  2163. .owning_root = btrfs_header_owner(upper->eb),
  2164. .ref_root = btrfs_header_owner(upper->eb),
  2165. };
  2166. btrfs_set_node_blockptr(upper->eb, slot,
  2167. node->eb->start);
  2168. btrfs_set_node_ptr_generation(upper->eb, slot,
  2169. trans->transid);
  2170. btrfs_mark_buffer_dirty(trans, upper->eb);
  2171. btrfs_init_tree_ref(&ref, node->level,
  2172. btrfs_root_id(root), false);
  2173. ret = btrfs_inc_extent_ref(trans, &ref);
  2174. if (!ret)
  2175. ret = btrfs_drop_subtree(trans, root, eb,
  2176. upper->eb);
  2177. if (ret)
  2178. btrfs_abort_transaction(trans, ret);
  2179. }
  2180. next:
  2181. if (!upper->pending)
  2182. btrfs_backref_drop_node_buffer(upper);
  2183. else
  2184. btrfs_backref_unlock_node_buffer(upper);
  2185. if (ret)
  2186. break;
  2187. }
  2188. if (!ret && node->pending) {
  2189. btrfs_backref_drop_node_buffer(node);
  2190. list_move_tail(&node->list, &rc->backref_cache.changed);
  2191. node->pending = 0;
  2192. }
  2193. path->lowest_level = 0;
  2194. /*
  2195. * We should have allocated all of our space in the block rsv and thus
  2196. * shouldn't ENOSPC.
  2197. */
  2198. ASSERT(ret != -ENOSPC);
  2199. return ret;
  2200. }
  2201. static int link_to_upper(struct btrfs_trans_handle *trans,
  2202. struct reloc_control *rc,
  2203. struct btrfs_backref_node *node,
  2204. struct btrfs_path *path)
  2205. {
  2206. struct btrfs_key key;
  2207. btrfs_node_key_to_cpu(node->eb, &key, 0);
  2208. return do_relocation(trans, rc, node, &key, path, 0);
  2209. }
  2210. static int finish_pending_nodes(struct btrfs_trans_handle *trans,
  2211. struct reloc_control *rc,
  2212. struct btrfs_path *path, int err)
  2213. {
  2214. LIST_HEAD(list);
  2215. struct btrfs_backref_cache *cache = &rc->backref_cache;
  2216. struct btrfs_backref_node *node;
  2217. int level;
  2218. int ret;
  2219. for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
  2220. while (!list_empty(&cache->pending[level])) {
  2221. node = list_entry(cache->pending[level].next,
  2222. struct btrfs_backref_node, list);
  2223. list_move_tail(&node->list, &list);
  2224. BUG_ON(!node->pending);
  2225. if (!err) {
  2226. ret = link_to_upper(trans, rc, node, path);
  2227. if (ret < 0)
  2228. err = ret;
  2229. }
  2230. }
  2231. list_splice_init(&list, &cache->pending[level]);
  2232. }
  2233. return err;
  2234. }
  2235. /*
  2236. * mark a block and all blocks directly/indirectly reference the block
  2237. * as processed.
  2238. */
  2239. static void update_processed_blocks(struct reloc_control *rc,
  2240. struct btrfs_backref_node *node)
  2241. {
  2242. struct btrfs_backref_node *next = node;
  2243. struct btrfs_backref_edge *edge;
  2244. struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2245. int index = 0;
  2246. while (next) {
  2247. cond_resched();
  2248. while (1) {
  2249. if (next->processed)
  2250. break;
  2251. mark_block_processed(rc, next);
  2252. if (list_empty(&next->upper))
  2253. break;
  2254. edge = list_entry(next->upper.next,
  2255. struct btrfs_backref_edge, list[LOWER]);
  2256. edges[index++] = edge;
  2257. next = edge->node[UPPER];
  2258. }
  2259. next = walk_down_backref(edges, &index);
  2260. }
  2261. }
  2262. static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
  2263. {
  2264. u32 blocksize = rc->extent_root->fs_info->nodesize;
  2265. if (test_range_bit(&rc->processed_blocks, bytenr,
  2266. bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
  2267. return 1;
  2268. return 0;
  2269. }
  2270. static int get_tree_block_key(struct btrfs_fs_info *fs_info,
  2271. struct tree_block *block)
  2272. {
  2273. struct btrfs_tree_parent_check check = {
  2274. .level = block->level,
  2275. .owner_root = block->owner,
  2276. .transid = block->key.offset
  2277. };
  2278. struct extent_buffer *eb;
  2279. eb = read_tree_block(fs_info, block->bytenr, &check);
  2280. if (IS_ERR(eb))
  2281. return PTR_ERR(eb);
  2282. if (!extent_buffer_uptodate(eb)) {
  2283. free_extent_buffer(eb);
  2284. return -EIO;
  2285. }
  2286. if (block->level == 0)
  2287. btrfs_item_key_to_cpu(eb, &block->key, 0);
  2288. else
  2289. btrfs_node_key_to_cpu(eb, &block->key, 0);
  2290. free_extent_buffer(eb);
  2291. block->key_ready = true;
  2292. return 0;
  2293. }
  2294. /*
  2295. * helper function to relocate a tree block
  2296. */
  2297. static int relocate_tree_block(struct btrfs_trans_handle *trans,
  2298. struct reloc_control *rc,
  2299. struct btrfs_backref_node *node,
  2300. struct btrfs_key *key,
  2301. struct btrfs_path *path)
  2302. {
  2303. struct btrfs_root *root;
  2304. int ret = 0;
  2305. if (!node)
  2306. return 0;
  2307. /*
  2308. * If we fail here we want to drop our backref_node because we are going
  2309. * to start over and regenerate the tree for it.
  2310. */
  2311. ret = reserve_metadata_space(trans, rc, node);
  2312. if (ret)
  2313. goto out;
  2314. BUG_ON(node->processed);
  2315. root = select_one_root(node);
  2316. if (IS_ERR(root)) {
  2317. ret = PTR_ERR(root);
  2318. /* See explanation in select_one_root for the -EUCLEAN case. */
  2319. ASSERT(ret == -ENOENT);
  2320. if (ret == -ENOENT) {
  2321. ret = 0;
  2322. update_processed_blocks(rc, node);
  2323. }
  2324. goto out;
  2325. }
  2326. if (root) {
  2327. if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
  2328. /*
  2329. * This block was the root block of a root, and this is
  2330. * the first time we're processing the block and thus it
  2331. * should not have had the ->new_bytenr modified and
  2332. * should have not been included on the changed list.
  2333. *
  2334. * However in the case of corruption we could have
  2335. * multiple refs pointing to the same block improperly,
  2336. * and thus we would trip over these checks. ASSERT()
  2337. * for the developer case, because it could indicate a
  2338. * bug in the backref code, however error out for a
  2339. * normal user in the case of corruption.
  2340. */
  2341. ASSERT(node->new_bytenr == 0);
  2342. ASSERT(list_empty(&node->list));
  2343. if (node->new_bytenr || !list_empty(&node->list)) {
  2344. btrfs_err(root->fs_info,
  2345. "bytenr %llu has improper references to it",
  2346. node->bytenr);
  2347. ret = -EUCLEAN;
  2348. goto out;
  2349. }
  2350. ret = btrfs_record_root_in_trans(trans, root);
  2351. if (ret)
  2352. goto out;
  2353. /*
  2354. * Another thread could have failed, need to check if we
  2355. * have reloc_root actually set.
  2356. */
  2357. if (!root->reloc_root) {
  2358. ret = -ENOENT;
  2359. goto out;
  2360. }
  2361. root = root->reloc_root;
  2362. node->new_bytenr = root->node->start;
  2363. btrfs_put_root(node->root);
  2364. node->root = btrfs_grab_root(root);
  2365. ASSERT(node->root);
  2366. list_add_tail(&node->list, &rc->backref_cache.changed);
  2367. } else {
  2368. path->lowest_level = node->level;
  2369. if (root == root->fs_info->chunk_root)
  2370. btrfs_reserve_chunk_metadata(trans, false);
  2371. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2372. btrfs_release_path(path);
  2373. if (root == root->fs_info->chunk_root)
  2374. btrfs_trans_release_chunk_metadata(trans);
  2375. if (ret > 0)
  2376. ret = 0;
  2377. }
  2378. if (!ret)
  2379. update_processed_blocks(rc, node);
  2380. } else {
  2381. ret = do_relocation(trans, rc, node, key, path, 1);
  2382. }
  2383. out:
  2384. if (ret || node->level == 0 || node->cowonly)
  2385. btrfs_backref_cleanup_node(&rc->backref_cache, node);
  2386. return ret;
  2387. }
  2388. /*
  2389. * relocate a list of blocks
  2390. */
  2391. static noinline_for_stack
  2392. int relocate_tree_blocks(struct btrfs_trans_handle *trans,
  2393. struct reloc_control *rc, struct rb_root *blocks)
  2394. {
  2395. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  2396. struct btrfs_backref_node *node;
  2397. struct btrfs_path *path;
  2398. struct tree_block *block;
  2399. struct tree_block *next;
  2400. int ret = 0;
  2401. path = btrfs_alloc_path();
  2402. if (!path) {
  2403. ret = -ENOMEM;
  2404. goto out_free_blocks;
  2405. }
  2406. /* Kick in readahead for tree blocks with missing keys */
  2407. rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
  2408. if (!block->key_ready)
  2409. btrfs_readahead_tree_block(fs_info, block->bytenr,
  2410. block->owner, 0,
  2411. block->level);
  2412. }
  2413. /* Get first keys */
  2414. rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
  2415. if (!block->key_ready) {
  2416. ret = get_tree_block_key(fs_info, block);
  2417. if (ret)
  2418. goto out_free_path;
  2419. }
  2420. }
  2421. /* Do tree relocation */
  2422. rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
  2423. node = build_backref_tree(trans, rc, &block->key,
  2424. block->level, block->bytenr);
  2425. if (IS_ERR(node)) {
  2426. ret = PTR_ERR(node);
  2427. goto out;
  2428. }
  2429. ret = relocate_tree_block(trans, rc, node, &block->key,
  2430. path);
  2431. if (ret < 0)
  2432. break;
  2433. }
  2434. out:
  2435. ret = finish_pending_nodes(trans, rc, path, ret);
  2436. out_free_path:
  2437. btrfs_free_path(path);
  2438. out_free_blocks:
  2439. free_block_list(blocks);
  2440. return ret;
  2441. }
  2442. static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
  2443. {
  2444. const struct file_extent_cluster *cluster = &rc->cluster;
  2445. struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
  2446. u64 alloc_hint = 0;
  2447. u64 start;
  2448. u64 end;
  2449. u64 offset = inode->reloc_block_group_start;
  2450. u64 num_bytes;
  2451. int nr;
  2452. int ret = 0;
  2453. u64 i_size = i_size_read(&inode->vfs_inode);
  2454. u64 prealloc_start = cluster->start - offset;
  2455. u64 prealloc_end = cluster->end - offset;
  2456. u64 cur_offset = prealloc_start;
  2457. /*
  2458. * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
  2459. * This means the range [i_size, PAGE_END + 1) is filled with zeros by
  2460. * btrfs_do_readpage() call of previously relocated file cluster.
  2461. *
  2462. * If the current cluster starts in the above range, btrfs_do_readpage()
  2463. * will skip the read, and relocate_one_folio() will later writeback
  2464. * the padding zeros as new data, causing data corruption.
  2465. *
  2466. * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
  2467. */
  2468. if (!PAGE_ALIGNED(i_size)) {
  2469. struct address_space *mapping = inode->vfs_inode.i_mapping;
  2470. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  2471. const u32 sectorsize = fs_info->sectorsize;
  2472. struct folio *folio;
  2473. ASSERT(sectorsize < PAGE_SIZE);
  2474. ASSERT(IS_ALIGNED(i_size, sectorsize));
  2475. /*
  2476. * Subpage can't handle page with DIRTY but without UPTODATE
  2477. * bit as it can lead to the following deadlock:
  2478. *
  2479. * btrfs_read_folio()
  2480. * | Page already *locked*
  2481. * |- btrfs_lock_and_flush_ordered_range()
  2482. * |- btrfs_start_ordered_extent()
  2483. * |- extent_write_cache_pages()
  2484. * |- lock_page()
  2485. * We try to lock the page we already hold.
  2486. *
  2487. * Here we just writeback the whole data reloc inode, so that
  2488. * we will be ensured to have no dirty range in the page, and
  2489. * are safe to clear the uptodate bits.
  2490. *
  2491. * This shouldn't cause too much overhead, as we need to write
  2492. * the data back anyway.
  2493. */
  2494. ret = filemap_write_and_wait(mapping);
  2495. if (ret < 0)
  2496. return ret;
  2497. clear_extent_bits(&inode->io_tree, i_size,
  2498. round_up(i_size, PAGE_SIZE) - 1,
  2499. EXTENT_UPTODATE);
  2500. folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT);
  2501. /*
  2502. * If page is freed we don't need to do anything then, as we
  2503. * will re-read the whole page anyway.
  2504. */
  2505. if (!IS_ERR(folio)) {
  2506. btrfs_subpage_clear_uptodate(fs_info, folio, i_size,
  2507. round_up(i_size, PAGE_SIZE) - i_size);
  2508. folio_unlock(folio);
  2509. folio_put(folio);
  2510. }
  2511. }
  2512. BUG_ON(cluster->start != cluster->boundary[0]);
  2513. ret = btrfs_alloc_data_chunk_ondemand(inode,
  2514. prealloc_end + 1 - prealloc_start);
  2515. if (ret)
  2516. return ret;
  2517. btrfs_inode_lock(inode, 0);
  2518. for (nr = 0; nr < cluster->nr; nr++) {
  2519. struct extent_state *cached_state = NULL;
  2520. start = cluster->boundary[nr] - offset;
  2521. if (nr + 1 < cluster->nr)
  2522. end = cluster->boundary[nr + 1] - 1 - offset;
  2523. else
  2524. end = cluster->end - offset;
  2525. lock_extent(&inode->io_tree, start, end, &cached_state);
  2526. num_bytes = end + 1 - start;
  2527. ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
  2528. num_bytes, num_bytes,
  2529. end + 1, &alloc_hint);
  2530. cur_offset = end + 1;
  2531. unlock_extent(&inode->io_tree, start, end, &cached_state);
  2532. if (ret)
  2533. break;
  2534. }
  2535. btrfs_inode_unlock(inode, 0);
  2536. if (cur_offset < prealloc_end)
  2537. btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
  2538. prealloc_end + 1 - cur_offset);
  2539. return ret;
  2540. }
  2541. static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
  2542. {
  2543. struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
  2544. struct extent_map *em;
  2545. struct extent_state *cached_state = NULL;
  2546. u64 offset = inode->reloc_block_group_start;
  2547. u64 start = rc->cluster.start - offset;
  2548. u64 end = rc->cluster.end - offset;
  2549. int ret = 0;
  2550. em = alloc_extent_map();
  2551. if (!em)
  2552. return -ENOMEM;
  2553. em->start = start;
  2554. em->len = end + 1 - start;
  2555. em->disk_bytenr = rc->cluster.start;
  2556. em->disk_num_bytes = em->len;
  2557. em->ram_bytes = em->len;
  2558. em->flags |= EXTENT_FLAG_PINNED;
  2559. lock_extent(&inode->io_tree, start, end, &cached_state);
  2560. ret = btrfs_replace_extent_map_range(inode, em, false);
  2561. unlock_extent(&inode->io_tree, start, end, &cached_state);
  2562. free_extent_map(em);
  2563. return ret;
  2564. }
  2565. /*
  2566. * Allow error injection to test balance/relocation cancellation
  2567. */
  2568. noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
  2569. {
  2570. return atomic_read(&fs_info->balance_cancel_req) ||
  2571. atomic_read(&fs_info->reloc_cancel_req) ||
  2572. fatal_signal_pending(current);
  2573. }
  2574. ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
  2575. static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
  2576. int cluster_nr)
  2577. {
  2578. /* Last extent, use cluster end directly */
  2579. if (cluster_nr >= cluster->nr - 1)
  2580. return cluster->end;
  2581. /* Use next boundary start*/
  2582. return cluster->boundary[cluster_nr + 1] - 1;
  2583. }
  2584. static int relocate_one_folio(struct reloc_control *rc,
  2585. struct file_ra_state *ra,
  2586. int *cluster_nr, unsigned long index)
  2587. {
  2588. const struct file_extent_cluster *cluster = &rc->cluster;
  2589. struct inode *inode = rc->data_inode;
  2590. struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
  2591. u64 offset = BTRFS_I(inode)->reloc_block_group_start;
  2592. const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
  2593. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  2594. struct folio *folio;
  2595. u64 folio_start;
  2596. u64 folio_end;
  2597. u64 cur;
  2598. int ret;
  2599. const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
  2600. ASSERT(index <= last_index);
  2601. again:
  2602. folio = filemap_lock_folio(inode->i_mapping, index);
  2603. if (IS_ERR(folio)) {
  2604. /*
  2605. * On relocation we're doing readahead on the relocation inode,
  2606. * but if the filesystem is backed by a RAID stripe tree we can
  2607. * get ENOENT (e.g. due to preallocated extents not being
  2608. * mapped in the RST) from the lookup.
  2609. *
  2610. * But readahead doesn't handle the error and submits invalid
  2611. * reads to the device, causing a assertion failures.
  2612. */
  2613. if (!use_rst)
  2614. page_cache_sync_readahead(inode->i_mapping, ra, NULL,
  2615. index, last_index + 1 - index);
  2616. folio = __filemap_get_folio(inode->i_mapping, index,
  2617. FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
  2618. mask);
  2619. if (IS_ERR(folio))
  2620. return PTR_ERR(folio);
  2621. }
  2622. WARN_ON(folio_order(folio));
  2623. if (folio_test_readahead(folio) && !use_rst)
  2624. page_cache_async_readahead(inode->i_mapping, ra, NULL,
  2625. folio, last_index + 1 - index);
  2626. if (!folio_test_uptodate(folio)) {
  2627. btrfs_read_folio(NULL, folio);
  2628. folio_lock(folio);
  2629. if (!folio_test_uptodate(folio)) {
  2630. ret = -EIO;
  2631. goto release_folio;
  2632. }
  2633. if (folio->mapping != inode->i_mapping) {
  2634. folio_unlock(folio);
  2635. folio_put(folio);
  2636. goto again;
  2637. }
  2638. }
  2639. /*
  2640. * We could have lost folio private when we dropped the lock to read the
  2641. * folio above, make sure we set_page_extent_mapped here so we have any
  2642. * of the subpage blocksize stuff we need in place.
  2643. */
  2644. ret = set_folio_extent_mapped(folio);
  2645. if (ret < 0)
  2646. goto release_folio;
  2647. folio_start = folio_pos(folio);
  2648. folio_end = folio_start + PAGE_SIZE - 1;
  2649. /*
  2650. * Start from the cluster, as for subpage case, the cluster can start
  2651. * inside the folio.
  2652. */
  2653. cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
  2654. while (cur <= folio_end) {
  2655. struct extent_state *cached_state = NULL;
  2656. u64 extent_start = cluster->boundary[*cluster_nr] - offset;
  2657. u64 extent_end = get_cluster_boundary_end(cluster,
  2658. *cluster_nr) - offset;
  2659. u64 clamped_start = max(folio_start, extent_start);
  2660. u64 clamped_end = min(folio_end, extent_end);
  2661. u32 clamped_len = clamped_end + 1 - clamped_start;
  2662. /* Reserve metadata for this range */
  2663. ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
  2664. clamped_len, clamped_len,
  2665. false);
  2666. if (ret)
  2667. goto release_folio;
  2668. /* Mark the range delalloc and dirty for later writeback */
  2669. lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
  2670. &cached_state);
  2671. ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
  2672. clamped_end, 0, &cached_state);
  2673. if (ret) {
  2674. clear_extent_bit(&BTRFS_I(inode)->io_tree,
  2675. clamped_start, clamped_end,
  2676. EXTENT_LOCKED | EXTENT_BOUNDARY,
  2677. &cached_state);
  2678. btrfs_delalloc_release_metadata(BTRFS_I(inode),
  2679. clamped_len, true);
  2680. btrfs_delalloc_release_extents(BTRFS_I(inode),
  2681. clamped_len);
  2682. goto release_folio;
  2683. }
  2684. btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
  2685. /*
  2686. * Set the boundary if it's inside the folio.
  2687. * Data relocation requires the destination extents to have the
  2688. * same size as the source.
  2689. * EXTENT_BOUNDARY bit prevents current extent from being merged
  2690. * with previous extent.
  2691. */
  2692. if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
  2693. u64 boundary_start = cluster->boundary[*cluster_nr] -
  2694. offset;
  2695. u64 boundary_end = boundary_start +
  2696. fs_info->sectorsize - 1;
  2697. set_extent_bit(&BTRFS_I(inode)->io_tree,
  2698. boundary_start, boundary_end,
  2699. EXTENT_BOUNDARY, NULL);
  2700. }
  2701. unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
  2702. &cached_state);
  2703. btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
  2704. cur += clamped_len;
  2705. /* Crossed extent end, go to next extent */
  2706. if (cur >= extent_end) {
  2707. (*cluster_nr)++;
  2708. /* Just finished the last extent of the cluster, exit. */
  2709. if (*cluster_nr >= cluster->nr)
  2710. break;
  2711. }
  2712. }
  2713. folio_unlock(folio);
  2714. folio_put(folio);
  2715. balance_dirty_pages_ratelimited(inode->i_mapping);
  2716. btrfs_throttle(fs_info);
  2717. if (btrfs_should_cancel_balance(fs_info))
  2718. ret = -ECANCELED;
  2719. return ret;
  2720. release_folio:
  2721. folio_unlock(folio);
  2722. folio_put(folio);
  2723. return ret;
  2724. }
  2725. static int relocate_file_extent_cluster(struct reloc_control *rc)
  2726. {
  2727. struct inode *inode = rc->data_inode;
  2728. const struct file_extent_cluster *cluster = &rc->cluster;
  2729. u64 offset = BTRFS_I(inode)->reloc_block_group_start;
  2730. unsigned long index;
  2731. unsigned long last_index;
  2732. struct file_ra_state *ra;
  2733. int cluster_nr = 0;
  2734. int ret = 0;
  2735. if (!cluster->nr)
  2736. return 0;
  2737. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  2738. if (!ra)
  2739. return -ENOMEM;
  2740. ret = prealloc_file_extent_cluster(rc);
  2741. if (ret)
  2742. goto out;
  2743. file_ra_state_init(ra, inode->i_mapping);
  2744. ret = setup_relocation_extent_mapping(rc);
  2745. if (ret)
  2746. goto out;
  2747. last_index = (cluster->end - offset) >> PAGE_SHIFT;
  2748. for (index = (cluster->start - offset) >> PAGE_SHIFT;
  2749. index <= last_index && !ret; index++)
  2750. ret = relocate_one_folio(rc, ra, &cluster_nr, index);
  2751. if (ret == 0)
  2752. WARN_ON(cluster_nr != cluster->nr);
  2753. out:
  2754. kfree(ra);
  2755. return ret;
  2756. }
  2757. static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
  2758. const struct btrfs_key *extent_key)
  2759. {
  2760. struct inode *inode = rc->data_inode;
  2761. struct file_extent_cluster *cluster = &rc->cluster;
  2762. int ret;
  2763. struct btrfs_root *root = BTRFS_I(inode)->root;
  2764. if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
  2765. ret = relocate_file_extent_cluster(rc);
  2766. if (ret)
  2767. return ret;
  2768. cluster->nr = 0;
  2769. }
  2770. /*
  2771. * Under simple quotas, we set root->relocation_src_root when we find
  2772. * the extent. If adjacent extents have different owners, we can't merge
  2773. * them while relocating. Handle this by storing the owning root that
  2774. * started a cluster and if we see an extent from a different root break
  2775. * cluster formation (just like the above case of non-adjacent extents).
  2776. *
  2777. * Without simple quotas, relocation_src_root is always 0, so we should
  2778. * never see a mismatch, and it should have no effect on relocation
  2779. * clusters.
  2780. */
  2781. if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
  2782. u64 tmp = root->relocation_src_root;
  2783. /*
  2784. * root->relocation_src_root is the state that actually affects
  2785. * the preallocation we do here, so set it to the root owning
  2786. * the cluster we need to relocate.
  2787. */
  2788. root->relocation_src_root = cluster->owning_root;
  2789. ret = relocate_file_extent_cluster(rc);
  2790. if (ret)
  2791. return ret;
  2792. cluster->nr = 0;
  2793. /* And reset it back for the current extent's owning root. */
  2794. root->relocation_src_root = tmp;
  2795. }
  2796. if (!cluster->nr) {
  2797. cluster->start = extent_key->objectid;
  2798. cluster->owning_root = root->relocation_src_root;
  2799. }
  2800. else
  2801. BUG_ON(cluster->nr >= MAX_EXTENTS);
  2802. cluster->end = extent_key->objectid + extent_key->offset - 1;
  2803. cluster->boundary[cluster->nr] = extent_key->objectid;
  2804. cluster->nr++;
  2805. if (cluster->nr >= MAX_EXTENTS) {
  2806. ret = relocate_file_extent_cluster(rc);
  2807. if (ret)
  2808. return ret;
  2809. cluster->nr = 0;
  2810. }
  2811. return 0;
  2812. }
  2813. /*
  2814. * helper to add a tree block to the list.
  2815. * the major work is getting the generation and level of the block
  2816. */
  2817. static int add_tree_block(struct reloc_control *rc,
  2818. const struct btrfs_key *extent_key,
  2819. struct btrfs_path *path,
  2820. struct rb_root *blocks)
  2821. {
  2822. struct extent_buffer *eb;
  2823. struct btrfs_extent_item *ei;
  2824. struct btrfs_tree_block_info *bi;
  2825. struct tree_block *block;
  2826. struct rb_node *rb_node;
  2827. u32 item_size;
  2828. int level = -1;
  2829. u64 generation;
  2830. u64 owner = 0;
  2831. eb = path->nodes[0];
  2832. item_size = btrfs_item_size(eb, path->slots[0]);
  2833. if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
  2834. item_size >= sizeof(*ei) + sizeof(*bi)) {
  2835. unsigned long ptr = 0, end;
  2836. ei = btrfs_item_ptr(eb, path->slots[0],
  2837. struct btrfs_extent_item);
  2838. end = (unsigned long)ei + item_size;
  2839. if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
  2840. bi = (struct btrfs_tree_block_info *)(ei + 1);
  2841. level = btrfs_tree_block_level(eb, bi);
  2842. ptr = (unsigned long)(bi + 1);
  2843. } else {
  2844. level = (int)extent_key->offset;
  2845. ptr = (unsigned long)(ei + 1);
  2846. }
  2847. generation = btrfs_extent_generation(eb, ei);
  2848. /*
  2849. * We're reading random blocks without knowing their owner ahead
  2850. * of time. This is ok most of the time, as all reloc roots and
  2851. * fs roots have the same lock type. However normal trees do
  2852. * not, and the only way to know ahead of time is to read the
  2853. * inline ref offset. We know it's an fs root if
  2854. *
  2855. * 1. There's more than one ref.
  2856. * 2. There's a SHARED_DATA_REF_KEY set.
  2857. * 3. FULL_BACKREF is set on the flags.
  2858. *
  2859. * Otherwise it's safe to assume that the ref offset == the
  2860. * owner of this block, so we can use that when calling
  2861. * read_tree_block.
  2862. */
  2863. if (btrfs_extent_refs(eb, ei) == 1 &&
  2864. !(btrfs_extent_flags(eb, ei) &
  2865. BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
  2866. ptr < end) {
  2867. struct btrfs_extent_inline_ref *iref;
  2868. int type;
  2869. iref = (struct btrfs_extent_inline_ref *)ptr;
  2870. type = btrfs_get_extent_inline_ref_type(eb, iref,
  2871. BTRFS_REF_TYPE_BLOCK);
  2872. if (type == BTRFS_REF_TYPE_INVALID)
  2873. return -EINVAL;
  2874. if (type == BTRFS_TREE_BLOCK_REF_KEY)
  2875. owner = btrfs_extent_inline_ref_offset(eb, iref);
  2876. }
  2877. } else {
  2878. btrfs_print_leaf(eb);
  2879. btrfs_err(rc->block_group->fs_info,
  2880. "unrecognized tree backref at tree block %llu slot %u",
  2881. eb->start, path->slots[0]);
  2882. btrfs_release_path(path);
  2883. return -EUCLEAN;
  2884. }
  2885. btrfs_release_path(path);
  2886. BUG_ON(level == -1);
  2887. block = kmalloc(sizeof(*block), GFP_NOFS);
  2888. if (!block)
  2889. return -ENOMEM;
  2890. block->bytenr = extent_key->objectid;
  2891. block->key.objectid = rc->extent_root->fs_info->nodesize;
  2892. block->key.offset = generation;
  2893. block->level = level;
  2894. block->key_ready = false;
  2895. block->owner = owner;
  2896. rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
  2897. if (rb_node)
  2898. btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
  2899. -EEXIST);
  2900. return 0;
  2901. }
  2902. /*
  2903. * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
  2904. */
  2905. static int __add_tree_block(struct reloc_control *rc,
  2906. u64 bytenr, u32 blocksize,
  2907. struct rb_root *blocks)
  2908. {
  2909. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  2910. struct btrfs_path *path;
  2911. struct btrfs_key key;
  2912. int ret;
  2913. bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  2914. if (tree_block_processed(bytenr, rc))
  2915. return 0;
  2916. if (rb_simple_search(blocks, bytenr))
  2917. return 0;
  2918. path = btrfs_alloc_path();
  2919. if (!path)
  2920. return -ENOMEM;
  2921. again:
  2922. key.objectid = bytenr;
  2923. if (skinny) {
  2924. key.type = BTRFS_METADATA_ITEM_KEY;
  2925. key.offset = (u64)-1;
  2926. } else {
  2927. key.type = BTRFS_EXTENT_ITEM_KEY;
  2928. key.offset = blocksize;
  2929. }
  2930. path->search_commit_root = 1;
  2931. path->skip_locking = 1;
  2932. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
  2933. if (ret < 0)
  2934. goto out;
  2935. if (ret > 0 && skinny) {
  2936. if (path->slots[0]) {
  2937. path->slots[0]--;
  2938. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2939. path->slots[0]);
  2940. if (key.objectid == bytenr &&
  2941. (key.type == BTRFS_METADATA_ITEM_KEY ||
  2942. (key.type == BTRFS_EXTENT_ITEM_KEY &&
  2943. key.offset == blocksize)))
  2944. ret = 0;
  2945. }
  2946. if (ret) {
  2947. skinny = false;
  2948. btrfs_release_path(path);
  2949. goto again;
  2950. }
  2951. }
  2952. if (ret) {
  2953. ASSERT(ret == 1);
  2954. btrfs_print_leaf(path->nodes[0]);
  2955. btrfs_err(fs_info,
  2956. "tree block extent item (%llu) is not found in extent tree",
  2957. bytenr);
  2958. WARN_ON(1);
  2959. ret = -EINVAL;
  2960. goto out;
  2961. }
  2962. ret = add_tree_block(rc, &key, path, blocks);
  2963. out:
  2964. btrfs_free_path(path);
  2965. return ret;
  2966. }
  2967. static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
  2968. struct btrfs_block_group *block_group,
  2969. struct inode *inode,
  2970. u64 ino)
  2971. {
  2972. struct btrfs_root *root = fs_info->tree_root;
  2973. struct btrfs_trans_handle *trans;
  2974. int ret = 0;
  2975. if (inode)
  2976. goto truncate;
  2977. inode = btrfs_iget(ino, root);
  2978. if (IS_ERR(inode))
  2979. return -ENOENT;
  2980. truncate:
  2981. ret = btrfs_check_trunc_cache_free_space(fs_info,
  2982. &fs_info->global_block_rsv);
  2983. if (ret)
  2984. goto out;
  2985. trans = btrfs_join_transaction(root);
  2986. if (IS_ERR(trans)) {
  2987. ret = PTR_ERR(trans);
  2988. goto out;
  2989. }
  2990. ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
  2991. btrfs_end_transaction(trans);
  2992. btrfs_btree_balance_dirty(fs_info);
  2993. out:
  2994. iput(inode);
  2995. return ret;
  2996. }
  2997. /*
  2998. * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
  2999. * cache inode, to avoid free space cache data extent blocking data relocation.
  3000. */
  3001. static int delete_v1_space_cache(struct extent_buffer *leaf,
  3002. struct btrfs_block_group *block_group,
  3003. u64 data_bytenr)
  3004. {
  3005. u64 space_cache_ino;
  3006. struct btrfs_file_extent_item *ei;
  3007. struct btrfs_key key;
  3008. bool found = false;
  3009. int i;
  3010. int ret;
  3011. if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
  3012. return 0;
  3013. for (i = 0; i < btrfs_header_nritems(leaf); i++) {
  3014. u8 type;
  3015. btrfs_item_key_to_cpu(leaf, &key, i);
  3016. if (key.type != BTRFS_EXTENT_DATA_KEY)
  3017. continue;
  3018. ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  3019. type = btrfs_file_extent_type(leaf, ei);
  3020. if ((type == BTRFS_FILE_EXTENT_REG ||
  3021. type == BTRFS_FILE_EXTENT_PREALLOC) &&
  3022. btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
  3023. found = true;
  3024. space_cache_ino = key.objectid;
  3025. break;
  3026. }
  3027. }
  3028. if (!found)
  3029. return -ENOENT;
  3030. ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
  3031. space_cache_ino);
  3032. return ret;
  3033. }
  3034. /*
  3035. * helper to find all tree blocks that reference a given data extent
  3036. */
  3037. static noinline_for_stack int add_data_references(struct reloc_control *rc,
  3038. const struct btrfs_key *extent_key,
  3039. struct btrfs_path *path,
  3040. struct rb_root *blocks)
  3041. {
  3042. struct btrfs_backref_walk_ctx ctx = { 0 };
  3043. struct ulist_iterator leaf_uiter;
  3044. struct ulist_node *ref_node = NULL;
  3045. const u32 blocksize = rc->extent_root->fs_info->nodesize;
  3046. int ret = 0;
  3047. btrfs_release_path(path);
  3048. ctx.bytenr = extent_key->objectid;
  3049. ctx.skip_inode_ref_list = true;
  3050. ctx.fs_info = rc->extent_root->fs_info;
  3051. ret = btrfs_find_all_leafs(&ctx);
  3052. if (ret < 0)
  3053. return ret;
  3054. ULIST_ITER_INIT(&leaf_uiter);
  3055. while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
  3056. struct btrfs_tree_parent_check check = { 0 };
  3057. struct extent_buffer *eb;
  3058. eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
  3059. if (IS_ERR(eb)) {
  3060. ret = PTR_ERR(eb);
  3061. break;
  3062. }
  3063. ret = delete_v1_space_cache(eb, rc->block_group,
  3064. extent_key->objectid);
  3065. free_extent_buffer(eb);
  3066. if (ret < 0)
  3067. break;
  3068. ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
  3069. if (ret < 0)
  3070. break;
  3071. }
  3072. if (ret < 0)
  3073. free_block_list(blocks);
  3074. ulist_free(ctx.refs);
  3075. return ret;
  3076. }
  3077. /*
  3078. * helper to find next unprocessed extent
  3079. */
  3080. static noinline_for_stack
  3081. int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
  3082. struct btrfs_key *extent_key)
  3083. {
  3084. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3085. struct btrfs_key key;
  3086. struct extent_buffer *leaf;
  3087. u64 start, end, last;
  3088. int ret;
  3089. last = rc->block_group->start + rc->block_group->length;
  3090. while (1) {
  3091. bool block_found;
  3092. cond_resched();
  3093. if (rc->search_start >= last) {
  3094. ret = 1;
  3095. break;
  3096. }
  3097. key.objectid = rc->search_start;
  3098. key.type = BTRFS_EXTENT_ITEM_KEY;
  3099. key.offset = 0;
  3100. path->search_commit_root = 1;
  3101. path->skip_locking = 1;
  3102. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
  3103. 0, 0);
  3104. if (ret < 0)
  3105. break;
  3106. next:
  3107. leaf = path->nodes[0];
  3108. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  3109. ret = btrfs_next_leaf(rc->extent_root, path);
  3110. if (ret != 0)
  3111. break;
  3112. leaf = path->nodes[0];
  3113. }
  3114. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3115. if (key.objectid >= last) {
  3116. ret = 1;
  3117. break;
  3118. }
  3119. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  3120. key.type != BTRFS_METADATA_ITEM_KEY) {
  3121. path->slots[0]++;
  3122. goto next;
  3123. }
  3124. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  3125. key.objectid + key.offset <= rc->search_start) {
  3126. path->slots[0]++;
  3127. goto next;
  3128. }
  3129. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  3130. key.objectid + fs_info->nodesize <=
  3131. rc->search_start) {
  3132. path->slots[0]++;
  3133. goto next;
  3134. }
  3135. block_found = find_first_extent_bit(&rc->processed_blocks,
  3136. key.objectid, &start, &end,
  3137. EXTENT_DIRTY, NULL);
  3138. if (block_found && start <= key.objectid) {
  3139. btrfs_release_path(path);
  3140. rc->search_start = end + 1;
  3141. } else {
  3142. if (key.type == BTRFS_EXTENT_ITEM_KEY)
  3143. rc->search_start = key.objectid + key.offset;
  3144. else
  3145. rc->search_start = key.objectid +
  3146. fs_info->nodesize;
  3147. memcpy(extent_key, &key, sizeof(key));
  3148. return 0;
  3149. }
  3150. }
  3151. btrfs_release_path(path);
  3152. return ret;
  3153. }
  3154. static void set_reloc_control(struct reloc_control *rc)
  3155. {
  3156. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3157. mutex_lock(&fs_info->reloc_mutex);
  3158. fs_info->reloc_ctl = rc;
  3159. mutex_unlock(&fs_info->reloc_mutex);
  3160. }
  3161. static void unset_reloc_control(struct reloc_control *rc)
  3162. {
  3163. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3164. mutex_lock(&fs_info->reloc_mutex);
  3165. fs_info->reloc_ctl = NULL;
  3166. mutex_unlock(&fs_info->reloc_mutex);
  3167. }
  3168. static noinline_for_stack
  3169. int prepare_to_relocate(struct reloc_control *rc)
  3170. {
  3171. struct btrfs_trans_handle *trans;
  3172. int ret;
  3173. rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
  3174. BTRFS_BLOCK_RSV_TEMP);
  3175. if (!rc->block_rsv)
  3176. return -ENOMEM;
  3177. memset(&rc->cluster, 0, sizeof(rc->cluster));
  3178. rc->search_start = rc->block_group->start;
  3179. rc->extents_found = 0;
  3180. rc->nodes_relocated = 0;
  3181. rc->merging_rsv_size = 0;
  3182. rc->reserved_bytes = 0;
  3183. rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
  3184. RELOCATION_RESERVED_NODES;
  3185. ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
  3186. rc->block_rsv, rc->block_rsv->size,
  3187. BTRFS_RESERVE_FLUSH_ALL);
  3188. if (ret)
  3189. return ret;
  3190. rc->create_reloc_tree = true;
  3191. set_reloc_control(rc);
  3192. trans = btrfs_join_transaction(rc->extent_root);
  3193. if (IS_ERR(trans)) {
  3194. unset_reloc_control(rc);
  3195. /*
  3196. * extent tree is not a ref_cow tree and has no reloc_root to
  3197. * cleanup. And callers are responsible to free the above
  3198. * block rsv.
  3199. */
  3200. return PTR_ERR(trans);
  3201. }
  3202. ret = btrfs_commit_transaction(trans);
  3203. if (ret)
  3204. unset_reloc_control(rc);
  3205. return ret;
  3206. }
  3207. static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
  3208. {
  3209. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3210. struct rb_root blocks = RB_ROOT;
  3211. struct btrfs_key key;
  3212. struct btrfs_trans_handle *trans = NULL;
  3213. struct btrfs_path *path;
  3214. struct btrfs_extent_item *ei;
  3215. u64 flags;
  3216. int ret;
  3217. int err = 0;
  3218. int progress = 0;
  3219. path = btrfs_alloc_path();
  3220. if (!path)
  3221. return -ENOMEM;
  3222. path->reada = READA_FORWARD;
  3223. ret = prepare_to_relocate(rc);
  3224. if (ret) {
  3225. err = ret;
  3226. goto out_free;
  3227. }
  3228. while (1) {
  3229. rc->reserved_bytes = 0;
  3230. ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
  3231. rc->block_rsv->size,
  3232. BTRFS_RESERVE_FLUSH_ALL);
  3233. if (ret) {
  3234. err = ret;
  3235. break;
  3236. }
  3237. progress++;
  3238. trans = btrfs_start_transaction(rc->extent_root, 0);
  3239. if (IS_ERR(trans)) {
  3240. err = PTR_ERR(trans);
  3241. trans = NULL;
  3242. break;
  3243. }
  3244. restart:
  3245. if (rc->backref_cache.last_trans != trans->transid)
  3246. btrfs_backref_release_cache(&rc->backref_cache);
  3247. rc->backref_cache.last_trans = trans->transid;
  3248. ret = find_next_extent(rc, path, &key);
  3249. if (ret < 0)
  3250. err = ret;
  3251. if (ret != 0)
  3252. break;
  3253. rc->extents_found++;
  3254. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  3255. struct btrfs_extent_item);
  3256. flags = btrfs_extent_flags(path->nodes[0], ei);
  3257. /*
  3258. * If we are relocating a simple quota owned extent item, we
  3259. * need to note the owner on the reloc data root so that when
  3260. * we allocate the replacement item, we can attribute it to the
  3261. * correct eventual owner (rather than the reloc data root).
  3262. */
  3263. if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
  3264. struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
  3265. u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
  3266. path->nodes[0],
  3267. path->slots[0]);
  3268. root->relocation_src_root = owning_root_id;
  3269. }
  3270. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  3271. ret = add_tree_block(rc, &key, path, &blocks);
  3272. } else if (rc->stage == UPDATE_DATA_PTRS &&
  3273. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3274. ret = add_data_references(rc, &key, path, &blocks);
  3275. } else {
  3276. btrfs_release_path(path);
  3277. ret = 0;
  3278. }
  3279. if (ret < 0) {
  3280. err = ret;
  3281. break;
  3282. }
  3283. if (!RB_EMPTY_ROOT(&blocks)) {
  3284. ret = relocate_tree_blocks(trans, rc, &blocks);
  3285. if (ret < 0) {
  3286. if (ret != -EAGAIN) {
  3287. err = ret;
  3288. break;
  3289. }
  3290. rc->extents_found--;
  3291. rc->search_start = key.objectid;
  3292. }
  3293. }
  3294. btrfs_end_transaction_throttle(trans);
  3295. btrfs_btree_balance_dirty(fs_info);
  3296. trans = NULL;
  3297. if (rc->stage == MOVE_DATA_EXTENTS &&
  3298. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3299. rc->found_file_extent = true;
  3300. ret = relocate_data_extent(rc, &key);
  3301. if (ret < 0) {
  3302. err = ret;
  3303. break;
  3304. }
  3305. }
  3306. if (btrfs_should_cancel_balance(fs_info)) {
  3307. err = -ECANCELED;
  3308. break;
  3309. }
  3310. }
  3311. if (trans && progress && err == -ENOSPC) {
  3312. ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
  3313. if (ret == 1) {
  3314. err = 0;
  3315. progress = 0;
  3316. goto restart;
  3317. }
  3318. }
  3319. btrfs_release_path(path);
  3320. clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
  3321. if (trans) {
  3322. btrfs_end_transaction_throttle(trans);
  3323. btrfs_btree_balance_dirty(fs_info);
  3324. }
  3325. if (!err) {
  3326. ret = relocate_file_extent_cluster(rc);
  3327. if (ret < 0)
  3328. err = ret;
  3329. }
  3330. rc->create_reloc_tree = false;
  3331. set_reloc_control(rc);
  3332. btrfs_backref_release_cache(&rc->backref_cache);
  3333. btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
  3334. /*
  3335. * Even in the case when the relocation is cancelled, we should all go
  3336. * through prepare_to_merge() and merge_reloc_roots().
  3337. *
  3338. * For error (including cancelled balance), prepare_to_merge() will
  3339. * mark all reloc trees orphan, then queue them for cleanup in
  3340. * merge_reloc_roots()
  3341. */
  3342. err = prepare_to_merge(rc, err);
  3343. merge_reloc_roots(rc);
  3344. rc->merge_reloc_tree = false;
  3345. unset_reloc_control(rc);
  3346. btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
  3347. /* get rid of pinned extents */
  3348. trans = btrfs_join_transaction(rc->extent_root);
  3349. if (IS_ERR(trans)) {
  3350. err = PTR_ERR(trans);
  3351. goto out_free;
  3352. }
  3353. ret = btrfs_commit_transaction(trans);
  3354. if (ret && !err)
  3355. err = ret;
  3356. out_free:
  3357. ret = clean_dirty_subvols(rc);
  3358. if (ret < 0 && !err)
  3359. err = ret;
  3360. btrfs_free_block_rsv(fs_info, rc->block_rsv);
  3361. btrfs_free_path(path);
  3362. return err;
  3363. }
  3364. static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
  3365. struct btrfs_root *root, u64 objectid)
  3366. {
  3367. struct btrfs_path *path;
  3368. struct btrfs_inode_item *item;
  3369. struct extent_buffer *leaf;
  3370. int ret;
  3371. path = btrfs_alloc_path();
  3372. if (!path)
  3373. return -ENOMEM;
  3374. ret = btrfs_insert_empty_inode(trans, root, path, objectid);
  3375. if (ret)
  3376. goto out;
  3377. leaf = path->nodes[0];
  3378. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
  3379. memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
  3380. btrfs_set_inode_generation(leaf, item, 1);
  3381. btrfs_set_inode_size(leaf, item, 0);
  3382. btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
  3383. btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
  3384. BTRFS_INODE_PREALLOC);
  3385. btrfs_mark_buffer_dirty(trans, leaf);
  3386. out:
  3387. btrfs_free_path(path);
  3388. return ret;
  3389. }
  3390. static void delete_orphan_inode(struct btrfs_trans_handle *trans,
  3391. struct btrfs_root *root, u64 objectid)
  3392. {
  3393. struct btrfs_path *path;
  3394. struct btrfs_key key;
  3395. int ret = 0;
  3396. path = btrfs_alloc_path();
  3397. if (!path) {
  3398. ret = -ENOMEM;
  3399. goto out;
  3400. }
  3401. key.objectid = objectid;
  3402. key.type = BTRFS_INODE_ITEM_KEY;
  3403. key.offset = 0;
  3404. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  3405. if (ret) {
  3406. if (ret > 0)
  3407. ret = -ENOENT;
  3408. goto out;
  3409. }
  3410. ret = btrfs_del_item(trans, root, path);
  3411. out:
  3412. if (ret)
  3413. btrfs_abort_transaction(trans, ret);
  3414. btrfs_free_path(path);
  3415. }
  3416. /*
  3417. * helper to create inode for data relocation.
  3418. * the inode is in data relocation tree and its link count is 0
  3419. */
  3420. static noinline_for_stack struct inode *create_reloc_inode(
  3421. struct btrfs_fs_info *fs_info,
  3422. const struct btrfs_block_group *group)
  3423. {
  3424. struct inode *inode = NULL;
  3425. struct btrfs_trans_handle *trans;
  3426. struct btrfs_root *root;
  3427. u64 objectid;
  3428. int ret = 0;
  3429. root = btrfs_grab_root(fs_info->data_reloc_root);
  3430. trans = btrfs_start_transaction(root, 6);
  3431. if (IS_ERR(trans)) {
  3432. btrfs_put_root(root);
  3433. return ERR_CAST(trans);
  3434. }
  3435. ret = btrfs_get_free_objectid(root, &objectid);
  3436. if (ret)
  3437. goto out;
  3438. ret = __insert_orphan_inode(trans, root, objectid);
  3439. if (ret)
  3440. goto out;
  3441. inode = btrfs_iget(objectid, root);
  3442. if (IS_ERR(inode)) {
  3443. delete_orphan_inode(trans, root, objectid);
  3444. ret = PTR_ERR(inode);
  3445. inode = NULL;
  3446. goto out;
  3447. }
  3448. BTRFS_I(inode)->reloc_block_group_start = group->start;
  3449. ret = btrfs_orphan_add(trans, BTRFS_I(inode));
  3450. out:
  3451. btrfs_put_root(root);
  3452. btrfs_end_transaction(trans);
  3453. btrfs_btree_balance_dirty(fs_info);
  3454. if (ret) {
  3455. iput(inode);
  3456. inode = ERR_PTR(ret);
  3457. }
  3458. return inode;
  3459. }
  3460. /*
  3461. * Mark start of chunk relocation that is cancellable. Check if the cancellation
  3462. * has been requested meanwhile and don't start in that case.
  3463. * NOTE: if this returns an error, reloc_chunk_end() must not be called.
  3464. *
  3465. * Return:
  3466. * 0 success
  3467. * -EINPROGRESS operation is already in progress, that's probably a bug
  3468. * -ECANCELED cancellation request was set before the operation started
  3469. */
  3470. static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
  3471. {
  3472. if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
  3473. /* This should not happen */
  3474. btrfs_err(fs_info, "reloc already running, cannot start");
  3475. return -EINPROGRESS;
  3476. }
  3477. if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
  3478. btrfs_info(fs_info, "chunk relocation canceled on start");
  3479. /* On cancel, clear all requests. */
  3480. clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
  3481. atomic_set(&fs_info->reloc_cancel_req, 0);
  3482. return -ECANCELED;
  3483. }
  3484. return 0;
  3485. }
  3486. /*
  3487. * Mark end of chunk relocation that is cancellable and wake any waiters.
  3488. * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
  3489. */
  3490. static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
  3491. {
  3492. ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
  3493. /* Requested after start, clear bit first so any waiters can continue */
  3494. if (atomic_read(&fs_info->reloc_cancel_req) > 0)
  3495. btrfs_info(fs_info, "chunk relocation canceled during operation");
  3496. clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
  3497. atomic_set(&fs_info->reloc_cancel_req, 0);
  3498. }
  3499. static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
  3500. {
  3501. struct reloc_control *rc;
  3502. rc = kzalloc(sizeof(*rc), GFP_NOFS);
  3503. if (!rc)
  3504. return NULL;
  3505. INIT_LIST_HEAD(&rc->reloc_roots);
  3506. INIT_LIST_HEAD(&rc->dirty_subvol_roots);
  3507. btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
  3508. rc->reloc_root_tree.rb_root = RB_ROOT;
  3509. spin_lock_init(&rc->reloc_root_tree.lock);
  3510. extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
  3511. return rc;
  3512. }
  3513. static void free_reloc_control(struct reloc_control *rc)
  3514. {
  3515. struct mapping_node *node, *tmp;
  3516. free_reloc_roots(&rc->reloc_roots);
  3517. rbtree_postorder_for_each_entry_safe(node, tmp,
  3518. &rc->reloc_root_tree.rb_root, rb_node)
  3519. kfree(node);
  3520. kfree(rc);
  3521. }
  3522. /*
  3523. * Print the block group being relocated
  3524. */
  3525. static void describe_relocation(struct btrfs_block_group *block_group)
  3526. {
  3527. char buf[128] = {'\0'};
  3528. btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
  3529. btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
  3530. block_group->start, buf);
  3531. }
  3532. static const char *stage_to_string(enum reloc_stage stage)
  3533. {
  3534. if (stage == MOVE_DATA_EXTENTS)
  3535. return "move data extents";
  3536. if (stage == UPDATE_DATA_PTRS)
  3537. return "update data pointers";
  3538. return "unknown";
  3539. }
  3540. /*
  3541. * function to relocate all extents in a block group.
  3542. */
  3543. int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
  3544. {
  3545. struct btrfs_block_group *bg;
  3546. struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
  3547. struct reloc_control *rc;
  3548. struct inode *inode;
  3549. struct btrfs_path *path;
  3550. int ret;
  3551. int rw = 0;
  3552. int err = 0;
  3553. /*
  3554. * This only gets set if we had a half-deleted snapshot on mount. We
  3555. * cannot allow relocation to start while we're still trying to clean up
  3556. * these pending deletions.
  3557. */
  3558. ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
  3559. if (ret)
  3560. return ret;
  3561. /* We may have been woken up by close_ctree, so bail if we're closing. */
  3562. if (btrfs_fs_closing(fs_info))
  3563. return -EINTR;
  3564. bg = btrfs_lookup_block_group(fs_info, group_start);
  3565. if (!bg)
  3566. return -ENOENT;
  3567. /*
  3568. * Relocation of a data block group creates ordered extents. Without
  3569. * sb_start_write(), we can freeze the filesystem while unfinished
  3570. * ordered extents are left. Such ordered extents can cause a deadlock
  3571. * e.g. when syncfs() is waiting for their completion but they can't
  3572. * finish because they block when joining a transaction, due to the
  3573. * fact that the freeze locks are being held in write mode.
  3574. */
  3575. if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
  3576. ASSERT(sb_write_started(fs_info->sb));
  3577. if (btrfs_pinned_by_swapfile(fs_info, bg)) {
  3578. btrfs_put_block_group(bg);
  3579. return -ETXTBSY;
  3580. }
  3581. rc = alloc_reloc_control(fs_info);
  3582. if (!rc) {
  3583. btrfs_put_block_group(bg);
  3584. return -ENOMEM;
  3585. }
  3586. ret = reloc_chunk_start(fs_info);
  3587. if (ret < 0) {
  3588. err = ret;
  3589. goto out_put_bg;
  3590. }
  3591. rc->extent_root = extent_root;
  3592. rc->block_group = bg;
  3593. ret = btrfs_inc_block_group_ro(rc->block_group, true);
  3594. if (ret) {
  3595. err = ret;
  3596. goto out;
  3597. }
  3598. rw = 1;
  3599. path = btrfs_alloc_path();
  3600. if (!path) {
  3601. err = -ENOMEM;
  3602. goto out;
  3603. }
  3604. inode = lookup_free_space_inode(rc->block_group, path);
  3605. btrfs_free_path(path);
  3606. if (!IS_ERR(inode))
  3607. ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
  3608. else
  3609. ret = PTR_ERR(inode);
  3610. if (ret && ret != -ENOENT) {
  3611. err = ret;
  3612. goto out;
  3613. }
  3614. rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
  3615. if (IS_ERR(rc->data_inode)) {
  3616. err = PTR_ERR(rc->data_inode);
  3617. rc->data_inode = NULL;
  3618. goto out;
  3619. }
  3620. describe_relocation(rc->block_group);
  3621. btrfs_wait_block_group_reservations(rc->block_group);
  3622. btrfs_wait_nocow_writers(rc->block_group);
  3623. btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
  3624. ret = btrfs_zone_finish(rc->block_group);
  3625. WARN_ON(ret && ret != -EAGAIN);
  3626. while (1) {
  3627. enum reloc_stage finishes_stage;
  3628. mutex_lock(&fs_info->cleaner_mutex);
  3629. ret = relocate_block_group(rc);
  3630. mutex_unlock(&fs_info->cleaner_mutex);
  3631. if (ret < 0)
  3632. err = ret;
  3633. finishes_stage = rc->stage;
  3634. /*
  3635. * We may have gotten ENOSPC after we already dirtied some
  3636. * extents. If writeout happens while we're relocating a
  3637. * different block group we could end up hitting the
  3638. * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
  3639. * btrfs_reloc_cow_block. Make sure we write everything out
  3640. * properly so we don't trip over this problem, and then break
  3641. * out of the loop if we hit an error.
  3642. */
  3643. if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
  3644. ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
  3645. (u64)-1);
  3646. if (ret)
  3647. err = ret;
  3648. invalidate_mapping_pages(rc->data_inode->i_mapping,
  3649. 0, -1);
  3650. rc->stage = UPDATE_DATA_PTRS;
  3651. }
  3652. if (err < 0)
  3653. goto out;
  3654. if (rc->extents_found == 0)
  3655. break;
  3656. btrfs_info(fs_info, "found %llu extents, stage: %s",
  3657. rc->extents_found, stage_to_string(finishes_stage));
  3658. }
  3659. WARN_ON(rc->block_group->pinned > 0);
  3660. WARN_ON(rc->block_group->reserved > 0);
  3661. WARN_ON(rc->block_group->used > 0);
  3662. out:
  3663. if (err && rw)
  3664. btrfs_dec_block_group_ro(rc->block_group);
  3665. iput(rc->data_inode);
  3666. reloc_chunk_end(fs_info);
  3667. out_put_bg:
  3668. btrfs_put_block_group(bg);
  3669. free_reloc_control(rc);
  3670. return err;
  3671. }
  3672. static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
  3673. {
  3674. struct btrfs_fs_info *fs_info = root->fs_info;
  3675. struct btrfs_trans_handle *trans;
  3676. int ret, err;
  3677. trans = btrfs_start_transaction(fs_info->tree_root, 0);
  3678. if (IS_ERR(trans))
  3679. return PTR_ERR(trans);
  3680. memset(&root->root_item.drop_progress, 0,
  3681. sizeof(root->root_item.drop_progress));
  3682. btrfs_set_root_drop_level(&root->root_item, 0);
  3683. btrfs_set_root_refs(&root->root_item, 0);
  3684. ret = btrfs_update_root(trans, fs_info->tree_root,
  3685. &root->root_key, &root->root_item);
  3686. err = btrfs_end_transaction(trans);
  3687. if (err)
  3688. return err;
  3689. return ret;
  3690. }
  3691. /*
  3692. * recover relocation interrupted by system crash.
  3693. *
  3694. * this function resumes merging reloc trees with corresponding fs trees.
  3695. * this is important for keeping the sharing of tree blocks
  3696. */
  3697. int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
  3698. {
  3699. LIST_HEAD(reloc_roots);
  3700. struct btrfs_key key;
  3701. struct btrfs_root *fs_root;
  3702. struct btrfs_root *reloc_root;
  3703. struct btrfs_path *path;
  3704. struct extent_buffer *leaf;
  3705. struct reloc_control *rc = NULL;
  3706. struct btrfs_trans_handle *trans;
  3707. int ret2;
  3708. int ret = 0;
  3709. path = btrfs_alloc_path();
  3710. if (!path)
  3711. return -ENOMEM;
  3712. path->reada = READA_BACK;
  3713. key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  3714. key.type = BTRFS_ROOT_ITEM_KEY;
  3715. key.offset = (u64)-1;
  3716. while (1) {
  3717. ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
  3718. path, 0, 0);
  3719. if (ret < 0)
  3720. goto out;
  3721. if (ret > 0) {
  3722. if (path->slots[0] == 0)
  3723. break;
  3724. path->slots[0]--;
  3725. }
  3726. ret = 0;
  3727. leaf = path->nodes[0];
  3728. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3729. btrfs_release_path(path);
  3730. if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
  3731. key.type != BTRFS_ROOT_ITEM_KEY)
  3732. break;
  3733. reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
  3734. if (IS_ERR(reloc_root)) {
  3735. ret = PTR_ERR(reloc_root);
  3736. goto out;
  3737. }
  3738. set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
  3739. list_add(&reloc_root->root_list, &reloc_roots);
  3740. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  3741. fs_root = btrfs_get_fs_root(fs_info,
  3742. reloc_root->root_key.offset, false);
  3743. if (IS_ERR(fs_root)) {
  3744. ret = PTR_ERR(fs_root);
  3745. if (ret != -ENOENT)
  3746. goto out;
  3747. ret = mark_garbage_root(reloc_root);
  3748. if (ret < 0)
  3749. goto out;
  3750. ret = 0;
  3751. } else {
  3752. btrfs_put_root(fs_root);
  3753. }
  3754. }
  3755. if (key.offset == 0)
  3756. break;
  3757. key.offset--;
  3758. }
  3759. btrfs_release_path(path);
  3760. if (list_empty(&reloc_roots))
  3761. goto out;
  3762. rc = alloc_reloc_control(fs_info);
  3763. if (!rc) {
  3764. ret = -ENOMEM;
  3765. goto out;
  3766. }
  3767. ret = reloc_chunk_start(fs_info);
  3768. if (ret < 0)
  3769. goto out_end;
  3770. rc->extent_root = btrfs_extent_root(fs_info, 0);
  3771. set_reloc_control(rc);
  3772. trans = btrfs_join_transaction(rc->extent_root);
  3773. if (IS_ERR(trans)) {
  3774. ret = PTR_ERR(trans);
  3775. goto out_unset;
  3776. }
  3777. rc->merge_reloc_tree = true;
  3778. while (!list_empty(&reloc_roots)) {
  3779. reloc_root = list_entry(reloc_roots.next,
  3780. struct btrfs_root, root_list);
  3781. list_del(&reloc_root->root_list);
  3782. if (btrfs_root_refs(&reloc_root->root_item) == 0) {
  3783. list_add_tail(&reloc_root->root_list,
  3784. &rc->reloc_roots);
  3785. continue;
  3786. }
  3787. fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
  3788. false);
  3789. if (IS_ERR(fs_root)) {
  3790. ret = PTR_ERR(fs_root);
  3791. list_add_tail(&reloc_root->root_list, &reloc_roots);
  3792. btrfs_end_transaction(trans);
  3793. goto out_unset;
  3794. }
  3795. ret = __add_reloc_root(reloc_root);
  3796. ASSERT(ret != -EEXIST);
  3797. if (ret) {
  3798. list_add_tail(&reloc_root->root_list, &reloc_roots);
  3799. btrfs_put_root(fs_root);
  3800. btrfs_end_transaction(trans);
  3801. goto out_unset;
  3802. }
  3803. fs_root->reloc_root = btrfs_grab_root(reloc_root);
  3804. btrfs_put_root(fs_root);
  3805. }
  3806. ret = btrfs_commit_transaction(trans);
  3807. if (ret)
  3808. goto out_unset;
  3809. merge_reloc_roots(rc);
  3810. unset_reloc_control(rc);
  3811. trans = btrfs_join_transaction(rc->extent_root);
  3812. if (IS_ERR(trans)) {
  3813. ret = PTR_ERR(trans);
  3814. goto out_clean;
  3815. }
  3816. ret = btrfs_commit_transaction(trans);
  3817. out_clean:
  3818. ret2 = clean_dirty_subvols(rc);
  3819. if (ret2 < 0 && !ret)
  3820. ret = ret2;
  3821. out_unset:
  3822. unset_reloc_control(rc);
  3823. reloc_chunk_end(fs_info);
  3824. out_end:
  3825. free_reloc_control(rc);
  3826. out:
  3827. free_reloc_roots(&reloc_roots);
  3828. btrfs_free_path(path);
  3829. if (ret == 0) {
  3830. /* cleanup orphan inode in data relocation tree */
  3831. fs_root = btrfs_grab_root(fs_info->data_reloc_root);
  3832. ASSERT(fs_root);
  3833. ret = btrfs_orphan_cleanup(fs_root);
  3834. btrfs_put_root(fs_root);
  3835. }
  3836. return ret;
  3837. }
  3838. /*
  3839. * helper to add ordered checksum for data relocation.
  3840. *
  3841. * cloning checksum properly handles the nodatasum extents.
  3842. * it also saves CPU time to re-calculate the checksum.
  3843. */
  3844. int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
  3845. {
  3846. struct btrfs_inode *inode = ordered->inode;
  3847. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  3848. u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
  3849. struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
  3850. LIST_HEAD(list);
  3851. int ret;
  3852. ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
  3853. disk_bytenr + ordered->num_bytes - 1,
  3854. &list, false);
  3855. if (ret < 0) {
  3856. btrfs_mark_ordered_extent_error(ordered);
  3857. return ret;
  3858. }
  3859. while (!list_empty(&list)) {
  3860. struct btrfs_ordered_sum *sums =
  3861. list_entry(list.next, struct btrfs_ordered_sum, list);
  3862. list_del_init(&sums->list);
  3863. /*
  3864. * We need to offset the new_bytenr based on where the csum is.
  3865. * We need to do this because we will read in entire prealloc
  3866. * extents but we may have written to say the middle of the
  3867. * prealloc extent, so we need to make sure the csum goes with
  3868. * the right disk offset.
  3869. *
  3870. * We can do this because the data reloc inode refers strictly
  3871. * to the on disk bytes, so we don't have to worry about
  3872. * disk_len vs real len like with real inodes since it's all
  3873. * disk length.
  3874. */
  3875. sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
  3876. btrfs_add_ordered_sum(ordered, sums);
  3877. }
  3878. return 0;
  3879. }
  3880. int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
  3881. struct btrfs_root *root,
  3882. const struct extent_buffer *buf,
  3883. struct extent_buffer *cow)
  3884. {
  3885. struct btrfs_fs_info *fs_info = root->fs_info;
  3886. struct reloc_control *rc;
  3887. struct btrfs_backref_node *node;
  3888. int first_cow = 0;
  3889. int level;
  3890. int ret = 0;
  3891. rc = fs_info->reloc_ctl;
  3892. if (!rc)
  3893. return 0;
  3894. BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
  3895. level = btrfs_header_level(buf);
  3896. if (btrfs_header_generation(buf) <=
  3897. btrfs_root_last_snapshot(&root->root_item))
  3898. first_cow = 1;
  3899. if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
  3900. WARN_ON(!first_cow && level == 0);
  3901. node = rc->backref_cache.path[level];
  3902. /*
  3903. * If node->bytenr != buf->start and node->new_bytenr !=
  3904. * buf->start then we've got the wrong backref node for what we
  3905. * expected to see here and the cache is incorrect.
  3906. */
  3907. if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
  3908. btrfs_err(fs_info,
  3909. "bytenr %llu was found but our backref cache was expecting %llu or %llu",
  3910. buf->start, node->bytenr, node->new_bytenr);
  3911. return -EUCLEAN;
  3912. }
  3913. btrfs_backref_drop_node_buffer(node);
  3914. atomic_inc(&cow->refs);
  3915. node->eb = cow;
  3916. node->new_bytenr = cow->start;
  3917. if (!node->pending) {
  3918. list_move_tail(&node->list,
  3919. &rc->backref_cache.pending[level]);
  3920. node->pending = 1;
  3921. }
  3922. if (first_cow)
  3923. mark_block_processed(rc, node);
  3924. if (first_cow && level > 0)
  3925. rc->nodes_relocated += buf->len;
  3926. }
  3927. if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
  3928. ret = replace_file_extents(trans, rc, root, cow);
  3929. return ret;
  3930. }
  3931. /*
  3932. * called before creating snapshot. it calculates metadata reservation
  3933. * required for relocating tree blocks in the snapshot
  3934. */
  3935. void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
  3936. u64 *bytes_to_reserve)
  3937. {
  3938. struct btrfs_root *root = pending->root;
  3939. struct reloc_control *rc = root->fs_info->reloc_ctl;
  3940. if (!rc || !have_reloc_root(root))
  3941. return;
  3942. if (!rc->merge_reloc_tree)
  3943. return;
  3944. root = root->reloc_root;
  3945. BUG_ON(btrfs_root_refs(&root->root_item) == 0);
  3946. /*
  3947. * relocation is in the stage of merging trees. the space
  3948. * used by merging a reloc tree is twice the size of
  3949. * relocated tree nodes in the worst case. half for cowing
  3950. * the reloc tree, half for cowing the fs tree. the space
  3951. * used by cowing the reloc tree will be freed after the
  3952. * tree is dropped. if we create snapshot, cowing the fs
  3953. * tree may use more space than it frees. so we need
  3954. * reserve extra space.
  3955. */
  3956. *bytes_to_reserve += rc->nodes_relocated;
  3957. }
  3958. /*
  3959. * called after snapshot is created. migrate block reservation
  3960. * and create reloc root for the newly created snapshot
  3961. *
  3962. * This is similar to btrfs_init_reloc_root(), we come out of here with two
  3963. * references held on the reloc_root, one for root->reloc_root and one for
  3964. * rc->reloc_roots.
  3965. */
  3966. int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
  3967. struct btrfs_pending_snapshot *pending)
  3968. {
  3969. struct btrfs_root *root = pending->root;
  3970. struct btrfs_root *reloc_root;
  3971. struct btrfs_root *new_root;
  3972. struct reloc_control *rc = root->fs_info->reloc_ctl;
  3973. int ret;
  3974. if (!rc || !have_reloc_root(root))
  3975. return 0;
  3976. rc = root->fs_info->reloc_ctl;
  3977. rc->merging_rsv_size += rc->nodes_relocated;
  3978. if (rc->merge_reloc_tree) {
  3979. ret = btrfs_block_rsv_migrate(&pending->block_rsv,
  3980. rc->block_rsv,
  3981. rc->nodes_relocated, true);
  3982. if (ret)
  3983. return ret;
  3984. }
  3985. new_root = pending->snap;
  3986. reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
  3987. if (IS_ERR(reloc_root))
  3988. return PTR_ERR(reloc_root);
  3989. ret = __add_reloc_root(reloc_root);
  3990. ASSERT(ret != -EEXIST);
  3991. if (ret) {
  3992. /* Pairs with create_reloc_root */
  3993. btrfs_put_root(reloc_root);
  3994. return ret;
  3995. }
  3996. new_root->reloc_root = btrfs_grab_root(reloc_root);
  3997. if (rc->create_reloc_tree)
  3998. ret = clone_backref_node(trans, rc, root, reloc_root);
  3999. return ret;
  4000. }
  4001. /*
  4002. * Get the current bytenr for the block group which is being relocated.
  4003. *
  4004. * Return U64_MAX if no running relocation.
  4005. */
  4006. u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
  4007. {
  4008. u64 logical = U64_MAX;
  4009. lockdep_assert_held(&fs_info->reloc_mutex);
  4010. if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
  4011. logical = fs_info->reloc_ctl->block_group->start;
  4012. return logical;
  4013. }