xfs_log_recover.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_sb.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_defer.h"
  16. #include "xfs_inode.h"
  17. #include "xfs_trans.h"
  18. #include "xfs_log.h"
  19. #include "xfs_log_priv.h"
  20. #include "xfs_log_recover.h"
  21. #include "xfs_trans_priv.h"
  22. #include "xfs_alloc.h"
  23. #include "xfs_ialloc.h"
  24. #include "xfs_trace.h"
  25. #include "xfs_icache.h"
  26. #include "xfs_error.h"
  27. #include "xfs_buf_item.h"
  28. #include "xfs_ag.h"
  29. #include "xfs_quota.h"
  30. #include "xfs_reflink.h"
  31. #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
  32. STATIC int
  33. xlog_find_zeroed(
  34. struct xlog *,
  35. xfs_daddr_t *);
  36. STATIC int
  37. xlog_clear_stale_blocks(
  38. struct xlog *,
  39. xfs_lsn_t);
  40. STATIC int
  41. xlog_do_recovery_pass(
  42. struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
  43. /*
  44. * Sector aligned buffer routines for buffer create/read/write/access
  45. */
  46. /*
  47. * Verify the log-relative block number and length in basic blocks are valid for
  48. * an operation involving the given XFS log buffer. Returns true if the fields
  49. * are valid, false otherwise.
  50. */
  51. static inline bool
  52. xlog_verify_bno(
  53. struct xlog *log,
  54. xfs_daddr_t blk_no,
  55. int bbcount)
  56. {
  57. if (blk_no < 0 || blk_no >= log->l_logBBsize)
  58. return false;
  59. if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
  60. return false;
  61. return true;
  62. }
  63. /*
  64. * Allocate a buffer to hold log data. The buffer needs to be able to map to
  65. * a range of nbblks basic blocks at any valid offset within the log.
  66. */
  67. static char *
  68. xlog_alloc_buffer(
  69. struct xlog *log,
  70. int nbblks)
  71. {
  72. /*
  73. * Pass log block 0 since we don't have an addr yet, buffer will be
  74. * verified on read.
  75. */
  76. if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
  77. xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  78. nbblks);
  79. return NULL;
  80. }
  81. /*
  82. * We do log I/O in units of log sectors (a power-of-2 multiple of the
  83. * basic block size), so we round up the requested size to accommodate
  84. * the basic blocks required for complete log sectors.
  85. *
  86. * In addition, the buffer may be used for a non-sector-aligned block
  87. * offset, in which case an I/O of the requested size could extend
  88. * beyond the end of the buffer. If the requested size is only 1 basic
  89. * block it will never straddle a sector boundary, so this won't be an
  90. * issue. Nor will this be a problem if the log I/O is done in basic
  91. * blocks (sector size 1). But otherwise we extend the buffer by one
  92. * extra log sector to ensure there's space to accommodate this
  93. * possibility.
  94. */
  95. if (nbblks > 1 && log->l_sectBBsize > 1)
  96. nbblks += log->l_sectBBsize;
  97. nbblks = round_up(nbblks, log->l_sectBBsize);
  98. return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  99. }
  100. /*
  101. * Return the address of the start of the given block number's data
  102. * in a log buffer. The buffer covers a log sector-aligned region.
  103. */
  104. static inline unsigned int
  105. xlog_align(
  106. struct xlog *log,
  107. xfs_daddr_t blk_no)
  108. {
  109. return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
  110. }
  111. static int
  112. xlog_do_io(
  113. struct xlog *log,
  114. xfs_daddr_t blk_no,
  115. unsigned int nbblks,
  116. char *data,
  117. enum req_op op)
  118. {
  119. int error;
  120. if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
  121. xfs_warn(log->l_mp,
  122. "Invalid log block/length (0x%llx, 0x%x) for buffer",
  123. blk_no, nbblks);
  124. return -EFSCORRUPTED;
  125. }
  126. blk_no = round_down(blk_no, log->l_sectBBsize);
  127. nbblks = round_up(nbblks, log->l_sectBBsize);
  128. ASSERT(nbblks > 0);
  129. error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
  130. BBTOB(nbblks), data, op);
  131. if (error && !xlog_is_shutdown(log)) {
  132. xfs_alert(log->l_mp,
  133. "log recovery %s I/O error at daddr 0x%llx len %d error %d",
  134. op == REQ_OP_WRITE ? "write" : "read",
  135. blk_no, nbblks, error);
  136. }
  137. return error;
  138. }
  139. STATIC int
  140. xlog_bread_noalign(
  141. struct xlog *log,
  142. xfs_daddr_t blk_no,
  143. int nbblks,
  144. char *data)
  145. {
  146. return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
  147. }
  148. STATIC int
  149. xlog_bread(
  150. struct xlog *log,
  151. xfs_daddr_t blk_no,
  152. int nbblks,
  153. char *data,
  154. char **offset)
  155. {
  156. int error;
  157. error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
  158. if (!error)
  159. *offset = data + xlog_align(log, blk_no);
  160. return error;
  161. }
  162. STATIC int
  163. xlog_bwrite(
  164. struct xlog *log,
  165. xfs_daddr_t blk_no,
  166. int nbblks,
  167. char *data)
  168. {
  169. return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
  170. }
  171. #ifdef DEBUG
  172. /*
  173. * dump debug superblock and log record information
  174. */
  175. STATIC void
  176. xlog_header_check_dump(
  177. xfs_mount_t *mp,
  178. xlog_rec_header_t *head)
  179. {
  180. xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
  181. __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
  182. xfs_debug(mp, " log : uuid = %pU, fmt = %d",
  183. &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
  184. }
  185. #else
  186. #define xlog_header_check_dump(mp, head)
  187. #endif
  188. /*
  189. * check log record header for recovery
  190. */
  191. STATIC int
  192. xlog_header_check_recover(
  193. xfs_mount_t *mp,
  194. xlog_rec_header_t *head)
  195. {
  196. ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
  197. /*
  198. * IRIX doesn't write the h_fmt field and leaves it zeroed
  199. * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
  200. * a dirty log created in IRIX.
  201. */
  202. if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
  203. xfs_warn(mp,
  204. "dirty log written in incompatible format - can't recover");
  205. xlog_header_check_dump(mp, head);
  206. return -EFSCORRUPTED;
  207. }
  208. if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
  209. &head->h_fs_uuid))) {
  210. xfs_warn(mp,
  211. "dirty log entry has mismatched uuid - can't recover");
  212. xlog_header_check_dump(mp, head);
  213. return -EFSCORRUPTED;
  214. }
  215. return 0;
  216. }
  217. /*
  218. * read the head block of the log and check the header
  219. */
  220. STATIC int
  221. xlog_header_check_mount(
  222. xfs_mount_t *mp,
  223. xlog_rec_header_t *head)
  224. {
  225. ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
  226. if (uuid_is_null(&head->h_fs_uuid)) {
  227. /*
  228. * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
  229. * h_fs_uuid is null, we assume this log was last mounted
  230. * by IRIX and continue.
  231. */
  232. xfs_warn(mp, "null uuid in log - IRIX style log");
  233. } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
  234. &head->h_fs_uuid))) {
  235. xfs_warn(mp, "log has mismatched uuid - can't recover");
  236. xlog_header_check_dump(mp, head);
  237. return -EFSCORRUPTED;
  238. }
  239. return 0;
  240. }
  241. /*
  242. * This routine finds (to an approximation) the first block in the physical
  243. * log which contains the given cycle. It uses a binary search algorithm.
  244. * Note that the algorithm can not be perfect because the disk will not
  245. * necessarily be perfect.
  246. */
  247. STATIC int
  248. xlog_find_cycle_start(
  249. struct xlog *log,
  250. char *buffer,
  251. xfs_daddr_t first_blk,
  252. xfs_daddr_t *last_blk,
  253. uint cycle)
  254. {
  255. char *offset;
  256. xfs_daddr_t mid_blk;
  257. xfs_daddr_t end_blk;
  258. uint mid_cycle;
  259. int error;
  260. end_blk = *last_blk;
  261. mid_blk = BLK_AVG(first_blk, end_blk);
  262. while (mid_blk != first_blk && mid_blk != end_blk) {
  263. error = xlog_bread(log, mid_blk, 1, buffer, &offset);
  264. if (error)
  265. return error;
  266. mid_cycle = xlog_get_cycle(offset);
  267. if (mid_cycle == cycle)
  268. end_blk = mid_blk; /* last_half_cycle == mid_cycle */
  269. else
  270. first_blk = mid_blk; /* first_half_cycle == mid_cycle */
  271. mid_blk = BLK_AVG(first_blk, end_blk);
  272. }
  273. ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
  274. (mid_blk == end_blk && mid_blk-1 == first_blk));
  275. *last_blk = end_blk;
  276. return 0;
  277. }
  278. /*
  279. * Check that a range of blocks does not contain stop_on_cycle_no.
  280. * Fill in *new_blk with the block offset where such a block is
  281. * found, or with -1 (an invalid block number) if there is no such
  282. * block in the range. The scan needs to occur from front to back
  283. * and the pointer into the region must be updated since a later
  284. * routine will need to perform another test.
  285. */
  286. STATIC int
  287. xlog_find_verify_cycle(
  288. struct xlog *log,
  289. xfs_daddr_t start_blk,
  290. int nbblks,
  291. uint stop_on_cycle_no,
  292. xfs_daddr_t *new_blk)
  293. {
  294. xfs_daddr_t i, j;
  295. uint cycle;
  296. char *buffer;
  297. xfs_daddr_t bufblks;
  298. char *buf = NULL;
  299. int error = 0;
  300. /*
  301. * Greedily allocate a buffer big enough to handle the full
  302. * range of basic blocks we'll be examining. If that fails,
  303. * try a smaller size. We need to be able to read at least
  304. * a log sector, or we're out of luck.
  305. */
  306. bufblks = roundup_pow_of_two(nbblks);
  307. while (bufblks > log->l_logBBsize)
  308. bufblks >>= 1;
  309. while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
  310. bufblks >>= 1;
  311. if (bufblks < log->l_sectBBsize)
  312. return -ENOMEM;
  313. }
  314. for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
  315. int bcount;
  316. bcount = min(bufblks, (start_blk + nbblks - i));
  317. error = xlog_bread(log, i, bcount, buffer, &buf);
  318. if (error)
  319. goto out;
  320. for (j = 0; j < bcount; j++) {
  321. cycle = xlog_get_cycle(buf);
  322. if (cycle == stop_on_cycle_no) {
  323. *new_blk = i+j;
  324. goto out;
  325. }
  326. buf += BBSIZE;
  327. }
  328. }
  329. *new_blk = -1;
  330. out:
  331. kvfree(buffer);
  332. return error;
  333. }
  334. static inline int
  335. xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
  336. {
  337. if (xfs_has_logv2(log->l_mp)) {
  338. int h_size = be32_to_cpu(rh->h_size);
  339. if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
  340. h_size > XLOG_HEADER_CYCLE_SIZE)
  341. return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
  342. }
  343. return 1;
  344. }
  345. /*
  346. * Potentially backup over partial log record write.
  347. *
  348. * In the typical case, last_blk is the number of the block directly after
  349. * a good log record. Therefore, we subtract one to get the block number
  350. * of the last block in the given buffer. extra_bblks contains the number
  351. * of blocks we would have read on a previous read. This happens when the
  352. * last log record is split over the end of the physical log.
  353. *
  354. * extra_bblks is the number of blocks potentially verified on a previous
  355. * call to this routine.
  356. */
  357. STATIC int
  358. xlog_find_verify_log_record(
  359. struct xlog *log,
  360. xfs_daddr_t start_blk,
  361. xfs_daddr_t *last_blk,
  362. int extra_bblks)
  363. {
  364. xfs_daddr_t i;
  365. char *buffer;
  366. char *offset = NULL;
  367. xlog_rec_header_t *head = NULL;
  368. int error = 0;
  369. int smallmem = 0;
  370. int num_blks = *last_blk - start_blk;
  371. int xhdrs;
  372. ASSERT(start_blk != 0 || *last_blk != start_blk);
  373. buffer = xlog_alloc_buffer(log, num_blks);
  374. if (!buffer) {
  375. buffer = xlog_alloc_buffer(log, 1);
  376. if (!buffer)
  377. return -ENOMEM;
  378. smallmem = 1;
  379. } else {
  380. error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
  381. if (error)
  382. goto out;
  383. offset += ((num_blks - 1) << BBSHIFT);
  384. }
  385. for (i = (*last_blk) - 1; i >= 0; i--) {
  386. if (i < start_blk) {
  387. /* valid log record not found */
  388. xfs_warn(log->l_mp,
  389. "Log inconsistent (didn't find previous header)");
  390. ASSERT(0);
  391. error = -EFSCORRUPTED;
  392. goto out;
  393. }
  394. if (smallmem) {
  395. error = xlog_bread(log, i, 1, buffer, &offset);
  396. if (error)
  397. goto out;
  398. }
  399. head = (xlog_rec_header_t *)offset;
  400. if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
  401. break;
  402. if (!smallmem)
  403. offset -= BBSIZE;
  404. }
  405. /*
  406. * We hit the beginning of the physical log & still no header. Return
  407. * to caller. If caller can handle a return of -1, then this routine
  408. * will be called again for the end of the physical log.
  409. */
  410. if (i == -1) {
  411. error = 1;
  412. goto out;
  413. }
  414. /*
  415. * We have the final block of the good log (the first block
  416. * of the log record _before_ the head. So we check the uuid.
  417. */
  418. if ((error = xlog_header_check_mount(log->l_mp, head)))
  419. goto out;
  420. /*
  421. * We may have found a log record header before we expected one.
  422. * last_blk will be the 1st block # with a given cycle #. We may end
  423. * up reading an entire log record. In this case, we don't want to
  424. * reset last_blk. Only when last_blk points in the middle of a log
  425. * record do we update last_blk.
  426. */
  427. xhdrs = xlog_logrec_hblks(log, head);
  428. if (*last_blk - i + extra_bblks !=
  429. BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
  430. *last_blk = i;
  431. out:
  432. kvfree(buffer);
  433. return error;
  434. }
  435. /*
  436. * Head is defined to be the point of the log where the next log write
  437. * could go. This means that incomplete LR writes at the end are
  438. * eliminated when calculating the head. We aren't guaranteed that previous
  439. * LR have complete transactions. We only know that a cycle number of
  440. * current cycle number -1 won't be present in the log if we start writing
  441. * from our current block number.
  442. *
  443. * last_blk contains the block number of the first block with a given
  444. * cycle number.
  445. *
  446. * Return: zero if normal, non-zero if error.
  447. */
  448. STATIC int
  449. xlog_find_head(
  450. struct xlog *log,
  451. xfs_daddr_t *return_head_blk)
  452. {
  453. char *buffer;
  454. char *offset;
  455. xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
  456. int num_scan_bblks;
  457. uint first_half_cycle, last_half_cycle;
  458. uint stop_on_cycle;
  459. int error, log_bbnum = log->l_logBBsize;
  460. /* Is the end of the log device zeroed? */
  461. error = xlog_find_zeroed(log, &first_blk);
  462. if (error < 0) {
  463. xfs_warn(log->l_mp, "empty log check failed");
  464. return error;
  465. }
  466. if (error == 1) {
  467. *return_head_blk = first_blk;
  468. /* Is the whole lot zeroed? */
  469. if (!first_blk) {
  470. /* Linux XFS shouldn't generate totally zeroed logs -
  471. * mkfs etc write a dummy unmount record to a fresh
  472. * log so we can store the uuid in there
  473. */
  474. xfs_warn(log->l_mp, "totally zeroed log");
  475. }
  476. return 0;
  477. }
  478. first_blk = 0; /* get cycle # of 1st block */
  479. buffer = xlog_alloc_buffer(log, 1);
  480. if (!buffer)
  481. return -ENOMEM;
  482. error = xlog_bread(log, 0, 1, buffer, &offset);
  483. if (error)
  484. goto out_free_buffer;
  485. first_half_cycle = xlog_get_cycle(offset);
  486. last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
  487. error = xlog_bread(log, last_blk, 1, buffer, &offset);
  488. if (error)
  489. goto out_free_buffer;
  490. last_half_cycle = xlog_get_cycle(offset);
  491. ASSERT(last_half_cycle != 0);
  492. /*
  493. * If the 1st half cycle number is equal to the last half cycle number,
  494. * then the entire log is stamped with the same cycle number. In this
  495. * case, head_blk can't be set to zero (which makes sense). The below
  496. * math doesn't work out properly with head_blk equal to zero. Instead,
  497. * we set it to log_bbnum which is an invalid block number, but this
  498. * value makes the math correct. If head_blk doesn't changed through
  499. * all the tests below, *head_blk is set to zero at the very end rather
  500. * than log_bbnum. In a sense, log_bbnum and zero are the same block
  501. * in a circular file.
  502. */
  503. if (first_half_cycle == last_half_cycle) {
  504. /*
  505. * In this case we believe that the entire log should have
  506. * cycle number last_half_cycle. We need to scan backwards
  507. * from the end verifying that there are no holes still
  508. * containing last_half_cycle - 1. If we find such a hole,
  509. * then the start of that hole will be the new head. The
  510. * simple case looks like
  511. * x | x ... | x - 1 | x
  512. * Another case that fits this picture would be
  513. * x | x + 1 | x ... | x
  514. * In this case the head really is somewhere at the end of the
  515. * log, as one of the latest writes at the beginning was
  516. * incomplete.
  517. * One more case is
  518. * x | x + 1 | x ... | x - 1 | x
  519. * This is really the combination of the above two cases, and
  520. * the head has to end up at the start of the x-1 hole at the
  521. * end of the log.
  522. *
  523. * In the 256k log case, we will read from the beginning to the
  524. * end of the log and search for cycle numbers equal to x-1.
  525. * We don't worry about the x+1 blocks that we encounter,
  526. * because we know that they cannot be the head since the log
  527. * started with x.
  528. */
  529. head_blk = log_bbnum;
  530. stop_on_cycle = last_half_cycle - 1;
  531. } else {
  532. /*
  533. * In this case we want to find the first block with cycle
  534. * number matching last_half_cycle. We expect the log to be
  535. * some variation on
  536. * x + 1 ... | x ... | x
  537. * The first block with cycle number x (last_half_cycle) will
  538. * be where the new head belongs. First we do a binary search
  539. * for the first occurrence of last_half_cycle. The binary
  540. * search may not be totally accurate, so then we scan back
  541. * from there looking for occurrences of last_half_cycle before
  542. * us. If that backwards scan wraps around the beginning of
  543. * the log, then we look for occurrences of last_half_cycle - 1
  544. * at the end of the log. The cases we're looking for look
  545. * like
  546. * v binary search stopped here
  547. * x + 1 ... | x | x + 1 | x ... | x
  548. * ^ but we want to locate this spot
  549. * or
  550. * <---------> less than scan distance
  551. * x + 1 ... | x ... | x - 1 | x
  552. * ^ we want to locate this spot
  553. */
  554. stop_on_cycle = last_half_cycle;
  555. error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
  556. last_half_cycle);
  557. if (error)
  558. goto out_free_buffer;
  559. }
  560. /*
  561. * Now validate the answer. Scan back some number of maximum possible
  562. * blocks and make sure each one has the expected cycle number. The
  563. * maximum is determined by the total possible amount of buffering
  564. * in the in-core log. The following number can be made tighter if
  565. * we actually look at the block size of the filesystem.
  566. */
  567. num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
  568. if (head_blk >= num_scan_bblks) {
  569. /*
  570. * We are guaranteed that the entire check can be performed
  571. * in one buffer.
  572. */
  573. start_blk = head_blk - num_scan_bblks;
  574. if ((error = xlog_find_verify_cycle(log,
  575. start_blk, num_scan_bblks,
  576. stop_on_cycle, &new_blk)))
  577. goto out_free_buffer;
  578. if (new_blk != -1)
  579. head_blk = new_blk;
  580. } else { /* need to read 2 parts of log */
  581. /*
  582. * We are going to scan backwards in the log in two parts.
  583. * First we scan the physical end of the log. In this part
  584. * of the log, we are looking for blocks with cycle number
  585. * last_half_cycle - 1.
  586. * If we find one, then we know that the log starts there, as
  587. * we've found a hole that didn't get written in going around
  588. * the end of the physical log. The simple case for this is
  589. * x + 1 ... | x ... | x - 1 | x
  590. * <---------> less than scan distance
  591. * If all of the blocks at the end of the log have cycle number
  592. * last_half_cycle, then we check the blocks at the start of
  593. * the log looking for occurrences of last_half_cycle. If we
  594. * find one, then our current estimate for the location of the
  595. * first occurrence of last_half_cycle is wrong and we move
  596. * back to the hole we've found. This case looks like
  597. * x + 1 ... | x | x + 1 | x ...
  598. * ^ binary search stopped here
  599. * Another case we need to handle that only occurs in 256k
  600. * logs is
  601. * x + 1 ... | x ... | x+1 | x ...
  602. * ^ binary search stops here
  603. * In a 256k log, the scan at the end of the log will see the
  604. * x + 1 blocks. We need to skip past those since that is
  605. * certainly not the head of the log. By searching for
  606. * last_half_cycle-1 we accomplish that.
  607. */
  608. ASSERT(head_blk <= INT_MAX &&
  609. (xfs_daddr_t) num_scan_bblks >= head_blk);
  610. start_blk = log_bbnum - (num_scan_bblks - head_blk);
  611. if ((error = xlog_find_verify_cycle(log, start_blk,
  612. num_scan_bblks - (int)head_blk,
  613. (stop_on_cycle - 1), &new_blk)))
  614. goto out_free_buffer;
  615. if (new_blk != -1) {
  616. head_blk = new_blk;
  617. goto validate_head;
  618. }
  619. /*
  620. * Scan beginning of log now. The last part of the physical
  621. * log is good. This scan needs to verify that it doesn't find
  622. * the last_half_cycle.
  623. */
  624. start_blk = 0;
  625. ASSERT(head_blk <= INT_MAX);
  626. if ((error = xlog_find_verify_cycle(log,
  627. start_blk, (int)head_blk,
  628. stop_on_cycle, &new_blk)))
  629. goto out_free_buffer;
  630. if (new_blk != -1)
  631. head_blk = new_blk;
  632. }
  633. validate_head:
  634. /*
  635. * Now we need to make sure head_blk is not pointing to a block in
  636. * the middle of a log record.
  637. */
  638. num_scan_bblks = XLOG_REC_SHIFT(log);
  639. if (head_blk >= num_scan_bblks) {
  640. start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
  641. /* start ptr at last block ptr before head_blk */
  642. error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
  643. if (error == 1)
  644. error = -EIO;
  645. if (error)
  646. goto out_free_buffer;
  647. } else {
  648. start_blk = 0;
  649. ASSERT(head_blk <= INT_MAX);
  650. error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
  651. if (error < 0)
  652. goto out_free_buffer;
  653. if (error == 1) {
  654. /* We hit the beginning of the log during our search */
  655. start_blk = log_bbnum - (num_scan_bblks - head_blk);
  656. new_blk = log_bbnum;
  657. ASSERT(start_blk <= INT_MAX &&
  658. (xfs_daddr_t) log_bbnum-start_blk >= 0);
  659. ASSERT(head_blk <= INT_MAX);
  660. error = xlog_find_verify_log_record(log, start_blk,
  661. &new_blk, (int)head_blk);
  662. if (error == 1)
  663. error = -EIO;
  664. if (error)
  665. goto out_free_buffer;
  666. if (new_blk != log_bbnum)
  667. head_blk = new_blk;
  668. } else if (error)
  669. goto out_free_buffer;
  670. }
  671. kvfree(buffer);
  672. if (head_blk == log_bbnum)
  673. *return_head_blk = 0;
  674. else
  675. *return_head_blk = head_blk;
  676. /*
  677. * When returning here, we have a good block number. Bad block
  678. * means that during a previous crash, we didn't have a clean break
  679. * from cycle number N to cycle number N-1. In this case, we need
  680. * to find the first block with cycle number N-1.
  681. */
  682. return 0;
  683. out_free_buffer:
  684. kvfree(buffer);
  685. if (error)
  686. xfs_warn(log->l_mp, "failed to find log head");
  687. return error;
  688. }
  689. /*
  690. * Seek backwards in the log for log record headers.
  691. *
  692. * Given a starting log block, walk backwards until we find the provided number
  693. * of records or hit the provided tail block. The return value is the number of
  694. * records encountered or a negative error code. The log block and buffer
  695. * pointer of the last record seen are returned in rblk and rhead respectively.
  696. */
  697. STATIC int
  698. xlog_rseek_logrec_hdr(
  699. struct xlog *log,
  700. xfs_daddr_t head_blk,
  701. xfs_daddr_t tail_blk,
  702. int count,
  703. char *buffer,
  704. xfs_daddr_t *rblk,
  705. struct xlog_rec_header **rhead,
  706. bool *wrapped)
  707. {
  708. int i;
  709. int error;
  710. int found = 0;
  711. char *offset = NULL;
  712. xfs_daddr_t end_blk;
  713. *wrapped = false;
  714. /*
  715. * Walk backwards from the head block until we hit the tail or the first
  716. * block in the log.
  717. */
  718. end_blk = head_blk > tail_blk ? tail_blk : 0;
  719. for (i = (int) head_blk - 1; i >= end_blk; i--) {
  720. error = xlog_bread(log, i, 1, buffer, &offset);
  721. if (error)
  722. goto out_error;
  723. if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  724. *rblk = i;
  725. *rhead = (struct xlog_rec_header *) offset;
  726. if (++found == count)
  727. break;
  728. }
  729. }
  730. /*
  731. * If we haven't hit the tail block or the log record header count,
  732. * start looking again from the end of the physical log. Note that
  733. * callers can pass head == tail if the tail is not yet known.
  734. */
  735. if (tail_blk >= head_blk && found != count) {
  736. for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
  737. error = xlog_bread(log, i, 1, buffer, &offset);
  738. if (error)
  739. goto out_error;
  740. if (*(__be32 *)offset ==
  741. cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  742. *wrapped = true;
  743. *rblk = i;
  744. *rhead = (struct xlog_rec_header *) offset;
  745. if (++found == count)
  746. break;
  747. }
  748. }
  749. }
  750. return found;
  751. out_error:
  752. return error;
  753. }
  754. /*
  755. * Seek forward in the log for log record headers.
  756. *
  757. * Given head and tail blocks, walk forward from the tail block until we find
  758. * the provided number of records or hit the head block. The return value is the
  759. * number of records encountered or a negative error code. The log block and
  760. * buffer pointer of the last record seen are returned in rblk and rhead
  761. * respectively.
  762. */
  763. STATIC int
  764. xlog_seek_logrec_hdr(
  765. struct xlog *log,
  766. xfs_daddr_t head_blk,
  767. xfs_daddr_t tail_blk,
  768. int count,
  769. char *buffer,
  770. xfs_daddr_t *rblk,
  771. struct xlog_rec_header **rhead,
  772. bool *wrapped)
  773. {
  774. int i;
  775. int error;
  776. int found = 0;
  777. char *offset = NULL;
  778. xfs_daddr_t end_blk;
  779. *wrapped = false;
  780. /*
  781. * Walk forward from the tail block until we hit the head or the last
  782. * block in the log.
  783. */
  784. end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
  785. for (i = (int) tail_blk; i <= end_blk; i++) {
  786. error = xlog_bread(log, i, 1, buffer, &offset);
  787. if (error)
  788. goto out_error;
  789. if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  790. *rblk = i;
  791. *rhead = (struct xlog_rec_header *) offset;
  792. if (++found == count)
  793. break;
  794. }
  795. }
  796. /*
  797. * If we haven't hit the head block or the log record header count,
  798. * start looking again from the start of the physical log.
  799. */
  800. if (tail_blk > head_blk && found != count) {
  801. for (i = 0; i < (int) head_blk; i++) {
  802. error = xlog_bread(log, i, 1, buffer, &offset);
  803. if (error)
  804. goto out_error;
  805. if (*(__be32 *)offset ==
  806. cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  807. *wrapped = true;
  808. *rblk = i;
  809. *rhead = (struct xlog_rec_header *) offset;
  810. if (++found == count)
  811. break;
  812. }
  813. }
  814. }
  815. return found;
  816. out_error:
  817. return error;
  818. }
  819. /*
  820. * Calculate distance from head to tail (i.e., unused space in the log).
  821. */
  822. static inline int
  823. xlog_tail_distance(
  824. struct xlog *log,
  825. xfs_daddr_t head_blk,
  826. xfs_daddr_t tail_blk)
  827. {
  828. if (head_blk < tail_blk)
  829. return tail_blk - head_blk;
  830. return tail_blk + (log->l_logBBsize - head_blk);
  831. }
  832. /*
  833. * Verify the log tail. This is particularly important when torn or incomplete
  834. * writes have been detected near the front of the log and the head has been
  835. * walked back accordingly.
  836. *
  837. * We also have to handle the case where the tail was pinned and the head
  838. * blocked behind the tail right before a crash. If the tail had been pushed
  839. * immediately prior to the crash and the subsequent checkpoint was only
  840. * partially written, it's possible it overwrote the last referenced tail in the
  841. * log with garbage. This is not a coherency problem because the tail must have
  842. * been pushed before it can be overwritten, but appears as log corruption to
  843. * recovery because we have no way to know the tail was updated if the
  844. * subsequent checkpoint didn't write successfully.
  845. *
  846. * Therefore, CRC check the log from tail to head. If a failure occurs and the
  847. * offending record is within max iclog bufs from the head, walk the tail
  848. * forward and retry until a valid tail is found or corruption is detected out
  849. * of the range of a possible overwrite.
  850. */
  851. STATIC int
  852. xlog_verify_tail(
  853. struct xlog *log,
  854. xfs_daddr_t head_blk,
  855. xfs_daddr_t *tail_blk,
  856. int hsize)
  857. {
  858. struct xlog_rec_header *thead;
  859. char *buffer;
  860. xfs_daddr_t first_bad;
  861. int error = 0;
  862. bool wrapped;
  863. xfs_daddr_t tmp_tail;
  864. xfs_daddr_t orig_tail = *tail_blk;
  865. buffer = xlog_alloc_buffer(log, 1);
  866. if (!buffer)
  867. return -ENOMEM;
  868. /*
  869. * Make sure the tail points to a record (returns positive count on
  870. * success).
  871. */
  872. error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
  873. &tmp_tail, &thead, &wrapped);
  874. if (error < 0)
  875. goto out;
  876. if (*tail_blk != tmp_tail)
  877. *tail_blk = tmp_tail;
  878. /*
  879. * Run a CRC check from the tail to the head. We can't just check
  880. * MAX_ICLOGS records past the tail because the tail may point to stale
  881. * blocks cleared during the search for the head/tail. These blocks are
  882. * overwritten with zero-length records and thus record count is not a
  883. * reliable indicator of the iclog state before a crash.
  884. */
  885. first_bad = 0;
  886. error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
  887. XLOG_RECOVER_CRCPASS, &first_bad);
  888. while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
  889. int tail_distance;
  890. /*
  891. * Is corruption within range of the head? If so, retry from
  892. * the next record. Otherwise return an error.
  893. */
  894. tail_distance = xlog_tail_distance(log, head_blk, first_bad);
  895. if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
  896. break;
  897. /* skip to the next record; returns positive count on success */
  898. error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
  899. buffer, &tmp_tail, &thead, &wrapped);
  900. if (error < 0)
  901. goto out;
  902. *tail_blk = tmp_tail;
  903. first_bad = 0;
  904. error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
  905. XLOG_RECOVER_CRCPASS, &first_bad);
  906. }
  907. if (!error && *tail_blk != orig_tail)
  908. xfs_warn(log->l_mp,
  909. "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
  910. orig_tail, *tail_blk);
  911. out:
  912. kvfree(buffer);
  913. return error;
  914. }
  915. /*
  916. * Detect and trim torn writes from the head of the log.
  917. *
  918. * Storage without sector atomicity guarantees can result in torn writes in the
  919. * log in the event of a crash. Our only means to detect this scenario is via
  920. * CRC verification. While we can't always be certain that CRC verification
  921. * failure is due to a torn write vs. an unrelated corruption, we do know that
  922. * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
  923. * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
  924. * the log and treat failures in this range as torn writes as a matter of
  925. * policy. In the event of CRC failure, the head is walked back to the last good
  926. * record in the log and the tail is updated from that record and verified.
  927. */
  928. STATIC int
  929. xlog_verify_head(
  930. struct xlog *log,
  931. xfs_daddr_t *head_blk, /* in/out: unverified head */
  932. xfs_daddr_t *tail_blk, /* out: tail block */
  933. char *buffer,
  934. xfs_daddr_t *rhead_blk, /* start blk of last record */
  935. struct xlog_rec_header **rhead, /* ptr to last record */
  936. bool *wrapped) /* last rec. wraps phys. log */
  937. {
  938. struct xlog_rec_header *tmp_rhead;
  939. char *tmp_buffer;
  940. xfs_daddr_t first_bad;
  941. xfs_daddr_t tmp_rhead_blk;
  942. int found;
  943. int error;
  944. bool tmp_wrapped;
  945. /*
  946. * Check the head of the log for torn writes. Search backwards from the
  947. * head until we hit the tail or the maximum number of log record I/Os
  948. * that could have been in flight at one time. Use a temporary buffer so
  949. * we don't trash the rhead/buffer pointers from the caller.
  950. */
  951. tmp_buffer = xlog_alloc_buffer(log, 1);
  952. if (!tmp_buffer)
  953. return -ENOMEM;
  954. error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
  955. XLOG_MAX_ICLOGS, tmp_buffer,
  956. &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
  957. kvfree(tmp_buffer);
  958. if (error < 0)
  959. return error;
  960. /*
  961. * Now run a CRC verification pass over the records starting at the
  962. * block found above to the current head. If a CRC failure occurs, the
  963. * log block of the first bad record is saved in first_bad.
  964. */
  965. error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
  966. XLOG_RECOVER_CRCPASS, &first_bad);
  967. if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
  968. /*
  969. * We've hit a potential torn write. Reset the error and warn
  970. * about it.
  971. */
  972. error = 0;
  973. xfs_warn(log->l_mp,
  974. "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
  975. first_bad, *head_blk);
  976. /*
  977. * Get the header block and buffer pointer for the last good
  978. * record before the bad record.
  979. *
  980. * Note that xlog_find_tail() clears the blocks at the new head
  981. * (i.e., the records with invalid CRC) if the cycle number
  982. * matches the current cycle.
  983. */
  984. found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
  985. buffer, rhead_blk, rhead, wrapped);
  986. if (found < 0)
  987. return found;
  988. if (found == 0) /* XXX: right thing to do here? */
  989. return -EIO;
  990. /*
  991. * Reset the head block to the starting block of the first bad
  992. * log record and set the tail block based on the last good
  993. * record.
  994. *
  995. * Bail out if the updated head/tail match as this indicates
  996. * possible corruption outside of the acceptable
  997. * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
  998. */
  999. *head_blk = first_bad;
  1000. *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
  1001. if (*head_blk == *tail_blk) {
  1002. ASSERT(0);
  1003. return 0;
  1004. }
  1005. }
  1006. if (error)
  1007. return error;
  1008. return xlog_verify_tail(log, *head_blk, tail_blk,
  1009. be32_to_cpu((*rhead)->h_size));
  1010. }
  1011. /*
  1012. * We need to make sure we handle log wrapping properly, so we can't use the
  1013. * calculated logbno directly. Make sure it wraps to the correct bno inside the
  1014. * log.
  1015. *
  1016. * The log is limited to 32 bit sizes, so we use the appropriate modulus
  1017. * operation here and cast it back to a 64 bit daddr on return.
  1018. */
  1019. static inline xfs_daddr_t
  1020. xlog_wrap_logbno(
  1021. struct xlog *log,
  1022. xfs_daddr_t bno)
  1023. {
  1024. int mod;
  1025. div_s64_rem(bno, log->l_logBBsize, &mod);
  1026. return mod;
  1027. }
  1028. /*
  1029. * Check whether the head of the log points to an unmount record. In other
  1030. * words, determine whether the log is clean. If so, update the in-core state
  1031. * appropriately.
  1032. */
  1033. static int
  1034. xlog_check_unmount_rec(
  1035. struct xlog *log,
  1036. xfs_daddr_t *head_blk,
  1037. xfs_daddr_t *tail_blk,
  1038. struct xlog_rec_header *rhead,
  1039. xfs_daddr_t rhead_blk,
  1040. char *buffer,
  1041. bool *clean)
  1042. {
  1043. struct xlog_op_header *op_head;
  1044. xfs_daddr_t umount_data_blk;
  1045. xfs_daddr_t after_umount_blk;
  1046. int hblks;
  1047. int error;
  1048. char *offset;
  1049. *clean = false;
  1050. /*
  1051. * Look for unmount record. If we find it, then we know there was a
  1052. * clean unmount. Since 'i' could be the last block in the physical
  1053. * log, we convert to a log block before comparing to the head_blk.
  1054. *
  1055. * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
  1056. * below. We won't want to clear the unmount record if there is one, so
  1057. * we pass the lsn of the unmount record rather than the block after it.
  1058. */
  1059. hblks = xlog_logrec_hblks(log, rhead);
  1060. after_umount_blk = xlog_wrap_logbno(log,
  1061. rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
  1062. if (*head_blk == after_umount_blk &&
  1063. be32_to_cpu(rhead->h_num_logops) == 1) {
  1064. umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
  1065. error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
  1066. if (error)
  1067. return error;
  1068. op_head = (struct xlog_op_header *)offset;
  1069. if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
  1070. /*
  1071. * Set tail and last sync so that newly written log
  1072. * records will point recovery to after the current
  1073. * unmount record.
  1074. */
  1075. xlog_assign_atomic_lsn(&log->l_tail_lsn,
  1076. log->l_curr_cycle, after_umount_blk);
  1077. log->l_ailp->ail_head_lsn =
  1078. atomic64_read(&log->l_tail_lsn);
  1079. *tail_blk = after_umount_blk;
  1080. *clean = true;
  1081. }
  1082. }
  1083. return 0;
  1084. }
  1085. static void
  1086. xlog_set_state(
  1087. struct xlog *log,
  1088. xfs_daddr_t head_blk,
  1089. struct xlog_rec_header *rhead,
  1090. xfs_daddr_t rhead_blk,
  1091. bool bump_cycle)
  1092. {
  1093. /*
  1094. * Reset log values according to the state of the log when we
  1095. * crashed. In the case where head_blk == 0, we bump curr_cycle
  1096. * one because the next write starts a new cycle rather than
  1097. * continuing the cycle of the last good log record. At this
  1098. * point we have guaranteed that all partial log records have been
  1099. * accounted for. Therefore, we know that the last good log record
  1100. * written was complete and ended exactly on the end boundary
  1101. * of the physical log.
  1102. */
  1103. log->l_prev_block = rhead_blk;
  1104. log->l_curr_block = (int)head_blk;
  1105. log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
  1106. if (bump_cycle)
  1107. log->l_curr_cycle++;
  1108. atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
  1109. log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn);
  1110. }
  1111. /*
  1112. * Find the sync block number or the tail of the log.
  1113. *
  1114. * This will be the block number of the last record to have its
  1115. * associated buffers synced to disk. Every log record header has
  1116. * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
  1117. * to get a sync block number. The only concern is to figure out which
  1118. * log record header to believe.
  1119. *
  1120. * The following algorithm uses the log record header with the largest
  1121. * lsn. The entire log record does not need to be valid. We only care
  1122. * that the header is valid.
  1123. *
  1124. * We could speed up search by using current head_blk buffer, but it is not
  1125. * available.
  1126. */
  1127. STATIC int
  1128. xlog_find_tail(
  1129. struct xlog *log,
  1130. xfs_daddr_t *head_blk,
  1131. xfs_daddr_t *tail_blk)
  1132. {
  1133. xlog_rec_header_t *rhead;
  1134. char *offset = NULL;
  1135. char *buffer;
  1136. int error;
  1137. xfs_daddr_t rhead_blk;
  1138. xfs_lsn_t tail_lsn;
  1139. bool wrapped = false;
  1140. bool clean = false;
  1141. /*
  1142. * Find previous log record
  1143. */
  1144. if ((error = xlog_find_head(log, head_blk)))
  1145. return error;
  1146. ASSERT(*head_blk < INT_MAX);
  1147. buffer = xlog_alloc_buffer(log, 1);
  1148. if (!buffer)
  1149. return -ENOMEM;
  1150. if (*head_blk == 0) { /* special case */
  1151. error = xlog_bread(log, 0, 1, buffer, &offset);
  1152. if (error)
  1153. goto done;
  1154. if (xlog_get_cycle(offset) == 0) {
  1155. *tail_blk = 0;
  1156. /* leave all other log inited values alone */
  1157. goto done;
  1158. }
  1159. }
  1160. /*
  1161. * Search backwards through the log looking for the log record header
  1162. * block. This wraps all the way back around to the head so something is
  1163. * seriously wrong if we can't find it.
  1164. */
  1165. error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
  1166. &rhead_blk, &rhead, &wrapped);
  1167. if (error < 0)
  1168. goto done;
  1169. if (!error) {
  1170. xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
  1171. error = -EFSCORRUPTED;
  1172. goto done;
  1173. }
  1174. *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
  1175. /*
  1176. * Set the log state based on the current head record.
  1177. */
  1178. xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
  1179. tail_lsn = atomic64_read(&log->l_tail_lsn);
  1180. /*
  1181. * Look for an unmount record at the head of the log. This sets the log
  1182. * state to determine whether recovery is necessary.
  1183. */
  1184. error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
  1185. rhead_blk, buffer, &clean);
  1186. if (error)
  1187. goto done;
  1188. /*
  1189. * Verify the log head if the log is not clean (e.g., we have anything
  1190. * but an unmount record at the head). This uses CRC verification to
  1191. * detect and trim torn writes. If discovered, CRC failures are
  1192. * considered torn writes and the log head is trimmed accordingly.
  1193. *
  1194. * Note that we can only run CRC verification when the log is dirty
  1195. * because there's no guarantee that the log data behind an unmount
  1196. * record is compatible with the current architecture.
  1197. */
  1198. if (!clean) {
  1199. xfs_daddr_t orig_head = *head_blk;
  1200. error = xlog_verify_head(log, head_blk, tail_blk, buffer,
  1201. &rhead_blk, &rhead, &wrapped);
  1202. if (error)
  1203. goto done;
  1204. /* update in-core state again if the head changed */
  1205. if (*head_blk != orig_head) {
  1206. xlog_set_state(log, *head_blk, rhead, rhead_blk,
  1207. wrapped);
  1208. tail_lsn = atomic64_read(&log->l_tail_lsn);
  1209. error = xlog_check_unmount_rec(log, head_blk, tail_blk,
  1210. rhead, rhead_blk, buffer,
  1211. &clean);
  1212. if (error)
  1213. goto done;
  1214. }
  1215. }
  1216. /*
  1217. * Note that the unmount was clean. If the unmount was not clean, we
  1218. * need to know this to rebuild the superblock counters from the perag
  1219. * headers if we have a filesystem using non-persistent counters.
  1220. */
  1221. if (clean)
  1222. xfs_set_clean(log->l_mp);
  1223. /*
  1224. * Make sure that there are no blocks in front of the head
  1225. * with the same cycle number as the head. This can happen
  1226. * because we allow multiple outstanding log writes concurrently,
  1227. * and the later writes might make it out before earlier ones.
  1228. *
  1229. * We use the lsn from before modifying it so that we'll never
  1230. * overwrite the unmount record after a clean unmount.
  1231. *
  1232. * Do this only if we are going to recover the filesystem
  1233. *
  1234. * NOTE: This used to say "if (!readonly)"
  1235. * However on Linux, we can & do recover a read-only filesystem.
  1236. * We only skip recovery if NORECOVERY is specified on mount,
  1237. * in which case we would not be here.
  1238. *
  1239. * But... if the -device- itself is readonly, just skip this.
  1240. * We can't recover this device anyway, so it won't matter.
  1241. */
  1242. if (!xfs_readonly_buftarg(log->l_targ))
  1243. error = xlog_clear_stale_blocks(log, tail_lsn);
  1244. done:
  1245. kvfree(buffer);
  1246. if (error)
  1247. xfs_warn(log->l_mp, "failed to locate log tail");
  1248. return error;
  1249. }
  1250. /*
  1251. * Is the log zeroed at all?
  1252. *
  1253. * The last binary search should be changed to perform an X block read
  1254. * once X becomes small enough. You can then search linearly through
  1255. * the X blocks. This will cut down on the number of reads we need to do.
  1256. *
  1257. * If the log is partially zeroed, this routine will pass back the blkno
  1258. * of the first block with cycle number 0. It won't have a complete LR
  1259. * preceding it.
  1260. *
  1261. * Return:
  1262. * 0 => the log is completely written to
  1263. * 1 => use *blk_no as the first block of the log
  1264. * <0 => error has occurred
  1265. */
  1266. STATIC int
  1267. xlog_find_zeroed(
  1268. struct xlog *log,
  1269. xfs_daddr_t *blk_no)
  1270. {
  1271. char *buffer;
  1272. char *offset;
  1273. uint first_cycle, last_cycle;
  1274. xfs_daddr_t new_blk, last_blk, start_blk;
  1275. xfs_daddr_t num_scan_bblks;
  1276. int error, log_bbnum = log->l_logBBsize;
  1277. int ret = 1;
  1278. *blk_no = 0;
  1279. /* check totally zeroed log */
  1280. buffer = xlog_alloc_buffer(log, 1);
  1281. if (!buffer)
  1282. return -ENOMEM;
  1283. error = xlog_bread(log, 0, 1, buffer, &offset);
  1284. if (error)
  1285. goto out_free_buffer;
  1286. first_cycle = xlog_get_cycle(offset);
  1287. if (first_cycle == 0) { /* completely zeroed log */
  1288. *blk_no = 0;
  1289. goto out_free_buffer;
  1290. }
  1291. /* check partially zeroed log */
  1292. error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
  1293. if (error)
  1294. goto out_free_buffer;
  1295. last_cycle = xlog_get_cycle(offset);
  1296. if (last_cycle != 0) { /* log completely written to */
  1297. ret = 0;
  1298. goto out_free_buffer;
  1299. }
  1300. /* we have a partially zeroed log */
  1301. last_blk = log_bbnum-1;
  1302. error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
  1303. if (error)
  1304. goto out_free_buffer;
  1305. /*
  1306. * Validate the answer. Because there is no way to guarantee that
  1307. * the entire log is made up of log records which are the same size,
  1308. * we scan over the defined maximum blocks. At this point, the maximum
  1309. * is not chosen to mean anything special. XXXmiken
  1310. */
  1311. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  1312. ASSERT(num_scan_bblks <= INT_MAX);
  1313. if (last_blk < num_scan_bblks)
  1314. num_scan_bblks = last_blk;
  1315. start_blk = last_blk - num_scan_bblks;
  1316. /*
  1317. * We search for any instances of cycle number 0 that occur before
  1318. * our current estimate of the head. What we're trying to detect is
  1319. * 1 ... | 0 | 1 | 0...
  1320. * ^ binary search ends here
  1321. */
  1322. if ((error = xlog_find_verify_cycle(log, start_blk,
  1323. (int)num_scan_bblks, 0, &new_blk)))
  1324. goto out_free_buffer;
  1325. if (new_blk != -1)
  1326. last_blk = new_blk;
  1327. /*
  1328. * Potentially backup over partial log record write. We don't need
  1329. * to search the end of the log because we know it is zero.
  1330. */
  1331. error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
  1332. if (error == 1)
  1333. error = -EIO;
  1334. if (error)
  1335. goto out_free_buffer;
  1336. *blk_no = last_blk;
  1337. out_free_buffer:
  1338. kvfree(buffer);
  1339. if (error)
  1340. return error;
  1341. return ret;
  1342. }
  1343. /*
  1344. * These are simple subroutines used by xlog_clear_stale_blocks() below
  1345. * to initialize a buffer full of empty log record headers and write
  1346. * them into the log.
  1347. */
  1348. STATIC void
  1349. xlog_add_record(
  1350. struct xlog *log,
  1351. char *buf,
  1352. int cycle,
  1353. int block,
  1354. int tail_cycle,
  1355. int tail_block)
  1356. {
  1357. xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
  1358. memset(buf, 0, BBSIZE);
  1359. recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
  1360. recp->h_cycle = cpu_to_be32(cycle);
  1361. recp->h_version = cpu_to_be32(
  1362. xfs_has_logv2(log->l_mp) ? 2 : 1);
  1363. recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
  1364. recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
  1365. recp->h_fmt = cpu_to_be32(XLOG_FMT);
  1366. memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
  1367. }
  1368. STATIC int
  1369. xlog_write_log_records(
  1370. struct xlog *log,
  1371. int cycle,
  1372. int start_block,
  1373. int blocks,
  1374. int tail_cycle,
  1375. int tail_block)
  1376. {
  1377. char *offset;
  1378. char *buffer;
  1379. int balign, ealign;
  1380. int sectbb = log->l_sectBBsize;
  1381. int end_block = start_block + blocks;
  1382. int bufblks;
  1383. int error = 0;
  1384. int i, j = 0;
  1385. /*
  1386. * Greedily allocate a buffer big enough to handle the full
  1387. * range of basic blocks to be written. If that fails, try
  1388. * a smaller size. We need to be able to write at least a
  1389. * log sector, or we're out of luck.
  1390. */
  1391. bufblks = roundup_pow_of_two(blocks);
  1392. while (bufblks > log->l_logBBsize)
  1393. bufblks >>= 1;
  1394. while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
  1395. bufblks >>= 1;
  1396. if (bufblks < sectbb)
  1397. return -ENOMEM;
  1398. }
  1399. /* We may need to do a read at the start to fill in part of
  1400. * the buffer in the starting sector not covered by the first
  1401. * write below.
  1402. */
  1403. balign = round_down(start_block, sectbb);
  1404. if (balign != start_block) {
  1405. error = xlog_bread_noalign(log, start_block, 1, buffer);
  1406. if (error)
  1407. goto out_free_buffer;
  1408. j = start_block - balign;
  1409. }
  1410. for (i = start_block; i < end_block; i += bufblks) {
  1411. int bcount, endcount;
  1412. bcount = min(bufblks, end_block - start_block);
  1413. endcount = bcount - j;
  1414. /* We may need to do a read at the end to fill in part of
  1415. * the buffer in the final sector not covered by the write.
  1416. * If this is the same sector as the above read, skip it.
  1417. */
  1418. ealign = round_down(end_block, sectbb);
  1419. if (j == 0 && (start_block + endcount > ealign)) {
  1420. error = xlog_bread_noalign(log, ealign, sectbb,
  1421. buffer + BBTOB(ealign - start_block));
  1422. if (error)
  1423. break;
  1424. }
  1425. offset = buffer + xlog_align(log, start_block);
  1426. for (; j < endcount; j++) {
  1427. xlog_add_record(log, offset, cycle, i+j,
  1428. tail_cycle, tail_block);
  1429. offset += BBSIZE;
  1430. }
  1431. error = xlog_bwrite(log, start_block, endcount, buffer);
  1432. if (error)
  1433. break;
  1434. start_block += endcount;
  1435. j = 0;
  1436. }
  1437. out_free_buffer:
  1438. kvfree(buffer);
  1439. return error;
  1440. }
  1441. /*
  1442. * This routine is called to blow away any incomplete log writes out
  1443. * in front of the log head. We do this so that we won't become confused
  1444. * if we come up, write only a little bit more, and then crash again.
  1445. * If we leave the partial log records out there, this situation could
  1446. * cause us to think those partial writes are valid blocks since they
  1447. * have the current cycle number. We get rid of them by overwriting them
  1448. * with empty log records with the old cycle number rather than the
  1449. * current one.
  1450. *
  1451. * The tail lsn is passed in rather than taken from
  1452. * the log so that we will not write over the unmount record after a
  1453. * clean unmount in a 512 block log. Doing so would leave the log without
  1454. * any valid log records in it until a new one was written. If we crashed
  1455. * during that time we would not be able to recover.
  1456. */
  1457. STATIC int
  1458. xlog_clear_stale_blocks(
  1459. struct xlog *log,
  1460. xfs_lsn_t tail_lsn)
  1461. {
  1462. int tail_cycle, head_cycle;
  1463. int tail_block, head_block;
  1464. int tail_distance, max_distance;
  1465. int distance;
  1466. int error;
  1467. tail_cycle = CYCLE_LSN(tail_lsn);
  1468. tail_block = BLOCK_LSN(tail_lsn);
  1469. head_cycle = log->l_curr_cycle;
  1470. head_block = log->l_curr_block;
  1471. /*
  1472. * Figure out the distance between the new head of the log
  1473. * and the tail. We want to write over any blocks beyond the
  1474. * head that we may have written just before the crash, but
  1475. * we don't want to overwrite the tail of the log.
  1476. */
  1477. if (head_cycle == tail_cycle) {
  1478. /*
  1479. * The tail is behind the head in the physical log,
  1480. * so the distance from the head to the tail is the
  1481. * distance from the head to the end of the log plus
  1482. * the distance from the beginning of the log to the
  1483. * tail.
  1484. */
  1485. if (XFS_IS_CORRUPT(log->l_mp,
  1486. head_block < tail_block ||
  1487. head_block >= log->l_logBBsize))
  1488. return -EFSCORRUPTED;
  1489. tail_distance = tail_block + (log->l_logBBsize - head_block);
  1490. } else {
  1491. /*
  1492. * The head is behind the tail in the physical log,
  1493. * so the distance from the head to the tail is just
  1494. * the tail block minus the head block.
  1495. */
  1496. if (XFS_IS_CORRUPT(log->l_mp,
  1497. head_block >= tail_block ||
  1498. head_cycle != tail_cycle + 1))
  1499. return -EFSCORRUPTED;
  1500. tail_distance = tail_block - head_block;
  1501. }
  1502. /*
  1503. * If the head is right up against the tail, we can't clear
  1504. * anything.
  1505. */
  1506. if (tail_distance <= 0) {
  1507. ASSERT(tail_distance == 0);
  1508. return 0;
  1509. }
  1510. max_distance = XLOG_TOTAL_REC_SHIFT(log);
  1511. /*
  1512. * Take the smaller of the maximum amount of outstanding I/O
  1513. * we could have and the distance to the tail to clear out.
  1514. * We take the smaller so that we don't overwrite the tail and
  1515. * we don't waste all day writing from the head to the tail
  1516. * for no reason.
  1517. */
  1518. max_distance = min(max_distance, tail_distance);
  1519. if ((head_block + max_distance) <= log->l_logBBsize) {
  1520. /*
  1521. * We can stomp all the blocks we need to without
  1522. * wrapping around the end of the log. Just do it
  1523. * in a single write. Use the cycle number of the
  1524. * current cycle minus one so that the log will look like:
  1525. * n ... | n - 1 ...
  1526. */
  1527. error = xlog_write_log_records(log, (head_cycle - 1),
  1528. head_block, max_distance, tail_cycle,
  1529. tail_block);
  1530. if (error)
  1531. return error;
  1532. } else {
  1533. /*
  1534. * We need to wrap around the end of the physical log in
  1535. * order to clear all the blocks. Do it in two separate
  1536. * I/Os. The first write should be from the head to the
  1537. * end of the physical log, and it should use the current
  1538. * cycle number minus one just like above.
  1539. */
  1540. distance = log->l_logBBsize - head_block;
  1541. error = xlog_write_log_records(log, (head_cycle - 1),
  1542. head_block, distance, tail_cycle,
  1543. tail_block);
  1544. if (error)
  1545. return error;
  1546. /*
  1547. * Now write the blocks at the start of the physical log.
  1548. * This writes the remainder of the blocks we want to clear.
  1549. * It uses the current cycle number since we're now on the
  1550. * same cycle as the head so that we get:
  1551. * n ... n ... | n - 1 ...
  1552. * ^^^^^ blocks we're writing
  1553. */
  1554. distance = max_distance - (log->l_logBBsize - head_block);
  1555. error = xlog_write_log_records(log, head_cycle, 0, distance,
  1556. tail_cycle, tail_block);
  1557. if (error)
  1558. return error;
  1559. }
  1560. return 0;
  1561. }
  1562. /*
  1563. * Release the recovered intent item in the AIL that matches the given intent
  1564. * type and intent id.
  1565. */
  1566. void
  1567. xlog_recover_release_intent(
  1568. struct xlog *log,
  1569. unsigned short intent_type,
  1570. uint64_t intent_id)
  1571. {
  1572. struct xfs_defer_pending *dfp, *n;
  1573. list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
  1574. struct xfs_log_item *lip = dfp->dfp_intent;
  1575. if (lip->li_type != intent_type)
  1576. continue;
  1577. if (!lip->li_ops->iop_match(lip, intent_id))
  1578. continue;
  1579. ASSERT(xlog_item_is_intent(lip));
  1580. xfs_defer_cancel_recovery(log->l_mp, dfp);
  1581. }
  1582. }
  1583. int
  1584. xlog_recover_iget(
  1585. struct xfs_mount *mp,
  1586. xfs_ino_t ino,
  1587. struct xfs_inode **ipp)
  1588. {
  1589. int error;
  1590. error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
  1591. if (error)
  1592. return error;
  1593. error = xfs_qm_dqattach(*ipp);
  1594. if (error) {
  1595. xfs_irele(*ipp);
  1596. return error;
  1597. }
  1598. if (VFS_I(*ipp)->i_nlink == 0)
  1599. xfs_iflags_set(*ipp, XFS_IRECOVERY);
  1600. return 0;
  1601. }
  1602. /*
  1603. * Get an inode so that we can recover a log operation.
  1604. *
  1605. * Log intent items that target inodes effectively contain a file handle.
  1606. * Check that the generation number matches the intent item like we do for
  1607. * other file handles. Log intent items defined after this validation weakness
  1608. * was identified must use this function.
  1609. */
  1610. int
  1611. xlog_recover_iget_handle(
  1612. struct xfs_mount *mp,
  1613. xfs_ino_t ino,
  1614. uint32_t gen,
  1615. struct xfs_inode **ipp)
  1616. {
  1617. struct xfs_inode *ip;
  1618. int error;
  1619. error = xlog_recover_iget(mp, ino, &ip);
  1620. if (error)
  1621. return error;
  1622. if (VFS_I(ip)->i_generation != gen) {
  1623. xfs_irele(ip);
  1624. return -EFSCORRUPTED;
  1625. }
  1626. *ipp = ip;
  1627. return 0;
  1628. }
  1629. /******************************************************************************
  1630. *
  1631. * Log recover routines
  1632. *
  1633. ******************************************************************************
  1634. */
  1635. static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
  1636. &xlog_buf_item_ops,
  1637. &xlog_inode_item_ops,
  1638. &xlog_dquot_item_ops,
  1639. &xlog_quotaoff_item_ops,
  1640. &xlog_icreate_item_ops,
  1641. &xlog_efi_item_ops,
  1642. &xlog_efd_item_ops,
  1643. &xlog_rui_item_ops,
  1644. &xlog_rud_item_ops,
  1645. &xlog_cui_item_ops,
  1646. &xlog_cud_item_ops,
  1647. &xlog_bui_item_ops,
  1648. &xlog_bud_item_ops,
  1649. &xlog_attri_item_ops,
  1650. &xlog_attrd_item_ops,
  1651. &xlog_xmi_item_ops,
  1652. &xlog_xmd_item_ops,
  1653. };
  1654. static const struct xlog_recover_item_ops *
  1655. xlog_find_item_ops(
  1656. struct xlog_recover_item *item)
  1657. {
  1658. unsigned int i;
  1659. for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
  1660. if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
  1661. return xlog_recover_item_ops[i];
  1662. return NULL;
  1663. }
  1664. /*
  1665. * Sort the log items in the transaction.
  1666. *
  1667. * The ordering constraints are defined by the inode allocation and unlink
  1668. * behaviour. The rules are:
  1669. *
  1670. * 1. Every item is only logged once in a given transaction. Hence it
  1671. * represents the last logged state of the item. Hence ordering is
  1672. * dependent on the order in which operations need to be performed so
  1673. * required initial conditions are always met.
  1674. *
  1675. * 2. Cancelled buffers are recorded in pass 1 in a separate table and
  1676. * there's nothing to replay from them so we can simply cull them
  1677. * from the transaction. However, we can't do that until after we've
  1678. * replayed all the other items because they may be dependent on the
  1679. * cancelled buffer and replaying the cancelled buffer can remove it
  1680. * form the cancelled buffer table. Hence they have to be done last.
  1681. *
  1682. * 3. Inode allocation buffers must be replayed before inode items that
  1683. * read the buffer and replay changes into it. For filesystems using the
  1684. * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
  1685. * treated the same as inode allocation buffers as they create and
  1686. * initialise the buffers directly.
  1687. *
  1688. * 4. Inode unlink buffers must be replayed after inode items are replayed.
  1689. * This ensures that inodes are completely flushed to the inode buffer
  1690. * in a "free" state before we remove the unlinked inode list pointer.
  1691. *
  1692. * Hence the ordering needs to be inode allocation buffers first, inode items
  1693. * second, inode unlink buffers third and cancelled buffers last.
  1694. *
  1695. * But there's a problem with that - we can't tell an inode allocation buffer
  1696. * apart from a regular buffer, so we can't separate them. We can, however,
  1697. * tell an inode unlink buffer from the others, and so we can separate them out
  1698. * from all the other buffers and move them to last.
  1699. *
  1700. * Hence, 4 lists, in order from head to tail:
  1701. * - buffer_list for all buffers except cancelled/inode unlink buffers
  1702. * - item_list for all non-buffer items
  1703. * - inode_buffer_list for inode unlink buffers
  1704. * - cancel_list for the cancelled buffers
  1705. *
  1706. * Note that we add objects to the tail of the lists so that first-to-last
  1707. * ordering is preserved within the lists. Adding objects to the head of the
  1708. * list means when we traverse from the head we walk them in last-to-first
  1709. * order. For cancelled buffers and inode unlink buffers this doesn't matter,
  1710. * but for all other items there may be specific ordering that we need to
  1711. * preserve.
  1712. */
  1713. STATIC int
  1714. xlog_recover_reorder_trans(
  1715. struct xlog *log,
  1716. struct xlog_recover *trans,
  1717. int pass)
  1718. {
  1719. struct xlog_recover_item *item, *n;
  1720. int error = 0;
  1721. LIST_HEAD(sort_list);
  1722. LIST_HEAD(cancel_list);
  1723. LIST_HEAD(buffer_list);
  1724. LIST_HEAD(inode_buffer_list);
  1725. LIST_HEAD(item_list);
  1726. list_splice_init(&trans->r_itemq, &sort_list);
  1727. list_for_each_entry_safe(item, n, &sort_list, ri_list) {
  1728. enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
  1729. item->ri_ops = xlog_find_item_ops(item);
  1730. if (!item->ri_ops) {
  1731. xfs_warn(log->l_mp,
  1732. "%s: unrecognized type of log operation (%d)",
  1733. __func__, ITEM_TYPE(item));
  1734. ASSERT(0);
  1735. /*
  1736. * return the remaining items back to the transaction
  1737. * item list so they can be freed in caller.
  1738. */
  1739. if (!list_empty(&sort_list))
  1740. list_splice_init(&sort_list, &trans->r_itemq);
  1741. error = -EFSCORRUPTED;
  1742. break;
  1743. }
  1744. if (item->ri_ops->reorder)
  1745. fate = item->ri_ops->reorder(item);
  1746. switch (fate) {
  1747. case XLOG_REORDER_BUFFER_LIST:
  1748. list_move_tail(&item->ri_list, &buffer_list);
  1749. break;
  1750. case XLOG_REORDER_CANCEL_LIST:
  1751. trace_xfs_log_recover_item_reorder_head(log,
  1752. trans, item, pass);
  1753. list_move(&item->ri_list, &cancel_list);
  1754. break;
  1755. case XLOG_REORDER_INODE_BUFFER_LIST:
  1756. list_move(&item->ri_list, &inode_buffer_list);
  1757. break;
  1758. case XLOG_REORDER_ITEM_LIST:
  1759. trace_xfs_log_recover_item_reorder_tail(log,
  1760. trans, item, pass);
  1761. list_move_tail(&item->ri_list, &item_list);
  1762. break;
  1763. }
  1764. }
  1765. ASSERT(list_empty(&sort_list));
  1766. if (!list_empty(&buffer_list))
  1767. list_splice(&buffer_list, &trans->r_itemq);
  1768. if (!list_empty(&item_list))
  1769. list_splice_tail(&item_list, &trans->r_itemq);
  1770. if (!list_empty(&inode_buffer_list))
  1771. list_splice_tail(&inode_buffer_list, &trans->r_itemq);
  1772. if (!list_empty(&cancel_list))
  1773. list_splice_tail(&cancel_list, &trans->r_itemq);
  1774. return error;
  1775. }
  1776. void
  1777. xlog_buf_readahead(
  1778. struct xlog *log,
  1779. xfs_daddr_t blkno,
  1780. uint len,
  1781. const struct xfs_buf_ops *ops)
  1782. {
  1783. if (!xlog_is_buffer_cancelled(log, blkno, len))
  1784. xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
  1785. }
  1786. /*
  1787. * Create a deferred work structure for resuming and tracking the progress of a
  1788. * log intent item that was found during recovery.
  1789. */
  1790. void
  1791. xlog_recover_intent_item(
  1792. struct xlog *log,
  1793. struct xfs_log_item *lip,
  1794. xfs_lsn_t lsn,
  1795. const struct xfs_defer_op_type *ops)
  1796. {
  1797. ASSERT(xlog_item_is_intent(lip));
  1798. xfs_defer_start_recovery(lip, &log->r_dfops, ops);
  1799. /*
  1800. * Insert the intent into the AIL directly and drop one reference so
  1801. * that finishing or canceling the work will drop the other.
  1802. */
  1803. xfs_trans_ail_insert(log->l_ailp, lip, lsn);
  1804. lip->li_ops->iop_unpin(lip, 0);
  1805. }
  1806. STATIC int
  1807. xlog_recover_items_pass2(
  1808. struct xlog *log,
  1809. struct xlog_recover *trans,
  1810. struct list_head *buffer_list,
  1811. struct list_head *item_list)
  1812. {
  1813. struct xlog_recover_item *item;
  1814. int error = 0;
  1815. list_for_each_entry(item, item_list, ri_list) {
  1816. trace_xfs_log_recover_item_recover(log, trans, item,
  1817. XLOG_RECOVER_PASS2);
  1818. if (item->ri_ops->commit_pass2)
  1819. error = item->ri_ops->commit_pass2(log, buffer_list,
  1820. item, trans->r_lsn);
  1821. if (error)
  1822. return error;
  1823. }
  1824. return error;
  1825. }
  1826. /*
  1827. * Perform the transaction.
  1828. *
  1829. * If the transaction modifies a buffer or inode, do it now. Otherwise,
  1830. * EFIs and EFDs get queued up by adding entries into the AIL for them.
  1831. */
  1832. STATIC int
  1833. xlog_recover_commit_trans(
  1834. struct xlog *log,
  1835. struct xlog_recover *trans,
  1836. int pass,
  1837. struct list_head *buffer_list)
  1838. {
  1839. int error = 0;
  1840. int items_queued = 0;
  1841. struct xlog_recover_item *item;
  1842. struct xlog_recover_item *next;
  1843. LIST_HEAD (ra_list);
  1844. LIST_HEAD (done_list);
  1845. #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
  1846. hlist_del_init(&trans->r_list);
  1847. error = xlog_recover_reorder_trans(log, trans, pass);
  1848. if (error)
  1849. return error;
  1850. list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
  1851. trace_xfs_log_recover_item_recover(log, trans, item, pass);
  1852. switch (pass) {
  1853. case XLOG_RECOVER_PASS1:
  1854. if (item->ri_ops->commit_pass1)
  1855. error = item->ri_ops->commit_pass1(log, item);
  1856. break;
  1857. case XLOG_RECOVER_PASS2:
  1858. if (item->ri_ops->ra_pass2)
  1859. item->ri_ops->ra_pass2(log, item);
  1860. list_move_tail(&item->ri_list, &ra_list);
  1861. items_queued++;
  1862. if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
  1863. error = xlog_recover_items_pass2(log, trans,
  1864. buffer_list, &ra_list);
  1865. list_splice_tail_init(&ra_list, &done_list);
  1866. items_queued = 0;
  1867. }
  1868. break;
  1869. default:
  1870. ASSERT(0);
  1871. }
  1872. if (error)
  1873. goto out;
  1874. }
  1875. out:
  1876. if (!list_empty(&ra_list)) {
  1877. if (!error)
  1878. error = xlog_recover_items_pass2(log, trans,
  1879. buffer_list, &ra_list);
  1880. list_splice_tail_init(&ra_list, &done_list);
  1881. }
  1882. if (!list_empty(&done_list))
  1883. list_splice_init(&done_list, &trans->r_itemq);
  1884. return error;
  1885. }
  1886. STATIC void
  1887. xlog_recover_add_item(
  1888. struct list_head *head)
  1889. {
  1890. struct xlog_recover_item *item;
  1891. item = kzalloc(sizeof(struct xlog_recover_item),
  1892. GFP_KERNEL | __GFP_NOFAIL);
  1893. INIT_LIST_HEAD(&item->ri_list);
  1894. list_add_tail(&item->ri_list, head);
  1895. }
  1896. STATIC int
  1897. xlog_recover_add_to_cont_trans(
  1898. struct xlog *log,
  1899. struct xlog_recover *trans,
  1900. char *dp,
  1901. int len)
  1902. {
  1903. struct xlog_recover_item *item;
  1904. char *ptr, *old_ptr;
  1905. int old_len;
  1906. /*
  1907. * If the transaction is empty, the header was split across this and the
  1908. * previous record. Copy the rest of the header.
  1909. */
  1910. if (list_empty(&trans->r_itemq)) {
  1911. ASSERT(len <= sizeof(struct xfs_trans_header));
  1912. if (len > sizeof(struct xfs_trans_header)) {
  1913. xfs_warn(log->l_mp, "%s: bad header length", __func__);
  1914. return -EFSCORRUPTED;
  1915. }
  1916. xlog_recover_add_item(&trans->r_itemq);
  1917. ptr = (char *)&trans->r_theader +
  1918. sizeof(struct xfs_trans_header) - len;
  1919. memcpy(ptr, dp, len);
  1920. return 0;
  1921. }
  1922. /* take the tail entry */
  1923. item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
  1924. ri_list);
  1925. old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
  1926. old_len = item->ri_buf[item->ri_cnt-1].i_len;
  1927. ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL);
  1928. if (!ptr)
  1929. return -ENOMEM;
  1930. memcpy(&ptr[old_len], dp, len);
  1931. item->ri_buf[item->ri_cnt-1].i_len += len;
  1932. item->ri_buf[item->ri_cnt-1].i_addr = ptr;
  1933. trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
  1934. return 0;
  1935. }
  1936. /*
  1937. * The next region to add is the start of a new region. It could be
  1938. * a whole region or it could be the first part of a new region. Because
  1939. * of this, the assumption here is that the type and size fields of all
  1940. * format structures fit into the first 32 bits of the structure.
  1941. *
  1942. * This works because all regions must be 32 bit aligned. Therefore, we
  1943. * either have both fields or we have neither field. In the case we have
  1944. * neither field, the data part of the region is zero length. We only have
  1945. * a log_op_header and can throw away the header since a new one will appear
  1946. * later. If we have at least 4 bytes, then we can determine how many regions
  1947. * will appear in the current log item.
  1948. */
  1949. STATIC int
  1950. xlog_recover_add_to_trans(
  1951. struct xlog *log,
  1952. struct xlog_recover *trans,
  1953. char *dp,
  1954. int len)
  1955. {
  1956. struct xfs_inode_log_format *in_f; /* any will do */
  1957. struct xlog_recover_item *item;
  1958. char *ptr;
  1959. if (!len)
  1960. return 0;
  1961. if (list_empty(&trans->r_itemq)) {
  1962. /* we need to catch log corruptions here */
  1963. if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
  1964. xfs_warn(log->l_mp, "%s: bad header magic number",
  1965. __func__);
  1966. ASSERT(0);
  1967. return -EFSCORRUPTED;
  1968. }
  1969. if (len > sizeof(struct xfs_trans_header)) {
  1970. xfs_warn(log->l_mp, "%s: bad header length", __func__);
  1971. ASSERT(0);
  1972. return -EFSCORRUPTED;
  1973. }
  1974. /*
  1975. * The transaction header can be arbitrarily split across op
  1976. * records. If we don't have the whole thing here, copy what we
  1977. * do have and handle the rest in the next record.
  1978. */
  1979. if (len == sizeof(struct xfs_trans_header))
  1980. xlog_recover_add_item(&trans->r_itemq);
  1981. memcpy(&trans->r_theader, dp, len);
  1982. return 0;
  1983. }
  1984. ptr = xlog_kvmalloc(len);
  1985. memcpy(ptr, dp, len);
  1986. in_f = (struct xfs_inode_log_format *)ptr;
  1987. /* take the tail entry */
  1988. item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
  1989. ri_list);
  1990. if (item->ri_total != 0 &&
  1991. item->ri_total == item->ri_cnt) {
  1992. /* tail item is in use, get a new one */
  1993. xlog_recover_add_item(&trans->r_itemq);
  1994. item = list_entry(trans->r_itemq.prev,
  1995. struct xlog_recover_item, ri_list);
  1996. }
  1997. if (item->ri_total == 0) { /* first region to be added */
  1998. if (in_f->ilf_size == 0 ||
  1999. in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
  2000. xfs_warn(log->l_mp,
  2001. "bad number of regions (%d) in inode log format",
  2002. in_f->ilf_size);
  2003. ASSERT(0);
  2004. kvfree(ptr);
  2005. return -EFSCORRUPTED;
  2006. }
  2007. item->ri_total = in_f->ilf_size;
  2008. item->ri_buf = kzalloc(item->ri_total * sizeof(xfs_log_iovec_t),
  2009. GFP_KERNEL | __GFP_NOFAIL);
  2010. }
  2011. if (item->ri_total <= item->ri_cnt) {
  2012. xfs_warn(log->l_mp,
  2013. "log item region count (%d) overflowed size (%d)",
  2014. item->ri_cnt, item->ri_total);
  2015. ASSERT(0);
  2016. kvfree(ptr);
  2017. return -EFSCORRUPTED;
  2018. }
  2019. /* Description region is ri_buf[0] */
  2020. item->ri_buf[item->ri_cnt].i_addr = ptr;
  2021. item->ri_buf[item->ri_cnt].i_len = len;
  2022. item->ri_cnt++;
  2023. trace_xfs_log_recover_item_add(log, trans, item, 0);
  2024. return 0;
  2025. }
  2026. /*
  2027. * Free up any resources allocated by the transaction
  2028. *
  2029. * Remember that EFIs, EFDs, and IUNLINKs are handled later.
  2030. */
  2031. STATIC void
  2032. xlog_recover_free_trans(
  2033. struct xlog_recover *trans)
  2034. {
  2035. struct xlog_recover_item *item, *n;
  2036. int i;
  2037. hlist_del_init(&trans->r_list);
  2038. list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
  2039. /* Free the regions in the item. */
  2040. list_del(&item->ri_list);
  2041. for (i = 0; i < item->ri_cnt; i++)
  2042. kvfree(item->ri_buf[i].i_addr);
  2043. /* Free the item itself */
  2044. kfree(item->ri_buf);
  2045. kfree(item);
  2046. }
  2047. /* Free the transaction recover structure */
  2048. kfree(trans);
  2049. }
  2050. /*
  2051. * On error or completion, trans is freed.
  2052. */
  2053. STATIC int
  2054. xlog_recovery_process_trans(
  2055. struct xlog *log,
  2056. struct xlog_recover *trans,
  2057. char *dp,
  2058. unsigned int len,
  2059. unsigned int flags,
  2060. int pass,
  2061. struct list_head *buffer_list)
  2062. {
  2063. int error = 0;
  2064. bool freeit = false;
  2065. /* mask off ophdr transaction container flags */
  2066. flags &= ~XLOG_END_TRANS;
  2067. if (flags & XLOG_WAS_CONT_TRANS)
  2068. flags &= ~XLOG_CONTINUE_TRANS;
  2069. /*
  2070. * Callees must not free the trans structure. We'll decide if we need to
  2071. * free it or not based on the operation being done and it's result.
  2072. */
  2073. switch (flags) {
  2074. /* expected flag values */
  2075. case 0:
  2076. case XLOG_CONTINUE_TRANS:
  2077. error = xlog_recover_add_to_trans(log, trans, dp, len);
  2078. break;
  2079. case XLOG_WAS_CONT_TRANS:
  2080. error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
  2081. break;
  2082. case XLOG_COMMIT_TRANS:
  2083. error = xlog_recover_commit_trans(log, trans, pass,
  2084. buffer_list);
  2085. /* success or fail, we are now done with this transaction. */
  2086. freeit = true;
  2087. break;
  2088. /* unexpected flag values */
  2089. case XLOG_UNMOUNT_TRANS:
  2090. /* just skip trans */
  2091. xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
  2092. freeit = true;
  2093. break;
  2094. case XLOG_START_TRANS:
  2095. default:
  2096. xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
  2097. ASSERT(0);
  2098. error = -EFSCORRUPTED;
  2099. break;
  2100. }
  2101. if (error || freeit)
  2102. xlog_recover_free_trans(trans);
  2103. return error;
  2104. }
  2105. /*
  2106. * Lookup the transaction recovery structure associated with the ID in the
  2107. * current ophdr. If the transaction doesn't exist and the start flag is set in
  2108. * the ophdr, then allocate a new transaction for future ID matches to find.
  2109. * Either way, return what we found during the lookup - an existing transaction
  2110. * or nothing.
  2111. */
  2112. STATIC struct xlog_recover *
  2113. xlog_recover_ophdr_to_trans(
  2114. struct hlist_head rhash[],
  2115. struct xlog_rec_header *rhead,
  2116. struct xlog_op_header *ohead)
  2117. {
  2118. struct xlog_recover *trans;
  2119. xlog_tid_t tid;
  2120. struct hlist_head *rhp;
  2121. tid = be32_to_cpu(ohead->oh_tid);
  2122. rhp = &rhash[XLOG_RHASH(tid)];
  2123. hlist_for_each_entry(trans, rhp, r_list) {
  2124. if (trans->r_log_tid == tid)
  2125. return trans;
  2126. }
  2127. /*
  2128. * skip over non-start transaction headers - we could be
  2129. * processing slack space before the next transaction starts
  2130. */
  2131. if (!(ohead->oh_flags & XLOG_START_TRANS))
  2132. return NULL;
  2133. ASSERT(be32_to_cpu(ohead->oh_len) == 0);
  2134. /*
  2135. * This is a new transaction so allocate a new recovery container to
  2136. * hold the recovery ops that will follow.
  2137. */
  2138. trans = kzalloc(sizeof(struct xlog_recover), GFP_KERNEL | __GFP_NOFAIL);
  2139. trans->r_log_tid = tid;
  2140. trans->r_lsn = be64_to_cpu(rhead->h_lsn);
  2141. INIT_LIST_HEAD(&trans->r_itemq);
  2142. INIT_HLIST_NODE(&trans->r_list);
  2143. hlist_add_head(&trans->r_list, rhp);
  2144. /*
  2145. * Nothing more to do for this ophdr. Items to be added to this new
  2146. * transaction will be in subsequent ophdr containers.
  2147. */
  2148. return NULL;
  2149. }
  2150. STATIC int
  2151. xlog_recover_process_ophdr(
  2152. struct xlog *log,
  2153. struct hlist_head rhash[],
  2154. struct xlog_rec_header *rhead,
  2155. struct xlog_op_header *ohead,
  2156. char *dp,
  2157. char *end,
  2158. int pass,
  2159. struct list_head *buffer_list)
  2160. {
  2161. struct xlog_recover *trans;
  2162. unsigned int len;
  2163. int error;
  2164. /* Do we understand who wrote this op? */
  2165. if (ohead->oh_clientid != XFS_TRANSACTION &&
  2166. ohead->oh_clientid != XFS_LOG) {
  2167. xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
  2168. __func__, ohead->oh_clientid);
  2169. ASSERT(0);
  2170. return -EFSCORRUPTED;
  2171. }
  2172. /*
  2173. * Check the ophdr contains all the data it is supposed to contain.
  2174. */
  2175. len = be32_to_cpu(ohead->oh_len);
  2176. if (dp + len > end) {
  2177. xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
  2178. WARN_ON(1);
  2179. return -EFSCORRUPTED;
  2180. }
  2181. trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
  2182. if (!trans) {
  2183. /* nothing to do, so skip over this ophdr */
  2184. return 0;
  2185. }
  2186. /*
  2187. * The recovered buffer queue is drained only once we know that all
  2188. * recovery items for the current LSN have been processed. This is
  2189. * required because:
  2190. *
  2191. * - Buffer write submission updates the metadata LSN of the buffer.
  2192. * - Log recovery skips items with a metadata LSN >= the current LSN of
  2193. * the recovery item.
  2194. * - Separate recovery items against the same metadata buffer can share
  2195. * a current LSN. I.e., consider that the LSN of a recovery item is
  2196. * defined as the starting LSN of the first record in which its
  2197. * transaction appears, that a record can hold multiple transactions,
  2198. * and/or that a transaction can span multiple records.
  2199. *
  2200. * In other words, we are allowed to submit a buffer from log recovery
  2201. * once per current LSN. Otherwise, we may incorrectly skip recovery
  2202. * items and cause corruption.
  2203. *
  2204. * We don't know up front whether buffers are updated multiple times per
  2205. * LSN. Therefore, track the current LSN of each commit log record as it
  2206. * is processed and drain the queue when it changes. Use commit records
  2207. * because they are ordered correctly by the logging code.
  2208. */
  2209. if (log->l_recovery_lsn != trans->r_lsn &&
  2210. ohead->oh_flags & XLOG_COMMIT_TRANS) {
  2211. error = xfs_buf_delwri_submit(buffer_list);
  2212. if (error)
  2213. return error;
  2214. log->l_recovery_lsn = trans->r_lsn;
  2215. }
  2216. return xlog_recovery_process_trans(log, trans, dp, len,
  2217. ohead->oh_flags, pass, buffer_list);
  2218. }
  2219. /*
  2220. * There are two valid states of the r_state field. 0 indicates that the
  2221. * transaction structure is in a normal state. We have either seen the
  2222. * start of the transaction or the last operation we added was not a partial
  2223. * operation. If the last operation we added to the transaction was a
  2224. * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
  2225. *
  2226. * NOTE: skip LRs with 0 data length.
  2227. */
  2228. STATIC int
  2229. xlog_recover_process_data(
  2230. struct xlog *log,
  2231. struct hlist_head rhash[],
  2232. struct xlog_rec_header *rhead,
  2233. char *dp,
  2234. int pass,
  2235. struct list_head *buffer_list)
  2236. {
  2237. struct xlog_op_header *ohead;
  2238. char *end;
  2239. int num_logops;
  2240. int error;
  2241. end = dp + be32_to_cpu(rhead->h_len);
  2242. num_logops = be32_to_cpu(rhead->h_num_logops);
  2243. /* check the log format matches our own - else we can't recover */
  2244. if (xlog_header_check_recover(log->l_mp, rhead))
  2245. return -EIO;
  2246. trace_xfs_log_recover_record(log, rhead, pass);
  2247. while ((dp < end) && num_logops) {
  2248. ohead = (struct xlog_op_header *)dp;
  2249. dp += sizeof(*ohead);
  2250. if (dp > end) {
  2251. xfs_warn(log->l_mp, "%s: op header overrun", __func__);
  2252. return -EFSCORRUPTED;
  2253. }
  2254. /* errors will abort recovery */
  2255. error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
  2256. dp, end, pass, buffer_list);
  2257. if (error)
  2258. return error;
  2259. dp += be32_to_cpu(ohead->oh_len);
  2260. num_logops--;
  2261. }
  2262. return 0;
  2263. }
  2264. /* Take all the collected deferred ops and finish them in order. */
  2265. static int
  2266. xlog_finish_defer_ops(
  2267. struct xfs_mount *mp,
  2268. struct list_head *capture_list)
  2269. {
  2270. struct xfs_defer_capture *dfc, *next;
  2271. struct xfs_trans *tp;
  2272. int error = 0;
  2273. list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
  2274. struct xfs_trans_res resv;
  2275. struct xfs_defer_resources dres;
  2276. /*
  2277. * Create a new transaction reservation from the captured
  2278. * information. Set logcount to 1 to force the new transaction
  2279. * to regrant every roll so that we can make forward progress
  2280. * in recovery no matter how full the log might be.
  2281. */
  2282. resv.tr_logres = dfc->dfc_logres;
  2283. resv.tr_logcount = 1;
  2284. resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
  2285. error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
  2286. dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
  2287. if (error) {
  2288. xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
  2289. return error;
  2290. }
  2291. /*
  2292. * Transfer to this new transaction all the dfops we captured
  2293. * from recovering a single intent item.
  2294. */
  2295. list_del_init(&dfc->dfc_list);
  2296. xfs_defer_ops_continue(dfc, tp, &dres);
  2297. error = xfs_trans_commit(tp);
  2298. xfs_defer_resources_rele(&dres);
  2299. if (error)
  2300. return error;
  2301. }
  2302. ASSERT(list_empty(capture_list));
  2303. return 0;
  2304. }
  2305. /* Release all the captured defer ops and capture structures in this list. */
  2306. static void
  2307. xlog_abort_defer_ops(
  2308. struct xfs_mount *mp,
  2309. struct list_head *capture_list)
  2310. {
  2311. struct xfs_defer_capture *dfc;
  2312. struct xfs_defer_capture *next;
  2313. list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
  2314. list_del_init(&dfc->dfc_list);
  2315. xfs_defer_ops_capture_abort(mp, dfc);
  2316. }
  2317. }
  2318. /*
  2319. * When this is called, all of the log intent items which did not have
  2320. * corresponding log done items should be in the AIL. What we do now is update
  2321. * the data structures associated with each one.
  2322. *
  2323. * Since we process the log intent items in normal transactions, they will be
  2324. * removed at some point after the commit. This prevents us from just walking
  2325. * down the list processing each one. We'll use a flag in the intent item to
  2326. * skip those that we've already processed and use the AIL iteration mechanism's
  2327. * generation count to try to speed this up at least a bit.
  2328. *
  2329. * When we start, we know that the intents are the only things in the AIL. As we
  2330. * process them, however, other items are added to the AIL. Hence we know we
  2331. * have started recovery on all the pending intents when we find an non-intent
  2332. * item in the AIL.
  2333. */
  2334. STATIC int
  2335. xlog_recover_process_intents(
  2336. struct xlog *log)
  2337. {
  2338. LIST_HEAD(capture_list);
  2339. struct xfs_defer_pending *dfp, *n;
  2340. int error = 0;
  2341. #if defined(DEBUG) || defined(XFS_WARN)
  2342. xfs_lsn_t last_lsn;
  2343. last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
  2344. #endif
  2345. list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
  2346. ASSERT(xlog_item_is_intent(dfp->dfp_intent));
  2347. /*
  2348. * We should never see a redo item with a LSN higher than
  2349. * the last transaction we found in the log at the start
  2350. * of recovery.
  2351. */
  2352. ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0);
  2353. /*
  2354. * NOTE: If your intent processing routine can create more
  2355. * deferred ops, you /must/ attach them to the capture list in
  2356. * the recover routine or else those subsequent intents will be
  2357. * replayed in the wrong order!
  2358. *
  2359. * The recovery function can free the log item, so we must not
  2360. * access dfp->dfp_intent after it returns. It must dispose of
  2361. * @dfp if it returns 0.
  2362. */
  2363. error = xfs_defer_finish_recovery(log->l_mp, dfp,
  2364. &capture_list);
  2365. if (error)
  2366. break;
  2367. }
  2368. if (error)
  2369. goto err;
  2370. error = xlog_finish_defer_ops(log->l_mp, &capture_list);
  2371. if (error)
  2372. goto err;
  2373. return 0;
  2374. err:
  2375. xlog_abort_defer_ops(log->l_mp, &capture_list);
  2376. return error;
  2377. }
  2378. /*
  2379. * A cancel occurs when the mount has failed and we're bailing out. Release all
  2380. * pending log intent items that we haven't started recovery on so they don't
  2381. * pin the AIL.
  2382. */
  2383. STATIC void
  2384. xlog_recover_cancel_intents(
  2385. struct xlog *log)
  2386. {
  2387. struct xfs_defer_pending *dfp, *n;
  2388. list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
  2389. ASSERT(xlog_item_is_intent(dfp->dfp_intent));
  2390. xfs_defer_cancel_recovery(log->l_mp, dfp);
  2391. }
  2392. }
  2393. /*
  2394. * Transfer ownership of the recovered pending work to the recovery transaction
  2395. * and try to finish the work. If there is more work to be done, the dfp will
  2396. * remain attached to the transaction. If not, the dfp is freed.
  2397. */
  2398. int
  2399. xlog_recover_finish_intent(
  2400. struct xfs_trans *tp,
  2401. struct xfs_defer_pending *dfp)
  2402. {
  2403. int error;
  2404. list_move(&dfp->dfp_list, &tp->t_dfops);
  2405. error = xfs_defer_finish_one(tp, dfp);
  2406. if (error == -EAGAIN)
  2407. return 0;
  2408. return error;
  2409. }
  2410. /*
  2411. * This routine performs a transaction to null out a bad inode pointer
  2412. * in an agi unlinked inode hash bucket.
  2413. */
  2414. STATIC void
  2415. xlog_recover_clear_agi_bucket(
  2416. struct xfs_perag *pag,
  2417. int bucket)
  2418. {
  2419. struct xfs_mount *mp = pag->pag_mount;
  2420. struct xfs_trans *tp;
  2421. struct xfs_agi *agi;
  2422. struct xfs_buf *agibp;
  2423. int offset;
  2424. int error;
  2425. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
  2426. if (error)
  2427. goto out_error;
  2428. error = xfs_read_agi(pag, tp, 0, &agibp);
  2429. if (error)
  2430. goto out_abort;
  2431. agi = agibp->b_addr;
  2432. agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
  2433. offset = offsetof(xfs_agi_t, agi_unlinked) +
  2434. (sizeof(xfs_agino_t) * bucket);
  2435. xfs_trans_log_buf(tp, agibp, offset,
  2436. (offset + sizeof(xfs_agino_t) - 1));
  2437. error = xfs_trans_commit(tp);
  2438. if (error)
  2439. goto out_error;
  2440. return;
  2441. out_abort:
  2442. xfs_trans_cancel(tp);
  2443. out_error:
  2444. xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
  2445. pag->pag_agno);
  2446. return;
  2447. }
  2448. static int
  2449. xlog_recover_iunlink_bucket(
  2450. struct xfs_perag *pag,
  2451. struct xfs_agi *agi,
  2452. int bucket)
  2453. {
  2454. struct xfs_mount *mp = pag->pag_mount;
  2455. struct xfs_inode *prev_ip = NULL;
  2456. struct xfs_inode *ip;
  2457. xfs_agino_t prev_agino, agino;
  2458. int error = 0;
  2459. agino = be32_to_cpu(agi->agi_unlinked[bucket]);
  2460. while (agino != NULLAGINO) {
  2461. error = xfs_iget(mp, NULL,
  2462. XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
  2463. 0, 0, &ip);
  2464. if (error)
  2465. break;
  2466. ASSERT(VFS_I(ip)->i_nlink == 0);
  2467. ASSERT(VFS_I(ip)->i_mode != 0);
  2468. xfs_iflags_clear(ip, XFS_IRECOVERY);
  2469. agino = ip->i_next_unlinked;
  2470. if (prev_ip) {
  2471. ip->i_prev_unlinked = prev_agino;
  2472. xfs_irele(prev_ip);
  2473. /*
  2474. * Ensure the inode is removed from the unlinked list
  2475. * before we continue so that it won't race with
  2476. * building the in-memory list here. This could be
  2477. * serialised with the agibp lock, but that just
  2478. * serialises via lockstepping and it's much simpler
  2479. * just to flush the inodegc queue and wait for it to
  2480. * complete.
  2481. */
  2482. error = xfs_inodegc_flush(mp);
  2483. if (error)
  2484. break;
  2485. }
  2486. prev_agino = agino;
  2487. prev_ip = ip;
  2488. }
  2489. if (prev_ip) {
  2490. int error2;
  2491. ip->i_prev_unlinked = prev_agino;
  2492. xfs_irele(prev_ip);
  2493. error2 = xfs_inodegc_flush(mp);
  2494. if (error2 && !error)
  2495. return error2;
  2496. }
  2497. return error;
  2498. }
  2499. /*
  2500. * Recover AGI unlinked lists
  2501. *
  2502. * This is called during recovery to process any inodes which we unlinked but
  2503. * not freed when the system crashed. These inodes will be on the lists in the
  2504. * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
  2505. * any inodes found on the lists. Each inode is removed from the lists when it
  2506. * has been fully truncated and is freed. The freeing of the inode and its
  2507. * removal from the list must be atomic.
  2508. *
  2509. * If everything we touch in the agi processing loop is already in memory, this
  2510. * loop can hold the cpu for a long time. It runs without lock contention,
  2511. * memory allocation contention, the need wait for IO, etc, and so will run
  2512. * until we either run out of inodes to process, run low on memory or we run out
  2513. * of log space.
  2514. *
  2515. * This behaviour is bad for latency on single CPU and non-preemptible kernels,
  2516. * and can prevent other filesystem work (such as CIL pushes) from running. This
  2517. * can lead to deadlocks if the recovery process runs out of log reservation
  2518. * space. Hence we need to yield the CPU when there is other kernel work
  2519. * scheduled on this CPU to ensure other scheduled work can run without undue
  2520. * latency.
  2521. */
  2522. static void
  2523. xlog_recover_iunlink_ag(
  2524. struct xfs_perag *pag)
  2525. {
  2526. struct xfs_agi *agi;
  2527. struct xfs_buf *agibp;
  2528. int bucket;
  2529. int error;
  2530. error = xfs_read_agi(pag, NULL, 0, &agibp);
  2531. if (error) {
  2532. /*
  2533. * AGI is b0rked. Don't process it.
  2534. *
  2535. * We should probably mark the filesystem as corrupt after we've
  2536. * recovered all the ag's we can....
  2537. */
  2538. return;
  2539. }
  2540. /*
  2541. * Unlock the buffer so that it can be acquired in the normal course of
  2542. * the transaction to truncate and free each inode. Because we are not
  2543. * racing with anyone else here for the AGI buffer, we don't even need
  2544. * to hold it locked to read the initial unlinked bucket entries out of
  2545. * the buffer. We keep buffer reference though, so that it stays pinned
  2546. * in memory while we need the buffer.
  2547. */
  2548. agi = agibp->b_addr;
  2549. xfs_buf_unlock(agibp);
  2550. for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
  2551. error = xlog_recover_iunlink_bucket(pag, agi, bucket);
  2552. if (error) {
  2553. /*
  2554. * Bucket is unrecoverable, so only a repair scan can
  2555. * free the remaining unlinked inodes. Just empty the
  2556. * bucket and remaining inodes on it unreferenced and
  2557. * unfreeable.
  2558. */
  2559. xlog_recover_clear_agi_bucket(pag, bucket);
  2560. }
  2561. }
  2562. xfs_buf_rele(agibp);
  2563. }
  2564. static void
  2565. xlog_recover_process_iunlinks(
  2566. struct xlog *log)
  2567. {
  2568. struct xfs_perag *pag;
  2569. xfs_agnumber_t agno;
  2570. for_each_perag(log->l_mp, agno, pag)
  2571. xlog_recover_iunlink_ag(pag);
  2572. }
  2573. STATIC void
  2574. xlog_unpack_data(
  2575. struct xlog_rec_header *rhead,
  2576. char *dp,
  2577. struct xlog *log)
  2578. {
  2579. int i, j, k;
  2580. for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
  2581. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  2582. *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
  2583. dp += BBSIZE;
  2584. }
  2585. if (xfs_has_logv2(log->l_mp)) {
  2586. xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
  2587. for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
  2588. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  2589. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  2590. *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
  2591. dp += BBSIZE;
  2592. }
  2593. }
  2594. }
  2595. /*
  2596. * CRC check, unpack and process a log record.
  2597. */
  2598. STATIC int
  2599. xlog_recover_process(
  2600. struct xlog *log,
  2601. struct hlist_head rhash[],
  2602. struct xlog_rec_header *rhead,
  2603. char *dp,
  2604. int pass,
  2605. struct list_head *buffer_list)
  2606. {
  2607. __le32 expected_crc = rhead->h_crc, crc, other_crc;
  2608. crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE,
  2609. be32_to_cpu(rhead->h_len));
  2610. /*
  2611. * Look at the end of the struct xlog_rec_header definition in
  2612. * xfs_log_format.h for the glory details.
  2613. */
  2614. if (expected_crc && crc != expected_crc) {
  2615. other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER,
  2616. be32_to_cpu(rhead->h_len));
  2617. if (other_crc == expected_crc) {
  2618. xfs_notice_once(log->l_mp,
  2619. "Fixing up incorrect CRC due to padding.");
  2620. crc = other_crc;
  2621. }
  2622. }
  2623. /*
  2624. * Nothing else to do if this is a CRC verification pass. Just return
  2625. * if this a record with a non-zero crc. Unfortunately, mkfs always
  2626. * sets expected_crc to 0 so we must consider this valid even on v5
  2627. * supers. Otherwise, return EFSBADCRC on failure so the callers up the
  2628. * stack know precisely what failed.
  2629. */
  2630. if (pass == XLOG_RECOVER_CRCPASS) {
  2631. if (expected_crc && crc != expected_crc)
  2632. return -EFSBADCRC;
  2633. return 0;
  2634. }
  2635. /*
  2636. * We're in the normal recovery path. Issue a warning if and only if the
  2637. * CRC in the header is non-zero. This is an advisory warning and the
  2638. * zero CRC check prevents warnings from being emitted when upgrading
  2639. * the kernel from one that does not add CRCs by default.
  2640. */
  2641. if (crc != expected_crc) {
  2642. if (expected_crc || xfs_has_crc(log->l_mp)) {
  2643. xfs_alert(log->l_mp,
  2644. "log record CRC mismatch: found 0x%x, expected 0x%x.",
  2645. le32_to_cpu(expected_crc),
  2646. le32_to_cpu(crc));
  2647. xfs_hex_dump(dp, 32);
  2648. }
  2649. /*
  2650. * If the filesystem is CRC enabled, this mismatch becomes a
  2651. * fatal log corruption failure.
  2652. */
  2653. if (xfs_has_crc(log->l_mp)) {
  2654. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
  2655. return -EFSCORRUPTED;
  2656. }
  2657. }
  2658. xlog_unpack_data(rhead, dp, log);
  2659. return xlog_recover_process_data(log, rhash, rhead, dp, pass,
  2660. buffer_list);
  2661. }
  2662. STATIC int
  2663. xlog_valid_rec_header(
  2664. struct xlog *log,
  2665. struct xlog_rec_header *rhead,
  2666. xfs_daddr_t blkno,
  2667. int bufsize)
  2668. {
  2669. int hlen;
  2670. if (XFS_IS_CORRUPT(log->l_mp,
  2671. rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
  2672. return -EFSCORRUPTED;
  2673. if (XFS_IS_CORRUPT(log->l_mp,
  2674. (!rhead->h_version ||
  2675. (be32_to_cpu(rhead->h_version) &
  2676. (~XLOG_VERSION_OKBITS))))) {
  2677. xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
  2678. __func__, be32_to_cpu(rhead->h_version));
  2679. return -EFSCORRUPTED;
  2680. }
  2681. /*
  2682. * LR body must have data (or it wouldn't have been written)
  2683. * and h_len must not be greater than LR buffer size.
  2684. */
  2685. hlen = be32_to_cpu(rhead->h_len);
  2686. if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
  2687. return -EFSCORRUPTED;
  2688. if (XFS_IS_CORRUPT(log->l_mp,
  2689. blkno > log->l_logBBsize || blkno > INT_MAX))
  2690. return -EFSCORRUPTED;
  2691. return 0;
  2692. }
  2693. /*
  2694. * Read the log from tail to head and process the log records found.
  2695. * Handle the two cases where the tail and head are in the same cycle
  2696. * and where the active portion of the log wraps around the end of
  2697. * the physical log separately. The pass parameter is passed through
  2698. * to the routines called to process the data and is not looked at
  2699. * here.
  2700. */
  2701. STATIC int
  2702. xlog_do_recovery_pass(
  2703. struct xlog *log,
  2704. xfs_daddr_t head_blk,
  2705. xfs_daddr_t tail_blk,
  2706. int pass,
  2707. xfs_daddr_t *first_bad) /* out: first bad log rec */
  2708. {
  2709. xlog_rec_header_t *rhead;
  2710. xfs_daddr_t blk_no, rblk_no;
  2711. xfs_daddr_t rhead_blk;
  2712. char *offset;
  2713. char *hbp, *dbp;
  2714. int error = 0, h_size, h_len;
  2715. int error2 = 0;
  2716. int bblks, split_bblks;
  2717. int hblks = 1, split_hblks, wrapped_hblks;
  2718. int i;
  2719. struct hlist_head rhash[XLOG_RHASH_SIZE];
  2720. LIST_HEAD (buffer_list);
  2721. ASSERT(head_blk != tail_blk);
  2722. blk_no = rhead_blk = tail_blk;
  2723. for (i = 0; i < XLOG_RHASH_SIZE; i++)
  2724. INIT_HLIST_HEAD(&rhash[i]);
  2725. hbp = xlog_alloc_buffer(log, hblks);
  2726. if (!hbp)
  2727. return -ENOMEM;
  2728. /*
  2729. * Read the header of the tail block and get the iclog buffer size from
  2730. * h_size. Use this to tell how many sectors make up the log header.
  2731. */
  2732. if (xfs_has_logv2(log->l_mp)) {
  2733. /*
  2734. * When using variable length iclogs, read first sector of
  2735. * iclog header and extract the header size from it. Get a
  2736. * new hbp that is the correct size.
  2737. */
  2738. error = xlog_bread(log, tail_blk, 1, hbp, &offset);
  2739. if (error)
  2740. goto bread_err1;
  2741. rhead = (xlog_rec_header_t *)offset;
  2742. /*
  2743. * xfsprogs has a bug where record length is based on lsunit but
  2744. * h_size (iclog size) is hardcoded to 32k. Now that we
  2745. * unconditionally CRC verify the unmount record, this means the
  2746. * log buffer can be too small for the record and cause an
  2747. * overrun.
  2748. *
  2749. * Detect this condition here. Use lsunit for the buffer size as
  2750. * long as this looks like the mkfs case. Otherwise, return an
  2751. * error to avoid a buffer overrun.
  2752. */
  2753. h_size = be32_to_cpu(rhead->h_size);
  2754. h_len = be32_to_cpu(rhead->h_len);
  2755. if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
  2756. rhead->h_num_logops == cpu_to_be32(1)) {
  2757. xfs_warn(log->l_mp,
  2758. "invalid iclog size (%d bytes), using lsunit (%d bytes)",
  2759. h_size, log->l_mp->m_logbsize);
  2760. h_size = log->l_mp->m_logbsize;
  2761. }
  2762. error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
  2763. if (error)
  2764. goto bread_err1;
  2765. /*
  2766. * This open codes xlog_logrec_hblks so that we can reuse the
  2767. * fixed up h_size value calculated above. Without that we'd
  2768. * still allocate the buffer based on the incorrect on-disk
  2769. * size.
  2770. */
  2771. if (h_size > XLOG_HEADER_CYCLE_SIZE &&
  2772. (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
  2773. hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
  2774. if (hblks > 1) {
  2775. kvfree(hbp);
  2776. hbp = xlog_alloc_buffer(log, hblks);
  2777. if (!hbp)
  2778. return -ENOMEM;
  2779. }
  2780. }
  2781. } else {
  2782. ASSERT(log->l_sectBBsize == 1);
  2783. h_size = XLOG_BIG_RECORD_BSIZE;
  2784. }
  2785. dbp = xlog_alloc_buffer(log, BTOBB(h_size));
  2786. if (!dbp) {
  2787. kvfree(hbp);
  2788. return -ENOMEM;
  2789. }
  2790. memset(rhash, 0, sizeof(rhash));
  2791. if (tail_blk > head_blk) {
  2792. /*
  2793. * Perform recovery around the end of the physical log.
  2794. * When the head is not on the same cycle number as the tail,
  2795. * we can't do a sequential recovery.
  2796. */
  2797. while (blk_no < log->l_logBBsize) {
  2798. /*
  2799. * Check for header wrapping around physical end-of-log
  2800. */
  2801. offset = hbp;
  2802. split_hblks = 0;
  2803. wrapped_hblks = 0;
  2804. if (blk_no + hblks <= log->l_logBBsize) {
  2805. /* Read header in one read */
  2806. error = xlog_bread(log, blk_no, hblks, hbp,
  2807. &offset);
  2808. if (error)
  2809. goto bread_err2;
  2810. } else {
  2811. /* This LR is split across physical log end */
  2812. if (blk_no != log->l_logBBsize) {
  2813. /* some data before physical log end */
  2814. ASSERT(blk_no <= INT_MAX);
  2815. split_hblks = log->l_logBBsize - (int)blk_no;
  2816. ASSERT(split_hblks > 0);
  2817. error = xlog_bread(log, blk_no,
  2818. split_hblks, hbp,
  2819. &offset);
  2820. if (error)
  2821. goto bread_err2;
  2822. }
  2823. /*
  2824. * Note: this black magic still works with
  2825. * large sector sizes (non-512) only because:
  2826. * - we increased the buffer size originally
  2827. * by 1 sector giving us enough extra space
  2828. * for the second read;
  2829. * - the log start is guaranteed to be sector
  2830. * aligned;
  2831. * - we read the log end (LR header start)
  2832. * _first_, then the log start (LR header end)
  2833. * - order is important.
  2834. */
  2835. wrapped_hblks = hblks - split_hblks;
  2836. error = xlog_bread_noalign(log, 0,
  2837. wrapped_hblks,
  2838. offset + BBTOB(split_hblks));
  2839. if (error)
  2840. goto bread_err2;
  2841. }
  2842. rhead = (xlog_rec_header_t *)offset;
  2843. error = xlog_valid_rec_header(log, rhead,
  2844. split_hblks ? blk_no : 0, h_size);
  2845. if (error)
  2846. goto bread_err2;
  2847. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  2848. blk_no += hblks;
  2849. /*
  2850. * Read the log record data in multiple reads if it
  2851. * wraps around the end of the log. Note that if the
  2852. * header already wrapped, blk_no could point past the
  2853. * end of the log. The record data is contiguous in
  2854. * that case.
  2855. */
  2856. if (blk_no + bblks <= log->l_logBBsize ||
  2857. blk_no >= log->l_logBBsize) {
  2858. rblk_no = xlog_wrap_logbno(log, blk_no);
  2859. error = xlog_bread(log, rblk_no, bblks, dbp,
  2860. &offset);
  2861. if (error)
  2862. goto bread_err2;
  2863. } else {
  2864. /* This log record is split across the
  2865. * physical end of log */
  2866. offset = dbp;
  2867. split_bblks = 0;
  2868. if (blk_no != log->l_logBBsize) {
  2869. /* some data is before the physical
  2870. * end of log */
  2871. ASSERT(!wrapped_hblks);
  2872. ASSERT(blk_no <= INT_MAX);
  2873. split_bblks =
  2874. log->l_logBBsize - (int)blk_no;
  2875. ASSERT(split_bblks > 0);
  2876. error = xlog_bread(log, blk_no,
  2877. split_bblks, dbp,
  2878. &offset);
  2879. if (error)
  2880. goto bread_err2;
  2881. }
  2882. /*
  2883. * Note: this black magic still works with
  2884. * large sector sizes (non-512) only because:
  2885. * - we increased the buffer size originally
  2886. * by 1 sector giving us enough extra space
  2887. * for the second read;
  2888. * - the log start is guaranteed to be sector
  2889. * aligned;
  2890. * - we read the log end (LR header start)
  2891. * _first_, then the log start (LR header end)
  2892. * - order is important.
  2893. */
  2894. error = xlog_bread_noalign(log, 0,
  2895. bblks - split_bblks,
  2896. offset + BBTOB(split_bblks));
  2897. if (error)
  2898. goto bread_err2;
  2899. }
  2900. error = xlog_recover_process(log, rhash, rhead, offset,
  2901. pass, &buffer_list);
  2902. if (error)
  2903. goto bread_err2;
  2904. blk_no += bblks;
  2905. rhead_blk = blk_no;
  2906. }
  2907. ASSERT(blk_no >= log->l_logBBsize);
  2908. blk_no -= log->l_logBBsize;
  2909. rhead_blk = blk_no;
  2910. }
  2911. /* read first part of physical log */
  2912. while (blk_no < head_blk) {
  2913. error = xlog_bread(log, blk_no, hblks, hbp, &offset);
  2914. if (error)
  2915. goto bread_err2;
  2916. rhead = (xlog_rec_header_t *)offset;
  2917. error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
  2918. if (error)
  2919. goto bread_err2;
  2920. /* blocks in data section */
  2921. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  2922. error = xlog_bread(log, blk_no+hblks, bblks, dbp,
  2923. &offset);
  2924. if (error)
  2925. goto bread_err2;
  2926. error = xlog_recover_process(log, rhash, rhead, offset, pass,
  2927. &buffer_list);
  2928. if (error)
  2929. goto bread_err2;
  2930. blk_no += bblks + hblks;
  2931. rhead_blk = blk_no;
  2932. }
  2933. bread_err2:
  2934. kvfree(dbp);
  2935. bread_err1:
  2936. kvfree(hbp);
  2937. /*
  2938. * Submit buffers that have been dirtied by the last record recovered.
  2939. */
  2940. if (!list_empty(&buffer_list)) {
  2941. if (error) {
  2942. /*
  2943. * If there has been an item recovery error then we
  2944. * cannot allow partial checkpoint writeback to
  2945. * occur. We might have multiple checkpoints with the
  2946. * same start LSN in this buffer list, and partial
  2947. * writeback of a checkpoint in this situation can
  2948. * prevent future recovery of all the changes in the
  2949. * checkpoints at this start LSN.
  2950. *
  2951. * Note: Shutting down the filesystem will result in the
  2952. * delwri submission marking all the buffers stale,
  2953. * completing them and cleaning up _XBF_LOGRECOVERY
  2954. * state without doing any IO.
  2955. */
  2956. xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
  2957. }
  2958. error2 = xfs_buf_delwri_submit(&buffer_list);
  2959. }
  2960. if (error && first_bad)
  2961. *first_bad = rhead_blk;
  2962. /*
  2963. * Transactions are freed at commit time but transactions without commit
  2964. * records on disk are never committed. Free any that may be left in the
  2965. * hash table.
  2966. */
  2967. for (i = 0; i < XLOG_RHASH_SIZE; i++) {
  2968. struct hlist_node *tmp;
  2969. struct xlog_recover *trans;
  2970. hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
  2971. xlog_recover_free_trans(trans);
  2972. }
  2973. return error ? error : error2;
  2974. }
  2975. /*
  2976. * Do the recovery of the log. We actually do this in two phases.
  2977. * The two passes are necessary in order to implement the function
  2978. * of cancelling a record written into the log. The first pass
  2979. * determines those things which have been cancelled, and the
  2980. * second pass replays log items normally except for those which
  2981. * have been cancelled. The handling of the replay and cancellations
  2982. * takes place in the log item type specific routines.
  2983. *
  2984. * The table of items which have cancel records in the log is allocated
  2985. * and freed at this level, since only here do we know when all of
  2986. * the log recovery has been completed.
  2987. */
  2988. STATIC int
  2989. xlog_do_log_recovery(
  2990. struct xlog *log,
  2991. xfs_daddr_t head_blk,
  2992. xfs_daddr_t tail_blk)
  2993. {
  2994. int error;
  2995. ASSERT(head_blk != tail_blk);
  2996. /*
  2997. * First do a pass to find all of the cancelled buf log items.
  2998. * Store them in the buf_cancel_table for use in the second pass.
  2999. */
  3000. error = xlog_alloc_buf_cancel_table(log);
  3001. if (error)
  3002. return error;
  3003. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3004. XLOG_RECOVER_PASS1, NULL);
  3005. if (error != 0)
  3006. goto out_cancel;
  3007. /*
  3008. * Then do a second pass to actually recover the items in the log.
  3009. * When it is complete free the table of buf cancel items.
  3010. */
  3011. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3012. XLOG_RECOVER_PASS2, NULL);
  3013. if (!error)
  3014. xlog_check_buf_cancel_table(log);
  3015. out_cancel:
  3016. xlog_free_buf_cancel_table(log);
  3017. return error;
  3018. }
  3019. /*
  3020. * Do the actual recovery
  3021. */
  3022. STATIC int
  3023. xlog_do_recover(
  3024. struct xlog *log,
  3025. xfs_daddr_t head_blk,
  3026. xfs_daddr_t tail_blk)
  3027. {
  3028. struct xfs_mount *mp = log->l_mp;
  3029. struct xfs_buf *bp = mp->m_sb_bp;
  3030. struct xfs_sb *sbp = &mp->m_sb;
  3031. int error;
  3032. trace_xfs_log_recover(log, head_blk, tail_blk);
  3033. /*
  3034. * First replay the images in the log.
  3035. */
  3036. error = xlog_do_log_recovery(log, head_blk, tail_blk);
  3037. if (error)
  3038. return error;
  3039. if (xlog_is_shutdown(log))
  3040. return -EIO;
  3041. /*
  3042. * We now update the tail_lsn since much of the recovery has completed
  3043. * and there may be space available to use. If there were no extent or
  3044. * iunlinks, we can free up the entire log. This was set in
  3045. * xlog_find_tail to be the lsn of the last known good LR on disk. If
  3046. * there are extent frees or iunlinks they will have some entries in the
  3047. * AIL; so we look at the AIL to determine how to set the tail_lsn.
  3048. */
  3049. xfs_ail_assign_tail_lsn(log->l_ailp);
  3050. /*
  3051. * Now that we've finished replaying all buffer and inode updates,
  3052. * re-read the superblock and reverify it.
  3053. */
  3054. xfs_buf_lock(bp);
  3055. xfs_buf_hold(bp);
  3056. error = _xfs_buf_read(bp, XBF_READ);
  3057. if (error) {
  3058. if (!xlog_is_shutdown(log)) {
  3059. xfs_buf_ioerror_alert(bp, __this_address);
  3060. ASSERT(0);
  3061. }
  3062. xfs_buf_relse(bp);
  3063. return error;
  3064. }
  3065. /* Convert superblock from on-disk format */
  3066. xfs_sb_from_disk(sbp, bp->b_addr);
  3067. xfs_buf_relse(bp);
  3068. /* re-initialise in-core superblock and geometry structures */
  3069. mp->m_features |= xfs_sb_version_to_features(sbp);
  3070. xfs_reinit_percpu_counters(mp);
  3071. /* Normal transactions can now occur */
  3072. clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
  3073. return 0;
  3074. }
  3075. /*
  3076. * Perform recovery and re-initialize some log variables in xlog_find_tail.
  3077. *
  3078. * Return error or zero.
  3079. */
  3080. int
  3081. xlog_recover(
  3082. struct xlog *log)
  3083. {
  3084. xfs_daddr_t head_blk, tail_blk;
  3085. int error;
  3086. /* find the tail of the log */
  3087. error = xlog_find_tail(log, &head_blk, &tail_blk);
  3088. if (error)
  3089. return error;
  3090. /*
  3091. * The superblock was read before the log was available and thus the LSN
  3092. * could not be verified. Check the superblock LSN against the current
  3093. * LSN now that it's known.
  3094. */
  3095. if (xfs_has_crc(log->l_mp) &&
  3096. !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
  3097. return -EINVAL;
  3098. if (tail_blk != head_blk) {
  3099. /* There used to be a comment here:
  3100. *
  3101. * disallow recovery on read-only mounts. note -- mount
  3102. * checks for ENOSPC and turns it into an intelligent
  3103. * error message.
  3104. * ...but this is no longer true. Now, unless you specify
  3105. * NORECOVERY (in which case this function would never be
  3106. * called), we just go ahead and recover. We do this all
  3107. * under the vfs layer, so we can get away with it unless
  3108. * the device itself is read-only, in which case we fail.
  3109. */
  3110. if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
  3111. return error;
  3112. }
  3113. /*
  3114. * Version 5 superblock log feature mask validation. We know the
  3115. * log is dirty so check if there are any unknown log features
  3116. * in what we need to recover. If there are unknown features
  3117. * (e.g. unsupported transactions, then simply reject the
  3118. * attempt at recovery before touching anything.
  3119. */
  3120. if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
  3121. xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
  3122. XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
  3123. xfs_warn(log->l_mp,
  3124. "Superblock has unknown incompatible log features (0x%x) enabled.",
  3125. (log->l_mp->m_sb.sb_features_log_incompat &
  3126. XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
  3127. xfs_warn(log->l_mp,
  3128. "The log can not be fully and/or safely recovered by this kernel.");
  3129. xfs_warn(log->l_mp,
  3130. "Please recover the log on a kernel that supports the unknown features.");
  3131. return -EINVAL;
  3132. }
  3133. /*
  3134. * Delay log recovery if the debug hook is set. This is debug
  3135. * instrumentation to coordinate simulation of I/O failures with
  3136. * log recovery.
  3137. */
  3138. if (xfs_globals.log_recovery_delay) {
  3139. xfs_notice(log->l_mp,
  3140. "Delaying log recovery for %d seconds.",
  3141. xfs_globals.log_recovery_delay);
  3142. msleep(xfs_globals.log_recovery_delay * 1000);
  3143. }
  3144. xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
  3145. log->l_mp->m_logname ? log->l_mp->m_logname
  3146. : "internal");
  3147. error = xlog_do_recover(log, head_blk, tail_blk);
  3148. set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
  3149. }
  3150. return error;
  3151. }
  3152. /*
  3153. * In the first part of recovery we replay inodes and buffers and build up the
  3154. * list of intents which need to be processed. Here we process the intents and
  3155. * clean up the on disk unlinked inode lists. This is separated from the first
  3156. * part of recovery so that the root and real-time bitmap inodes can be read in
  3157. * from disk in between the two stages. This is necessary so that we can free
  3158. * space in the real-time portion of the file system.
  3159. *
  3160. * We run this whole process under GFP_NOFS allocation context. We do a
  3161. * combination of non-transactional and transactional work, yet we really don't
  3162. * want to recurse into the filesystem from direct reclaim during any of this
  3163. * processing. This allows all the recovery code run here not to care about the
  3164. * memory allocation context it is running in.
  3165. */
  3166. int
  3167. xlog_recover_finish(
  3168. struct xlog *log)
  3169. {
  3170. unsigned int nofs_flags = memalloc_nofs_save();
  3171. int error;
  3172. error = xlog_recover_process_intents(log);
  3173. if (error) {
  3174. /*
  3175. * Cancel all the unprocessed intent items now so that we don't
  3176. * leave them pinned in the AIL. This can cause the AIL to
  3177. * livelock on the pinned item if anyone tries to push the AIL
  3178. * (inode reclaim does this) before we get around to
  3179. * xfs_log_mount_cancel.
  3180. */
  3181. xlog_recover_cancel_intents(log);
  3182. xfs_alert(log->l_mp, "Failed to recover intents");
  3183. xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
  3184. goto out_error;
  3185. }
  3186. /*
  3187. * Sync the log to get all the intents out of the AIL. This isn't
  3188. * absolutely necessary, but it helps in case the unlink transactions
  3189. * would have problems pushing the intents out of the way.
  3190. */
  3191. xfs_log_force(log->l_mp, XFS_LOG_SYNC);
  3192. xlog_recover_process_iunlinks(log);
  3193. /*
  3194. * Recover any CoW staging blocks that are still referenced by the
  3195. * ondisk refcount metadata. During mount there cannot be any live
  3196. * staging extents as we have not permitted any user modifications.
  3197. * Therefore, it is safe to free them all right now, even on a
  3198. * read-only mount.
  3199. */
  3200. error = xfs_reflink_recover_cow(log->l_mp);
  3201. if (error) {
  3202. xfs_alert(log->l_mp,
  3203. "Failed to recover leftover CoW staging extents, err %d.",
  3204. error);
  3205. /*
  3206. * If we get an error here, make sure the log is shut down
  3207. * but return zero so that any log items committed since the
  3208. * end of intents processing can be pushed through the CIL
  3209. * and AIL.
  3210. */
  3211. xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
  3212. error = 0;
  3213. goto out_error;
  3214. }
  3215. out_error:
  3216. memalloc_nofs_restore(nofs_flags);
  3217. return error;
  3218. }
  3219. void
  3220. xlog_recover_cancel(
  3221. struct xlog *log)
  3222. {
  3223. if (xlog_recovery_needed(log))
  3224. xlog_recover_cancel_intents(log);
  3225. }