agheader_repair.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <djwong@kernel.org>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_btree.h"
  13. #include "xfs_log_format.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_sb.h"
  16. #include "xfs_alloc.h"
  17. #include "xfs_alloc_btree.h"
  18. #include "xfs_ialloc.h"
  19. #include "xfs_ialloc_btree.h"
  20. #include "xfs_rmap.h"
  21. #include "xfs_rmap_btree.h"
  22. #include "xfs_refcount_btree.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_inode.h"
  25. #include "xfs_iunlink_item.h"
  26. #include "scrub/scrub.h"
  27. #include "scrub/common.h"
  28. #include "scrub/trace.h"
  29. #include "scrub/repair.h"
  30. #include "scrub/bitmap.h"
  31. #include "scrub/agb_bitmap.h"
  32. #include "scrub/agino_bitmap.h"
  33. #include "scrub/reap.h"
  34. #include "scrub/xfile.h"
  35. #include "scrub/xfarray.h"
  36. /* Superblock */
  37. /* Repair the superblock. */
  38. int
  39. xrep_superblock(
  40. struct xfs_scrub *sc)
  41. {
  42. struct xfs_mount *mp = sc->mp;
  43. struct xfs_buf *bp;
  44. xfs_agnumber_t agno;
  45. int error;
  46. /* Don't try to repair AG 0's sb; let xfs_repair deal with it. */
  47. agno = sc->sm->sm_agno;
  48. if (agno == 0)
  49. return -EOPNOTSUPP;
  50. error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp);
  51. if (error)
  52. return error;
  53. /* Last chance to abort before we start committing fixes. */
  54. if (xchk_should_terminate(sc, &error))
  55. return error;
  56. /* Copy AG 0's superblock to this one. */
  57. xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
  58. xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
  59. /*
  60. * Don't write out a secondary super with NEEDSREPAIR or log incompat
  61. * features set, since both are ignored when set on a secondary.
  62. */
  63. if (xfs_has_crc(mp)) {
  64. struct xfs_dsb *sb = bp->b_addr;
  65. sb->sb_features_incompat &=
  66. ~cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
  67. sb->sb_features_log_incompat = 0;
  68. }
  69. /* Write this to disk. */
  70. xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
  71. xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
  72. return 0;
  73. }
  74. /* AGF */
  75. struct xrep_agf_allocbt {
  76. struct xfs_scrub *sc;
  77. xfs_agblock_t freeblks;
  78. xfs_agblock_t longest;
  79. };
  80. /* Record free space shape information. */
  81. STATIC int
  82. xrep_agf_walk_allocbt(
  83. struct xfs_btree_cur *cur,
  84. const struct xfs_alloc_rec_incore *rec,
  85. void *priv)
  86. {
  87. struct xrep_agf_allocbt *raa = priv;
  88. int error = 0;
  89. if (xchk_should_terminate(raa->sc, &error))
  90. return error;
  91. raa->freeblks += rec->ar_blockcount;
  92. if (rec->ar_blockcount > raa->longest)
  93. raa->longest = rec->ar_blockcount;
  94. return error;
  95. }
  96. /* Does this AGFL block look sane? */
  97. STATIC int
  98. xrep_agf_check_agfl_block(
  99. struct xfs_mount *mp,
  100. xfs_agblock_t agbno,
  101. void *priv)
  102. {
  103. struct xfs_scrub *sc = priv;
  104. if (!xfs_verify_agbno(sc->sa.pag, agbno))
  105. return -EFSCORRUPTED;
  106. return 0;
  107. }
  108. /*
  109. * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
  110. * XFS_BTNUM_ names here to avoid creating a sparse array.
  111. */
  112. enum {
  113. XREP_AGF_BNOBT = 0,
  114. XREP_AGF_CNTBT,
  115. XREP_AGF_RMAPBT,
  116. XREP_AGF_REFCOUNTBT,
  117. XREP_AGF_END,
  118. XREP_AGF_MAX
  119. };
  120. /* Check a btree root candidate. */
  121. static inline bool
  122. xrep_check_btree_root(
  123. struct xfs_scrub *sc,
  124. struct xrep_find_ag_btree *fab)
  125. {
  126. return xfs_verify_agbno(sc->sa.pag, fab->root) &&
  127. fab->height <= fab->maxlevels;
  128. }
  129. /*
  130. * Given the btree roots described by *fab, find the roots, check them for
  131. * sanity, and pass the root data back out via *fab.
  132. *
  133. * This is /also/ a chicken and egg problem because we have to use the rmapbt
  134. * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no
  135. * idea if the btrees make any sense. If we hit obvious corruptions in those
  136. * btrees we'll bail out.
  137. */
  138. STATIC int
  139. xrep_agf_find_btrees(
  140. struct xfs_scrub *sc,
  141. struct xfs_buf *agf_bp,
  142. struct xrep_find_ag_btree *fab,
  143. struct xfs_buf *agfl_bp)
  144. {
  145. struct xfs_agf *old_agf = agf_bp->b_addr;
  146. int error;
  147. /* Go find the root data. */
  148. error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
  149. if (error)
  150. return error;
  151. /* We must find the bnobt, cntbt, and rmapbt roots. */
  152. if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
  153. !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
  154. !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
  155. return -EFSCORRUPTED;
  156. /*
  157. * We relied on the rmapbt to reconstruct the AGF. If we get a
  158. * different root then something's seriously wrong.
  159. */
  160. if (fab[XREP_AGF_RMAPBT].root != be32_to_cpu(old_agf->agf_rmap_root))
  161. return -EFSCORRUPTED;
  162. /* We must find the refcountbt root if that feature is enabled. */
  163. if (xfs_has_reflink(sc->mp) &&
  164. !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
  165. return -EFSCORRUPTED;
  166. return 0;
  167. }
  168. /*
  169. * Reinitialize the AGF header, making an in-core copy of the old contents so
  170. * that we know which in-core state needs to be reinitialized.
  171. */
  172. STATIC void
  173. xrep_agf_init_header(
  174. struct xfs_scrub *sc,
  175. struct xfs_buf *agf_bp,
  176. struct xfs_agf *old_agf)
  177. {
  178. struct xfs_mount *mp = sc->mp;
  179. struct xfs_perag *pag = sc->sa.pag;
  180. struct xfs_agf *agf = agf_bp->b_addr;
  181. memcpy(old_agf, agf, sizeof(*old_agf));
  182. memset(agf, 0, BBTOB(agf_bp->b_length));
  183. agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
  184. agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
  185. agf->agf_seqno = cpu_to_be32(pag->pag_agno);
  186. agf->agf_length = cpu_to_be32(pag->block_count);
  187. agf->agf_flfirst = old_agf->agf_flfirst;
  188. agf->agf_fllast = old_agf->agf_fllast;
  189. agf->agf_flcount = old_agf->agf_flcount;
  190. if (xfs_has_crc(mp))
  191. uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
  192. /* Mark the incore AGF data stale until we're done fixing things. */
  193. ASSERT(xfs_perag_initialised_agf(pag));
  194. clear_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
  195. }
  196. /* Set btree root information in an AGF. */
  197. STATIC void
  198. xrep_agf_set_roots(
  199. struct xfs_scrub *sc,
  200. struct xfs_agf *agf,
  201. struct xrep_find_ag_btree *fab)
  202. {
  203. agf->agf_bno_root = cpu_to_be32(fab[XREP_AGF_BNOBT].root);
  204. agf->agf_bno_level = cpu_to_be32(fab[XREP_AGF_BNOBT].height);
  205. agf->agf_cnt_root = cpu_to_be32(fab[XREP_AGF_CNTBT].root);
  206. agf->agf_cnt_level = cpu_to_be32(fab[XREP_AGF_CNTBT].height);
  207. agf->agf_rmap_root = cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
  208. agf->agf_rmap_level = cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
  209. if (xfs_has_reflink(sc->mp)) {
  210. agf->agf_refcount_root =
  211. cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
  212. agf->agf_refcount_level =
  213. cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
  214. }
  215. }
  216. /* Update all AGF fields which derive from btree contents. */
  217. STATIC int
  218. xrep_agf_calc_from_btrees(
  219. struct xfs_scrub *sc,
  220. struct xfs_buf *agf_bp)
  221. {
  222. struct xrep_agf_allocbt raa = { .sc = sc };
  223. struct xfs_btree_cur *cur = NULL;
  224. struct xfs_agf *agf = agf_bp->b_addr;
  225. struct xfs_mount *mp = sc->mp;
  226. xfs_agblock_t btreeblks;
  227. xfs_filblks_t blocks;
  228. int error;
  229. /* Update the AGF counters from the bnobt. */
  230. cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  231. error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
  232. if (error)
  233. goto err;
  234. error = xfs_btree_count_blocks(cur, &blocks);
  235. if (error)
  236. goto err;
  237. xfs_btree_del_cursor(cur, error);
  238. btreeblks = blocks - 1;
  239. agf->agf_freeblks = cpu_to_be32(raa.freeblks);
  240. agf->agf_longest = cpu_to_be32(raa.longest);
  241. /* Update the AGF counters from the cntbt. */
  242. cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  243. error = xfs_btree_count_blocks(cur, &blocks);
  244. if (error)
  245. goto err;
  246. xfs_btree_del_cursor(cur, error);
  247. btreeblks += blocks - 1;
  248. /* Update the AGF counters from the rmapbt. */
  249. cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  250. error = xfs_btree_count_blocks(cur, &blocks);
  251. if (error)
  252. goto err;
  253. xfs_btree_del_cursor(cur, error);
  254. agf->agf_rmap_blocks = cpu_to_be32(blocks);
  255. btreeblks += blocks - 1;
  256. agf->agf_btreeblks = cpu_to_be32(btreeblks);
  257. /* Update the AGF counters from the refcountbt. */
  258. if (xfs_has_reflink(mp)) {
  259. cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
  260. sc->sa.pag);
  261. error = xfs_btree_count_blocks(cur, &blocks);
  262. if (error)
  263. goto err;
  264. xfs_btree_del_cursor(cur, error);
  265. agf->agf_refcount_blocks = cpu_to_be32(blocks);
  266. }
  267. return 0;
  268. err:
  269. xfs_btree_del_cursor(cur, error);
  270. return error;
  271. }
  272. /* Commit the new AGF and reinitialize the incore state. */
  273. STATIC int
  274. xrep_agf_commit_new(
  275. struct xfs_scrub *sc,
  276. struct xfs_buf *agf_bp)
  277. {
  278. struct xfs_perag *pag;
  279. struct xfs_agf *agf = agf_bp->b_addr;
  280. /* Trigger fdblocks recalculation */
  281. xfs_force_summary_recalc(sc->mp);
  282. /* Write this to disk. */
  283. xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
  284. xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
  285. /* Now reinitialize the in-core counters we changed. */
  286. pag = sc->sa.pag;
  287. pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
  288. pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
  289. pag->pagf_longest = be32_to_cpu(agf->agf_longest);
  290. pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
  291. pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
  292. pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
  293. pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
  294. set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
  295. return xrep_roll_ag_trans(sc);
  296. }
  297. /* Repair the AGF. v5 filesystems only. */
  298. int
  299. xrep_agf(
  300. struct xfs_scrub *sc)
  301. {
  302. struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
  303. [XREP_AGF_BNOBT] = {
  304. .rmap_owner = XFS_RMAP_OWN_AG,
  305. .buf_ops = &xfs_bnobt_buf_ops,
  306. .maxlevels = sc->mp->m_alloc_maxlevels,
  307. },
  308. [XREP_AGF_CNTBT] = {
  309. .rmap_owner = XFS_RMAP_OWN_AG,
  310. .buf_ops = &xfs_cntbt_buf_ops,
  311. .maxlevels = sc->mp->m_alloc_maxlevels,
  312. },
  313. [XREP_AGF_RMAPBT] = {
  314. .rmap_owner = XFS_RMAP_OWN_AG,
  315. .buf_ops = &xfs_rmapbt_buf_ops,
  316. .maxlevels = sc->mp->m_rmap_maxlevels,
  317. },
  318. [XREP_AGF_REFCOUNTBT] = {
  319. .rmap_owner = XFS_RMAP_OWN_REFC,
  320. .buf_ops = &xfs_refcountbt_buf_ops,
  321. .maxlevels = sc->mp->m_refc_maxlevels,
  322. },
  323. [XREP_AGF_END] = {
  324. .buf_ops = NULL,
  325. },
  326. };
  327. struct xfs_agf old_agf;
  328. struct xfs_mount *mp = sc->mp;
  329. struct xfs_buf *agf_bp;
  330. struct xfs_buf *agfl_bp;
  331. struct xfs_agf *agf;
  332. int error;
  333. /* We require the rmapbt to rebuild anything. */
  334. if (!xfs_has_rmapbt(mp))
  335. return -EOPNOTSUPP;
  336. /*
  337. * Make sure we have the AGF buffer, as scrub might have decided it
  338. * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED.
  339. */
  340. error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
  341. XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
  342. XFS_AGF_DADDR(mp)),
  343. XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
  344. if (error)
  345. return error;
  346. agf_bp->b_ops = &xfs_agf_buf_ops;
  347. agf = agf_bp->b_addr;
  348. /*
  349. * Load the AGFL so that we can screen out OWN_AG blocks that are on
  350. * the AGFL now; these blocks might have once been part of the
  351. * bno/cnt/rmap btrees but are not now. This is a chicken and egg
  352. * problem: the AGF is corrupt, so we have to trust the AGFL contents
  353. * because we can't do any serious cross-referencing with any of the
  354. * btrees rooted in the AGF. If the AGFL contents are obviously bad
  355. * then we'll bail out.
  356. */
  357. error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
  358. if (error)
  359. return error;
  360. /*
  361. * Spot-check the AGFL blocks; if they're obviously corrupt then
  362. * there's nothing we can do but bail out.
  363. */
  364. error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp,
  365. xrep_agf_check_agfl_block, sc);
  366. if (error)
  367. return error;
  368. /*
  369. * Find the AGF btree roots. This is also a chicken-and-egg situation;
  370. * see the function for more details.
  371. */
  372. error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
  373. if (error)
  374. return error;
  375. /* Last chance to abort before we start committing fixes. */
  376. if (xchk_should_terminate(sc, &error))
  377. return error;
  378. /* Start rewriting the header and implant the btrees we found. */
  379. xrep_agf_init_header(sc, agf_bp, &old_agf);
  380. xrep_agf_set_roots(sc, agf, fab);
  381. error = xrep_agf_calc_from_btrees(sc, agf_bp);
  382. if (error)
  383. goto out_revert;
  384. /* Commit the changes and reinitialize incore state. */
  385. return xrep_agf_commit_new(sc, agf_bp);
  386. out_revert:
  387. /* Mark the incore AGF state stale and revert the AGF. */
  388. clear_bit(XFS_AGSTATE_AGF_INIT, &sc->sa.pag->pag_opstate);
  389. memcpy(agf, &old_agf, sizeof(old_agf));
  390. return error;
  391. }
  392. /* AGFL */
  393. struct xrep_agfl {
  394. /* Bitmap of alleged AGFL blocks that we're not going to add. */
  395. struct xagb_bitmap crossed;
  396. /* Bitmap of other OWN_AG metadata blocks. */
  397. struct xagb_bitmap agmetablocks;
  398. /* Bitmap of free space. */
  399. struct xagb_bitmap *freesp;
  400. /* rmapbt cursor for finding crosslinked blocks */
  401. struct xfs_btree_cur *rmap_cur;
  402. struct xfs_scrub *sc;
  403. };
  404. /* Record all OWN_AG (free space btree) information from the rmap data. */
  405. STATIC int
  406. xrep_agfl_walk_rmap(
  407. struct xfs_btree_cur *cur,
  408. const struct xfs_rmap_irec *rec,
  409. void *priv)
  410. {
  411. struct xrep_agfl *ra = priv;
  412. int error = 0;
  413. if (xchk_should_terminate(ra->sc, &error))
  414. return error;
  415. /* Record all the OWN_AG blocks. */
  416. if (rec->rm_owner == XFS_RMAP_OWN_AG) {
  417. error = xagb_bitmap_set(ra->freesp, rec->rm_startblock,
  418. rec->rm_blockcount);
  419. if (error)
  420. return error;
  421. }
  422. return xagb_bitmap_set_btcur_path(&ra->agmetablocks, cur);
  423. }
  424. /* Strike out the blocks that are cross-linked according to the rmapbt. */
  425. STATIC int
  426. xrep_agfl_check_extent(
  427. uint32_t agbno,
  428. uint32_t len,
  429. void *priv)
  430. {
  431. struct xrep_agfl *ra = priv;
  432. xfs_agblock_t last_agbno = agbno + len - 1;
  433. int error;
  434. while (agbno <= last_agbno) {
  435. bool other_owners;
  436. error = xfs_rmap_has_other_keys(ra->rmap_cur, agbno, 1,
  437. &XFS_RMAP_OINFO_AG, &other_owners);
  438. if (error)
  439. return error;
  440. if (other_owners) {
  441. error = xagb_bitmap_set(&ra->crossed, agbno, 1);
  442. if (error)
  443. return error;
  444. }
  445. if (xchk_should_terminate(ra->sc, &error))
  446. return error;
  447. agbno++;
  448. }
  449. return 0;
  450. }
  451. /*
  452. * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce
  453. * which blocks belong to the AGFL.
  454. *
  455. * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG
  456. * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt,
  457. * rmapbt). These are the old AGFL blocks, so return that list and the number
  458. * of blocks we're actually going to put back on the AGFL.
  459. */
  460. STATIC int
  461. xrep_agfl_collect_blocks(
  462. struct xfs_scrub *sc,
  463. struct xfs_buf *agf_bp,
  464. struct xagb_bitmap *agfl_extents,
  465. xfs_agblock_t *flcount)
  466. {
  467. struct xrep_agfl ra;
  468. struct xfs_mount *mp = sc->mp;
  469. struct xfs_btree_cur *cur;
  470. int error;
  471. ra.sc = sc;
  472. ra.freesp = agfl_extents;
  473. xagb_bitmap_init(&ra.agmetablocks);
  474. xagb_bitmap_init(&ra.crossed);
  475. /* Find all space used by the free space btrees & rmapbt. */
  476. cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  477. error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
  478. xfs_btree_del_cursor(cur, error);
  479. if (error)
  480. goto out_bmp;
  481. /* Find all blocks currently being used by the bnobt. */
  482. cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  483. error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
  484. xfs_btree_del_cursor(cur, error);
  485. if (error)
  486. goto out_bmp;
  487. /* Find all blocks currently being used by the cntbt. */
  488. cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  489. error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
  490. xfs_btree_del_cursor(cur, error);
  491. if (error)
  492. goto out_bmp;
  493. /*
  494. * Drop the freesp meta blocks that are in use by btrees.
  495. * The remaining blocks /should/ be AGFL blocks.
  496. */
  497. error = xagb_bitmap_disunion(agfl_extents, &ra.agmetablocks);
  498. if (error)
  499. goto out_bmp;
  500. /* Strike out the blocks that are cross-linked. */
  501. ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
  502. error = xagb_bitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra);
  503. xfs_btree_del_cursor(ra.rmap_cur, error);
  504. if (error)
  505. goto out_bmp;
  506. error = xagb_bitmap_disunion(agfl_extents, &ra.crossed);
  507. if (error)
  508. goto out_bmp;
  509. /*
  510. * Calculate the new AGFL size. If we found more blocks than fit in
  511. * the AGFL we'll free them later.
  512. */
  513. *flcount = min_t(uint64_t, xagb_bitmap_hweight(agfl_extents),
  514. xfs_agfl_size(mp));
  515. out_bmp:
  516. xagb_bitmap_destroy(&ra.crossed);
  517. xagb_bitmap_destroy(&ra.agmetablocks);
  518. return error;
  519. }
  520. /* Update the AGF and reset the in-core state. */
  521. STATIC void
  522. xrep_agfl_update_agf(
  523. struct xfs_scrub *sc,
  524. struct xfs_buf *agf_bp,
  525. xfs_agblock_t flcount)
  526. {
  527. struct xfs_agf *agf = agf_bp->b_addr;
  528. ASSERT(flcount <= xfs_agfl_size(sc->mp));
  529. /* Trigger fdblocks recalculation */
  530. xfs_force_summary_recalc(sc->mp);
  531. /* Update the AGF counters. */
  532. if (xfs_perag_initialised_agf(sc->sa.pag)) {
  533. sc->sa.pag->pagf_flcount = flcount;
  534. clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET,
  535. &sc->sa.pag->pag_opstate);
  536. }
  537. agf->agf_flfirst = cpu_to_be32(0);
  538. agf->agf_flcount = cpu_to_be32(flcount);
  539. if (flcount)
  540. agf->agf_fllast = cpu_to_be32(flcount - 1);
  541. else
  542. agf->agf_fllast = cpu_to_be32(xfs_agfl_size(sc->mp) - 1);
  543. xfs_alloc_log_agf(sc->tp, agf_bp,
  544. XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
  545. }
  546. struct xrep_agfl_fill {
  547. struct xagb_bitmap used_extents;
  548. struct xfs_scrub *sc;
  549. __be32 *agfl_bno;
  550. xfs_agblock_t flcount;
  551. unsigned int fl_off;
  552. };
  553. /* Fill the AGFL with whatever blocks are in this extent. */
  554. static int
  555. xrep_agfl_fill(
  556. uint32_t start,
  557. uint32_t len,
  558. void *priv)
  559. {
  560. struct xrep_agfl_fill *af = priv;
  561. struct xfs_scrub *sc = af->sc;
  562. xfs_agblock_t agbno = start;
  563. int error;
  564. trace_xrep_agfl_insert(sc->sa.pag, agbno, len);
  565. while (agbno < start + len && af->fl_off < af->flcount)
  566. af->agfl_bno[af->fl_off++] = cpu_to_be32(agbno++);
  567. error = xagb_bitmap_set(&af->used_extents, start, agbno - 1);
  568. if (error)
  569. return error;
  570. if (af->fl_off == af->flcount)
  571. return -ECANCELED;
  572. return 0;
  573. }
  574. /* Write out a totally new AGFL. */
  575. STATIC int
  576. xrep_agfl_init_header(
  577. struct xfs_scrub *sc,
  578. struct xfs_buf *agfl_bp,
  579. struct xagb_bitmap *agfl_extents,
  580. xfs_agblock_t flcount)
  581. {
  582. struct xrep_agfl_fill af = {
  583. .sc = sc,
  584. .flcount = flcount,
  585. };
  586. struct xfs_mount *mp = sc->mp;
  587. struct xfs_agfl *agfl;
  588. int error;
  589. ASSERT(flcount <= xfs_agfl_size(mp));
  590. /*
  591. * Start rewriting the header by setting the bno[] array to
  592. * NULLAGBLOCK, then setting AGFL header fields.
  593. */
  594. agfl = XFS_BUF_TO_AGFL(agfl_bp);
  595. memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
  596. agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
  597. agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
  598. uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
  599. /*
  600. * Fill the AGFL with the remaining blocks. If agfl_extents has more
  601. * blocks than fit in the AGFL, they will be freed in a subsequent
  602. * step.
  603. */
  604. xagb_bitmap_init(&af.used_extents);
  605. af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
  606. xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af);
  607. error = xagb_bitmap_disunion(agfl_extents, &af.used_extents);
  608. if (error)
  609. return error;
  610. /* Write new AGFL to disk. */
  611. xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
  612. xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
  613. xagb_bitmap_destroy(&af.used_extents);
  614. return 0;
  615. }
  616. /* Repair the AGFL. */
  617. int
  618. xrep_agfl(
  619. struct xfs_scrub *sc)
  620. {
  621. struct xagb_bitmap agfl_extents;
  622. struct xfs_mount *mp = sc->mp;
  623. struct xfs_buf *agf_bp;
  624. struct xfs_buf *agfl_bp;
  625. xfs_agblock_t flcount;
  626. int error;
  627. /* We require the rmapbt to rebuild anything. */
  628. if (!xfs_has_rmapbt(mp))
  629. return -EOPNOTSUPP;
  630. xagb_bitmap_init(&agfl_extents);
  631. /*
  632. * Read the AGF so that we can query the rmapbt. We hope that there's
  633. * nothing wrong with the AGF, but all the AG header repair functions
  634. * have this chicken-and-egg problem.
  635. */
  636. error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
  637. if (error)
  638. return error;
  639. /*
  640. * Make sure we have the AGFL buffer, as scrub might have decided it
  641. * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED.
  642. */
  643. error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
  644. XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
  645. XFS_AGFL_DADDR(mp)),
  646. XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
  647. if (error)
  648. return error;
  649. agfl_bp->b_ops = &xfs_agfl_buf_ops;
  650. /* Gather all the extents we're going to put on the new AGFL. */
  651. error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
  652. if (error)
  653. goto err;
  654. /* Last chance to abort before we start committing fixes. */
  655. if (xchk_should_terminate(sc, &error))
  656. goto err;
  657. /*
  658. * Update AGF and AGFL. We reset the global free block counter when
  659. * we adjust the AGF flcount (which can fail) so avoid updating any
  660. * buffers until we know that part works.
  661. */
  662. xrep_agfl_update_agf(sc, agf_bp, flcount);
  663. error = xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
  664. if (error)
  665. goto err;
  666. /*
  667. * Ok, the AGFL should be ready to go now. Roll the transaction to
  668. * make the new AGFL permanent before we start using it to return
  669. * freespace overflow to the freespace btrees.
  670. */
  671. sc->sa.agf_bp = agf_bp;
  672. error = xrep_roll_ag_trans(sc);
  673. if (error)
  674. goto err;
  675. /* Dump any AGFL overflow. */
  676. error = xrep_reap_agblocks(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
  677. XFS_AG_RESV_AGFL);
  678. if (error)
  679. goto err;
  680. err:
  681. xagb_bitmap_destroy(&agfl_extents);
  682. return error;
  683. }
  684. /* AGI */
  685. /*
  686. * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
  687. * XFS_BTNUM_ names here to avoid creating a sparse array.
  688. */
  689. enum {
  690. XREP_AGI_INOBT = 0,
  691. XREP_AGI_FINOBT,
  692. XREP_AGI_END,
  693. XREP_AGI_MAX
  694. };
  695. #define XREP_AGI_LOOKUP_BATCH 32
  696. struct xrep_agi {
  697. struct xfs_scrub *sc;
  698. /* AGI buffer, tracked separately */
  699. struct xfs_buf *agi_bp;
  700. /* context for finding btree roots */
  701. struct xrep_find_ag_btree fab[XREP_AGI_MAX];
  702. /* old AGI contents in case we have to revert */
  703. struct xfs_agi old_agi;
  704. /* bitmap of which inodes are unlinked */
  705. struct xagino_bitmap iunlink_bmp;
  706. /* heads of the unlinked inode bucket lists */
  707. xfs_agino_t iunlink_heads[XFS_AGI_UNLINKED_BUCKETS];
  708. /* scratchpad for batched lookups of the radix tree */
  709. struct xfs_inode *lookup_batch[XREP_AGI_LOOKUP_BATCH];
  710. /* Map of ino -> next_ino for unlinked inode processing. */
  711. struct xfarray *iunlink_next;
  712. /* Map of ino -> prev_ino for unlinked inode processing. */
  713. struct xfarray *iunlink_prev;
  714. };
  715. static void
  716. xrep_agi_buf_cleanup(
  717. void *buf)
  718. {
  719. struct xrep_agi *ragi = buf;
  720. xfarray_destroy(ragi->iunlink_prev);
  721. xfarray_destroy(ragi->iunlink_next);
  722. xagino_bitmap_destroy(&ragi->iunlink_bmp);
  723. }
  724. /*
  725. * Given the inode btree roots described by *fab, find the roots, check them
  726. * for sanity, and pass the root data back out via *fab.
  727. */
  728. STATIC int
  729. xrep_agi_find_btrees(
  730. struct xrep_agi *ragi)
  731. {
  732. struct xfs_scrub *sc = ragi->sc;
  733. struct xrep_find_ag_btree *fab = ragi->fab;
  734. struct xfs_buf *agf_bp;
  735. struct xfs_mount *mp = sc->mp;
  736. int error;
  737. /* Read the AGF. */
  738. error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
  739. if (error)
  740. return error;
  741. /* Find the btree roots. */
  742. error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
  743. if (error)
  744. return error;
  745. /* We must find the inobt root. */
  746. if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
  747. return -EFSCORRUPTED;
  748. /* We must find the finobt root if that feature is enabled. */
  749. if (xfs_has_finobt(mp) &&
  750. !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
  751. return -EFSCORRUPTED;
  752. return 0;
  753. }
  754. /*
  755. * Reinitialize the AGI header, making an in-core copy of the old contents so
  756. * that we know which in-core state needs to be reinitialized.
  757. */
  758. STATIC void
  759. xrep_agi_init_header(
  760. struct xrep_agi *ragi)
  761. {
  762. struct xfs_scrub *sc = ragi->sc;
  763. struct xfs_buf *agi_bp = ragi->agi_bp;
  764. struct xfs_agi *old_agi = &ragi->old_agi;
  765. struct xfs_agi *agi = agi_bp->b_addr;
  766. struct xfs_perag *pag = sc->sa.pag;
  767. struct xfs_mount *mp = sc->mp;
  768. memcpy(old_agi, agi, sizeof(*old_agi));
  769. memset(agi, 0, BBTOB(agi_bp->b_length));
  770. agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
  771. agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
  772. agi->agi_seqno = cpu_to_be32(pag->pag_agno);
  773. agi->agi_length = cpu_to_be32(pag->block_count);
  774. agi->agi_newino = cpu_to_be32(NULLAGINO);
  775. agi->agi_dirino = cpu_to_be32(NULLAGINO);
  776. if (xfs_has_crc(mp))
  777. uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
  778. /* Mark the incore AGF data stale until we're done fixing things. */
  779. ASSERT(xfs_perag_initialised_agi(pag));
  780. clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
  781. }
  782. /* Set btree root information in an AGI. */
  783. STATIC void
  784. xrep_agi_set_roots(
  785. struct xrep_agi *ragi)
  786. {
  787. struct xfs_scrub *sc = ragi->sc;
  788. struct xfs_agi *agi = ragi->agi_bp->b_addr;
  789. struct xrep_find_ag_btree *fab = ragi->fab;
  790. agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
  791. agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
  792. if (xfs_has_finobt(sc->mp)) {
  793. agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
  794. agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
  795. }
  796. }
  797. /* Update the AGI counters. */
  798. STATIC int
  799. xrep_agi_calc_from_btrees(
  800. struct xrep_agi *ragi)
  801. {
  802. struct xfs_scrub *sc = ragi->sc;
  803. struct xfs_buf *agi_bp = ragi->agi_bp;
  804. struct xfs_btree_cur *cur;
  805. struct xfs_agi *agi = agi_bp->b_addr;
  806. struct xfs_mount *mp = sc->mp;
  807. xfs_agino_t count;
  808. xfs_agino_t freecount;
  809. int error;
  810. cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
  811. error = xfs_ialloc_count_inodes(cur, &count, &freecount);
  812. if (error)
  813. goto err;
  814. if (xfs_has_inobtcounts(mp)) {
  815. xfs_filblks_t blocks;
  816. error = xfs_btree_count_blocks(cur, &blocks);
  817. if (error)
  818. goto err;
  819. agi->agi_iblocks = cpu_to_be32(blocks);
  820. }
  821. xfs_btree_del_cursor(cur, error);
  822. agi->agi_count = cpu_to_be32(count);
  823. agi->agi_freecount = cpu_to_be32(freecount);
  824. if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) {
  825. xfs_filblks_t blocks;
  826. cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
  827. error = xfs_btree_count_blocks(cur, &blocks);
  828. if (error)
  829. goto err;
  830. xfs_btree_del_cursor(cur, error);
  831. agi->agi_fblocks = cpu_to_be32(blocks);
  832. }
  833. return 0;
  834. err:
  835. xfs_btree_del_cursor(cur, error);
  836. return error;
  837. }
  838. /*
  839. * Record a forwards unlinked chain pointer from agino -> next_agino in our
  840. * staging information.
  841. */
  842. static inline int
  843. xrep_iunlink_store_next(
  844. struct xrep_agi *ragi,
  845. xfs_agino_t agino,
  846. xfs_agino_t next_agino)
  847. {
  848. ASSERT(next_agino != 0);
  849. return xfarray_store(ragi->iunlink_next, agino, &next_agino);
  850. }
  851. /*
  852. * Record a backwards unlinked chain pointer from prev_ino <- agino in our
  853. * staging information.
  854. */
  855. static inline int
  856. xrep_iunlink_store_prev(
  857. struct xrep_agi *ragi,
  858. xfs_agino_t agino,
  859. xfs_agino_t prev_agino)
  860. {
  861. ASSERT(prev_agino != 0);
  862. return xfarray_store(ragi->iunlink_prev, agino, &prev_agino);
  863. }
  864. /*
  865. * Given an @agino, look up the next inode in the iunlink bucket. Returns
  866. * NULLAGINO if we're at the end of the chain, 0 if @agino is not in memory
  867. * like it should be, or a per-AG inode number.
  868. */
  869. static inline xfs_agino_t
  870. xrep_iunlink_next(
  871. struct xfs_scrub *sc,
  872. xfs_agino_t agino)
  873. {
  874. struct xfs_inode *ip;
  875. ip = xfs_iunlink_lookup(sc->sa.pag, agino);
  876. if (!ip)
  877. return 0;
  878. return ip->i_next_unlinked;
  879. }
  880. /*
  881. * Load the inode @agino into memory, set its i_prev_unlinked, and drop the
  882. * inode so it can be inactivated. Returns NULLAGINO if we're at the end of
  883. * the chain or if we should stop walking the chain due to corruption; or a
  884. * per-AG inode number.
  885. */
  886. STATIC xfs_agino_t
  887. xrep_iunlink_reload_next(
  888. struct xrep_agi *ragi,
  889. xfs_agino_t prev_agino,
  890. xfs_agino_t agino)
  891. {
  892. struct xfs_scrub *sc = ragi->sc;
  893. struct xfs_inode *ip;
  894. xfs_ino_t ino;
  895. xfs_agino_t ret = NULLAGINO;
  896. int error;
  897. ino = XFS_AGINO_TO_INO(sc->mp, sc->sa.pag->pag_agno, agino);
  898. error = xchk_iget(ragi->sc, ino, &ip);
  899. if (error)
  900. return ret;
  901. trace_xrep_iunlink_reload_next(ip, prev_agino);
  902. /* If this is a linked inode, stop processing the chain. */
  903. if (VFS_I(ip)->i_nlink != 0) {
  904. xrep_iunlink_store_next(ragi, agino, NULLAGINO);
  905. goto rele;
  906. }
  907. ip->i_prev_unlinked = prev_agino;
  908. ret = ip->i_next_unlinked;
  909. /*
  910. * Drop the inode reference that we just took. We hold the AGI, so
  911. * this inode cannot move off the unlinked list and hence cannot be
  912. * reclaimed.
  913. */
  914. rele:
  915. xchk_irele(sc, ip);
  916. return ret;
  917. }
  918. /*
  919. * Walk an AGI unlinked bucket's list to load incore any unlinked inodes that
  920. * still existed at mount time. This can happen if iunlink processing fails
  921. * during log recovery.
  922. */
  923. STATIC int
  924. xrep_iunlink_walk_ondisk_bucket(
  925. struct xrep_agi *ragi,
  926. unsigned int bucket)
  927. {
  928. struct xfs_scrub *sc = ragi->sc;
  929. struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
  930. xfs_agino_t prev_agino = NULLAGINO;
  931. xfs_agino_t next_agino;
  932. int error = 0;
  933. next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
  934. while (next_agino != NULLAGINO) {
  935. xfs_agino_t agino = next_agino;
  936. if (xchk_should_terminate(ragi->sc, &error))
  937. return error;
  938. trace_xrep_iunlink_walk_ondisk_bucket(sc->sa.pag, bucket,
  939. prev_agino, agino);
  940. if (bucket != agino % XFS_AGI_UNLINKED_BUCKETS)
  941. break;
  942. next_agino = xrep_iunlink_next(sc, agino);
  943. if (!next_agino)
  944. next_agino = xrep_iunlink_reload_next(ragi, prev_agino,
  945. agino);
  946. prev_agino = agino;
  947. }
  948. return 0;
  949. }
  950. /* Decide if this is an unlinked inode in this AG. */
  951. STATIC bool
  952. xrep_iunlink_igrab(
  953. struct xfs_perag *pag,
  954. struct xfs_inode *ip)
  955. {
  956. struct xfs_mount *mp = pag->pag_mount;
  957. if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
  958. return false;
  959. if (!xfs_inode_on_unlinked_list(ip))
  960. return false;
  961. return true;
  962. }
  963. /*
  964. * Mark the given inode in the lookup batch in our unlinked inode bitmap, and
  965. * remember if this inode is the start of the unlinked chain.
  966. */
  967. STATIC int
  968. xrep_iunlink_visit(
  969. struct xrep_agi *ragi,
  970. unsigned int batch_idx)
  971. {
  972. struct xfs_mount *mp = ragi->sc->mp;
  973. struct xfs_inode *ip = ragi->lookup_batch[batch_idx];
  974. xfs_agino_t agino;
  975. unsigned int bucket;
  976. int error;
  977. ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == ragi->sc->sa.pag->pag_agno);
  978. ASSERT(xfs_inode_on_unlinked_list(ip));
  979. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  980. bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
  981. trace_xrep_iunlink_visit(ragi->sc->sa.pag, bucket,
  982. ragi->iunlink_heads[bucket], ip);
  983. error = xagino_bitmap_set(&ragi->iunlink_bmp, agino, 1);
  984. if (error)
  985. return error;
  986. if (ip->i_prev_unlinked == NULLAGINO) {
  987. if (ragi->iunlink_heads[bucket] == NULLAGINO)
  988. ragi->iunlink_heads[bucket] = agino;
  989. }
  990. return 0;
  991. }
  992. /*
  993. * Find all incore unlinked inodes so that we can rebuild the unlinked buckets.
  994. * We hold the AGI so there should not be any modifications to the unlinked
  995. * list.
  996. */
  997. STATIC int
  998. xrep_iunlink_mark_incore(
  999. struct xrep_agi *ragi)
  1000. {
  1001. struct xfs_perag *pag = ragi->sc->sa.pag;
  1002. struct xfs_mount *mp = pag->pag_mount;
  1003. uint32_t first_index = 0;
  1004. bool done = false;
  1005. unsigned int nr_found = 0;
  1006. do {
  1007. unsigned int i;
  1008. int error = 0;
  1009. if (xchk_should_terminate(ragi->sc, &error))
  1010. return error;
  1011. rcu_read_lock();
  1012. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
  1013. (void **)&ragi->lookup_batch, first_index,
  1014. XREP_AGI_LOOKUP_BATCH);
  1015. if (!nr_found) {
  1016. rcu_read_unlock();
  1017. return 0;
  1018. }
  1019. for (i = 0; i < nr_found; i++) {
  1020. struct xfs_inode *ip = ragi->lookup_batch[i];
  1021. if (done || !xrep_iunlink_igrab(pag, ip))
  1022. ragi->lookup_batch[i] = NULL;
  1023. /*
  1024. * Update the index for the next lookup. Catch
  1025. * overflows into the next AG range which can occur if
  1026. * we have inodes in the last block of the AG and we
  1027. * are currently pointing to the last inode.
  1028. *
  1029. * Because we may see inodes that are from the wrong AG
  1030. * due to RCU freeing and reallocation, only update the
  1031. * index if it lies in this AG. It was a race that lead
  1032. * us to see this inode, so another lookup from the
  1033. * same index will not find it again.
  1034. */
  1035. if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
  1036. continue;
  1037. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
  1038. if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
  1039. done = true;
  1040. }
  1041. /* unlock now we've grabbed the inodes. */
  1042. rcu_read_unlock();
  1043. for (i = 0; i < nr_found; i++) {
  1044. if (!ragi->lookup_batch[i])
  1045. continue;
  1046. error = xrep_iunlink_visit(ragi, i);
  1047. if (error)
  1048. return error;
  1049. }
  1050. } while (!done);
  1051. return 0;
  1052. }
  1053. /* Mark all the unlinked ondisk inodes in this inobt record in iunlink_bmp. */
  1054. STATIC int
  1055. xrep_iunlink_mark_ondisk_rec(
  1056. struct xfs_btree_cur *cur,
  1057. const union xfs_btree_rec *rec,
  1058. void *priv)
  1059. {
  1060. struct xfs_inobt_rec_incore irec;
  1061. struct xrep_agi *ragi = priv;
  1062. struct xfs_scrub *sc = ragi->sc;
  1063. struct xfs_mount *mp = cur->bc_mp;
  1064. xfs_agino_t agino;
  1065. unsigned int i;
  1066. int error = 0;
  1067. xfs_inobt_btrec_to_irec(mp, rec, &irec);
  1068. for (i = 0, agino = irec.ir_startino;
  1069. i < XFS_INODES_PER_CHUNK;
  1070. i++, agino++) {
  1071. struct xfs_inode *ip;
  1072. unsigned int len = 1;
  1073. /* Skip free inodes */
  1074. if (XFS_INOBT_MASK(i) & irec.ir_free)
  1075. continue;
  1076. /* Skip inodes we've seen before */
  1077. if (xagino_bitmap_test(&ragi->iunlink_bmp, agino, &len))
  1078. continue;
  1079. /*
  1080. * Skip incore inodes; these were already picked up by
  1081. * the _mark_incore step.
  1082. */
  1083. rcu_read_lock();
  1084. ip = radix_tree_lookup(&sc->sa.pag->pag_ici_root, agino);
  1085. rcu_read_unlock();
  1086. if (ip)
  1087. continue;
  1088. /*
  1089. * Try to look up this inode. If we can't get it, just move
  1090. * on because we haven't actually scrubbed the inobt or the
  1091. * inodes yet.
  1092. */
  1093. error = xchk_iget(ragi->sc,
  1094. XFS_AGINO_TO_INO(mp, sc->sa.pag->pag_agno,
  1095. agino),
  1096. &ip);
  1097. if (error)
  1098. continue;
  1099. trace_xrep_iunlink_reload_ondisk(ip);
  1100. if (VFS_I(ip)->i_nlink == 0)
  1101. error = xagino_bitmap_set(&ragi->iunlink_bmp, agino, 1);
  1102. xchk_irele(sc, ip);
  1103. if (error)
  1104. break;
  1105. }
  1106. return error;
  1107. }
  1108. /*
  1109. * Find ondisk inodes that are unlinked and not in cache, and mark them in
  1110. * iunlink_bmp. We haven't checked the inobt yet, so we don't error out if
  1111. * the btree is corrupt.
  1112. */
  1113. STATIC void
  1114. xrep_iunlink_mark_ondisk(
  1115. struct xrep_agi *ragi)
  1116. {
  1117. struct xfs_scrub *sc = ragi->sc;
  1118. struct xfs_buf *agi_bp = ragi->agi_bp;
  1119. struct xfs_btree_cur *cur;
  1120. int error;
  1121. cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
  1122. error = xfs_btree_query_all(cur, xrep_iunlink_mark_ondisk_rec, ragi);
  1123. xfs_btree_del_cursor(cur, error);
  1124. }
  1125. /*
  1126. * Walk an iunlink bucket's inode list. For each inode that should be on this
  1127. * chain, clear its entry in in iunlink_bmp because it's ok and we don't need
  1128. * to touch it further.
  1129. */
  1130. STATIC int
  1131. xrep_iunlink_resolve_bucket(
  1132. struct xrep_agi *ragi,
  1133. unsigned int bucket)
  1134. {
  1135. struct xfs_scrub *sc = ragi->sc;
  1136. struct xfs_inode *ip;
  1137. xfs_agino_t prev_agino = NULLAGINO;
  1138. xfs_agino_t next_agino = ragi->iunlink_heads[bucket];
  1139. int error = 0;
  1140. while (next_agino != NULLAGINO) {
  1141. if (xchk_should_terminate(ragi->sc, &error))
  1142. return error;
  1143. /* Find the next inode in the chain. */
  1144. ip = xfs_iunlink_lookup(sc->sa.pag, next_agino);
  1145. if (!ip) {
  1146. /* Inode not incore? Terminate the chain. */
  1147. trace_xrep_iunlink_resolve_uncached(sc->sa.pag,
  1148. bucket, prev_agino, next_agino);
  1149. next_agino = NULLAGINO;
  1150. break;
  1151. }
  1152. if (next_agino % XFS_AGI_UNLINKED_BUCKETS != bucket) {
  1153. /*
  1154. * Inode is in the wrong bucket. Advance the list,
  1155. * but pretend we didn't see this inode.
  1156. */
  1157. trace_xrep_iunlink_resolve_wronglist(sc->sa.pag,
  1158. bucket, prev_agino, next_agino);
  1159. next_agino = ip->i_next_unlinked;
  1160. continue;
  1161. }
  1162. if (!xfs_inode_on_unlinked_list(ip)) {
  1163. /*
  1164. * Incore inode doesn't think this inode is on an
  1165. * unlinked list. This is probably because we reloaded
  1166. * it from disk. Advance the list, but pretend we
  1167. * didn't see this inode; we'll fix that later.
  1168. */
  1169. trace_xrep_iunlink_resolve_nolist(sc->sa.pag,
  1170. bucket, prev_agino, next_agino);
  1171. next_agino = ip->i_next_unlinked;
  1172. continue;
  1173. }
  1174. trace_xrep_iunlink_resolve_ok(sc->sa.pag, bucket, prev_agino,
  1175. next_agino);
  1176. /*
  1177. * Otherwise, this inode's unlinked pointers are ok. Clear it
  1178. * from the unlinked bitmap since we're done with it, and make
  1179. * sure the chain is still correct.
  1180. */
  1181. error = xagino_bitmap_clear(&ragi->iunlink_bmp, next_agino, 1);
  1182. if (error)
  1183. return error;
  1184. /* Remember the previous inode's next pointer. */
  1185. if (prev_agino != NULLAGINO) {
  1186. error = xrep_iunlink_store_next(ragi, prev_agino,
  1187. next_agino);
  1188. if (error)
  1189. return error;
  1190. }
  1191. /* Remember this inode's previous pointer. */
  1192. error = xrep_iunlink_store_prev(ragi, next_agino, prev_agino);
  1193. if (error)
  1194. return error;
  1195. /* Advance the list and remember this inode. */
  1196. prev_agino = next_agino;
  1197. next_agino = ip->i_next_unlinked;
  1198. }
  1199. /* Update the previous inode's next pointer. */
  1200. if (prev_agino != NULLAGINO) {
  1201. error = xrep_iunlink_store_next(ragi, prev_agino, next_agino);
  1202. if (error)
  1203. return error;
  1204. }
  1205. return 0;
  1206. }
  1207. /* Reinsert this unlinked inode into the head of the staged bucket list. */
  1208. STATIC int
  1209. xrep_iunlink_add_to_bucket(
  1210. struct xrep_agi *ragi,
  1211. xfs_agino_t agino)
  1212. {
  1213. xfs_agino_t current_head;
  1214. unsigned int bucket;
  1215. int error;
  1216. bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
  1217. /* Point this inode at the current head of the bucket list. */
  1218. current_head = ragi->iunlink_heads[bucket];
  1219. trace_xrep_iunlink_add_to_bucket(ragi->sc->sa.pag, bucket, agino,
  1220. current_head);
  1221. error = xrep_iunlink_store_next(ragi, agino, current_head);
  1222. if (error)
  1223. return error;
  1224. /* Remember the head inode's previous pointer. */
  1225. if (current_head != NULLAGINO) {
  1226. error = xrep_iunlink_store_prev(ragi, current_head, agino);
  1227. if (error)
  1228. return error;
  1229. }
  1230. ragi->iunlink_heads[bucket] = agino;
  1231. return 0;
  1232. }
  1233. /* Reinsert unlinked inodes into the staged iunlink buckets. */
  1234. STATIC int
  1235. xrep_iunlink_add_lost_inodes(
  1236. uint32_t start,
  1237. uint32_t len,
  1238. void *priv)
  1239. {
  1240. struct xrep_agi *ragi = priv;
  1241. int error;
  1242. for (; len > 0; start++, len--) {
  1243. error = xrep_iunlink_add_to_bucket(ragi, start);
  1244. if (error)
  1245. return error;
  1246. }
  1247. return 0;
  1248. }
  1249. /*
  1250. * Figure out the iunlink bucket values and find inodes that need to be
  1251. * reinserted into the list.
  1252. */
  1253. STATIC int
  1254. xrep_iunlink_rebuild_buckets(
  1255. struct xrep_agi *ragi)
  1256. {
  1257. unsigned int i;
  1258. int error;
  1259. /*
  1260. * Walk the ondisk AGI unlinked list to find inodes that are on the
  1261. * list but aren't in memory. This can happen if a past log recovery
  1262. * tried to clear the iunlinked list but failed. Our scan rebuilds the
  1263. * unlinked list using incore inodes, so we must load and link them
  1264. * properly.
  1265. */
  1266. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  1267. error = xrep_iunlink_walk_ondisk_bucket(ragi, i);
  1268. if (error)
  1269. return error;
  1270. }
  1271. /*
  1272. * Record all the incore unlinked inodes in iunlink_bmp that we didn't
  1273. * find by walking the ondisk iunlink buckets. This shouldn't happen,
  1274. * but we can't risk forgetting an inode somewhere.
  1275. */
  1276. error = xrep_iunlink_mark_incore(ragi);
  1277. if (error)
  1278. return error;
  1279. /*
  1280. * If there are ondisk inodes that are unlinked and are not been loaded
  1281. * into cache, record them in iunlink_bmp.
  1282. */
  1283. xrep_iunlink_mark_ondisk(ragi);
  1284. /*
  1285. * Walk each iunlink bucket to (re)construct as much of the incore list
  1286. * as would be correct. For each inode that survives this step, mark
  1287. * it clear in iunlink_bmp; we're done with those inodes.
  1288. */
  1289. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  1290. error = xrep_iunlink_resolve_bucket(ragi, i);
  1291. if (error)
  1292. return error;
  1293. }
  1294. /*
  1295. * Any unlinked inodes that we didn't find through the bucket list
  1296. * walk (or was ignored by the walk) must be inserted into the bucket
  1297. * list. Stage this in memory for now.
  1298. */
  1299. return xagino_bitmap_walk(&ragi->iunlink_bmp,
  1300. xrep_iunlink_add_lost_inodes, ragi);
  1301. }
  1302. /* Update i_next_iunlinked for the inode @agino. */
  1303. STATIC int
  1304. xrep_iunlink_relink_next(
  1305. struct xrep_agi *ragi,
  1306. xfarray_idx_t idx,
  1307. xfs_agino_t next_agino)
  1308. {
  1309. struct xfs_scrub *sc = ragi->sc;
  1310. struct xfs_perag *pag = sc->sa.pag;
  1311. struct xfs_inode *ip;
  1312. xfarray_idx_t agino = idx - 1;
  1313. bool want_rele = false;
  1314. int error = 0;
  1315. ip = xfs_iunlink_lookup(pag, agino);
  1316. if (!ip) {
  1317. xfs_ino_t ino;
  1318. xfs_agino_t prev_agino;
  1319. /*
  1320. * No inode exists in cache. Load it off the disk so that we
  1321. * can reinsert it into the incore unlinked list.
  1322. */
  1323. ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
  1324. error = xchk_iget(sc, ino, &ip);
  1325. if (error)
  1326. return -EFSCORRUPTED;
  1327. want_rele = true;
  1328. /* Set the backward pointer since this just came off disk. */
  1329. error = xfarray_load(ragi->iunlink_prev, agino, &prev_agino);
  1330. if (error)
  1331. goto out_rele;
  1332. trace_xrep_iunlink_relink_prev(ip, prev_agino);
  1333. ip->i_prev_unlinked = prev_agino;
  1334. }
  1335. /* Update the forward pointer. */
  1336. if (ip->i_next_unlinked != next_agino) {
  1337. error = xfs_iunlink_log_inode(sc->tp, ip, pag, next_agino);
  1338. if (error)
  1339. goto out_rele;
  1340. trace_xrep_iunlink_relink_next(ip, next_agino);
  1341. ip->i_next_unlinked = next_agino;
  1342. }
  1343. out_rele:
  1344. /*
  1345. * The iunlink lookup doesn't igrab because we hold the AGI buffer lock
  1346. * and the inode cannot be reclaimed. However, if we used iget to load
  1347. * a missing inode, we must irele it here.
  1348. */
  1349. if (want_rele)
  1350. xchk_irele(sc, ip);
  1351. return error;
  1352. }
  1353. /* Update i_prev_iunlinked for the inode @agino. */
  1354. STATIC int
  1355. xrep_iunlink_relink_prev(
  1356. struct xrep_agi *ragi,
  1357. xfarray_idx_t idx,
  1358. xfs_agino_t prev_agino)
  1359. {
  1360. struct xfs_scrub *sc = ragi->sc;
  1361. struct xfs_perag *pag = sc->sa.pag;
  1362. struct xfs_inode *ip;
  1363. xfarray_idx_t agino = idx - 1;
  1364. bool want_rele = false;
  1365. int error = 0;
  1366. ASSERT(prev_agino != 0);
  1367. ip = xfs_iunlink_lookup(pag, agino);
  1368. if (!ip) {
  1369. xfs_ino_t ino;
  1370. xfs_agino_t next_agino;
  1371. /*
  1372. * No inode exists in cache. Load it off the disk so that we
  1373. * can reinsert it into the incore unlinked list.
  1374. */
  1375. ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
  1376. error = xchk_iget(sc, ino, &ip);
  1377. if (error)
  1378. return -EFSCORRUPTED;
  1379. want_rele = true;
  1380. /* Set the forward pointer since this just came off disk. */
  1381. error = xfarray_load(ragi->iunlink_prev, agino, &next_agino);
  1382. if (error)
  1383. goto out_rele;
  1384. error = xfs_iunlink_log_inode(sc->tp, ip, pag, next_agino);
  1385. if (error)
  1386. goto out_rele;
  1387. trace_xrep_iunlink_relink_next(ip, next_agino);
  1388. ip->i_next_unlinked = next_agino;
  1389. }
  1390. /* Update the backward pointer. */
  1391. if (ip->i_prev_unlinked != prev_agino) {
  1392. trace_xrep_iunlink_relink_prev(ip, prev_agino);
  1393. ip->i_prev_unlinked = prev_agino;
  1394. }
  1395. out_rele:
  1396. /*
  1397. * The iunlink lookup doesn't igrab because we hold the AGI buffer lock
  1398. * and the inode cannot be reclaimed. However, if we used iget to load
  1399. * a missing inode, we must irele it here.
  1400. */
  1401. if (want_rele)
  1402. xchk_irele(sc, ip);
  1403. return error;
  1404. }
  1405. /* Log all the iunlink updates we need to finish regenerating the AGI. */
  1406. STATIC int
  1407. xrep_iunlink_commit(
  1408. struct xrep_agi *ragi)
  1409. {
  1410. struct xfs_agi *agi = ragi->agi_bp->b_addr;
  1411. xfarray_idx_t idx = XFARRAY_CURSOR_INIT;
  1412. xfs_agino_t agino;
  1413. unsigned int i;
  1414. int error;
  1415. /* Fix all the forward links */
  1416. while ((error = xfarray_iter(ragi->iunlink_next, &idx, &agino)) == 1) {
  1417. error = xrep_iunlink_relink_next(ragi, idx, agino);
  1418. if (error)
  1419. return error;
  1420. }
  1421. /* Fix all the back links */
  1422. idx = XFARRAY_CURSOR_INIT;
  1423. while ((error = xfarray_iter(ragi->iunlink_prev, &idx, &agino)) == 1) {
  1424. error = xrep_iunlink_relink_prev(ragi, idx, agino);
  1425. if (error)
  1426. return error;
  1427. }
  1428. /* Copy the staged iunlink buckets to the new AGI. */
  1429. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  1430. trace_xrep_iunlink_commit_bucket(ragi->sc->sa.pag, i,
  1431. be32_to_cpu(ragi->old_agi.agi_unlinked[i]),
  1432. ragi->iunlink_heads[i]);
  1433. agi->agi_unlinked[i] = cpu_to_be32(ragi->iunlink_heads[i]);
  1434. }
  1435. return 0;
  1436. }
  1437. /* Trigger reinitialization of the in-core data. */
  1438. STATIC int
  1439. xrep_agi_commit_new(
  1440. struct xrep_agi *ragi)
  1441. {
  1442. struct xfs_scrub *sc = ragi->sc;
  1443. struct xfs_buf *agi_bp = ragi->agi_bp;
  1444. struct xfs_perag *pag;
  1445. struct xfs_agi *agi = agi_bp->b_addr;
  1446. /* Trigger inode count recalculation */
  1447. xfs_force_summary_recalc(sc->mp);
  1448. /* Write this to disk. */
  1449. xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
  1450. xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
  1451. /* Now reinitialize the in-core counters if necessary. */
  1452. pag = sc->sa.pag;
  1453. pag->pagi_count = be32_to_cpu(agi->agi_count);
  1454. pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
  1455. set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
  1456. return xrep_roll_ag_trans(sc);
  1457. }
  1458. /* Repair the AGI. */
  1459. int
  1460. xrep_agi(
  1461. struct xfs_scrub *sc)
  1462. {
  1463. struct xrep_agi *ragi;
  1464. struct xfs_mount *mp = sc->mp;
  1465. char *descr;
  1466. unsigned int i;
  1467. int error;
  1468. /* We require the rmapbt to rebuild anything. */
  1469. if (!xfs_has_rmapbt(mp))
  1470. return -EOPNOTSUPP;
  1471. sc->buf = kzalloc(sizeof(struct xrep_agi), XCHK_GFP_FLAGS);
  1472. if (!sc->buf)
  1473. return -ENOMEM;
  1474. ragi = sc->buf;
  1475. ragi->sc = sc;
  1476. ragi->fab[XREP_AGI_INOBT] = (struct xrep_find_ag_btree){
  1477. .rmap_owner = XFS_RMAP_OWN_INOBT,
  1478. .buf_ops = &xfs_inobt_buf_ops,
  1479. .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
  1480. };
  1481. ragi->fab[XREP_AGI_FINOBT] = (struct xrep_find_ag_btree){
  1482. .rmap_owner = XFS_RMAP_OWN_INOBT,
  1483. .buf_ops = &xfs_finobt_buf_ops,
  1484. .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
  1485. };
  1486. ragi->fab[XREP_AGI_END] = (struct xrep_find_ag_btree){
  1487. .buf_ops = NULL,
  1488. };
  1489. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
  1490. ragi->iunlink_heads[i] = NULLAGINO;
  1491. xagino_bitmap_init(&ragi->iunlink_bmp);
  1492. sc->buf_cleanup = xrep_agi_buf_cleanup;
  1493. descr = xchk_xfile_ag_descr(sc, "iunlinked next pointers");
  1494. error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
  1495. &ragi->iunlink_next);
  1496. kfree(descr);
  1497. if (error)
  1498. return error;
  1499. descr = xchk_xfile_ag_descr(sc, "iunlinked prev pointers");
  1500. error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
  1501. &ragi->iunlink_prev);
  1502. kfree(descr);
  1503. if (error)
  1504. return error;
  1505. /*
  1506. * Make sure we have the AGI buffer, as scrub might have decided it
  1507. * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
  1508. */
  1509. error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
  1510. XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
  1511. XFS_AGI_DADDR(mp)),
  1512. XFS_FSS_TO_BB(mp, 1), 0, &ragi->agi_bp, NULL);
  1513. if (error)
  1514. return error;
  1515. ragi->agi_bp->b_ops = &xfs_agi_buf_ops;
  1516. /* Find the AGI btree roots. */
  1517. error = xrep_agi_find_btrees(ragi);
  1518. if (error)
  1519. return error;
  1520. error = xrep_iunlink_rebuild_buckets(ragi);
  1521. if (error)
  1522. return error;
  1523. /* Last chance to abort before we start committing fixes. */
  1524. if (xchk_should_terminate(sc, &error))
  1525. return error;
  1526. /* Start rewriting the header and implant the btrees we found. */
  1527. xrep_agi_init_header(ragi);
  1528. xrep_agi_set_roots(ragi);
  1529. error = xrep_agi_calc_from_btrees(ragi);
  1530. if (error)
  1531. goto out_revert;
  1532. error = xrep_iunlink_commit(ragi);
  1533. if (error)
  1534. goto out_revert;
  1535. /* Reinitialize in-core state. */
  1536. return xrep_agi_commit_new(ragi);
  1537. out_revert:
  1538. /* Mark the incore AGI state stale and revert the AGI. */
  1539. clear_bit(XFS_AGSTATE_AGI_INIT, &sc->sa.pag->pag_opstate);
  1540. memcpy(ragi->agi_bp->b_addr, &ragi->old_agi, sizeof(struct xfs_agi));
  1541. return error;
  1542. }