bmap.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_defer.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_bit.h"
  15. #include "xfs_log_format.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_sb.h"
  18. #include "xfs_inode.h"
  19. #include "xfs_inode_fork.h"
  20. #include "xfs_alloc.h"
  21. #include "xfs_rtalloc.h"
  22. #include "xfs_bmap.h"
  23. #include "xfs_bmap_util.h"
  24. #include "xfs_bmap_btree.h"
  25. #include "xfs_rmap.h"
  26. #include "xfs_rmap_btree.h"
  27. #include "xfs_refcount.h"
  28. #include "scrub/xfs_scrub.h"
  29. #include "scrub/scrub.h"
  30. #include "scrub/common.h"
  31. #include "scrub/btree.h"
  32. #include "scrub/trace.h"
  33. /* Set us up with an inode's bmap. */
  34. int
  35. xchk_setup_inode_bmap(
  36. struct xfs_scrub *sc,
  37. struct xfs_inode *ip)
  38. {
  39. int error;
  40. error = xchk_get_inode(sc, ip);
  41. if (error)
  42. goto out;
  43. sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  44. xfs_ilock(sc->ip, sc->ilock_flags);
  45. /*
  46. * We don't want any ephemeral data fork updates sitting around
  47. * while we inspect block mappings, so wait for directio to finish
  48. * and flush dirty data if we have delalloc reservations.
  49. */
  50. if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
  51. sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
  52. struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
  53. inode_dio_wait(VFS_I(sc->ip));
  54. /*
  55. * Try to flush all incore state to disk before we examine the
  56. * space mappings for the data fork. Leave accumulated errors
  57. * in the mapping for the writer threads to consume.
  58. *
  59. * On ENOSPC or EIO writeback errors, we continue into the
  60. * extent mapping checks because write failures do not
  61. * necessarily imply anything about the correctness of the file
  62. * metadata. The metadata and the file data could be on
  63. * completely separate devices; a media failure might only
  64. * affect a subset of the disk, etc. We can handle delalloc
  65. * extents in the scrubber, so leaving them in memory is fine.
  66. */
  67. error = filemap_fdatawrite(mapping);
  68. if (!error)
  69. error = filemap_fdatawait_keep_errors(mapping);
  70. if (error && (error != -ENOSPC && error != -EIO))
  71. goto out;
  72. }
  73. /* Got the inode, lock it and we're ready to go. */
  74. error = xchk_trans_alloc(sc, 0);
  75. if (error)
  76. goto out;
  77. sc->ilock_flags |= XFS_ILOCK_EXCL;
  78. xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
  79. out:
  80. /* scrub teardown will unlock and release the inode */
  81. return error;
  82. }
  83. /*
  84. * Inode fork block mapping (BMBT) scrubber.
  85. * More complex than the others because we have to scrub
  86. * all the extents regardless of whether or not the fork
  87. * is in btree format.
  88. */
  89. struct xchk_bmap_info {
  90. struct xfs_scrub *sc;
  91. xfs_fileoff_t lastoff;
  92. bool is_rt;
  93. bool is_shared;
  94. int whichfork;
  95. };
  96. /* Look for a corresponding rmap for this irec. */
  97. static inline bool
  98. xchk_bmap_get_rmap(
  99. struct xchk_bmap_info *info,
  100. struct xfs_bmbt_irec *irec,
  101. xfs_agblock_t agbno,
  102. uint64_t owner,
  103. struct xfs_rmap_irec *rmap)
  104. {
  105. xfs_fileoff_t offset;
  106. unsigned int rflags = 0;
  107. int has_rmap;
  108. int error;
  109. if (info->whichfork == XFS_ATTR_FORK)
  110. rflags |= XFS_RMAP_ATTR_FORK;
  111. if (irec->br_state == XFS_EXT_UNWRITTEN)
  112. rflags |= XFS_RMAP_UNWRITTEN;
  113. /*
  114. * CoW staging extents are owned (on disk) by the refcountbt, so
  115. * their rmaps do not have offsets.
  116. */
  117. if (info->whichfork == XFS_COW_FORK)
  118. offset = 0;
  119. else
  120. offset = irec->br_startoff;
  121. /*
  122. * If the caller thinks this could be a shared bmbt extent (IOWs,
  123. * any data fork extent of a reflink inode) then we have to use the
  124. * range rmap lookup to make sure we get the correct owner/offset.
  125. */
  126. if (info->is_shared) {
  127. error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
  128. owner, offset, rflags, rmap, &has_rmap);
  129. if (!xchk_should_check_xref(info->sc, &error,
  130. &info->sc->sa.rmap_cur))
  131. return false;
  132. goto out;
  133. }
  134. /*
  135. * Otherwise, use the (faster) regular lookup.
  136. */
  137. error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
  138. offset, rflags, &has_rmap);
  139. if (!xchk_should_check_xref(info->sc, &error,
  140. &info->sc->sa.rmap_cur))
  141. return false;
  142. if (!has_rmap)
  143. goto out;
  144. error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
  145. if (!xchk_should_check_xref(info->sc, &error,
  146. &info->sc->sa.rmap_cur))
  147. return false;
  148. out:
  149. if (!has_rmap)
  150. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  151. irec->br_startoff);
  152. return has_rmap;
  153. }
  154. /* Make sure that we have rmapbt records for this extent. */
  155. STATIC void
  156. xchk_bmap_xref_rmap(
  157. struct xchk_bmap_info *info,
  158. struct xfs_bmbt_irec *irec,
  159. xfs_agblock_t agbno)
  160. {
  161. struct xfs_rmap_irec rmap;
  162. unsigned long long rmap_end;
  163. uint64_t owner;
  164. if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
  165. return;
  166. if (info->whichfork == XFS_COW_FORK)
  167. owner = XFS_RMAP_OWN_COW;
  168. else
  169. owner = info->sc->ip->i_ino;
  170. /* Find the rmap record for this irec. */
  171. if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
  172. return;
  173. /* Check the rmap. */
  174. rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
  175. if (rmap.rm_startblock > agbno ||
  176. agbno + irec->br_blockcount > rmap_end)
  177. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  178. irec->br_startoff);
  179. /*
  180. * Check the logical offsets if applicable. CoW staging extents
  181. * don't track logical offsets since the mappings only exist in
  182. * memory.
  183. */
  184. if (info->whichfork != XFS_COW_FORK) {
  185. rmap_end = (unsigned long long)rmap.rm_offset +
  186. rmap.rm_blockcount;
  187. if (rmap.rm_offset > irec->br_startoff ||
  188. irec->br_startoff + irec->br_blockcount > rmap_end)
  189. xchk_fblock_xref_set_corrupt(info->sc,
  190. info->whichfork, irec->br_startoff);
  191. }
  192. if (rmap.rm_owner != owner)
  193. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  194. irec->br_startoff);
  195. /*
  196. * Check for discrepancies between the unwritten flag in the irec and
  197. * the rmap. Note that the (in-memory) CoW fork distinguishes between
  198. * unwritten and written extents, but we don't track that in the rmap
  199. * records because the blocks are owned (on-disk) by the refcountbt,
  200. * which doesn't track unwritten state.
  201. */
  202. if (owner != XFS_RMAP_OWN_COW &&
  203. !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
  204. !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
  205. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  206. irec->br_startoff);
  207. if (!!(info->whichfork == XFS_ATTR_FORK) !=
  208. !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
  209. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  210. irec->br_startoff);
  211. if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
  212. xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
  213. irec->br_startoff);
  214. }
  215. /* Cross-reference a single rtdev extent record. */
  216. STATIC void
  217. xchk_bmap_rt_extent_xref(
  218. struct xchk_bmap_info *info,
  219. struct xfs_inode *ip,
  220. struct xfs_btree_cur *cur,
  221. struct xfs_bmbt_irec *irec)
  222. {
  223. if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  224. return;
  225. xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
  226. irec->br_blockcount);
  227. }
  228. /* Cross-reference a single datadev extent record. */
  229. STATIC void
  230. xchk_bmap_extent_xref(
  231. struct xchk_bmap_info *info,
  232. struct xfs_inode *ip,
  233. struct xfs_btree_cur *cur,
  234. struct xfs_bmbt_irec *irec)
  235. {
  236. struct xfs_mount *mp = info->sc->mp;
  237. xfs_agnumber_t agno;
  238. xfs_agblock_t agbno;
  239. xfs_extlen_t len;
  240. int error;
  241. if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  242. return;
  243. agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
  244. agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
  245. len = irec->br_blockcount;
  246. error = xchk_ag_init(info->sc, agno, &info->sc->sa);
  247. if (!xchk_fblock_process_error(info->sc, info->whichfork,
  248. irec->br_startoff, &error))
  249. return;
  250. xchk_xref_is_used_space(info->sc, agbno, len);
  251. xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
  252. xchk_bmap_xref_rmap(info, irec, agbno);
  253. switch (info->whichfork) {
  254. case XFS_DATA_FORK:
  255. if (xfs_is_reflink_inode(info->sc->ip))
  256. break;
  257. /* fall through */
  258. case XFS_ATTR_FORK:
  259. xchk_xref_is_not_shared(info->sc, agbno,
  260. irec->br_blockcount);
  261. break;
  262. case XFS_COW_FORK:
  263. xchk_xref_is_cow_staging(info->sc, agbno,
  264. irec->br_blockcount);
  265. break;
  266. }
  267. xchk_ag_free(info->sc, &info->sc->sa);
  268. }
  269. /* Scrub a single extent record. */
  270. STATIC int
  271. xchk_bmap_extent(
  272. struct xfs_inode *ip,
  273. struct xfs_btree_cur *cur,
  274. struct xchk_bmap_info *info,
  275. struct xfs_bmbt_irec *irec)
  276. {
  277. struct xfs_mount *mp = info->sc->mp;
  278. struct xfs_buf *bp = NULL;
  279. xfs_filblks_t end;
  280. int error = 0;
  281. if (cur)
  282. xfs_btree_get_block(cur, 0, &bp);
  283. /*
  284. * Check for out-of-order extents. This record could have come
  285. * from the incore list, for which there is no ordering check.
  286. */
  287. if (irec->br_startoff < info->lastoff)
  288. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  289. irec->br_startoff);
  290. /* There should never be a "hole" extent in either extent list. */
  291. if (irec->br_startblock == HOLESTARTBLOCK)
  292. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  293. irec->br_startoff);
  294. /*
  295. * Check for delalloc extents. We never iterate the ones in the
  296. * in-core extent scan, and we should never see these in the bmbt.
  297. */
  298. if (isnullstartblock(irec->br_startblock))
  299. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  300. irec->br_startoff);
  301. /* Make sure the extent points to a valid place. */
  302. if (irec->br_blockcount > MAXEXTLEN)
  303. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  304. irec->br_startoff);
  305. if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
  306. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  307. irec->br_startoff);
  308. end = irec->br_startblock + irec->br_blockcount - 1;
  309. if (info->is_rt &&
  310. (!xfs_verify_rtbno(mp, irec->br_startblock) ||
  311. !xfs_verify_rtbno(mp, end)))
  312. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  313. irec->br_startoff);
  314. if (!info->is_rt &&
  315. (!xfs_verify_fsbno(mp, irec->br_startblock) ||
  316. !xfs_verify_fsbno(mp, end) ||
  317. XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
  318. XFS_FSB_TO_AGNO(mp, end)))
  319. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  320. irec->br_startoff);
  321. /* We don't allow unwritten extents on attr forks. */
  322. if (irec->br_state == XFS_EXT_UNWRITTEN &&
  323. info->whichfork == XFS_ATTR_FORK)
  324. xchk_fblock_set_corrupt(info->sc, info->whichfork,
  325. irec->br_startoff);
  326. if (info->is_rt)
  327. xchk_bmap_rt_extent_xref(info, ip, cur, irec);
  328. else
  329. xchk_bmap_extent_xref(info, ip, cur, irec);
  330. info->lastoff = irec->br_startoff + irec->br_blockcount;
  331. return error;
  332. }
  333. /* Scrub a bmbt record. */
  334. STATIC int
  335. xchk_bmapbt_rec(
  336. struct xchk_btree *bs,
  337. union xfs_btree_rec *rec)
  338. {
  339. struct xfs_bmbt_irec irec;
  340. struct xchk_bmap_info *info = bs->private;
  341. struct xfs_inode *ip = bs->cur->bc_private.b.ip;
  342. struct xfs_buf *bp = NULL;
  343. struct xfs_btree_block *block;
  344. uint64_t owner;
  345. int i;
  346. /*
  347. * Check the owners of the btree blocks up to the level below
  348. * the root since the verifiers don't do that.
  349. */
  350. if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
  351. bs->cur->bc_ptrs[0] == 1) {
  352. for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
  353. block = xfs_btree_get_block(bs->cur, i, &bp);
  354. owner = be64_to_cpu(block->bb_u.l.bb_owner);
  355. if (owner != ip->i_ino)
  356. xchk_fblock_set_corrupt(bs->sc,
  357. info->whichfork, 0);
  358. }
  359. }
  360. /* Set up the in-core record and scrub it. */
  361. xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
  362. return xchk_bmap_extent(ip, bs->cur, info, &irec);
  363. }
  364. /* Scan the btree records. */
  365. STATIC int
  366. xchk_bmap_btree(
  367. struct xfs_scrub *sc,
  368. int whichfork,
  369. struct xchk_bmap_info *info)
  370. {
  371. struct xfs_owner_info oinfo;
  372. struct xfs_mount *mp = sc->mp;
  373. struct xfs_inode *ip = sc->ip;
  374. struct xfs_btree_cur *cur;
  375. int error;
  376. cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
  377. xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
  378. error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
  379. xfs_btree_del_cursor(cur, error);
  380. return error;
  381. }
  382. struct xchk_bmap_check_rmap_info {
  383. struct xfs_scrub *sc;
  384. int whichfork;
  385. struct xfs_iext_cursor icur;
  386. };
  387. /* Can we find bmaps that fit this rmap? */
  388. STATIC int
  389. xchk_bmap_check_rmap(
  390. struct xfs_btree_cur *cur,
  391. struct xfs_rmap_irec *rec,
  392. void *priv)
  393. {
  394. struct xfs_bmbt_irec irec;
  395. struct xchk_bmap_check_rmap_info *sbcri = priv;
  396. struct xfs_ifork *ifp;
  397. struct xfs_scrub *sc = sbcri->sc;
  398. bool have_map;
  399. /* Is this even the right fork? */
  400. if (rec->rm_owner != sc->ip->i_ino)
  401. return 0;
  402. if ((sbcri->whichfork == XFS_ATTR_FORK) ^
  403. !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
  404. return 0;
  405. if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
  406. return 0;
  407. /* Now look up the bmbt record. */
  408. ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
  409. if (!ifp) {
  410. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  411. rec->rm_offset);
  412. goto out;
  413. }
  414. have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
  415. &sbcri->icur, &irec);
  416. if (!have_map)
  417. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  418. rec->rm_offset);
  419. /*
  420. * bmap extent record lengths are constrained to 2^21 blocks in length
  421. * because of space constraints in the on-disk metadata structure.
  422. * However, rmap extent record lengths are constrained only by AG
  423. * length, so we have to loop through the bmbt to make sure that the
  424. * entire rmap is covered by bmbt records.
  425. */
  426. while (have_map) {
  427. if (irec.br_startoff != rec->rm_offset)
  428. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  429. rec->rm_offset);
  430. if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
  431. cur->bc_private.a.agno, rec->rm_startblock))
  432. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  433. rec->rm_offset);
  434. if (irec.br_blockcount > rec->rm_blockcount)
  435. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  436. rec->rm_offset);
  437. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  438. break;
  439. rec->rm_startblock += irec.br_blockcount;
  440. rec->rm_offset += irec.br_blockcount;
  441. rec->rm_blockcount -= irec.br_blockcount;
  442. if (rec->rm_blockcount == 0)
  443. break;
  444. have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
  445. if (!have_map)
  446. xchk_fblock_set_corrupt(sc, sbcri->whichfork,
  447. rec->rm_offset);
  448. }
  449. out:
  450. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  451. return XFS_BTREE_QUERY_RANGE_ABORT;
  452. return 0;
  453. }
  454. /* Make sure each rmap has a corresponding bmbt entry. */
  455. STATIC int
  456. xchk_bmap_check_ag_rmaps(
  457. struct xfs_scrub *sc,
  458. int whichfork,
  459. xfs_agnumber_t agno)
  460. {
  461. struct xchk_bmap_check_rmap_info sbcri;
  462. struct xfs_btree_cur *cur;
  463. struct xfs_buf *agf;
  464. int error;
  465. error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
  466. if (error)
  467. return error;
  468. cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
  469. if (!cur) {
  470. error = -ENOMEM;
  471. goto out_agf;
  472. }
  473. sbcri.sc = sc;
  474. sbcri.whichfork = whichfork;
  475. error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
  476. if (error == XFS_BTREE_QUERY_RANGE_ABORT)
  477. error = 0;
  478. xfs_btree_del_cursor(cur, error);
  479. out_agf:
  480. xfs_trans_brelse(sc->tp, agf);
  481. return error;
  482. }
  483. /* Make sure each rmap has a corresponding bmbt entry. */
  484. STATIC int
  485. xchk_bmap_check_rmaps(
  486. struct xfs_scrub *sc,
  487. int whichfork)
  488. {
  489. loff_t size;
  490. xfs_agnumber_t agno;
  491. int error;
  492. if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
  493. whichfork == XFS_COW_FORK ||
  494. (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
  495. return 0;
  496. /* Don't support realtime rmap checks yet. */
  497. if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
  498. return 0;
  499. /*
  500. * Only do this for complex maps that are in btree format, or for
  501. * situations where we would seem to have a size but zero extents.
  502. * The inode repair code can zap broken iforks, which means we have
  503. * to flag this bmap as corrupt if there are rmaps that need to be
  504. * reattached.
  505. */
  506. switch (whichfork) {
  507. case XFS_DATA_FORK:
  508. size = i_size_read(VFS_I(sc->ip));
  509. break;
  510. case XFS_ATTR_FORK:
  511. size = XFS_IFORK_Q(sc->ip);
  512. break;
  513. default:
  514. size = 0;
  515. break;
  516. }
  517. if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
  518. (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
  519. return 0;
  520. for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
  521. error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
  522. if (error)
  523. return error;
  524. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  525. break;
  526. }
  527. return 0;
  528. }
  529. /*
  530. * Scrub an inode fork's block mappings.
  531. *
  532. * First we scan every record in every btree block, if applicable.
  533. * Then we unconditionally scan the incore extent cache.
  534. */
  535. STATIC int
  536. xchk_bmap(
  537. struct xfs_scrub *sc,
  538. int whichfork)
  539. {
  540. struct xfs_bmbt_irec irec;
  541. struct xchk_bmap_info info = { NULL };
  542. struct xfs_mount *mp = sc->mp;
  543. struct xfs_inode *ip = sc->ip;
  544. struct xfs_ifork *ifp;
  545. xfs_fileoff_t endoff;
  546. struct xfs_iext_cursor icur;
  547. int error = 0;
  548. ifp = XFS_IFORK_PTR(ip, whichfork);
  549. info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
  550. info.whichfork = whichfork;
  551. info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
  552. info.sc = sc;
  553. switch (whichfork) {
  554. case XFS_COW_FORK:
  555. /* Non-existent CoW forks are ignorable. */
  556. if (!ifp)
  557. goto out;
  558. /* No CoW forks on non-reflink inodes/filesystems. */
  559. if (!xfs_is_reflink_inode(ip)) {
  560. xchk_ino_set_corrupt(sc, sc->ip->i_ino);
  561. goto out;
  562. }
  563. break;
  564. case XFS_ATTR_FORK:
  565. if (!ifp)
  566. goto out_check_rmap;
  567. if (!xfs_sb_version_hasattr(&mp->m_sb) &&
  568. !xfs_sb_version_hasattr2(&mp->m_sb))
  569. xchk_ino_set_corrupt(sc, sc->ip->i_ino);
  570. break;
  571. default:
  572. ASSERT(whichfork == XFS_DATA_FORK);
  573. break;
  574. }
  575. /* Check the fork values */
  576. switch (XFS_IFORK_FORMAT(ip, whichfork)) {
  577. case XFS_DINODE_FMT_UUID:
  578. case XFS_DINODE_FMT_DEV:
  579. case XFS_DINODE_FMT_LOCAL:
  580. /* No mappings to check. */
  581. goto out;
  582. case XFS_DINODE_FMT_EXTENTS:
  583. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  584. xchk_fblock_set_corrupt(sc, whichfork, 0);
  585. goto out;
  586. }
  587. break;
  588. case XFS_DINODE_FMT_BTREE:
  589. if (whichfork == XFS_COW_FORK) {
  590. xchk_fblock_set_corrupt(sc, whichfork, 0);
  591. goto out;
  592. }
  593. error = xchk_bmap_btree(sc, whichfork, &info);
  594. if (error)
  595. goto out;
  596. break;
  597. default:
  598. xchk_fblock_set_corrupt(sc, whichfork, 0);
  599. goto out;
  600. }
  601. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  602. goto out;
  603. /* Now try to scrub the in-memory extent list. */
  604. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  605. error = xfs_iread_extents(sc->tp, ip, whichfork);
  606. if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
  607. goto out;
  608. }
  609. /* Find the offset of the last extent in the mapping. */
  610. error = xfs_bmap_last_offset(ip, &endoff, whichfork);
  611. if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
  612. goto out;
  613. /* Scrub extent records. */
  614. info.lastoff = 0;
  615. ifp = XFS_IFORK_PTR(ip, whichfork);
  616. for_each_xfs_iext(ifp, &icur, &irec) {
  617. if (xchk_should_terminate(sc, &error) ||
  618. (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
  619. break;
  620. if (isnullstartblock(irec.br_startblock))
  621. continue;
  622. if (irec.br_startoff >= endoff) {
  623. xchk_fblock_set_corrupt(sc, whichfork,
  624. irec.br_startoff);
  625. goto out;
  626. }
  627. error = xchk_bmap_extent(ip, NULL, &info, &irec);
  628. if (error)
  629. goto out;
  630. }
  631. out_check_rmap:
  632. error = xchk_bmap_check_rmaps(sc, whichfork);
  633. if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
  634. goto out;
  635. out:
  636. return error;
  637. }
  638. /* Scrub an inode's data fork. */
  639. int
  640. xchk_bmap_data(
  641. struct xfs_scrub *sc)
  642. {
  643. return xchk_bmap(sc, XFS_DATA_FORK);
  644. }
  645. /* Scrub an inode's attr fork. */
  646. int
  647. xchk_bmap_attr(
  648. struct xfs_scrub *sc)
  649. {
  650. return xchk_bmap(sc, XFS_ATTR_FORK);
  651. }
  652. /* Scrub an inode's CoW fork. */
  653. int
  654. xchk_bmap_cow(
  655. struct xfs_scrub *sc)
  656. {
  657. if (!xfs_is_reflink_inode(sc->ip))
  658. return -ENOENT;
  659. return xchk_bmap(sc, XFS_COW_FORK);
  660. }