refcount.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_defer.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_bit.h"
  15. #include "xfs_log_format.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_sb.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_rmap.h"
  20. #include "xfs_refcount.h"
  21. #include "scrub/xfs_scrub.h"
  22. #include "scrub/scrub.h"
  23. #include "scrub/common.h"
  24. #include "scrub/btree.h"
  25. #include "scrub/trace.h"
  26. /*
  27. * Set us up to scrub reference count btrees.
  28. */
  29. int
  30. xchk_setup_ag_refcountbt(
  31. struct xfs_scrub *sc,
  32. struct xfs_inode *ip)
  33. {
  34. return xchk_setup_ag_btree(sc, ip, false);
  35. }
  36. /* Reference count btree scrubber. */
  37. /*
  38. * Confirming Reference Counts via Reverse Mappings
  39. *
  40. * We want to count the reverse mappings overlapping a refcount record
  41. * (bno, len, refcount), allowing for the possibility that some of the
  42. * overlap may come from smaller adjoining reverse mappings, while some
  43. * comes from single extents which overlap the range entirely. The
  44. * outer loop is as follows:
  45. *
  46. * 1. For all reverse mappings overlapping the refcount extent,
  47. * a. If a given rmap completely overlaps, mark it as seen.
  48. * b. Otherwise, record the fragment (in agbno order) for later
  49. * processing.
  50. *
  51. * Once we've seen all the rmaps, we know that for all blocks in the
  52. * refcount record we want to find $refcount owners and we've already
  53. * visited $seen extents that overlap all the blocks. Therefore, we
  54. * need to find ($refcount - $seen) owners for every block in the
  55. * extent; call that quantity $target_nr. Proceed as follows:
  56. *
  57. * 2. Pull the first $target_nr fragments from the list; all of them
  58. * should start at or before the start of the extent.
  59. * Call this subset of fragments the working set.
  60. * 3. Until there are no more unprocessed fragments,
  61. * a. Find the shortest fragments in the set and remove them.
  62. * b. Note the block number of the end of these fragments.
  63. * c. Pull the same number of fragments from the list. All of these
  64. * fragments should start at the block number recorded in the
  65. * previous step.
  66. * d. Put those fragments in the set.
  67. * 4. Check that there are $target_nr fragments remaining in the list,
  68. * and that they all end at or beyond the end of the refcount extent.
  69. *
  70. * If the refcount is correct, all the check conditions in the algorithm
  71. * should always hold true. If not, the refcount is incorrect.
  72. */
  73. struct xchk_refcnt_frag {
  74. struct list_head list;
  75. struct xfs_rmap_irec rm;
  76. };
  77. struct xchk_refcnt_check {
  78. struct xfs_scrub *sc;
  79. struct list_head fragments;
  80. /* refcount extent we're examining */
  81. xfs_agblock_t bno;
  82. xfs_extlen_t len;
  83. xfs_nlink_t refcount;
  84. /* number of owners seen */
  85. xfs_nlink_t seen;
  86. };
  87. /*
  88. * Decide if the given rmap is large enough that we can redeem it
  89. * towards refcount verification now, or if it's a fragment, in
  90. * which case we'll hang onto it in the hopes that we'll later
  91. * discover that we've collected exactly the correct number of
  92. * fragments as the refcountbt says we should have.
  93. */
  94. STATIC int
  95. xchk_refcountbt_rmap_check(
  96. struct xfs_btree_cur *cur,
  97. struct xfs_rmap_irec *rec,
  98. void *priv)
  99. {
  100. struct xchk_refcnt_check *refchk = priv;
  101. struct xchk_refcnt_frag *frag;
  102. xfs_agblock_t rm_last;
  103. xfs_agblock_t rc_last;
  104. int error = 0;
  105. if (xchk_should_terminate(refchk->sc, &error))
  106. return error;
  107. rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
  108. rc_last = refchk->bno + refchk->len - 1;
  109. /* Confirm that a single-owner refc extent is a CoW stage. */
  110. if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
  111. xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
  112. return 0;
  113. }
  114. if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
  115. /*
  116. * The rmap overlaps the refcount record, so we can confirm
  117. * one refcount owner seen.
  118. */
  119. refchk->seen++;
  120. } else {
  121. /*
  122. * This rmap covers only part of the refcount record, so
  123. * save the fragment for later processing. If the rmapbt
  124. * is healthy each rmap_irec we see will be in agbno order
  125. * so we don't need insertion sort here.
  126. */
  127. frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
  128. KM_MAYFAIL);
  129. if (!frag)
  130. return -ENOMEM;
  131. memcpy(&frag->rm, rec, sizeof(frag->rm));
  132. list_add_tail(&frag->list, &refchk->fragments);
  133. }
  134. return 0;
  135. }
  136. /*
  137. * Given a bunch of rmap fragments, iterate through them, keeping
  138. * a running tally of the refcount. If this ever deviates from
  139. * what we expect (which is the refcountbt's refcount minus the
  140. * number of extents that totally covered the refcountbt extent),
  141. * we have a refcountbt error.
  142. */
  143. STATIC void
  144. xchk_refcountbt_process_rmap_fragments(
  145. struct xchk_refcnt_check *refchk)
  146. {
  147. struct list_head worklist;
  148. struct xchk_refcnt_frag *frag;
  149. struct xchk_refcnt_frag *n;
  150. xfs_agblock_t bno;
  151. xfs_agblock_t rbno;
  152. xfs_agblock_t next_rbno;
  153. xfs_nlink_t nr;
  154. xfs_nlink_t target_nr;
  155. target_nr = refchk->refcount - refchk->seen;
  156. if (target_nr == 0)
  157. return;
  158. /*
  159. * There are (refchk->rc.rc_refcount - refchk->nr refcount)
  160. * references we haven't found yet. Pull that many off the
  161. * fragment list and figure out where the smallest rmap ends
  162. * (and therefore the next rmap should start). All the rmaps
  163. * we pull off should start at or before the beginning of the
  164. * refcount record's range.
  165. */
  166. INIT_LIST_HEAD(&worklist);
  167. rbno = NULLAGBLOCK;
  168. /* Make sure the fragments actually /are/ in agbno order. */
  169. bno = 0;
  170. list_for_each_entry(frag, &refchk->fragments, list) {
  171. if (frag->rm.rm_startblock < bno)
  172. goto done;
  173. bno = frag->rm.rm_startblock;
  174. }
  175. /*
  176. * Find all the rmaps that start at or before the refc extent,
  177. * and put them on the worklist.
  178. */
  179. nr = 0;
  180. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  181. if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
  182. break;
  183. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  184. if (bno < rbno)
  185. rbno = bno;
  186. list_move_tail(&frag->list, &worklist);
  187. nr++;
  188. }
  189. /*
  190. * We should have found exactly $target_nr rmap fragments starting
  191. * at or before the refcount extent.
  192. */
  193. if (nr != target_nr)
  194. goto done;
  195. while (!list_empty(&refchk->fragments)) {
  196. /* Discard any fragments ending at rbno from the worklist. */
  197. nr = 0;
  198. next_rbno = NULLAGBLOCK;
  199. list_for_each_entry_safe(frag, n, &worklist, list) {
  200. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  201. if (bno != rbno) {
  202. if (bno < next_rbno)
  203. next_rbno = bno;
  204. continue;
  205. }
  206. list_del(&frag->list);
  207. kmem_free(frag);
  208. nr++;
  209. }
  210. /* Try to add nr rmaps starting at rbno to the worklist. */
  211. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  212. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  213. if (frag->rm.rm_startblock != rbno)
  214. goto done;
  215. list_move_tail(&frag->list, &worklist);
  216. if (next_rbno > bno)
  217. next_rbno = bno;
  218. nr--;
  219. if (nr == 0)
  220. break;
  221. }
  222. /*
  223. * If we get here and nr > 0, this means that we added fewer
  224. * items to the worklist than we discarded because the fragment
  225. * list ran out of items. Therefore, we cannot maintain the
  226. * required refcount. Something is wrong, so we're done.
  227. */
  228. if (nr)
  229. goto done;
  230. rbno = next_rbno;
  231. }
  232. /*
  233. * Make sure the last extent we processed ends at or beyond
  234. * the end of the refcount extent.
  235. */
  236. if (rbno < refchk->bno + refchk->len)
  237. goto done;
  238. /* Actually record us having seen the remaining refcount. */
  239. refchk->seen = refchk->refcount;
  240. done:
  241. /* Delete fragments and work list. */
  242. list_for_each_entry_safe(frag, n, &worklist, list) {
  243. list_del(&frag->list);
  244. kmem_free(frag);
  245. }
  246. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  247. list_del(&frag->list);
  248. kmem_free(frag);
  249. }
  250. }
  251. /* Use the rmap entries covering this extent to verify the refcount. */
  252. STATIC void
  253. xchk_refcountbt_xref_rmap(
  254. struct xfs_scrub *sc,
  255. xfs_agblock_t bno,
  256. xfs_extlen_t len,
  257. xfs_nlink_t refcount)
  258. {
  259. struct xchk_refcnt_check refchk = {
  260. .sc = sc,
  261. .bno = bno,
  262. .len = len,
  263. .refcount = refcount,
  264. .seen = 0,
  265. };
  266. struct xfs_rmap_irec low;
  267. struct xfs_rmap_irec high;
  268. struct xchk_refcnt_frag *frag;
  269. struct xchk_refcnt_frag *n;
  270. int error;
  271. if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
  272. return;
  273. /* Cross-reference with the rmapbt to confirm the refcount. */
  274. memset(&low, 0, sizeof(low));
  275. low.rm_startblock = bno;
  276. memset(&high, 0xFF, sizeof(high));
  277. high.rm_startblock = bno + len - 1;
  278. INIT_LIST_HEAD(&refchk.fragments);
  279. error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
  280. &xchk_refcountbt_rmap_check, &refchk);
  281. if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  282. goto out_free;
  283. xchk_refcountbt_process_rmap_fragments(&refchk);
  284. if (refcount != refchk.seen)
  285. xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  286. out_free:
  287. list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
  288. list_del(&frag->list);
  289. kmem_free(frag);
  290. }
  291. }
  292. /* Cross-reference with the other btrees. */
  293. STATIC void
  294. xchk_refcountbt_xref(
  295. struct xfs_scrub *sc,
  296. xfs_agblock_t agbno,
  297. xfs_extlen_t len,
  298. xfs_nlink_t refcount)
  299. {
  300. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  301. return;
  302. xchk_xref_is_used_space(sc, agbno, len);
  303. xchk_xref_is_not_inode_chunk(sc, agbno, len);
  304. xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
  305. }
  306. /* Scrub a refcountbt record. */
  307. STATIC int
  308. xchk_refcountbt_rec(
  309. struct xchk_btree *bs,
  310. union xfs_btree_rec *rec)
  311. {
  312. struct xfs_mount *mp = bs->cur->bc_mp;
  313. xfs_agblock_t *cow_blocks = bs->private;
  314. xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
  315. xfs_agblock_t bno;
  316. xfs_extlen_t len;
  317. xfs_nlink_t refcount;
  318. bool has_cowflag;
  319. int error = 0;
  320. bno = be32_to_cpu(rec->refc.rc_startblock);
  321. len = be32_to_cpu(rec->refc.rc_blockcount);
  322. refcount = be32_to_cpu(rec->refc.rc_refcount);
  323. /* Only CoW records can have refcount == 1. */
  324. has_cowflag = (bno & XFS_REFC_COW_START);
  325. if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
  326. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  327. if (has_cowflag)
  328. (*cow_blocks) += len;
  329. /* Check the extent. */
  330. bno &= ~XFS_REFC_COW_START;
  331. if (bno + len <= bno ||
  332. !xfs_verify_agbno(mp, agno, bno) ||
  333. !xfs_verify_agbno(mp, agno, bno + len - 1))
  334. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  335. if (refcount == 0)
  336. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  337. xchk_refcountbt_xref(bs->sc, bno, len, refcount);
  338. return error;
  339. }
  340. /* Make sure we have as many refc blocks as the rmap says. */
  341. STATIC void
  342. xchk_refcount_xref_rmap(
  343. struct xfs_scrub *sc,
  344. struct xfs_owner_info *oinfo,
  345. xfs_filblks_t cow_blocks)
  346. {
  347. xfs_extlen_t refcbt_blocks = 0;
  348. xfs_filblks_t blocks;
  349. int error;
  350. if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
  351. return;
  352. /* Check that we saw as many refcbt blocks as the rmap knows about. */
  353. error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
  354. if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
  355. return;
  356. error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
  357. &blocks);
  358. if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  359. return;
  360. if (blocks != refcbt_blocks)
  361. xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  362. /* Check that we saw as many cow blocks as the rmap knows about. */
  363. xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
  364. error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
  365. &blocks);
  366. if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  367. return;
  368. if (blocks != cow_blocks)
  369. xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  370. }
  371. /* Scrub the refcount btree for some AG. */
  372. int
  373. xchk_refcountbt(
  374. struct xfs_scrub *sc)
  375. {
  376. struct xfs_owner_info oinfo;
  377. xfs_agblock_t cow_blocks = 0;
  378. int error;
  379. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
  380. error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
  381. &oinfo, &cow_blocks);
  382. if (error)
  383. return error;
  384. xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
  385. return 0;
  386. }
  387. /* xref check that a cow staging extent is marked in the refcountbt. */
  388. void
  389. xchk_xref_is_cow_staging(
  390. struct xfs_scrub *sc,
  391. xfs_agblock_t agbno,
  392. xfs_extlen_t len)
  393. {
  394. struct xfs_refcount_irec rc;
  395. bool has_cowflag;
  396. int has_refcount;
  397. int error;
  398. if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
  399. return;
  400. /* Find the CoW staging extent. */
  401. error = xfs_refcount_lookup_le(sc->sa.refc_cur,
  402. agbno + XFS_REFC_COW_START, &has_refcount);
  403. if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
  404. return;
  405. if (!has_refcount) {
  406. xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  407. return;
  408. }
  409. error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
  410. if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
  411. return;
  412. if (!has_refcount) {
  413. xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  414. return;
  415. }
  416. /* CoW flag must be set, refcount must be 1. */
  417. has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
  418. if (!has_cowflag || rc.rc_refcount != 1)
  419. xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  420. /* Must be at least as long as what was passed in */
  421. if (rc.rc_blockcount < len)
  422. xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  423. }
  424. /*
  425. * xref check that the extent is not shared. Only file data blocks
  426. * can have multiple owners.
  427. */
  428. void
  429. xchk_xref_is_not_shared(
  430. struct xfs_scrub *sc,
  431. xfs_agblock_t agbno,
  432. xfs_extlen_t len)
  433. {
  434. bool shared;
  435. int error;
  436. if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
  437. return;
  438. error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
  439. if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
  440. return;
  441. if (shared)
  442. xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  443. }