btree.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_defer.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_bit.h"
  15. #include "xfs_log_format.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_sb.h"
  18. #include "xfs_inode.h"
  19. #include "xfs_alloc.h"
  20. #include "scrub/scrub.h"
  21. #include "scrub/common.h"
  22. #include "scrub/btree.h"
  23. #include "scrub/trace.h"
  24. /* btree scrubbing */
  25. /*
  26. * Check for btree operation errors. See the section about handling
  27. * operational errors in common.c.
  28. */
  29. static bool
  30. __xchk_btree_process_error(
  31. struct xfs_scrub *sc,
  32. struct xfs_btree_cur *cur,
  33. int level,
  34. int *error,
  35. __u32 errflag,
  36. void *ret_ip)
  37. {
  38. if (*error == 0)
  39. return true;
  40. switch (*error) {
  41. case -EDEADLOCK:
  42. /* Used to restart an op with deadlock avoidance. */
  43. trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
  44. break;
  45. case -EFSBADCRC:
  46. case -EFSCORRUPTED:
  47. /* Note the badness but don't abort. */
  48. sc->sm->sm_flags |= errflag;
  49. *error = 0;
  50. /* fall through */
  51. default:
  52. if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
  53. trace_xchk_ifork_btree_op_error(sc, cur, level,
  54. *error, ret_ip);
  55. else
  56. trace_xchk_btree_op_error(sc, cur, level,
  57. *error, ret_ip);
  58. break;
  59. }
  60. return false;
  61. }
  62. bool
  63. xchk_btree_process_error(
  64. struct xfs_scrub *sc,
  65. struct xfs_btree_cur *cur,
  66. int level,
  67. int *error)
  68. {
  69. return __xchk_btree_process_error(sc, cur, level, error,
  70. XFS_SCRUB_OFLAG_CORRUPT, __return_address);
  71. }
  72. bool
  73. xchk_btree_xref_process_error(
  74. struct xfs_scrub *sc,
  75. struct xfs_btree_cur *cur,
  76. int level,
  77. int *error)
  78. {
  79. return __xchk_btree_process_error(sc, cur, level, error,
  80. XFS_SCRUB_OFLAG_XFAIL, __return_address);
  81. }
  82. /* Record btree block corruption. */
  83. static void
  84. __xchk_btree_set_corrupt(
  85. struct xfs_scrub *sc,
  86. struct xfs_btree_cur *cur,
  87. int level,
  88. __u32 errflag,
  89. void *ret_ip)
  90. {
  91. sc->sm->sm_flags |= errflag;
  92. if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
  93. trace_xchk_ifork_btree_error(sc, cur, level,
  94. ret_ip);
  95. else
  96. trace_xchk_btree_error(sc, cur, level,
  97. ret_ip);
  98. }
  99. void
  100. xchk_btree_set_corrupt(
  101. struct xfs_scrub *sc,
  102. struct xfs_btree_cur *cur,
  103. int level)
  104. {
  105. __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
  106. __return_address);
  107. }
  108. void
  109. xchk_btree_xref_set_corrupt(
  110. struct xfs_scrub *sc,
  111. struct xfs_btree_cur *cur,
  112. int level)
  113. {
  114. __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
  115. __return_address);
  116. }
  117. /*
  118. * Make sure this record is in order and doesn't stray outside of the parent
  119. * keys.
  120. */
  121. STATIC void
  122. xchk_btree_rec(
  123. struct xchk_btree *bs)
  124. {
  125. struct xfs_btree_cur *cur = bs->cur;
  126. union xfs_btree_rec *rec;
  127. union xfs_btree_key key;
  128. union xfs_btree_key hkey;
  129. union xfs_btree_key *keyp;
  130. struct xfs_btree_block *block;
  131. struct xfs_btree_block *keyblock;
  132. struct xfs_buf *bp;
  133. block = xfs_btree_get_block(cur, 0, &bp);
  134. rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
  135. trace_xchk_btree_rec(bs->sc, cur, 0);
  136. /* If this isn't the first record, are they in order? */
  137. if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
  138. xchk_btree_set_corrupt(bs->sc, cur, 0);
  139. bs->firstrec = false;
  140. memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
  141. if (cur->bc_nlevels == 1)
  142. return;
  143. /* Is this at least as large as the parent low key? */
  144. cur->bc_ops->init_key_from_rec(&key, rec);
  145. keyblock = xfs_btree_get_block(cur, 1, &bp);
  146. keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
  147. if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
  148. xchk_btree_set_corrupt(bs->sc, cur, 1);
  149. if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
  150. return;
  151. /* Is this no larger than the parent high key? */
  152. cur->bc_ops->init_high_key_from_rec(&hkey, rec);
  153. keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
  154. if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
  155. xchk_btree_set_corrupt(bs->sc, cur, 1);
  156. }
  157. /*
  158. * Make sure this key is in order and doesn't stray outside of the parent
  159. * keys.
  160. */
  161. STATIC void
  162. xchk_btree_key(
  163. struct xchk_btree *bs,
  164. int level)
  165. {
  166. struct xfs_btree_cur *cur = bs->cur;
  167. union xfs_btree_key *key;
  168. union xfs_btree_key *keyp;
  169. struct xfs_btree_block *block;
  170. struct xfs_btree_block *keyblock;
  171. struct xfs_buf *bp;
  172. block = xfs_btree_get_block(cur, level, &bp);
  173. key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
  174. trace_xchk_btree_key(bs->sc, cur, level);
  175. /* If this isn't the first key, are they in order? */
  176. if (!bs->firstkey[level] &&
  177. !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
  178. xchk_btree_set_corrupt(bs->sc, cur, level);
  179. bs->firstkey[level] = false;
  180. memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
  181. if (level + 1 >= cur->bc_nlevels)
  182. return;
  183. /* Is this at least as large as the parent low key? */
  184. keyblock = xfs_btree_get_block(cur, level + 1, &bp);
  185. keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
  186. if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
  187. xchk_btree_set_corrupt(bs->sc, cur, level);
  188. if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
  189. return;
  190. /* Is this no larger than the parent high key? */
  191. key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
  192. keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
  193. if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
  194. xchk_btree_set_corrupt(bs->sc, cur, level);
  195. }
  196. /*
  197. * Check a btree pointer. Returns true if it's ok to use this pointer.
  198. * Callers do not need to set the corrupt flag.
  199. */
  200. static bool
  201. xchk_btree_ptr_ok(
  202. struct xchk_btree *bs,
  203. int level,
  204. union xfs_btree_ptr *ptr)
  205. {
  206. bool res;
  207. /* A btree rooted in an inode has no block pointer to the root. */
  208. if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
  209. level == bs->cur->bc_nlevels)
  210. return true;
  211. /* Otherwise, check the pointers. */
  212. if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
  213. res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
  214. else
  215. res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
  216. if (!res)
  217. xchk_btree_set_corrupt(bs->sc, bs->cur, level);
  218. return res;
  219. }
  220. /* Check that a btree block's sibling matches what we expect it. */
  221. STATIC int
  222. xchk_btree_block_check_sibling(
  223. struct xchk_btree *bs,
  224. int level,
  225. int direction,
  226. union xfs_btree_ptr *sibling)
  227. {
  228. struct xfs_btree_cur *cur = bs->cur;
  229. struct xfs_btree_block *pblock;
  230. struct xfs_buf *pbp;
  231. struct xfs_btree_cur *ncur = NULL;
  232. union xfs_btree_ptr *pp;
  233. int success;
  234. int error;
  235. error = xfs_btree_dup_cursor(cur, &ncur);
  236. if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
  237. !ncur)
  238. return error;
  239. /*
  240. * If the pointer is null, we shouldn't be able to move the upper
  241. * level pointer anywhere.
  242. */
  243. if (xfs_btree_ptr_is_null(cur, sibling)) {
  244. if (direction > 0)
  245. error = xfs_btree_increment(ncur, level + 1, &success);
  246. else
  247. error = xfs_btree_decrement(ncur, level + 1, &success);
  248. if (error == 0 && success)
  249. xchk_btree_set_corrupt(bs->sc, cur, level);
  250. error = 0;
  251. goto out;
  252. }
  253. /* Increment upper level pointer. */
  254. if (direction > 0)
  255. error = xfs_btree_increment(ncur, level + 1, &success);
  256. else
  257. error = xfs_btree_decrement(ncur, level + 1, &success);
  258. if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
  259. goto out;
  260. if (!success) {
  261. xchk_btree_set_corrupt(bs->sc, cur, level + 1);
  262. goto out;
  263. }
  264. /* Compare upper level pointer to sibling pointer. */
  265. pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
  266. pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
  267. if (!xchk_btree_ptr_ok(bs, level + 1, pp))
  268. goto out;
  269. if (pbp)
  270. xchk_buffer_recheck(bs->sc, pbp);
  271. if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
  272. xchk_btree_set_corrupt(bs->sc, cur, level);
  273. out:
  274. xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
  275. return error;
  276. }
  277. /* Check the siblings of a btree block. */
  278. STATIC int
  279. xchk_btree_block_check_siblings(
  280. struct xchk_btree *bs,
  281. struct xfs_btree_block *block)
  282. {
  283. struct xfs_btree_cur *cur = bs->cur;
  284. union xfs_btree_ptr leftsib;
  285. union xfs_btree_ptr rightsib;
  286. int level;
  287. int error = 0;
  288. xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
  289. xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
  290. level = xfs_btree_get_level(block);
  291. /* Root block should never have siblings. */
  292. if (level == cur->bc_nlevels - 1) {
  293. if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
  294. !xfs_btree_ptr_is_null(cur, &rightsib))
  295. xchk_btree_set_corrupt(bs->sc, cur, level);
  296. goto out;
  297. }
  298. /*
  299. * Does the left & right sibling pointers match the adjacent
  300. * parent level pointers?
  301. * (These function absorbs error codes for us.)
  302. */
  303. error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib);
  304. if (error)
  305. return error;
  306. error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib);
  307. if (error)
  308. return error;
  309. out:
  310. return error;
  311. }
  312. struct check_owner {
  313. struct list_head list;
  314. xfs_daddr_t daddr;
  315. int level;
  316. };
  317. /*
  318. * Make sure this btree block isn't in the free list and that there's
  319. * an rmap record for it.
  320. */
  321. STATIC int
  322. xchk_btree_check_block_owner(
  323. struct xchk_btree *bs,
  324. int level,
  325. xfs_daddr_t daddr)
  326. {
  327. xfs_agnumber_t agno;
  328. xfs_agblock_t agbno;
  329. xfs_btnum_t btnum;
  330. bool init_sa;
  331. int error = 0;
  332. if (!bs->cur)
  333. return 0;
  334. btnum = bs->cur->bc_btnum;
  335. agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
  336. agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
  337. init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
  338. if (init_sa) {
  339. error = xchk_ag_init(bs->sc, agno, &bs->sc->sa);
  340. if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
  341. level, &error))
  342. return error;
  343. }
  344. xchk_xref_is_used_space(bs->sc, agbno, 1);
  345. /*
  346. * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
  347. * have to nullify it (to shut down further block owner checks) if
  348. * self-xref encounters problems.
  349. */
  350. if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
  351. bs->cur = NULL;
  352. xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
  353. if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
  354. bs->cur = NULL;
  355. if (init_sa)
  356. xchk_ag_free(bs->sc, &bs->sc->sa);
  357. return error;
  358. }
  359. /* Check the owner of a btree block. */
  360. STATIC int
  361. xchk_btree_check_owner(
  362. struct xchk_btree *bs,
  363. int level,
  364. struct xfs_buf *bp)
  365. {
  366. struct xfs_btree_cur *cur = bs->cur;
  367. struct check_owner *co;
  368. if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
  369. return 0;
  370. /*
  371. * We want to cross-reference each btree block with the bnobt
  372. * and the rmapbt. We cannot cross-reference the bnobt or
  373. * rmapbt while scanning the bnobt or rmapbt, respectively,
  374. * because we cannot alter the cursor and we'd prefer not to
  375. * duplicate cursors. Therefore, save the buffer daddr for
  376. * later scanning.
  377. */
  378. if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
  379. co = kmem_alloc(sizeof(struct check_owner),
  380. KM_MAYFAIL);
  381. if (!co)
  382. return -ENOMEM;
  383. co->level = level;
  384. co->daddr = XFS_BUF_ADDR(bp);
  385. list_add_tail(&co->list, &bs->to_check);
  386. return 0;
  387. }
  388. return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
  389. }
  390. /*
  391. * Check that this btree block has at least minrecs records or is one of the
  392. * special blocks that don't require that.
  393. */
  394. STATIC void
  395. xchk_btree_check_minrecs(
  396. struct xchk_btree *bs,
  397. int level,
  398. struct xfs_btree_block *block)
  399. {
  400. struct xfs_btree_cur *cur = bs->cur;
  401. unsigned int root_level = cur->bc_nlevels - 1;
  402. unsigned int numrecs = be16_to_cpu(block->bb_numrecs);
  403. /* More records than minrecs means the block is ok. */
  404. if (numrecs >= cur->bc_ops->get_minrecs(cur, level))
  405. return;
  406. /*
  407. * For btrees rooted in the inode, it's possible that the root block
  408. * contents spilled into a regular ondisk block because there wasn't
  409. * enough space in the inode root. The number of records in that
  410. * child block might be less than the standard minrecs, but that's ok
  411. * provided that there's only one direct child of the root.
  412. */
  413. if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
  414. level == cur->bc_nlevels - 2) {
  415. struct xfs_btree_block *root_block;
  416. struct xfs_buf *root_bp;
  417. int root_maxrecs;
  418. root_block = xfs_btree_get_block(cur, root_level, &root_bp);
  419. root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level);
  420. if (be16_to_cpu(root_block->bb_numrecs) != 1 ||
  421. numrecs <= root_maxrecs)
  422. xchk_btree_set_corrupt(bs->sc, cur, level);
  423. return;
  424. }
  425. /*
  426. * Otherwise, only the root level is allowed to have fewer than minrecs
  427. * records or keyptrs.
  428. */
  429. if (level < root_level)
  430. xchk_btree_set_corrupt(bs->sc, cur, level);
  431. }
  432. /*
  433. * Grab and scrub a btree block given a btree pointer. Returns block
  434. * and buffer pointers (if applicable) if they're ok to use.
  435. */
  436. STATIC int
  437. xchk_btree_get_block(
  438. struct xchk_btree *bs,
  439. int level,
  440. union xfs_btree_ptr *pp,
  441. struct xfs_btree_block **pblock,
  442. struct xfs_buf **pbp)
  443. {
  444. xfs_failaddr_t failed_at;
  445. int error;
  446. *pblock = NULL;
  447. *pbp = NULL;
  448. error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
  449. if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
  450. !*pblock)
  451. return error;
  452. xfs_btree_get_block(bs->cur, level, pbp);
  453. if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
  454. failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
  455. level, *pbp);
  456. else
  457. failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
  458. level, *pbp);
  459. if (failed_at) {
  460. xchk_btree_set_corrupt(bs->sc, bs->cur, level);
  461. return 0;
  462. }
  463. if (*pbp)
  464. xchk_buffer_recheck(bs->sc, *pbp);
  465. xchk_btree_check_minrecs(bs, level, *pblock);
  466. /*
  467. * Check the block's owner; this function absorbs error codes
  468. * for us.
  469. */
  470. error = xchk_btree_check_owner(bs, level, *pbp);
  471. if (error)
  472. return error;
  473. /*
  474. * Check the block's siblings; this function absorbs error codes
  475. * for us.
  476. */
  477. return xchk_btree_block_check_siblings(bs, *pblock);
  478. }
  479. /*
  480. * Check that the low and high keys of this block match the keys stored
  481. * in the parent block.
  482. */
  483. STATIC void
  484. xchk_btree_block_keys(
  485. struct xchk_btree *bs,
  486. int level,
  487. struct xfs_btree_block *block)
  488. {
  489. union xfs_btree_key block_keys;
  490. struct xfs_btree_cur *cur = bs->cur;
  491. union xfs_btree_key *high_bk;
  492. union xfs_btree_key *parent_keys;
  493. union xfs_btree_key *high_pk;
  494. struct xfs_btree_block *parent_block;
  495. struct xfs_buf *bp;
  496. if (level >= cur->bc_nlevels - 1)
  497. return;
  498. /* Calculate the keys for this block. */
  499. xfs_btree_get_keys(cur, block, &block_keys);
  500. /* Obtain the parent's copy of the keys for this block. */
  501. parent_block = xfs_btree_get_block(cur, level + 1, &bp);
  502. parent_keys = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1],
  503. parent_block);
  504. if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
  505. xchk_btree_set_corrupt(bs->sc, cur, 1);
  506. if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
  507. return;
  508. /* Get high keys */
  509. high_bk = xfs_btree_high_key_from_key(cur, &block_keys);
  510. high_pk = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1],
  511. parent_block);
  512. if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
  513. xchk_btree_set_corrupt(bs->sc, cur, 1);
  514. }
  515. /*
  516. * Visit all nodes and leaves of a btree. Check that all pointers and
  517. * records are in order, that the keys reflect the records, and use a callback
  518. * so that the caller can verify individual records.
  519. */
  520. int
  521. xchk_btree(
  522. struct xfs_scrub *sc,
  523. struct xfs_btree_cur *cur,
  524. xchk_btree_rec_fn scrub_fn,
  525. struct xfs_owner_info *oinfo,
  526. void *private)
  527. {
  528. struct xchk_btree bs = { NULL };
  529. union xfs_btree_ptr ptr;
  530. union xfs_btree_ptr *pp;
  531. union xfs_btree_rec *recp;
  532. struct xfs_btree_block *block;
  533. int level;
  534. struct xfs_buf *bp;
  535. struct check_owner *co;
  536. struct check_owner *n;
  537. int i;
  538. int error = 0;
  539. /* Initialize scrub state */
  540. bs.cur = cur;
  541. bs.scrub_rec = scrub_fn;
  542. bs.oinfo = oinfo;
  543. bs.firstrec = true;
  544. bs.private = private;
  545. bs.sc = sc;
  546. for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
  547. bs.firstkey[i] = true;
  548. INIT_LIST_HEAD(&bs.to_check);
  549. /* Don't try to check a tree with a height we can't handle. */
  550. if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
  551. xchk_btree_set_corrupt(sc, cur, 0);
  552. goto out;
  553. }
  554. /*
  555. * Load the root of the btree. The helper function absorbs
  556. * error codes for us.
  557. */
  558. level = cur->bc_nlevels - 1;
  559. cur->bc_ops->init_ptr_from_cur(cur, &ptr);
  560. if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
  561. goto out;
  562. error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
  563. if (error || !block)
  564. goto out;
  565. cur->bc_ptrs[level] = 1;
  566. while (level < cur->bc_nlevels) {
  567. block = xfs_btree_get_block(cur, level, &bp);
  568. if (level == 0) {
  569. /* End of leaf, pop back towards the root. */
  570. if (cur->bc_ptrs[level] >
  571. be16_to_cpu(block->bb_numrecs)) {
  572. xchk_btree_block_keys(&bs, level, block);
  573. if (level < cur->bc_nlevels - 1)
  574. cur->bc_ptrs[level + 1]++;
  575. level++;
  576. continue;
  577. }
  578. /* Records in order for scrub? */
  579. xchk_btree_rec(&bs);
  580. /* Call out to the record checker. */
  581. recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
  582. error = bs.scrub_rec(&bs, recp);
  583. if (error)
  584. break;
  585. if (xchk_should_terminate(sc, &error) ||
  586. (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
  587. break;
  588. cur->bc_ptrs[level]++;
  589. continue;
  590. }
  591. /* End of node, pop back towards the root. */
  592. if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
  593. xchk_btree_block_keys(&bs, level, block);
  594. if (level < cur->bc_nlevels - 1)
  595. cur->bc_ptrs[level + 1]++;
  596. level++;
  597. continue;
  598. }
  599. /* Keys in order for scrub? */
  600. xchk_btree_key(&bs, level);
  601. /* Drill another level deeper. */
  602. pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
  603. if (!xchk_btree_ptr_ok(&bs, level, pp)) {
  604. cur->bc_ptrs[level]++;
  605. continue;
  606. }
  607. level--;
  608. error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
  609. if (error || !block)
  610. goto out;
  611. cur->bc_ptrs[level] = 1;
  612. }
  613. out:
  614. /* Process deferred owner checks on btree blocks. */
  615. list_for_each_entry_safe(co, n, &bs.to_check, list) {
  616. if (!error && bs.cur)
  617. error = xchk_btree_check_block_owner(&bs,
  618. co->level, co->daddr);
  619. list_del(&co->list);
  620. kmem_free(co);
  621. }
  622. return error;
  623. }