dir.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_defer.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_bit.h"
  15. #include "xfs_log_format.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_sb.h"
  18. #include "xfs_inode.h"
  19. #include "xfs_icache.h"
  20. #include "xfs_itable.h"
  21. #include "xfs_da_format.h"
  22. #include "xfs_da_btree.h"
  23. #include "xfs_dir2.h"
  24. #include "xfs_dir2_priv.h"
  25. #include "xfs_ialloc.h"
  26. #include "scrub/xfs_scrub.h"
  27. #include "scrub/scrub.h"
  28. #include "scrub/common.h"
  29. #include "scrub/trace.h"
  30. #include "scrub/dabtree.h"
  31. /* Set us up to scrub directories. */
  32. int
  33. xchk_setup_directory(
  34. struct xfs_scrub *sc,
  35. struct xfs_inode *ip)
  36. {
  37. return xchk_setup_inode_contents(sc, ip, 0);
  38. }
  39. /* Directories */
  40. /* Scrub a directory entry. */
  41. struct xchk_dir_ctx {
  42. /* VFS fill-directory iterator */
  43. struct dir_context dir_iter;
  44. struct xfs_scrub *sc;
  45. };
  46. /* Check that an inode's mode matches a given DT_ type. */
  47. STATIC int
  48. xchk_dir_check_ftype(
  49. struct xchk_dir_ctx *sdc,
  50. xfs_fileoff_t offset,
  51. xfs_ino_t inum,
  52. int dtype)
  53. {
  54. struct xfs_mount *mp = sdc->sc->mp;
  55. struct xfs_inode *ip;
  56. int ino_dtype;
  57. int error = 0;
  58. if (!xfs_sb_version_hasftype(&mp->m_sb)) {
  59. if (dtype != DT_UNKNOWN && dtype != DT_DIR)
  60. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  61. offset);
  62. goto out;
  63. }
  64. /*
  65. * Grab the inode pointed to by the dirent. We release the
  66. * inode before we cancel the scrub transaction. Since we're
  67. * don't know a priori that releasing the inode won't trigger
  68. * eofblocks cleanup (which allocates what would be a nested
  69. * transaction), we can't use DONTCACHE here because DONTCACHE
  70. * inodes can trigger immediate inactive cleanup of the inode.
  71. */
  72. error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
  73. if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
  74. &error))
  75. goto out;
  76. /* Convert mode to the DT_* values that dir_emit uses. */
  77. ino_dtype = xfs_dir3_get_dtype(mp,
  78. xfs_mode_to_ftype(VFS_I(ip)->i_mode));
  79. if (ino_dtype != dtype)
  80. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  81. xfs_irele(ip);
  82. out:
  83. return error;
  84. }
  85. /*
  86. * Scrub a single directory entry.
  87. *
  88. * We use the VFS directory iterator (i.e. readdir) to call this
  89. * function for every directory entry in a directory. Once we're here,
  90. * we check the inode number to make sure it's sane, then we check that
  91. * we can look up this filename. Finally, we check the ftype.
  92. */
  93. STATIC int
  94. xchk_dir_actor(
  95. struct dir_context *dir_iter,
  96. const char *name,
  97. int namelen,
  98. loff_t pos,
  99. u64 ino,
  100. unsigned type)
  101. {
  102. struct xfs_mount *mp;
  103. struct xfs_inode *ip;
  104. struct xchk_dir_ctx *sdc;
  105. struct xfs_name xname;
  106. xfs_ino_t lookup_ino;
  107. xfs_dablk_t offset;
  108. int error = 0;
  109. sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter);
  110. ip = sdc->sc->ip;
  111. mp = ip->i_mount;
  112. offset = xfs_dir2_db_to_da(mp->m_dir_geo,
  113. xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos));
  114. /* Does this inode number make sense? */
  115. if (!xfs_verify_dir_ino(mp, ino)) {
  116. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  117. goto out;
  118. }
  119. if (!strncmp(".", name, namelen)) {
  120. /* If this is "." then check that the inum matches the dir. */
  121. if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
  122. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  123. offset);
  124. if (ino != ip->i_ino)
  125. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  126. offset);
  127. } else if (!strncmp("..", name, namelen)) {
  128. /*
  129. * If this is ".." in the root inode, check that the inum
  130. * matches this dir.
  131. */
  132. if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
  133. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  134. offset);
  135. if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
  136. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  137. offset);
  138. }
  139. /* Verify that we can look up this name by hash. */
  140. xname.name = name;
  141. xname.len = namelen;
  142. xname.type = XFS_DIR3_FT_UNKNOWN;
  143. error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
  144. /* ENOENT means the hash lookup failed and the dir is corrupt */
  145. if (error == -ENOENT)
  146. error = -EFSCORRUPTED;
  147. if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
  148. &error))
  149. goto out;
  150. if (lookup_ino != ino) {
  151. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  152. goto out;
  153. }
  154. /* Verify the file type. This function absorbs error codes. */
  155. error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type);
  156. if (error)
  157. goto out;
  158. out:
  159. /*
  160. * A negative error code returned here is supposed to cause the
  161. * dir_emit caller (xfs_readdir) to abort the directory iteration
  162. * and return zero to xchk_directory.
  163. */
  164. if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  165. return -EFSCORRUPTED;
  166. return error;
  167. }
  168. /* Scrub a directory btree record. */
  169. STATIC int
  170. xchk_dir_rec(
  171. struct xchk_da_btree *ds,
  172. int level,
  173. void *rec)
  174. {
  175. struct xfs_mount *mp = ds->state->mp;
  176. struct xfs_dir2_leaf_entry *ent = rec;
  177. struct xfs_inode *dp = ds->dargs.dp;
  178. struct xfs_dir2_data_entry *dent;
  179. struct xfs_buf *bp;
  180. char *p, *endp;
  181. xfs_ino_t ino;
  182. xfs_dablk_t rec_bno;
  183. xfs_dir2_db_t db;
  184. xfs_dir2_data_aoff_t off;
  185. xfs_dir2_dataptr_t ptr;
  186. xfs_dahash_t calc_hash;
  187. xfs_dahash_t hash;
  188. unsigned int tag;
  189. int error;
  190. /* Check the hash of the entry. */
  191. error = xchk_da_btree_hash(ds, level, &ent->hashval);
  192. if (error)
  193. goto out;
  194. /* Valid hash pointer? */
  195. ptr = be32_to_cpu(ent->address);
  196. if (ptr == 0)
  197. return 0;
  198. /* Find the directory entry's location. */
  199. db = xfs_dir2_dataptr_to_db(mp->m_dir_geo, ptr);
  200. off = xfs_dir2_dataptr_to_off(mp->m_dir_geo, ptr);
  201. rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
  202. if (rec_bno >= mp->m_dir_geo->leafblk) {
  203. xchk_da_set_corrupt(ds, level);
  204. goto out;
  205. }
  206. error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
  207. if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
  208. &error))
  209. goto out;
  210. if (!bp) {
  211. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  212. goto out;
  213. }
  214. xchk_buffer_recheck(ds->sc, bp);
  215. if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  216. goto out_relse;
  217. dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
  218. /* Make sure we got a real directory entry. */
  219. p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
  220. endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
  221. if (!endp) {
  222. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  223. goto out_relse;
  224. }
  225. while (p < endp) {
  226. struct xfs_dir2_data_entry *dep;
  227. struct xfs_dir2_data_unused *dup;
  228. dup = (struct xfs_dir2_data_unused *)p;
  229. if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
  230. p += be16_to_cpu(dup->length);
  231. continue;
  232. }
  233. dep = (struct xfs_dir2_data_entry *)p;
  234. if (dep == dent)
  235. break;
  236. p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
  237. }
  238. if (p >= endp) {
  239. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  240. goto out_relse;
  241. }
  242. /* Retrieve the entry, sanity check it, and compare hashes. */
  243. ino = be64_to_cpu(dent->inumber);
  244. hash = be32_to_cpu(ent->hashval);
  245. tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
  246. if (!xfs_verify_dir_ino(mp, ino) || tag != off)
  247. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  248. if (dent->namelen == 0) {
  249. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  250. goto out_relse;
  251. }
  252. calc_hash = xfs_da_hashname(dent->name, dent->namelen);
  253. if (calc_hash != hash)
  254. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  255. out_relse:
  256. xfs_trans_brelse(ds->dargs.trans, bp);
  257. out:
  258. return error;
  259. }
  260. /*
  261. * Is this unused entry either in the bestfree or smaller than all of
  262. * them? We've already checked that the bestfrees are sorted longest to
  263. * shortest, and that there aren't any bogus entries.
  264. */
  265. STATIC void
  266. xchk_directory_check_free_entry(
  267. struct xfs_scrub *sc,
  268. xfs_dablk_t lblk,
  269. struct xfs_dir2_data_free *bf,
  270. struct xfs_dir2_data_unused *dup)
  271. {
  272. struct xfs_dir2_data_free *dfp;
  273. unsigned int dup_length;
  274. dup_length = be16_to_cpu(dup->length);
  275. /* Unused entry is shorter than any of the bestfrees */
  276. if (dup_length < be16_to_cpu(bf[XFS_DIR2_DATA_FD_COUNT - 1].length))
  277. return;
  278. for (dfp = &bf[XFS_DIR2_DATA_FD_COUNT - 1]; dfp >= bf; dfp--)
  279. if (dup_length == be16_to_cpu(dfp->length))
  280. return;
  281. /* Unused entry should be in the bestfrees but wasn't found. */
  282. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  283. }
  284. /* Check free space info in a directory data block. */
  285. STATIC int
  286. xchk_directory_data_bestfree(
  287. struct xfs_scrub *sc,
  288. xfs_dablk_t lblk,
  289. bool is_block)
  290. {
  291. struct xfs_dir2_data_unused *dup;
  292. struct xfs_dir2_data_free *dfp;
  293. struct xfs_buf *bp;
  294. struct xfs_dir2_data_free *bf;
  295. struct xfs_mount *mp = sc->mp;
  296. const struct xfs_dir_ops *d_ops;
  297. char *ptr;
  298. char *endptr;
  299. u16 tag;
  300. unsigned int nr_bestfrees = 0;
  301. unsigned int nr_frees = 0;
  302. unsigned int smallest_bestfree;
  303. int newlen;
  304. int offset;
  305. int error;
  306. d_ops = sc->ip->d_ops;
  307. if (is_block) {
  308. /* dir block format */
  309. if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
  310. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  311. error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
  312. } else {
  313. /* dir data format */
  314. error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
  315. }
  316. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  317. goto out;
  318. xchk_buffer_recheck(sc, bp);
  319. /* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
  320. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  321. goto out_buf;
  322. /* Do the bestfrees correspond to actual free space? */
  323. bf = d_ops->data_bestfree_p(bp->b_addr);
  324. smallest_bestfree = UINT_MAX;
  325. for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) {
  326. offset = be16_to_cpu(dfp->offset);
  327. if (offset == 0)
  328. continue;
  329. if (offset >= mp->m_dir_geo->blksize) {
  330. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  331. goto out_buf;
  332. }
  333. dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
  334. tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
  335. /* bestfree doesn't match the entry it points at? */
  336. if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
  337. be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
  338. tag != ((char *)dup - (char *)bp->b_addr)) {
  339. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  340. goto out_buf;
  341. }
  342. /* bestfree records should be ordered largest to smallest */
  343. if (smallest_bestfree < be16_to_cpu(dfp->length)) {
  344. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  345. goto out_buf;
  346. }
  347. smallest_bestfree = be16_to_cpu(dfp->length);
  348. nr_bestfrees++;
  349. }
  350. /* Make sure the bestfrees are actually the best free spaces. */
  351. ptr = (char *)d_ops->data_entry_p(bp->b_addr);
  352. endptr = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
  353. /* Iterate the entries, stopping when we hit or go past the end. */
  354. while (ptr < endptr) {
  355. dup = (struct xfs_dir2_data_unused *)ptr;
  356. /* Skip real entries */
  357. if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) {
  358. struct xfs_dir2_data_entry *dep;
  359. dep = (struct xfs_dir2_data_entry *)ptr;
  360. newlen = d_ops->data_entsize(dep->namelen);
  361. if (newlen <= 0) {
  362. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  363. lblk);
  364. goto out_buf;
  365. }
  366. ptr += newlen;
  367. continue;
  368. }
  369. /* Spot check this free entry */
  370. tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
  371. if (tag != ((char *)dup - (char *)bp->b_addr)) {
  372. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  373. goto out_buf;
  374. }
  375. /*
  376. * Either this entry is a bestfree or it's smaller than
  377. * any of the bestfrees.
  378. */
  379. xchk_directory_check_free_entry(sc, lblk, bf, dup);
  380. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  381. goto out_buf;
  382. /* Move on. */
  383. newlen = be16_to_cpu(dup->length);
  384. if (newlen <= 0) {
  385. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  386. goto out_buf;
  387. }
  388. ptr += newlen;
  389. if (ptr <= endptr)
  390. nr_frees++;
  391. }
  392. /* We're required to fill all the space. */
  393. if (ptr != endptr)
  394. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  395. /* Did we see at least as many free slots as there are bestfrees? */
  396. if (nr_frees < nr_bestfrees)
  397. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  398. out_buf:
  399. xfs_trans_brelse(sc->tp, bp);
  400. out:
  401. return error;
  402. }
  403. /*
  404. * Does the free space length in the free space index block ($len) match
  405. * the longest length in the directory data block's bestfree array?
  406. * Assume that we've already checked that the data block's bestfree
  407. * array is in order.
  408. */
  409. STATIC void
  410. xchk_directory_check_freesp(
  411. struct xfs_scrub *sc,
  412. xfs_dablk_t lblk,
  413. struct xfs_buf *dbp,
  414. unsigned int len)
  415. {
  416. struct xfs_dir2_data_free *dfp;
  417. dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
  418. if (len != be16_to_cpu(dfp->length))
  419. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  420. if (len > 0 && be16_to_cpu(dfp->offset) == 0)
  421. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  422. }
  423. /* Check free space info in a directory leaf1 block. */
  424. STATIC int
  425. xchk_directory_leaf1_bestfree(
  426. struct xfs_scrub *sc,
  427. struct xfs_da_args *args,
  428. xfs_dablk_t lblk)
  429. {
  430. struct xfs_dir3_icleaf_hdr leafhdr;
  431. struct xfs_dir2_leaf_entry *ents;
  432. struct xfs_dir2_leaf_tail *ltp;
  433. struct xfs_dir2_leaf *leaf;
  434. struct xfs_buf *dbp;
  435. struct xfs_buf *bp;
  436. const struct xfs_dir_ops *d_ops = sc->ip->d_ops;
  437. struct xfs_da_geometry *geo = sc->mp->m_dir_geo;
  438. __be16 *bestp;
  439. __u16 best;
  440. __u32 hash;
  441. __u32 lasthash = 0;
  442. __u32 bestcount;
  443. unsigned int stale = 0;
  444. int i;
  445. int error;
  446. /* Read the free space block. */
  447. error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
  448. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  449. goto out;
  450. xchk_buffer_recheck(sc, bp);
  451. leaf = bp->b_addr;
  452. d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
  453. ents = d_ops->leaf_ents_p(leaf);
  454. ltp = xfs_dir2_leaf_tail_p(geo, leaf);
  455. bestcount = be32_to_cpu(ltp->bestcount);
  456. bestp = xfs_dir2_leaf_bests_p(ltp);
  457. if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
  458. struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
  459. if (hdr3->pad != cpu_to_be32(0))
  460. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  461. }
  462. /*
  463. * There should be as many bestfree slots as there are dir data
  464. * blocks that can fit under i_size.
  465. */
  466. if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
  467. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  468. goto out;
  469. }
  470. /* Is the leaf count even remotely sane? */
  471. if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
  472. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  473. goto out;
  474. }
  475. /* Leaves and bests don't overlap in leaf format. */
  476. if ((char *)&ents[leafhdr.count] > (char *)bestp) {
  477. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  478. goto out;
  479. }
  480. /* Check hash value order, count stale entries. */
  481. for (i = 0; i < leafhdr.count; i++) {
  482. hash = be32_to_cpu(ents[i].hashval);
  483. if (i > 0 && lasthash > hash)
  484. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  485. lasthash = hash;
  486. if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
  487. stale++;
  488. }
  489. if (leafhdr.stale != stale)
  490. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  491. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  492. goto out;
  493. /* Check all the bestfree entries. */
  494. for (i = 0; i < bestcount; i++, bestp++) {
  495. best = be16_to_cpu(*bestp);
  496. if (best == NULLDATAOFF)
  497. continue;
  498. error = xfs_dir3_data_read(sc->tp, sc->ip,
  499. i * args->geo->fsbcount, -1, &dbp);
  500. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
  501. &error))
  502. break;
  503. xchk_directory_check_freesp(sc, lblk, dbp, best);
  504. xfs_trans_brelse(sc->tp, dbp);
  505. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  506. goto out;
  507. }
  508. out:
  509. return error;
  510. }
  511. /* Check free space info in a directory freespace block. */
  512. STATIC int
  513. xchk_directory_free_bestfree(
  514. struct xfs_scrub *sc,
  515. struct xfs_da_args *args,
  516. xfs_dablk_t lblk)
  517. {
  518. struct xfs_dir3_icfree_hdr freehdr;
  519. struct xfs_buf *dbp;
  520. struct xfs_buf *bp;
  521. __be16 *bestp;
  522. __u16 best;
  523. unsigned int stale = 0;
  524. int i;
  525. int error;
  526. /* Read the free space block */
  527. error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
  528. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  529. goto out;
  530. xchk_buffer_recheck(sc, bp);
  531. if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
  532. struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
  533. if (hdr3->pad != cpu_to_be32(0))
  534. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  535. }
  536. /* Check all the entries. */
  537. sc->ip->d_ops->free_hdr_from_disk(&freehdr, bp->b_addr);
  538. bestp = sc->ip->d_ops->free_bests_p(bp->b_addr);
  539. for (i = 0; i < freehdr.nvalid; i++, bestp++) {
  540. best = be16_to_cpu(*bestp);
  541. if (best == NULLDATAOFF) {
  542. stale++;
  543. continue;
  544. }
  545. error = xfs_dir3_data_read(sc->tp, sc->ip,
  546. (freehdr.firstdb + i) * args->geo->fsbcount,
  547. -1, &dbp);
  548. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
  549. &error))
  550. break;
  551. xchk_directory_check_freesp(sc, lblk, dbp, best);
  552. xfs_trans_brelse(sc->tp, dbp);
  553. }
  554. if (freehdr.nused + stale != freehdr.nvalid)
  555. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  556. out:
  557. return error;
  558. }
  559. /* Check free space information in directories. */
  560. STATIC int
  561. xchk_directory_blocks(
  562. struct xfs_scrub *sc)
  563. {
  564. struct xfs_bmbt_irec got;
  565. struct xfs_da_args args;
  566. struct xfs_ifork *ifp;
  567. struct xfs_mount *mp = sc->mp;
  568. xfs_fileoff_t leaf_lblk;
  569. xfs_fileoff_t free_lblk;
  570. xfs_fileoff_t lblk;
  571. struct xfs_iext_cursor icur;
  572. xfs_dablk_t dabno;
  573. bool found;
  574. int is_block = 0;
  575. int error;
  576. /* Ignore local format directories. */
  577. if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
  578. sc->ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
  579. return 0;
  580. ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
  581. lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET);
  582. leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET);
  583. free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
  584. /* Is this a block dir? */
  585. args.dp = sc->ip;
  586. args.geo = mp->m_dir_geo;
  587. args.trans = sc->tp;
  588. error = xfs_dir2_isblock(&args, &is_block);
  589. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  590. goto out;
  591. /* Iterate all the data extents in the directory... */
  592. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  593. while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
  594. /* Block directories only have a single block at offset 0. */
  595. if (is_block &&
  596. (got.br_startoff > 0 ||
  597. got.br_blockcount != args.geo->fsbcount)) {
  598. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  599. got.br_startoff);
  600. break;
  601. }
  602. /* No more data blocks... */
  603. if (got.br_startoff >= leaf_lblk)
  604. break;
  605. /*
  606. * Check each data block's bestfree data.
  607. *
  608. * Iterate all the fsbcount-aligned block offsets in
  609. * this directory. The directory block reading code is
  610. * smart enough to do its own bmap lookups to handle
  611. * discontiguous directory blocks. When we're done
  612. * with the extent record, re-query the bmap at the
  613. * next fsbcount-aligned offset to avoid redundant
  614. * block checks.
  615. */
  616. for (lblk = roundup((xfs_dablk_t)got.br_startoff,
  617. args.geo->fsbcount);
  618. lblk < got.br_startoff + got.br_blockcount;
  619. lblk += args.geo->fsbcount) {
  620. error = xchk_directory_data_bestfree(sc, lblk,
  621. is_block);
  622. if (error)
  623. goto out;
  624. }
  625. dabno = got.br_startoff + got.br_blockcount;
  626. lblk = roundup(dabno, args.geo->fsbcount);
  627. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  628. }
  629. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  630. goto out;
  631. /* Look for a leaf1 block, which has free info. */
  632. if (xfs_iext_lookup_extent(sc->ip, ifp, leaf_lblk, &icur, &got) &&
  633. got.br_startoff == leaf_lblk &&
  634. got.br_blockcount == args.geo->fsbcount &&
  635. !xfs_iext_next_extent(ifp, &icur, &got)) {
  636. if (is_block) {
  637. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  638. goto out;
  639. }
  640. error = xchk_directory_leaf1_bestfree(sc, &args,
  641. leaf_lblk);
  642. if (error)
  643. goto out;
  644. }
  645. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  646. goto out;
  647. /* Scan for free blocks */
  648. lblk = free_lblk;
  649. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  650. while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
  651. /*
  652. * Dirs can't have blocks mapped above 2^32.
  653. * Single-block dirs shouldn't even be here.
  654. */
  655. lblk = got.br_startoff;
  656. if (lblk & ~0xFFFFFFFFULL) {
  657. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  658. goto out;
  659. }
  660. if (is_block) {
  661. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  662. goto out;
  663. }
  664. /*
  665. * Check each dir free block's bestfree data.
  666. *
  667. * Iterate all the fsbcount-aligned block offsets in
  668. * this directory. The directory block reading code is
  669. * smart enough to do its own bmap lookups to handle
  670. * discontiguous directory blocks. When we're done
  671. * with the extent record, re-query the bmap at the
  672. * next fsbcount-aligned offset to avoid redundant
  673. * block checks.
  674. */
  675. for (lblk = roundup((xfs_dablk_t)got.br_startoff,
  676. args.geo->fsbcount);
  677. lblk < got.br_startoff + got.br_blockcount;
  678. lblk += args.geo->fsbcount) {
  679. error = xchk_directory_free_bestfree(sc, &args,
  680. lblk);
  681. if (error)
  682. goto out;
  683. }
  684. dabno = got.br_startoff + got.br_blockcount;
  685. lblk = roundup(dabno, args.geo->fsbcount);
  686. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  687. }
  688. out:
  689. return error;
  690. }
  691. /* Scrub a whole directory. */
  692. int
  693. xchk_directory(
  694. struct xfs_scrub *sc)
  695. {
  696. struct xchk_dir_ctx sdc = {
  697. .dir_iter.actor = xchk_dir_actor,
  698. .dir_iter.pos = 0,
  699. .sc = sc,
  700. };
  701. size_t bufsize;
  702. loff_t oldpos;
  703. int error = 0;
  704. if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
  705. return -ENOENT;
  706. /* Plausible size? */
  707. if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
  708. xchk_ino_set_corrupt(sc, sc->ip->i_ino);
  709. goto out;
  710. }
  711. /* Check directory tree structure */
  712. error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL);
  713. if (error)
  714. return error;
  715. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  716. return error;
  717. /* Check the freespace. */
  718. error = xchk_directory_blocks(sc);
  719. if (error)
  720. return error;
  721. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  722. return error;
  723. /*
  724. * Check that every dirent we see can also be looked up by hash.
  725. * Userspace usually asks for a 32k buffer, so we will too.
  726. */
  727. bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
  728. sc->ip->i_d.di_size);
  729. /*
  730. * Look up every name in this directory by hash.
  731. *
  732. * Use the xfs_readdir function to call xchk_dir_actor on
  733. * every directory entry in this directory. In _actor, we check
  734. * the name, inode number, and ftype (if applicable) of the
  735. * entry. xfs_readdir uses the VFS filldir functions to provide
  736. * iteration context.
  737. *
  738. * The VFS grabs a read or write lock via i_rwsem before it reads
  739. * or writes to a directory. If we've gotten this far we've
  740. * already obtained IOLOCK_EXCL, which (since 4.10) is the same as
  741. * getting a write lock on i_rwsem. Therefore, it is safe for us
  742. * to drop the ILOCK here in order to reuse the _readdir and
  743. * _dir_lookup routines, which do their own ILOCK locking.
  744. */
  745. oldpos = 0;
  746. sc->ilock_flags &= ~XFS_ILOCK_EXCL;
  747. xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
  748. while (true) {
  749. error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
  750. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
  751. &error))
  752. goto out;
  753. if (oldpos == sdc.dir_iter.pos)
  754. break;
  755. oldpos = sdc.dir_iter.pos;
  756. }
  757. out:
  758. return error;
  759. }