attr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <djwong@kernel.org>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_log_format.h"
  13. #include "xfs_trans.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_da_format.h"
  16. #include "xfs_da_btree.h"
  17. #include "xfs_attr.h"
  18. #include "xfs_attr_leaf.h"
  19. #include "xfs_attr_sf.h"
  20. #include "xfs_parent.h"
  21. #include "scrub/scrub.h"
  22. #include "scrub/common.h"
  23. #include "scrub/dabtree.h"
  24. #include "scrub/attr.h"
  25. #include "scrub/listxattr.h"
  26. #include "scrub/repair.h"
  27. /* Free the buffers linked from the xattr buffer. */
  28. static void
  29. xchk_xattr_buf_cleanup(
  30. void *priv)
  31. {
  32. struct xchk_xattr_buf *ab = priv;
  33. kvfree(ab->freemap);
  34. ab->freemap = NULL;
  35. kvfree(ab->usedmap);
  36. ab->usedmap = NULL;
  37. kvfree(ab->value);
  38. ab->value = NULL;
  39. ab->value_sz = 0;
  40. kvfree(ab->name);
  41. ab->name = NULL;
  42. }
  43. /*
  44. * Allocate the free space bitmap if we're trying harder; there are leaf blocks
  45. * in the attr fork; or we can't tell if there are leaf blocks.
  46. */
  47. static inline bool
  48. xchk_xattr_want_freemap(
  49. struct xfs_scrub *sc)
  50. {
  51. struct xfs_ifork *ifp;
  52. if (sc->flags & XCHK_TRY_HARDER)
  53. return true;
  54. if (!sc->ip)
  55. return true;
  56. ifp = xfs_ifork_ptr(sc->ip, XFS_ATTR_FORK);
  57. if (!ifp)
  58. return false;
  59. return xfs_ifork_has_extents(ifp);
  60. }
  61. /*
  62. * Allocate enough memory to hold an attr value and attr block bitmaps,
  63. * reallocating the buffer if necessary. Buffer contents are not preserved
  64. * across a reallocation.
  65. */
  66. int
  67. xchk_setup_xattr_buf(
  68. struct xfs_scrub *sc,
  69. size_t value_size)
  70. {
  71. size_t bmp_sz;
  72. struct xchk_xattr_buf *ab = sc->buf;
  73. void *new_val;
  74. bmp_sz = sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
  75. if (ab)
  76. goto resize_value;
  77. ab = kvzalloc(sizeof(struct xchk_xattr_buf), XCHK_GFP_FLAGS);
  78. if (!ab)
  79. return -ENOMEM;
  80. sc->buf = ab;
  81. sc->buf_cleanup = xchk_xattr_buf_cleanup;
  82. ab->usedmap = kvmalloc(bmp_sz, XCHK_GFP_FLAGS);
  83. if (!ab->usedmap)
  84. return -ENOMEM;
  85. if (xchk_xattr_want_freemap(sc)) {
  86. ab->freemap = kvmalloc(bmp_sz, XCHK_GFP_FLAGS);
  87. if (!ab->freemap)
  88. return -ENOMEM;
  89. }
  90. if (xchk_could_repair(sc)) {
  91. ab->name = kvmalloc(XATTR_NAME_MAX + 1, XCHK_GFP_FLAGS);
  92. if (!ab->name)
  93. return -ENOMEM;
  94. }
  95. resize_value:
  96. if (ab->value_sz >= value_size)
  97. return 0;
  98. if (ab->value) {
  99. kvfree(ab->value);
  100. ab->value = NULL;
  101. ab->value_sz = 0;
  102. }
  103. new_val = kvmalloc(value_size, XCHK_GFP_FLAGS);
  104. if (!new_val)
  105. return -ENOMEM;
  106. ab->value = new_val;
  107. ab->value_sz = value_size;
  108. return 0;
  109. }
  110. /* Set us up to scrub an inode's extended attributes. */
  111. int
  112. xchk_setup_xattr(
  113. struct xfs_scrub *sc)
  114. {
  115. int error;
  116. if (xchk_could_repair(sc)) {
  117. error = xrep_setup_xattr(sc);
  118. if (error)
  119. return error;
  120. }
  121. /*
  122. * We failed to get memory while checking attrs, so this time try to
  123. * get all the memory we're ever going to need. Allocate the buffer
  124. * without the inode lock held, which means we can sleep.
  125. */
  126. if (sc->flags & XCHK_TRY_HARDER) {
  127. error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX);
  128. if (error)
  129. return error;
  130. }
  131. return xchk_setup_inode_contents(sc, 0);
  132. }
  133. /* Extended Attributes */
  134. /*
  135. * Check that an extended attribute key can be looked up by hash.
  136. *
  137. * We use the extended attribute walk helper to call this function for every
  138. * attribute key in an inode. Once we're here, we load the attribute value to
  139. * see if any errors happen, or if we get more or less data than we expected.
  140. */
  141. static int
  142. xchk_xattr_actor(
  143. struct xfs_scrub *sc,
  144. struct xfs_inode *ip,
  145. unsigned int attr_flags,
  146. const unsigned char *name,
  147. unsigned int namelen,
  148. const void *value,
  149. unsigned int valuelen,
  150. void *priv)
  151. {
  152. struct xfs_da_args args = {
  153. .attr_filter = attr_flags & XFS_ATTR_NSP_ONDISK_MASK,
  154. .geo = sc->mp->m_attr_geo,
  155. .whichfork = XFS_ATTR_FORK,
  156. .dp = ip,
  157. .name = name,
  158. .namelen = namelen,
  159. .trans = sc->tp,
  160. .valuelen = valuelen,
  161. .owner = ip->i_ino,
  162. };
  163. struct xchk_xattr_buf *ab;
  164. int error = 0;
  165. ab = sc->buf;
  166. if (xchk_should_terminate(sc, &error))
  167. return error;
  168. if (attr_flags & ~XFS_ATTR_ONDISK_MASK) {
  169. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
  170. return -ECANCELED;
  171. }
  172. if (attr_flags & XFS_ATTR_INCOMPLETE) {
  173. /* Incomplete attr key, just mark the inode for preening. */
  174. xchk_ino_set_preen(sc, ip->i_ino);
  175. return 0;
  176. }
  177. /* Does this name make sense? */
  178. if (!xfs_attr_namecheck(attr_flags, name, namelen)) {
  179. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
  180. return -ECANCELED;
  181. }
  182. /* Check parent pointer record. */
  183. if ((attr_flags & XFS_ATTR_PARENT) &&
  184. !xfs_parent_valuecheck(sc->mp, value, valuelen)) {
  185. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
  186. return -ECANCELED;
  187. }
  188. /*
  189. * Try to allocate enough memory to extract the attr value. If that
  190. * doesn't work, return -EDEADLOCK as a signal to try again with a
  191. * maximally sized buffer.
  192. */
  193. error = xchk_setup_xattr_buf(sc, valuelen);
  194. if (error == -ENOMEM)
  195. error = -EDEADLOCK;
  196. if (error)
  197. return error;
  198. /*
  199. * Parent pointers are matched on attr name and value, so we must
  200. * supply the xfs_parent_rec here when confirming that the dabtree
  201. * indexing works correctly.
  202. */
  203. if (attr_flags & XFS_ATTR_PARENT)
  204. memcpy(ab->value, value, valuelen);
  205. args.value = ab->value;
  206. /*
  207. * Get the attr value to ensure that lookup can find this attribute
  208. * through the dabtree indexing and that remote value retrieval also
  209. * works correctly.
  210. */
  211. xfs_attr_sethash(&args);
  212. error = xfs_attr_get_ilocked(&args);
  213. /* ENODATA means the hash lookup failed and the attr is bad */
  214. if (error == -ENODATA)
  215. error = -EFSCORRUPTED;
  216. if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, args.blkno,
  217. &error))
  218. return error;
  219. if (args.valuelen != valuelen)
  220. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
  221. return 0;
  222. }
  223. /*
  224. * Mark a range [start, start+len) in this map. Returns true if the
  225. * region was free, and false if there's a conflict or a problem.
  226. *
  227. * Within a char, the lowest bit of the char represents the byte with
  228. * the smallest address
  229. */
  230. bool
  231. xchk_xattr_set_map(
  232. struct xfs_scrub *sc,
  233. unsigned long *map,
  234. unsigned int start,
  235. unsigned int len)
  236. {
  237. unsigned int mapsize = sc->mp->m_attr_geo->blksize;
  238. bool ret = true;
  239. if (start >= mapsize)
  240. return false;
  241. if (start + len > mapsize) {
  242. len = mapsize - start;
  243. ret = false;
  244. }
  245. if (find_next_bit(map, mapsize, start) < start + len)
  246. ret = false;
  247. bitmap_set(map, start, len);
  248. return ret;
  249. }
  250. /*
  251. * Check the leaf freemap from the usage bitmap. Returns false if the
  252. * attr freemap has problems or points to used space.
  253. */
  254. STATIC bool
  255. xchk_xattr_check_freemap(
  256. struct xfs_scrub *sc,
  257. struct xfs_attr3_icleaf_hdr *leafhdr)
  258. {
  259. struct xchk_xattr_buf *ab = sc->buf;
  260. unsigned int mapsize = sc->mp->m_attr_geo->blksize;
  261. int i;
  262. /* Construct bitmap of freemap contents. */
  263. bitmap_zero(ab->freemap, mapsize);
  264. for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
  265. if (!xchk_xattr_set_map(sc, ab->freemap,
  266. leafhdr->freemap[i].base,
  267. leafhdr->freemap[i].size))
  268. return false;
  269. }
  270. /* Look for bits that are set in freemap and are marked in use. */
  271. return !bitmap_intersects(ab->freemap, ab->usedmap, mapsize);
  272. }
  273. /*
  274. * Check this leaf entry's relations to everything else.
  275. * Returns the number of bytes used for the name/value data.
  276. */
  277. STATIC void
  278. xchk_xattr_entry(
  279. struct xchk_da_btree *ds,
  280. int level,
  281. char *buf_end,
  282. struct xfs_attr_leafblock *leaf,
  283. struct xfs_attr3_icleaf_hdr *leafhdr,
  284. struct xfs_attr_leaf_entry *ent,
  285. int idx,
  286. unsigned int *usedbytes,
  287. __u32 *last_hashval)
  288. {
  289. struct xfs_mount *mp = ds->state->mp;
  290. struct xchk_xattr_buf *ab = ds->sc->buf;
  291. char *name_end;
  292. struct xfs_attr_leaf_name_local *lentry;
  293. struct xfs_attr_leaf_name_remote *rentry;
  294. unsigned int nameidx;
  295. unsigned int namesize;
  296. if (ent->pad2 != 0)
  297. xchk_da_set_corrupt(ds, level);
  298. /* Hash values in order? */
  299. if (be32_to_cpu(ent->hashval) < *last_hashval)
  300. xchk_da_set_corrupt(ds, level);
  301. *last_hashval = be32_to_cpu(ent->hashval);
  302. nameidx = be16_to_cpu(ent->nameidx);
  303. if (nameidx < leafhdr->firstused ||
  304. nameidx >= mp->m_attr_geo->blksize) {
  305. xchk_da_set_corrupt(ds, level);
  306. return;
  307. }
  308. /* Check the name information. */
  309. if (ent->flags & XFS_ATTR_LOCAL) {
  310. lentry = xfs_attr3_leaf_name_local(leaf, idx);
  311. namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
  312. be16_to_cpu(lentry->valuelen));
  313. name_end = (char *)lentry + namesize;
  314. if (lentry->namelen == 0)
  315. xchk_da_set_corrupt(ds, level);
  316. } else {
  317. rentry = xfs_attr3_leaf_name_remote(leaf, idx);
  318. namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
  319. name_end = (char *)rentry + namesize;
  320. if (rentry->namelen == 0 || rentry->valueblk == 0)
  321. xchk_da_set_corrupt(ds, level);
  322. }
  323. if (name_end > buf_end)
  324. xchk_da_set_corrupt(ds, level);
  325. if (!xchk_xattr_set_map(ds->sc, ab->usedmap, nameidx, namesize))
  326. xchk_da_set_corrupt(ds, level);
  327. if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
  328. *usedbytes += namesize;
  329. }
  330. /* Scrub an attribute leaf. */
  331. STATIC int
  332. xchk_xattr_block(
  333. struct xchk_da_btree *ds,
  334. int level)
  335. {
  336. struct xfs_attr3_icleaf_hdr leafhdr;
  337. struct xfs_mount *mp = ds->state->mp;
  338. struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
  339. struct xfs_buf *bp = blk->bp;
  340. xfs_dablk_t *last_checked = ds->private;
  341. struct xfs_attr_leafblock *leaf = bp->b_addr;
  342. struct xfs_attr_leaf_entry *ent;
  343. struct xfs_attr_leaf_entry *entries;
  344. struct xchk_xattr_buf *ab = ds->sc->buf;
  345. char *buf_end;
  346. size_t off;
  347. __u32 last_hashval = 0;
  348. unsigned int usedbytes = 0;
  349. unsigned int hdrsize;
  350. int i;
  351. if (*last_checked == blk->blkno)
  352. return 0;
  353. *last_checked = blk->blkno;
  354. bitmap_zero(ab->usedmap, mp->m_attr_geo->blksize);
  355. /* Check all the padding. */
  356. if (xfs_has_crc(ds->sc->mp)) {
  357. struct xfs_attr3_leafblock *leaf3 = bp->b_addr;
  358. if (leaf3->hdr.pad1 != 0 || leaf3->hdr.pad2 != 0 ||
  359. leaf3->hdr.info.hdr.pad != 0)
  360. xchk_da_set_corrupt(ds, level);
  361. } else {
  362. if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
  363. xchk_da_set_corrupt(ds, level);
  364. }
  365. /* Check the leaf header */
  366. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
  367. hdrsize = xfs_attr3_leaf_hdr_size(leaf);
  368. /*
  369. * Empty xattr leaf blocks mapped at block 0 are probably a byproduct
  370. * of a race between setxattr and a log shutdown. Anywhere else in the
  371. * attr fork is a corruption.
  372. */
  373. if (leafhdr.count == 0) {
  374. if (blk->blkno == 0)
  375. xchk_da_set_preen(ds, level);
  376. else
  377. xchk_da_set_corrupt(ds, level);
  378. }
  379. if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
  380. xchk_da_set_corrupt(ds, level);
  381. if (leafhdr.firstused > mp->m_attr_geo->blksize)
  382. xchk_da_set_corrupt(ds, level);
  383. if (leafhdr.firstused < hdrsize)
  384. xchk_da_set_corrupt(ds, level);
  385. if (!xchk_xattr_set_map(ds->sc, ab->usedmap, 0, hdrsize))
  386. xchk_da_set_corrupt(ds, level);
  387. if (leafhdr.holes)
  388. xchk_da_set_preen(ds, level);
  389. if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  390. goto out;
  391. entries = xfs_attr3_leaf_entryp(leaf);
  392. if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
  393. xchk_da_set_corrupt(ds, level);
  394. buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
  395. for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
  396. /* Mark the leaf entry itself. */
  397. off = (char *)ent - (char *)leaf;
  398. if (!xchk_xattr_set_map(ds->sc, ab->usedmap, off,
  399. sizeof(xfs_attr_leaf_entry_t))) {
  400. xchk_da_set_corrupt(ds, level);
  401. goto out;
  402. }
  403. /* Check the entry and nameval. */
  404. xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
  405. ent, i, &usedbytes, &last_hashval);
  406. if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  407. goto out;
  408. }
  409. if (!xchk_xattr_check_freemap(ds->sc, &leafhdr))
  410. xchk_da_set_corrupt(ds, level);
  411. if (leafhdr.usedbytes != usedbytes)
  412. xchk_da_set_corrupt(ds, level);
  413. out:
  414. return 0;
  415. }
  416. /* Scrub a attribute btree record. */
  417. STATIC int
  418. xchk_xattr_rec(
  419. struct xchk_da_btree *ds,
  420. int level)
  421. {
  422. struct xfs_mount *mp = ds->state->mp;
  423. struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
  424. struct xfs_attr_leaf_name_local *lentry;
  425. struct xfs_attr_leaf_name_remote *rentry;
  426. struct xfs_buf *bp;
  427. struct xfs_attr_leaf_entry *ent;
  428. xfs_dahash_t calc_hash;
  429. xfs_dahash_t hash;
  430. int nameidx;
  431. int hdrsize;
  432. int error;
  433. ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
  434. ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index;
  435. /* Check the whole block, if necessary. */
  436. error = xchk_xattr_block(ds, level);
  437. if (error)
  438. goto out;
  439. if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  440. goto out;
  441. /* Check the hash of the entry. */
  442. error = xchk_da_btree_hash(ds, level, &ent->hashval);
  443. if (error)
  444. goto out;
  445. /* Find the attr entry's location. */
  446. bp = blk->bp;
  447. hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
  448. nameidx = be16_to_cpu(ent->nameidx);
  449. if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
  450. xchk_da_set_corrupt(ds, level);
  451. goto out;
  452. }
  453. /* Retrieve the entry and check it. */
  454. hash = be32_to_cpu(ent->hashval);
  455. if (ent->flags & ~XFS_ATTR_ONDISK_MASK) {
  456. xchk_da_set_corrupt(ds, level);
  457. return 0;
  458. }
  459. if (!xfs_attr_check_namespace(ent->flags)) {
  460. xchk_da_set_corrupt(ds, level);
  461. return 0;
  462. }
  463. if (ent->flags & XFS_ATTR_LOCAL) {
  464. lentry = (struct xfs_attr_leaf_name_local *)
  465. (((char *)bp->b_addr) + nameidx);
  466. if (lentry->namelen <= 0) {
  467. xchk_da_set_corrupt(ds, level);
  468. goto out;
  469. }
  470. calc_hash = xfs_attr_hashval(mp, ent->flags, lentry->nameval,
  471. lentry->namelen,
  472. lentry->nameval + lentry->namelen,
  473. be16_to_cpu(lentry->valuelen));
  474. } else {
  475. rentry = (struct xfs_attr_leaf_name_remote *)
  476. (((char *)bp->b_addr) + nameidx);
  477. if (rentry->namelen <= 0) {
  478. xchk_da_set_corrupt(ds, level);
  479. goto out;
  480. }
  481. if (ent->flags & XFS_ATTR_PARENT) {
  482. xchk_da_set_corrupt(ds, level);
  483. goto out;
  484. }
  485. calc_hash = xfs_attr_hashval(mp, ent->flags, rentry->name,
  486. rentry->namelen, NULL,
  487. be32_to_cpu(rentry->valuelen));
  488. }
  489. if (calc_hash != hash)
  490. xchk_da_set_corrupt(ds, level);
  491. out:
  492. return error;
  493. }
  494. /* Check space usage of shortform attrs. */
  495. STATIC int
  496. xchk_xattr_check_sf(
  497. struct xfs_scrub *sc)
  498. {
  499. struct xchk_xattr_buf *ab = sc->buf;
  500. struct xfs_ifork *ifp = &sc->ip->i_af;
  501. struct xfs_attr_sf_hdr *sf = ifp->if_data;
  502. struct xfs_attr_sf_entry *sfe = xfs_attr_sf_firstentry(sf);
  503. struct xfs_attr_sf_entry *next;
  504. unsigned char *end = ifp->if_data + ifp->if_bytes;
  505. int i;
  506. int error = 0;
  507. bitmap_zero(ab->usedmap, ifp->if_bytes);
  508. xchk_xattr_set_map(sc, ab->usedmap, 0, sizeof(*sf));
  509. if ((unsigned char *)sfe > end) {
  510. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  511. return 0;
  512. }
  513. for (i = 0; i < sf->count; i++) {
  514. unsigned char *name = sfe->nameval;
  515. unsigned char *value = &sfe->nameval[sfe->namelen];
  516. if (xchk_should_terminate(sc, &error))
  517. return error;
  518. next = xfs_attr_sf_nextentry(sfe);
  519. if ((unsigned char *)next > end) {
  520. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  521. break;
  522. }
  523. /*
  524. * Shortform entries do not set LOCAL or INCOMPLETE, so the
  525. * only valid flag bits here are for namespaces.
  526. */
  527. if (sfe->flags & ~XFS_ATTR_NSP_ONDISK_MASK) {
  528. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  529. break;
  530. }
  531. if (!xchk_xattr_set_map(sc, ab->usedmap,
  532. (char *)sfe - (char *)sf,
  533. sizeof(struct xfs_attr_sf_entry))) {
  534. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  535. break;
  536. }
  537. if (!xchk_xattr_set_map(sc, ab->usedmap,
  538. (char *)name - (char *)sf,
  539. sfe->namelen)) {
  540. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  541. break;
  542. }
  543. if (!xchk_xattr_set_map(sc, ab->usedmap,
  544. (char *)value - (char *)sf,
  545. sfe->valuelen)) {
  546. xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
  547. break;
  548. }
  549. sfe = next;
  550. }
  551. return 0;
  552. }
  553. /* Scrub the extended attribute metadata. */
  554. int
  555. xchk_xattr(
  556. struct xfs_scrub *sc)
  557. {
  558. xfs_dablk_t last_checked = -1U;
  559. int error = 0;
  560. if (!xfs_inode_hasattr(sc->ip))
  561. return -ENOENT;
  562. /* Allocate memory for xattr checking. */
  563. error = xchk_setup_xattr_buf(sc, 0);
  564. if (error == -ENOMEM)
  565. return -EDEADLOCK;
  566. if (error)
  567. return error;
  568. /* Check the physical structure of the xattr. */
  569. if (sc->ip->i_af.if_format == XFS_DINODE_FMT_LOCAL)
  570. error = xchk_xattr_check_sf(sc);
  571. else
  572. error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
  573. &last_checked);
  574. if (error)
  575. return error;
  576. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  577. return 0;
  578. /*
  579. * Look up every xattr in this file by name and hash.
  580. *
  581. * The VFS only locks i_rwsem when modifying attrs, so keep all
  582. * three locks held because that's the only way to ensure we're
  583. * the only thread poking into the da btree. We traverse the da
  584. * btree while holding a leaf buffer locked for the xattr name
  585. * iteration, which doesn't really follow the usual buffer
  586. * locking order.
  587. */
  588. error = xchk_xattr_walk(sc, sc->ip, xchk_xattr_actor, NULL, NULL);
  589. if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
  590. return error;
  591. return 0;
  592. }