xfs_attr_list.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * Copyright (c) 2013 Red Hat, Inc.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_da_format.h"
  15. #include "xfs_da_btree.h"
  16. #include "xfs_inode.h"
  17. #include "xfs_trans.h"
  18. #include "xfs_inode_item.h"
  19. #include "xfs_bmap.h"
  20. #include "xfs_attr.h"
  21. #include "xfs_attr_sf.h"
  22. #include "xfs_attr_remote.h"
  23. #include "xfs_attr_leaf.h"
  24. #include "xfs_error.h"
  25. #include "xfs_trace.h"
  26. #include "xfs_buf_item.h"
  27. #include "xfs_cksum.h"
  28. #include "xfs_dir2.h"
  29. STATIC int
  30. xfs_attr_shortform_compare(const void *a, const void *b)
  31. {
  32. xfs_attr_sf_sort_t *sa, *sb;
  33. sa = (xfs_attr_sf_sort_t *)a;
  34. sb = (xfs_attr_sf_sort_t *)b;
  35. if (sa->hash < sb->hash) {
  36. return -1;
  37. } else if (sa->hash > sb->hash) {
  38. return 1;
  39. } else {
  40. return sa->entno - sb->entno;
  41. }
  42. }
  43. #define XFS_ISRESET_CURSOR(cursor) \
  44. (!((cursor)->initted) && !((cursor)->hashval) && \
  45. !((cursor)->blkno) && !((cursor)->offset))
  46. /*
  47. * Copy out entries of shortform attribute lists for attr_list().
  48. * Shortform attribute lists are not stored in hashval sorted order.
  49. * If the output buffer is not large enough to hold them all, then we
  50. * we have to calculate each entries' hashvalue and sort them before
  51. * we can begin returning them to the user.
  52. */
  53. static int
  54. xfs_attr_shortform_list(xfs_attr_list_context_t *context)
  55. {
  56. attrlist_cursor_kern_t *cursor;
  57. xfs_attr_sf_sort_t *sbuf, *sbp;
  58. xfs_attr_shortform_t *sf;
  59. xfs_attr_sf_entry_t *sfe;
  60. xfs_inode_t *dp;
  61. int sbsize, nsbuf, count, i;
  62. ASSERT(context != NULL);
  63. dp = context->dp;
  64. ASSERT(dp != NULL);
  65. ASSERT(dp->i_afp != NULL);
  66. sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
  67. ASSERT(sf != NULL);
  68. if (!sf->hdr.count)
  69. return 0;
  70. cursor = context->cursor;
  71. ASSERT(cursor != NULL);
  72. trace_xfs_attr_list_sf(context);
  73. /*
  74. * If the buffer is large enough and the cursor is at the start,
  75. * do not bother with sorting since we will return everything in
  76. * one buffer and another call using the cursor won't need to be
  77. * made.
  78. * Note the generous fudge factor of 16 overhead bytes per entry.
  79. * If bufsize is zero then put_listent must be a search function
  80. * and can just scan through what we have.
  81. */
  82. if (context->bufsize == 0 ||
  83. (XFS_ISRESET_CURSOR(cursor) &&
  84. (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
  85. for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
  86. context->put_listent(context,
  87. sfe->flags,
  88. sfe->nameval,
  89. (int)sfe->namelen,
  90. (int)sfe->valuelen);
  91. /*
  92. * Either search callback finished early or
  93. * didn't fit it all in the buffer after all.
  94. */
  95. if (context->seen_enough)
  96. break;
  97. sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
  98. }
  99. trace_xfs_attr_list_sf_all(context);
  100. return 0;
  101. }
  102. /* do no more for a search callback */
  103. if (context->bufsize == 0)
  104. return 0;
  105. /*
  106. * It didn't all fit, so we have to sort everything on hashval.
  107. */
  108. sbsize = sf->hdr.count * sizeof(*sbuf);
  109. sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
  110. /*
  111. * Scan the attribute list for the rest of the entries, storing
  112. * the relevant info from only those that match into a buffer.
  113. */
  114. nsbuf = 0;
  115. for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
  116. if (unlikely(
  117. ((char *)sfe < (char *)sf) ||
  118. ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
  119. XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
  120. XFS_ERRLEVEL_LOW,
  121. context->dp->i_mount, sfe,
  122. sizeof(*sfe));
  123. kmem_free(sbuf);
  124. return -EFSCORRUPTED;
  125. }
  126. sbp->entno = i;
  127. sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
  128. sbp->name = sfe->nameval;
  129. sbp->namelen = sfe->namelen;
  130. /* These are bytes, and both on-disk, don't endian-flip */
  131. sbp->valuelen = sfe->valuelen;
  132. sbp->flags = sfe->flags;
  133. sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
  134. sbp++;
  135. nsbuf++;
  136. }
  137. /*
  138. * Sort the entries on hash then entno.
  139. */
  140. xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
  141. /*
  142. * Re-find our place IN THE SORTED LIST.
  143. */
  144. count = 0;
  145. cursor->initted = 1;
  146. cursor->blkno = 0;
  147. for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
  148. if (sbp->hash == cursor->hashval) {
  149. if (cursor->offset == count) {
  150. break;
  151. }
  152. count++;
  153. } else if (sbp->hash > cursor->hashval) {
  154. break;
  155. }
  156. }
  157. if (i == nsbuf) {
  158. kmem_free(sbuf);
  159. return 0;
  160. }
  161. /*
  162. * Loop putting entries into the user buffer.
  163. */
  164. for ( ; i < nsbuf; i++, sbp++) {
  165. if (cursor->hashval != sbp->hash) {
  166. cursor->hashval = sbp->hash;
  167. cursor->offset = 0;
  168. }
  169. context->put_listent(context,
  170. sbp->flags,
  171. sbp->name,
  172. sbp->namelen,
  173. sbp->valuelen);
  174. if (context->seen_enough)
  175. break;
  176. cursor->offset++;
  177. }
  178. kmem_free(sbuf);
  179. return 0;
  180. }
  181. /*
  182. * We didn't find the block & hash mentioned in the cursor state, so
  183. * walk down the attr btree looking for the hash.
  184. */
  185. STATIC int
  186. xfs_attr_node_list_lookup(
  187. struct xfs_attr_list_context *context,
  188. struct attrlist_cursor_kern *cursor,
  189. struct xfs_buf **pbp)
  190. {
  191. struct xfs_da3_icnode_hdr nodehdr;
  192. struct xfs_da_intnode *node;
  193. struct xfs_da_node_entry *btree;
  194. struct xfs_inode *dp = context->dp;
  195. struct xfs_mount *mp = dp->i_mount;
  196. struct xfs_trans *tp = context->tp;
  197. struct xfs_buf *bp;
  198. int i;
  199. int error = 0;
  200. unsigned int expected_level = 0;
  201. uint16_t magic;
  202. ASSERT(*pbp == NULL);
  203. cursor->blkno = 0;
  204. for (;;) {
  205. error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp,
  206. XFS_ATTR_FORK);
  207. if (error)
  208. return error;
  209. node = bp->b_addr;
  210. magic = be16_to_cpu(node->hdr.info.magic);
  211. if (magic == XFS_ATTR_LEAF_MAGIC ||
  212. magic == XFS_ATTR3_LEAF_MAGIC)
  213. break;
  214. if (magic != XFS_DA_NODE_MAGIC &&
  215. magic != XFS_DA3_NODE_MAGIC) {
  216. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  217. node, sizeof(*node));
  218. goto out_corruptbuf;
  219. }
  220. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  221. /* Tree taller than we can handle; bail out! */
  222. if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
  223. goto out_corruptbuf;
  224. /* Check the level from the root node. */
  225. if (cursor->blkno == 0)
  226. expected_level = nodehdr.level - 1;
  227. else if (expected_level != nodehdr.level)
  228. goto out_corruptbuf;
  229. else
  230. expected_level--;
  231. btree = dp->d_ops->node_tree_p(node);
  232. for (i = 0; i < nodehdr.count; btree++, i++) {
  233. if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
  234. cursor->blkno = be32_to_cpu(btree->before);
  235. trace_xfs_attr_list_node_descend(context,
  236. btree);
  237. break;
  238. }
  239. }
  240. xfs_trans_brelse(tp, bp);
  241. if (i == nodehdr.count)
  242. return 0;
  243. /* We can't point back to the root. */
  244. if (cursor->blkno == 0)
  245. return -EFSCORRUPTED;
  246. }
  247. if (expected_level != 0)
  248. goto out_corruptbuf;
  249. *pbp = bp;
  250. return 0;
  251. out_corruptbuf:
  252. xfs_trans_brelse(tp, bp);
  253. return -EFSCORRUPTED;
  254. }
  255. STATIC int
  256. xfs_attr_node_list(
  257. struct xfs_attr_list_context *context)
  258. {
  259. struct xfs_attr3_icleaf_hdr leafhdr;
  260. struct attrlist_cursor_kern *cursor;
  261. struct xfs_attr_leafblock *leaf;
  262. struct xfs_da_intnode *node;
  263. struct xfs_buf *bp;
  264. struct xfs_inode *dp = context->dp;
  265. struct xfs_mount *mp = dp->i_mount;
  266. int error;
  267. trace_xfs_attr_node_list(context);
  268. cursor = context->cursor;
  269. cursor->initted = 1;
  270. /*
  271. * Do all sorts of validation on the passed-in cursor structure.
  272. * If anything is amiss, ignore the cursor and look up the hashval
  273. * starting from the btree root.
  274. */
  275. bp = NULL;
  276. if (cursor->blkno > 0) {
  277. error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1,
  278. &bp, XFS_ATTR_FORK);
  279. if ((error != 0) && (error != -EFSCORRUPTED))
  280. return error;
  281. if (bp) {
  282. struct xfs_attr_leaf_entry *entries;
  283. node = bp->b_addr;
  284. switch (be16_to_cpu(node->hdr.info.magic)) {
  285. case XFS_DA_NODE_MAGIC:
  286. case XFS_DA3_NODE_MAGIC:
  287. trace_xfs_attr_list_wrong_blk(context);
  288. xfs_trans_brelse(context->tp, bp);
  289. bp = NULL;
  290. break;
  291. case XFS_ATTR_LEAF_MAGIC:
  292. case XFS_ATTR3_LEAF_MAGIC:
  293. leaf = bp->b_addr;
  294. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
  295. &leafhdr, leaf);
  296. entries = xfs_attr3_leaf_entryp(leaf);
  297. if (cursor->hashval > be32_to_cpu(
  298. entries[leafhdr.count - 1].hashval)) {
  299. trace_xfs_attr_list_wrong_blk(context);
  300. xfs_trans_brelse(context->tp, bp);
  301. bp = NULL;
  302. } else if (cursor->hashval <= be32_to_cpu(
  303. entries[0].hashval)) {
  304. trace_xfs_attr_list_wrong_blk(context);
  305. xfs_trans_brelse(context->tp, bp);
  306. bp = NULL;
  307. }
  308. break;
  309. default:
  310. trace_xfs_attr_list_wrong_blk(context);
  311. xfs_trans_brelse(context->tp, bp);
  312. bp = NULL;
  313. }
  314. }
  315. }
  316. /*
  317. * We did not find what we expected given the cursor's contents,
  318. * so we start from the top and work down based on the hash value.
  319. * Note that start of node block is same as start of leaf block.
  320. */
  321. if (bp == NULL) {
  322. error = xfs_attr_node_list_lookup(context, cursor, &bp);
  323. if (error || !bp)
  324. return error;
  325. }
  326. ASSERT(bp != NULL);
  327. /*
  328. * Roll upward through the blocks, processing each leaf block in
  329. * order. As long as there is space in the result buffer, keep
  330. * adding the information.
  331. */
  332. for (;;) {
  333. leaf = bp->b_addr;
  334. xfs_attr3_leaf_list_int(bp, context);
  335. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
  336. if (context->seen_enough || leafhdr.forw == 0)
  337. break;
  338. cursor->blkno = leafhdr.forw;
  339. xfs_trans_brelse(context->tp, bp);
  340. error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp);
  341. if (error)
  342. return error;
  343. }
  344. xfs_trans_brelse(context->tp, bp);
  345. return 0;
  346. }
  347. /*
  348. * Copy out attribute list entries for attr_list(), for leaf attribute lists.
  349. */
  350. void
  351. xfs_attr3_leaf_list_int(
  352. struct xfs_buf *bp,
  353. struct xfs_attr_list_context *context)
  354. {
  355. struct attrlist_cursor_kern *cursor;
  356. struct xfs_attr_leafblock *leaf;
  357. struct xfs_attr3_icleaf_hdr ichdr;
  358. struct xfs_attr_leaf_entry *entries;
  359. struct xfs_attr_leaf_entry *entry;
  360. int i;
  361. struct xfs_mount *mp = context->dp->i_mount;
  362. trace_xfs_attr_list_leaf(context);
  363. leaf = bp->b_addr;
  364. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
  365. entries = xfs_attr3_leaf_entryp(leaf);
  366. cursor = context->cursor;
  367. cursor->initted = 1;
  368. /*
  369. * Re-find our place in the leaf block if this is a new syscall.
  370. */
  371. if (context->resynch) {
  372. entry = &entries[0];
  373. for (i = 0; i < ichdr.count; entry++, i++) {
  374. if (be32_to_cpu(entry->hashval) == cursor->hashval) {
  375. if (cursor->offset == context->dupcnt) {
  376. context->dupcnt = 0;
  377. break;
  378. }
  379. context->dupcnt++;
  380. } else if (be32_to_cpu(entry->hashval) >
  381. cursor->hashval) {
  382. context->dupcnt = 0;
  383. break;
  384. }
  385. }
  386. if (i == ichdr.count) {
  387. trace_xfs_attr_list_notfound(context);
  388. return;
  389. }
  390. } else {
  391. entry = &entries[0];
  392. i = 0;
  393. }
  394. context->resynch = 0;
  395. /*
  396. * We have found our place, start copying out the new attributes.
  397. */
  398. for (; i < ichdr.count; entry++, i++) {
  399. char *name;
  400. int namelen, valuelen;
  401. if (be32_to_cpu(entry->hashval) != cursor->hashval) {
  402. cursor->hashval = be32_to_cpu(entry->hashval);
  403. cursor->offset = 0;
  404. }
  405. if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
  406. !(context->flags & ATTR_INCOMPLETE))
  407. continue; /* skip incomplete entries */
  408. if (entry->flags & XFS_ATTR_LOCAL) {
  409. xfs_attr_leaf_name_local_t *name_loc;
  410. name_loc = xfs_attr3_leaf_name_local(leaf, i);
  411. name = name_loc->nameval;
  412. namelen = name_loc->namelen;
  413. valuelen = be16_to_cpu(name_loc->valuelen);
  414. } else {
  415. xfs_attr_leaf_name_remote_t *name_rmt;
  416. name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
  417. name = name_rmt->name;
  418. namelen = name_rmt->namelen;
  419. valuelen = be32_to_cpu(name_rmt->valuelen);
  420. }
  421. context->put_listent(context, entry->flags,
  422. name, namelen, valuelen);
  423. if (context->seen_enough)
  424. break;
  425. cursor->offset++;
  426. }
  427. trace_xfs_attr_list_leaf_end(context);
  428. return;
  429. }
  430. /*
  431. * Copy out attribute entries for attr_list(), for leaf attribute lists.
  432. */
  433. STATIC int
  434. xfs_attr_leaf_list(xfs_attr_list_context_t *context)
  435. {
  436. int error;
  437. struct xfs_buf *bp;
  438. trace_xfs_attr_leaf_list(context);
  439. context->cursor->blkno = 0;
  440. error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp);
  441. if (error)
  442. return error;
  443. xfs_attr3_leaf_list_int(bp, context);
  444. xfs_trans_brelse(context->tp, bp);
  445. return 0;
  446. }
  447. int
  448. xfs_attr_list_int_ilocked(
  449. struct xfs_attr_list_context *context)
  450. {
  451. struct xfs_inode *dp = context->dp;
  452. ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  453. /*
  454. * Decide on what work routines to call based on the inode size.
  455. */
  456. if (!xfs_inode_hasattr(dp))
  457. return 0;
  458. else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
  459. return xfs_attr_shortform_list(context);
  460. else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
  461. return xfs_attr_leaf_list(context);
  462. return xfs_attr_node_list(context);
  463. }
  464. int
  465. xfs_attr_list_int(
  466. xfs_attr_list_context_t *context)
  467. {
  468. int error;
  469. xfs_inode_t *dp = context->dp;
  470. uint lock_mode;
  471. XFS_STATS_INC(dp->i_mount, xs_attr_list);
  472. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  473. return -EIO;
  474. lock_mode = xfs_ilock_attr_map_shared(dp);
  475. error = xfs_attr_list_int_ilocked(context);
  476. xfs_iunlock(dp, lock_mode);
  477. return error;
  478. }
  479. #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
  480. (((struct attrlist_ent *) 0)->a_name - (char *) 0)
  481. #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
  482. ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \
  483. & ~(sizeof(uint32_t)-1))
  484. /*
  485. * Format an attribute and copy it out to the user's buffer.
  486. * Take care to check values and protect against them changing later,
  487. * we may be reading them directly out of a user buffer.
  488. */
  489. STATIC void
  490. xfs_attr_put_listent(
  491. xfs_attr_list_context_t *context,
  492. int flags,
  493. unsigned char *name,
  494. int namelen,
  495. int valuelen)
  496. {
  497. struct attrlist *alist = (struct attrlist *)context->alist;
  498. attrlist_ent_t *aep;
  499. int arraytop;
  500. ASSERT(!context->seen_enough);
  501. ASSERT(!(context->flags & ATTR_KERNOVAL));
  502. ASSERT(context->count >= 0);
  503. ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
  504. ASSERT(context->firstu >= sizeof(*alist));
  505. ASSERT(context->firstu <= context->bufsize);
  506. /*
  507. * Only list entries in the right namespace.
  508. */
  509. if (((context->flags & ATTR_SECURE) == 0) !=
  510. ((flags & XFS_ATTR_SECURE) == 0))
  511. return;
  512. if (((context->flags & ATTR_ROOT) == 0) !=
  513. ((flags & XFS_ATTR_ROOT) == 0))
  514. return;
  515. arraytop = sizeof(*alist) +
  516. context->count * sizeof(alist->al_offset[0]);
  517. context->firstu -= ATTR_ENTSIZE(namelen);
  518. if (context->firstu < arraytop) {
  519. trace_xfs_attr_list_full(context);
  520. alist->al_more = 1;
  521. context->seen_enough = 1;
  522. return;
  523. }
  524. aep = (attrlist_ent_t *)&context->alist[context->firstu];
  525. aep->a_valuelen = valuelen;
  526. memcpy(aep->a_name, name, namelen);
  527. aep->a_name[namelen] = 0;
  528. alist->al_offset[context->count++] = context->firstu;
  529. alist->al_count = context->count;
  530. trace_xfs_attr_list_add(context);
  531. return;
  532. }
  533. /*
  534. * Generate a list of extended attribute names and optionally
  535. * also value lengths. Positive return value follows the XFS
  536. * convention of being an error, zero or negative return code
  537. * is the length of the buffer returned (negated), indicating
  538. * success.
  539. */
  540. int
  541. xfs_attr_list(
  542. xfs_inode_t *dp,
  543. char *buffer,
  544. int bufsize,
  545. int flags,
  546. attrlist_cursor_kern_t *cursor)
  547. {
  548. xfs_attr_list_context_t context;
  549. struct attrlist *alist;
  550. int error;
  551. /*
  552. * Validate the cursor.
  553. */
  554. if (cursor->pad1 || cursor->pad2)
  555. return -EINVAL;
  556. if ((cursor->initted == 0) &&
  557. (cursor->hashval || cursor->blkno || cursor->offset))
  558. return -EINVAL;
  559. /* Only internal consumers can retrieve incomplete attrs. */
  560. if (flags & ATTR_INCOMPLETE)
  561. return -EINVAL;
  562. /*
  563. * Check for a properly aligned buffer.
  564. */
  565. if (((long)buffer) & (sizeof(int)-1))
  566. return -EFAULT;
  567. if (flags & ATTR_KERNOVAL)
  568. bufsize = 0;
  569. /*
  570. * Initialize the output buffer.
  571. */
  572. memset(&context, 0, sizeof(context));
  573. context.dp = dp;
  574. context.cursor = cursor;
  575. context.resynch = 1;
  576. context.flags = flags;
  577. context.alist = buffer;
  578. context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
  579. context.firstu = context.bufsize;
  580. context.put_listent = xfs_attr_put_listent;
  581. alist = (struct attrlist *)context.alist;
  582. alist->al_count = 0;
  583. alist->al_more = 0;
  584. alist->al_offset[0] = context.bufsize;
  585. error = xfs_attr_list_int(&context);
  586. ASSERT(error <= 0);
  587. return error;
  588. }