xfs_btree.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #ifndef __XFS_BTREE_H__
  7. #define __XFS_BTREE_H__
  8. struct xfs_buf;
  9. struct xfs_inode;
  10. struct xfs_mount;
  11. struct xfs_trans;
  12. struct xfs_ifork;
  13. struct xfs_perag;
  14. /*
  15. * Generic key, ptr and record wrapper structures.
  16. *
  17. * These are disk format structures, and are converted where necessary
  18. * by the btree specific code that needs to interpret them.
  19. */
  20. union xfs_btree_ptr {
  21. __be32 s; /* short form ptr */
  22. __be64 l; /* long form ptr */
  23. };
  24. /*
  25. * The in-core btree key. Overlapping btrees actually store two keys
  26. * per pointer, so we reserve enough memory to hold both. The __*bigkey
  27. * items should never be accessed directly.
  28. */
  29. union xfs_btree_key {
  30. struct xfs_bmbt_key bmbt;
  31. xfs_bmdr_key_t bmbr; /* bmbt root block */
  32. xfs_alloc_key_t alloc;
  33. struct xfs_inobt_key inobt;
  34. struct xfs_rmap_key rmap;
  35. struct xfs_rmap_key __rmap_bigkey[2];
  36. struct xfs_refcount_key refc;
  37. };
  38. union xfs_btree_rec {
  39. struct xfs_bmbt_rec bmbt;
  40. xfs_bmdr_rec_t bmbr; /* bmbt root block */
  41. struct xfs_alloc_rec alloc;
  42. struct xfs_inobt_rec inobt;
  43. struct xfs_rmap_rec rmap;
  44. struct xfs_refcount_rec refc;
  45. };
  46. /*
  47. * This nonsense is to make -wlint happy.
  48. */
  49. #define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
  50. #define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
  51. #define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
  52. struct xfs_btree_ops;
  53. uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
  54. /*
  55. * For logging record fields.
  56. */
  57. #define XFS_BB_MAGIC (1u << 0)
  58. #define XFS_BB_LEVEL (1u << 1)
  59. #define XFS_BB_NUMRECS (1u << 2)
  60. #define XFS_BB_LEFTSIB (1u << 3)
  61. #define XFS_BB_RIGHTSIB (1u << 4)
  62. #define XFS_BB_BLKNO (1u << 5)
  63. #define XFS_BB_LSN (1u << 6)
  64. #define XFS_BB_UUID (1u << 7)
  65. #define XFS_BB_OWNER (1u << 8)
  66. #define XFS_BB_NUM_BITS 5
  67. #define XFS_BB_ALL_BITS ((1u << XFS_BB_NUM_BITS) - 1)
  68. #define XFS_BB_NUM_BITS_CRC 9
  69. #define XFS_BB_ALL_BITS_CRC ((1u << XFS_BB_NUM_BITS_CRC) - 1)
  70. /*
  71. * Generic stats interface
  72. */
  73. #define XFS_BTREE_STATS_INC(cur, stat) \
  74. XFS_STATS_INC_OFF((cur)->bc_mp, \
  75. (cur)->bc_ops->statoff + __XBTS_ ## stat)
  76. #define XFS_BTREE_STATS_ADD(cur, stat, val) \
  77. XFS_STATS_ADD_OFF((cur)->bc_mp, \
  78. (cur)->bc_ops->statoff + __XBTS_ ## stat, val)
  79. enum xbtree_key_contig {
  80. XBTREE_KEY_GAP = 0,
  81. XBTREE_KEY_CONTIGUOUS,
  82. XBTREE_KEY_OVERLAP,
  83. };
  84. /*
  85. * Decide if these two numeric btree key fields are contiguous, overlapping,
  86. * or if there's a gap between them. @x should be the field from the high
  87. * key and @y should be the field from the low key.
  88. */
  89. static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
  90. {
  91. x++;
  92. if (x < y)
  93. return XBTREE_KEY_GAP;
  94. if (x == y)
  95. return XBTREE_KEY_CONTIGUOUS;
  96. return XBTREE_KEY_OVERLAP;
  97. }
  98. #define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64))
  99. #define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32))
  100. enum xfs_btree_type {
  101. XFS_BTREE_TYPE_AG,
  102. XFS_BTREE_TYPE_INODE,
  103. XFS_BTREE_TYPE_MEM,
  104. };
  105. struct xfs_btree_ops {
  106. const char *name;
  107. /* Type of btree - AG-rooted or inode-rooted */
  108. enum xfs_btree_type type;
  109. /* XFS_BTGEO_* flags that determine the geometry of the btree */
  110. unsigned int geom_flags;
  111. /* size of the key, pointer, and record structures */
  112. size_t key_len;
  113. size_t ptr_len;
  114. size_t rec_len;
  115. /* LRU refcount to set on each btree buffer created */
  116. unsigned int lru_refs;
  117. /* offset of btree stats array */
  118. unsigned int statoff;
  119. /* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */
  120. unsigned int sick_mask;
  121. /* cursor operations */
  122. struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
  123. void (*update_cursor)(struct xfs_btree_cur *src,
  124. struct xfs_btree_cur *dst);
  125. /* update btree root pointer */
  126. void (*set_root)(struct xfs_btree_cur *cur,
  127. const union xfs_btree_ptr *nptr, int level_change);
  128. /* block allocation / freeing */
  129. int (*alloc_block)(struct xfs_btree_cur *cur,
  130. const union xfs_btree_ptr *start_bno,
  131. union xfs_btree_ptr *new_bno,
  132. int *stat);
  133. int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
  134. /* records in block/level */
  135. int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
  136. int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
  137. /* records on disk. Matter for the root in inode case. */
  138. int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
  139. /* init values of btree structures */
  140. void (*init_key_from_rec)(union xfs_btree_key *key,
  141. const union xfs_btree_rec *rec);
  142. void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
  143. union xfs_btree_rec *rec);
  144. void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
  145. union xfs_btree_ptr *ptr);
  146. void (*init_high_key_from_rec)(union xfs_btree_key *key,
  147. const union xfs_btree_rec *rec);
  148. /* difference between key value and cursor value */
  149. int64_t (*key_diff)(struct xfs_btree_cur *cur,
  150. const union xfs_btree_key *key);
  151. /*
  152. * Difference between key2 and key1 -- positive if key1 > key2,
  153. * negative if key1 < key2, and zero if equal. If the @mask parameter
  154. * is non NULL, each key field to be used in the comparison must
  155. * contain a nonzero value.
  156. */
  157. int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
  158. const union xfs_btree_key *key1,
  159. const union xfs_btree_key *key2,
  160. const union xfs_btree_key *mask);
  161. const struct xfs_buf_ops *buf_ops;
  162. /* check that k1 is lower than k2 */
  163. int (*keys_inorder)(struct xfs_btree_cur *cur,
  164. const union xfs_btree_key *k1,
  165. const union xfs_btree_key *k2);
  166. /* check that r1 is lower than r2 */
  167. int (*recs_inorder)(struct xfs_btree_cur *cur,
  168. const union xfs_btree_rec *r1,
  169. const union xfs_btree_rec *r2);
  170. /*
  171. * Are these two btree keys immediately adjacent?
  172. *
  173. * Given two btree keys @key1 and @key2, decide if it is impossible for
  174. * there to be a third btree key K satisfying the relationship
  175. * @key1 < K < @key2. To determine if two btree records are
  176. * immediately adjacent, @key1 should be the high key of the first
  177. * record and @key2 should be the low key of the second record.
  178. * If the @mask parameter is non NULL, each key field to be used in the
  179. * comparison must contain a nonzero value.
  180. */
  181. enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
  182. const union xfs_btree_key *key1,
  183. const union xfs_btree_key *key2,
  184. const union xfs_btree_key *mask);
  185. };
  186. /* btree geometry flags */
  187. #define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */
  188. union xfs_btree_irec {
  189. struct xfs_alloc_rec_incore a;
  190. struct xfs_bmbt_irec b;
  191. struct xfs_inobt_rec_incore i;
  192. struct xfs_rmap_irec r;
  193. struct xfs_refcount_irec rc;
  194. };
  195. struct xfs_btree_level {
  196. /* buffer pointer */
  197. struct xfs_buf *bp;
  198. /* key/record number */
  199. uint16_t ptr;
  200. /* readahead info */
  201. #define XFS_BTCUR_LEFTRA (1 << 0) /* left sibling has been read-ahead */
  202. #define XFS_BTCUR_RIGHTRA (1 << 1) /* right sibling has been read-ahead */
  203. uint16_t ra;
  204. };
  205. /*
  206. * Btree cursor structure.
  207. * This collects all information needed by the btree code in one place.
  208. */
  209. struct xfs_btree_cur
  210. {
  211. struct xfs_trans *bc_tp; /* transaction we're in, if any */
  212. struct xfs_mount *bc_mp; /* file system mount struct */
  213. const struct xfs_btree_ops *bc_ops;
  214. struct kmem_cache *bc_cache; /* cursor cache */
  215. unsigned int bc_flags; /* btree features - below */
  216. union xfs_btree_irec bc_rec; /* current insert/search record value */
  217. uint8_t bc_nlevels; /* number of levels in the tree */
  218. uint8_t bc_maxlevels; /* maximum levels for this btree type */
  219. /* per-type information */
  220. union {
  221. struct {
  222. struct xfs_inode *ip;
  223. short forksize;
  224. char whichfork;
  225. struct xbtree_ifakeroot *ifake; /* for staging cursor */
  226. } bc_ino;
  227. struct {
  228. struct xfs_perag *pag;
  229. struct xfs_buf *agbp;
  230. struct xbtree_afakeroot *afake; /* for staging cursor */
  231. } bc_ag;
  232. struct {
  233. struct xfbtree *xfbtree;
  234. struct xfs_perag *pag;
  235. } bc_mem;
  236. };
  237. /* per-format private data */
  238. union {
  239. struct {
  240. int allocated;
  241. } bc_bmap; /* bmapbt */
  242. struct {
  243. unsigned int nr_ops; /* # record updates */
  244. unsigned int shape_changes; /* # of extent splits */
  245. } bc_refc; /* refcountbt */
  246. };
  247. /* Must be at the end of the struct! */
  248. struct xfs_btree_level bc_levels[];
  249. };
  250. /*
  251. * Compute the size of a btree cursor that can handle a btree of a given
  252. * height. The bc_levels array handles node and leaf blocks, so its size
  253. * is exactly nlevels.
  254. */
  255. static inline size_t
  256. xfs_btree_cur_sizeof(unsigned int nlevels)
  257. {
  258. return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
  259. }
  260. /* cursor state flags */
  261. /*
  262. * The root of this btree is a fakeroot structure so that we can stage a btree
  263. * rebuild without leaving it accessible via primary metadata. The ops struct
  264. * is dynamically allocated and must be freed when the cursor is deleted.
  265. */
  266. #define XFS_BTREE_STAGING (1U << 0)
  267. /* We are converting a delalloc reservation (only for bmbt btrees) */
  268. #define XFS_BTREE_BMBT_WASDEL (1U << 1)
  269. /* For extent swap, ignore owner check in verifier (only for bmbt btrees) */
  270. #define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2)
  271. /* Cursor is active (only for allocbt btrees) */
  272. #define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3)
  273. #define XFS_BTREE_NOERROR 0
  274. #define XFS_BTREE_ERROR 1
  275. /*
  276. * Convert from buffer to btree block header.
  277. */
  278. #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
  279. xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur,
  280. struct xfs_btree_block *block, int level, struct xfs_buf *bp);
  281. int __xfs_btree_check_ptr(struct xfs_btree_cur *cur,
  282. const union xfs_btree_ptr *ptr, int index, int level);
  283. /*
  284. * Check that block header is ok.
  285. */
  286. int
  287. xfs_btree_check_block(
  288. struct xfs_btree_cur *cur, /* btree cursor */
  289. struct xfs_btree_block *block, /* generic btree block pointer */
  290. int level, /* level of the btree block */
  291. struct xfs_buf *bp); /* buffer containing block, if any */
  292. /*
  293. * Delete the btree cursor.
  294. */
  295. void
  296. xfs_btree_del_cursor(
  297. struct xfs_btree_cur *cur, /* btree cursor */
  298. int error); /* del because of error */
  299. /*
  300. * Duplicate the btree cursor.
  301. * Allocate a new one, copy the record, re-get the buffers.
  302. */
  303. int /* error */
  304. xfs_btree_dup_cursor(
  305. struct xfs_btree_cur *cur, /* input cursor */
  306. struct xfs_btree_cur **ncur);/* output cursor */
  307. /*
  308. * Compute first and last byte offsets for the fields given.
  309. * Interprets the offsets table, which contains struct field offsets.
  310. */
  311. void
  312. xfs_btree_offsets(
  313. uint32_t fields, /* bitmask of fields */
  314. const short *offsets,/* table of field offsets */
  315. int nbits, /* number of bits to inspect */
  316. int *first, /* output: first byte offset */
  317. int *last); /* output: last byte offset */
  318. /*
  319. * Initialise a new btree block header
  320. */
  321. void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp,
  322. const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs,
  323. __u64 owner);
  324. void xfs_btree_init_block(struct xfs_mount *mp,
  325. struct xfs_btree_block *buf, const struct xfs_btree_ops *ops,
  326. __u16 level, __u16 numrecs, __u64 owner);
  327. /*
  328. * Common btree core entry points.
  329. */
  330. int xfs_btree_increment(struct xfs_btree_cur *, int, int *);
  331. int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
  332. int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
  333. int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
  334. int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
  335. int xfs_btree_insert(struct xfs_btree_cur *, int *);
  336. int xfs_btree_delete(struct xfs_btree_cur *, int *);
  337. int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
  338. int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
  339. struct list_head *buffer_list);
  340. /*
  341. * btree block CRC helpers
  342. */
  343. void xfs_btree_fsblock_calc_crc(struct xfs_buf *);
  344. bool xfs_btree_fsblock_verify_crc(struct xfs_buf *);
  345. void xfs_btree_agblock_calc_crc(struct xfs_buf *);
  346. bool xfs_btree_agblock_verify_crc(struct xfs_buf *);
  347. /*
  348. * Internal btree helpers also used by xfs_bmap.c.
  349. */
  350. void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, uint32_t);
  351. void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int);
  352. /*
  353. * Helpers.
  354. */
  355. static inline int xfs_btree_get_numrecs(const struct xfs_btree_block *block)
  356. {
  357. return be16_to_cpu(block->bb_numrecs);
  358. }
  359. static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
  360. uint16_t numrecs)
  361. {
  362. block->bb_numrecs = cpu_to_be16(numrecs);
  363. }
  364. static inline int xfs_btree_get_level(const struct xfs_btree_block *block)
  365. {
  366. return be16_to_cpu(block->bb_level);
  367. }
  368. /*
  369. * Min and max functions for extlen, agblock, fileoff, and filblks types.
  370. */
  371. #define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b))
  372. #define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b))
  373. #define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b))
  374. #define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b))
  375. #define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b))
  376. #define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b))
  377. #define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
  378. #define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
  379. xfs_failaddr_t xfs_btree_agblock_v5hdr_verify(struct xfs_buf *bp);
  380. xfs_failaddr_t xfs_btree_agblock_verify(struct xfs_buf *bp,
  381. unsigned int max_recs);
  382. xfs_failaddr_t xfs_btree_fsblock_v5hdr_verify(struct xfs_buf *bp,
  383. uint64_t owner);
  384. xfs_failaddr_t xfs_btree_fsblock_verify(struct xfs_buf *bp,
  385. unsigned int max_recs);
  386. xfs_failaddr_t xfs_btree_memblock_verify(struct xfs_buf *bp,
  387. unsigned int max_recs);
  388. unsigned int xfs_btree_compute_maxlevels(const unsigned int *limits,
  389. unsigned long long records);
  390. unsigned long long xfs_btree_calc_size(const unsigned int *limits,
  391. unsigned long long records);
  392. unsigned int xfs_btree_space_to_height(const unsigned int *limits,
  393. unsigned long long blocks);
  394. /*
  395. * Return codes for the query range iterator function are 0 to continue
  396. * iterating, and non-zero to stop iterating. Any non-zero value will be
  397. * passed up to the _query_range caller. The special value -ECANCELED can be
  398. * used to stop iteration, because _query_range never generates that error
  399. * code on its own.
  400. */
  401. typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
  402. const union xfs_btree_rec *rec, void *priv);
  403. int xfs_btree_query_range(struct xfs_btree_cur *cur,
  404. const union xfs_btree_irec *low_rec,
  405. const union xfs_btree_irec *high_rec,
  406. xfs_btree_query_range_fn fn, void *priv);
  407. int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
  408. void *priv);
  409. typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
  410. void *data);
  411. /* Visit record blocks. */
  412. #define XFS_BTREE_VISIT_RECORDS (1 << 0)
  413. /* Visit leaf blocks. */
  414. #define XFS_BTREE_VISIT_LEAVES (1 << 1)
  415. /* Visit all blocks. */
  416. #define XFS_BTREE_VISIT_ALL (XFS_BTREE_VISIT_RECORDS | \
  417. XFS_BTREE_VISIT_LEAVES)
  418. int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
  419. xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
  420. int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_filblks_t *blocks);
  421. union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
  422. struct xfs_btree_block *block);
  423. union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
  424. struct xfs_btree_block *block);
  425. union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
  426. struct xfs_btree_block *block);
  427. union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
  428. struct xfs_btree_block *block);
  429. int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
  430. const union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
  431. struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
  432. int level, struct xfs_buf **bpp);
  433. bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur,
  434. const union xfs_btree_ptr *ptr);
  435. int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
  436. const union xfs_btree_ptr *a,
  437. const union xfs_btree_ptr *b);
  438. void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
  439. struct xfs_btree_block *block,
  440. union xfs_btree_ptr *ptr, int lr);
  441. void xfs_btree_get_keys(struct xfs_btree_cur *cur,
  442. struct xfs_btree_block *block, union xfs_btree_key *key);
  443. union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
  444. union xfs_btree_key *key);
  445. typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
  446. const union xfs_btree_key *key1,
  447. const union xfs_btree_key *key2);
  448. int xfs_btree_has_records(struct xfs_btree_cur *cur,
  449. const union xfs_btree_irec *low,
  450. const union xfs_btree_irec *high,
  451. const union xfs_btree_key *mask,
  452. enum xbtree_recpacking *outcome);
  453. bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
  454. struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
  455. /* Key comparison helpers */
  456. static inline bool
  457. xfs_btree_keycmp_lt(
  458. struct xfs_btree_cur *cur,
  459. const union xfs_btree_key *key1,
  460. const union xfs_btree_key *key2)
  461. {
  462. return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
  463. }
  464. static inline bool
  465. xfs_btree_keycmp_gt(
  466. struct xfs_btree_cur *cur,
  467. const union xfs_btree_key *key1,
  468. const union xfs_btree_key *key2)
  469. {
  470. return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
  471. }
  472. static inline bool
  473. xfs_btree_keycmp_eq(
  474. struct xfs_btree_cur *cur,
  475. const union xfs_btree_key *key1,
  476. const union xfs_btree_key *key2)
  477. {
  478. return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
  479. }
  480. static inline bool
  481. xfs_btree_keycmp_le(
  482. struct xfs_btree_cur *cur,
  483. const union xfs_btree_key *key1,
  484. const union xfs_btree_key *key2)
  485. {
  486. return !xfs_btree_keycmp_gt(cur, key1, key2);
  487. }
  488. static inline bool
  489. xfs_btree_keycmp_ge(
  490. struct xfs_btree_cur *cur,
  491. const union xfs_btree_key *key1,
  492. const union xfs_btree_key *key2)
  493. {
  494. return !xfs_btree_keycmp_lt(cur, key1, key2);
  495. }
  496. static inline bool
  497. xfs_btree_keycmp_ne(
  498. struct xfs_btree_cur *cur,
  499. const union xfs_btree_key *key1,
  500. const union xfs_btree_key *key2)
  501. {
  502. return !xfs_btree_keycmp_eq(cur, key1, key2);
  503. }
  504. /* Masked key comparison helpers */
  505. static inline bool
  506. xfs_btree_masked_keycmp_lt(
  507. struct xfs_btree_cur *cur,
  508. const union xfs_btree_key *key1,
  509. const union xfs_btree_key *key2,
  510. const union xfs_btree_key *mask)
  511. {
  512. return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
  513. }
  514. static inline bool
  515. xfs_btree_masked_keycmp_gt(
  516. struct xfs_btree_cur *cur,
  517. const union xfs_btree_key *key1,
  518. const union xfs_btree_key *key2,
  519. const union xfs_btree_key *mask)
  520. {
  521. return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
  522. }
  523. static inline bool
  524. xfs_btree_masked_keycmp_ge(
  525. struct xfs_btree_cur *cur,
  526. const union xfs_btree_key *key1,
  527. const union xfs_btree_key *key2,
  528. const union xfs_btree_key *mask)
  529. {
  530. return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
  531. }
  532. /* Does this cursor point to the last block in the given level? */
  533. static inline bool
  534. xfs_btree_islastblock(
  535. struct xfs_btree_cur *cur,
  536. int level)
  537. {
  538. struct xfs_btree_block *block;
  539. struct xfs_buf *bp;
  540. block = xfs_btree_get_block(cur, level, &bp);
  541. if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
  542. return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
  543. return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
  544. }
  545. void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
  546. union xfs_btree_ptr *ptr);
  547. int xfs_btree_get_buf_block(struct xfs_btree_cur *cur,
  548. const union xfs_btree_ptr *ptr, struct xfs_btree_block **block,
  549. struct xfs_buf **bpp);
  550. int xfs_btree_read_buf_block(struct xfs_btree_cur *cur,
  551. const union xfs_btree_ptr *ptr, int flags,
  552. struct xfs_btree_block **block, struct xfs_buf **bpp);
  553. void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
  554. struct xfs_btree_block *block, const union xfs_btree_ptr *ptr,
  555. int lr);
  556. void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
  557. struct xfs_buf *bp, int level, int numrecs);
  558. void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
  559. union xfs_btree_ptr *dst_ptr,
  560. const union xfs_btree_ptr *src_ptr, int numptrs);
  561. void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
  562. union xfs_btree_key *dst_key,
  563. const union xfs_btree_key *src_key, int numkeys);
  564. void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur,
  565. union xfs_btree_ptr *ptr);
  566. static inline struct xfs_btree_cur *
  567. xfs_btree_alloc_cursor(
  568. struct xfs_mount *mp,
  569. struct xfs_trans *tp,
  570. const struct xfs_btree_ops *ops,
  571. uint8_t maxlevels,
  572. struct kmem_cache *cache)
  573. {
  574. struct xfs_btree_cur *cur;
  575. ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN ||
  576. ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN);
  577. /* BMBT allocations can come through from non-transactional context. */
  578. cur = kmem_cache_zalloc(cache,
  579. GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
  580. cur->bc_ops = ops;
  581. cur->bc_tp = tp;
  582. cur->bc_mp = mp;
  583. cur->bc_maxlevels = maxlevels;
  584. cur->bc_cache = cache;
  585. return cur;
  586. }
  587. int __init xfs_btree_init_cur_caches(void);
  588. void xfs_btree_destroy_cur_caches(void);
  589. int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
  590. /* Does this level of the cursor point to the inode root (and not a block)? */
  591. static inline bool
  592. xfs_btree_at_iroot(
  593. const struct xfs_btree_cur *cur,
  594. int level)
  595. {
  596. return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
  597. level == cur->bc_nlevels - 1;
  598. }
  599. #endif /* __XFS_BTREE_H__ */