extent-io-tree.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef BTRFS_EXTENT_IO_TREE_H
  3. #define BTRFS_EXTENT_IO_TREE_H
  4. #include <linux/rbtree.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/refcount.h>
  7. #include <linux/list.h>
  8. #include <linux/wait.h>
  9. #include "misc.h"
  10. struct extent_changeset;
  11. struct btrfs_fs_info;
  12. struct btrfs_inode;
  13. /* Bits for the extent state */
  14. enum {
  15. ENUM_BIT(EXTENT_DIRTY),
  16. ENUM_BIT(EXTENT_UPTODATE),
  17. ENUM_BIT(EXTENT_LOCKED),
  18. ENUM_BIT(EXTENT_DIO_LOCKED),
  19. ENUM_BIT(EXTENT_NEW),
  20. ENUM_BIT(EXTENT_DELALLOC),
  21. ENUM_BIT(EXTENT_DEFRAG),
  22. ENUM_BIT(EXTENT_BOUNDARY),
  23. ENUM_BIT(EXTENT_NODATASUM),
  24. ENUM_BIT(EXTENT_CLEAR_META_RESV),
  25. ENUM_BIT(EXTENT_NEED_WAIT),
  26. ENUM_BIT(EXTENT_NORESERVE),
  27. ENUM_BIT(EXTENT_QGROUP_RESERVED),
  28. ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
  29. /*
  30. * Must be cleared only during ordered extent completion or on error
  31. * paths if we did not manage to submit bios and create the ordered
  32. * extents for the range. Should not be cleared during page release
  33. * and page invalidation (if there is an ordered extent in flight),
  34. * that is left for the ordered extent completion.
  35. */
  36. ENUM_BIT(EXTENT_DELALLOC_NEW),
  37. /*
  38. * When an ordered extent successfully completes for a region marked as
  39. * a new delalloc range, use this flag when clearing a new delalloc
  40. * range to indicate that the VFS' inode number of bytes should be
  41. * incremented and the inode's new delalloc bytes decremented, in an
  42. * atomic way to prevent races with stat(2).
  43. */
  44. ENUM_BIT(EXTENT_ADD_INODE_BYTES),
  45. /*
  46. * Set during truncate when we're clearing an entire range and we just
  47. * want the extent states to go away.
  48. */
  49. ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
  50. /*
  51. * This must be last.
  52. *
  53. * Bit not representing a state but a request for NOWAIT semantics,
  54. * e.g. when allocating memory, and must be masked out from the other
  55. * bits.
  56. */
  57. ENUM_BIT(EXTENT_NOWAIT)
  58. };
  59. #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
  60. EXTENT_CLEAR_DATA_RESV)
  61. #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
  62. EXTENT_ADD_INODE_BYTES | \
  63. EXTENT_CLEAR_ALL_BITS)
  64. #define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
  65. /*
  66. * Redefined bits above which are used only in the device allocation tree,
  67. * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
  68. * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
  69. * manipulation functions
  70. */
  71. #define CHUNK_ALLOCATED EXTENT_DIRTY
  72. #define CHUNK_TRIMMED EXTENT_DEFRAG
  73. #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
  74. CHUNK_TRIMMED)
  75. enum {
  76. IO_TREE_FS_PINNED_EXTENTS,
  77. IO_TREE_FS_EXCLUDED_EXTENTS,
  78. IO_TREE_BTREE_INODE_IO,
  79. IO_TREE_INODE_IO,
  80. IO_TREE_RELOC_BLOCKS,
  81. IO_TREE_TRANS_DIRTY_PAGES,
  82. IO_TREE_ROOT_DIRTY_LOG_PAGES,
  83. IO_TREE_INODE_FILE_EXTENT,
  84. IO_TREE_LOG_CSUM_RANGE,
  85. IO_TREE_SELFTEST,
  86. IO_TREE_DEVICE_ALLOC_STATE,
  87. };
  88. struct extent_io_tree {
  89. struct rb_root state;
  90. /*
  91. * The fs_info is needed for trace points, a tree attached to an inode
  92. * needs the inode.
  93. *
  94. * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
  95. * accessed as inode->root->fs_info
  96. */
  97. union {
  98. struct btrfs_fs_info *fs_info;
  99. struct btrfs_inode *inode;
  100. };
  101. /* Who owns this io tree, should be one of IO_TREE_* */
  102. u8 owner;
  103. spinlock_t lock;
  104. };
  105. struct extent_state {
  106. u64 start;
  107. u64 end; /* inclusive */
  108. struct rb_node rb_node;
  109. /* ADD NEW ELEMENTS AFTER THIS */
  110. wait_queue_head_t wq;
  111. refcount_t refs;
  112. u32 state;
  113. #ifdef CONFIG_BTRFS_DEBUG
  114. struct list_head leak_list;
  115. #endif
  116. };
  117. struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree);
  118. const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree);
  119. const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
  120. void extent_io_tree_init(struct btrfs_fs_info *fs_info,
  121. struct extent_io_tree *tree, unsigned int owner);
  122. void extent_io_tree_release(struct extent_io_tree *tree);
  123. int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
  124. struct extent_state **cached);
  125. bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
  126. struct extent_state **cached);
  127. static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  128. struct extent_state **cached)
  129. {
  130. return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
  131. }
  132. static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
  133. u64 end, struct extent_state **cached)
  134. {
  135. return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
  136. }
  137. int __init extent_state_init_cachep(void);
  138. void __cold extent_state_free_cachep(void);
  139. u64 count_range_bits(struct extent_io_tree *tree,
  140. u64 *start, u64 search_end,
  141. u64 max_bytes, u32 bits, int contig,
  142. struct extent_state **cached_state);
  143. void free_extent_state(struct extent_state *state);
  144. bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
  145. struct extent_state *cached_state);
  146. bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
  147. int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  148. u32 bits, struct extent_changeset *changeset);
  149. int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  150. u32 bits, struct extent_state **cached,
  151. struct extent_changeset *changeset);
  152. static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
  153. u64 end, u32 bits,
  154. struct extent_state **cached)
  155. {
  156. return __clear_extent_bit(tree, start, end, bits, cached, NULL);
  157. }
  158. static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  159. struct extent_state **cached)
  160. {
  161. return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
  162. }
  163. static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
  164. u64 end, u32 bits)
  165. {
  166. return clear_extent_bit(tree, start, end, bits, NULL);
  167. }
  168. int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  169. u32 bits, struct extent_changeset *changeset);
  170. int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  171. u32 bits, struct extent_state **cached_state);
  172. static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
  173. u64 end, struct extent_state **cached_state)
  174. {
  175. return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
  176. cached_state, NULL);
  177. }
  178. static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
  179. u64 end, struct extent_state **cached)
  180. {
  181. return clear_extent_bit(tree, start, end,
  182. EXTENT_DIRTY | EXTENT_DELALLOC |
  183. EXTENT_DO_ACCOUNTING, cached);
  184. }
  185. int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  186. u32 bits, u32 clear_bits,
  187. struct extent_state **cached_state);
  188. bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  189. u64 *start_ret, u64 *end_ret, u32 bits,
  190. struct extent_state **cached_state);
  191. void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
  192. u64 *start_ret, u64 *end_ret, u32 bits);
  193. int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
  194. u64 *start_ret, u64 *end_ret, u32 bits);
  195. bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
  196. u64 *end, u64 max_bytes,
  197. struct extent_state **cached_state);
  198. static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
  199. u64 end, struct extent_state **cached)
  200. {
  201. return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
  202. }
  203. static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
  204. u64 end, struct extent_state **cached)
  205. {
  206. return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
  207. }
  208. static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
  209. u64 end, struct extent_state **cached)
  210. {
  211. return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
  212. }
  213. #endif /* BTRFS_EXTENT_IO_TREE_H */