btrfs_inode.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #ifndef BTRFS_INODE_H
  6. #define BTRFS_INODE_H
  7. #include <linux/hash.h>
  8. #include "extent_map.h"
  9. #include "extent_io.h"
  10. #include "ordered-data.h"
  11. #include "delayed-inode.h"
  12. /*
  13. * ordered_data_close is set by truncate when a file that used
  14. * to have good data has been truncated to zero. When it is set
  15. * the btrfs file release call will add this inode to the
  16. * ordered operations list so that we make sure to flush out any
  17. * new data the application may have written before commit.
  18. */
  19. enum {
  20. BTRFS_INODE_ORDERED_DATA_CLOSE = 0,
  21. BTRFS_INODE_DUMMY,
  22. BTRFS_INODE_IN_DEFRAG,
  23. BTRFS_INODE_HAS_ASYNC_EXTENT,
  24. BTRFS_INODE_NEEDS_FULL_SYNC,
  25. BTRFS_INODE_COPY_EVERYTHING,
  26. BTRFS_INODE_IN_DELALLOC_LIST,
  27. BTRFS_INODE_READDIO_NEED_LOCK,
  28. BTRFS_INODE_HAS_PROPS,
  29. BTRFS_INODE_SNAPSHOT_FLUSH,
  30. };
  31. /* in memory btrfs inode */
  32. struct btrfs_inode {
  33. /* which subvolume this inode belongs to */
  34. struct btrfs_root *root;
  35. /* key used to find this inode on disk. This is used by the code
  36. * to read in roots of subvolumes
  37. */
  38. struct btrfs_key location;
  39. /*
  40. * Lock for counters and all fields used to determine if the inode is in
  41. * the log or not (last_trans, last_sub_trans, last_log_commit,
  42. * logged_trans).
  43. */
  44. spinlock_t lock;
  45. /* the extent_tree has caches of all the extent mappings to disk */
  46. struct extent_map_tree extent_tree;
  47. /* the io_tree does range state (DIRTY, LOCKED etc) */
  48. struct extent_io_tree io_tree;
  49. /* special utility tree used to record which mirrors have already been
  50. * tried when checksums fail for a given block
  51. */
  52. struct extent_io_tree io_failure_tree;
  53. /* held while logging the inode in tree-log.c */
  54. struct mutex log_mutex;
  55. /* held while doing delalloc reservations */
  56. struct mutex delalloc_mutex;
  57. /* used to order data wrt metadata */
  58. struct btrfs_ordered_inode_tree ordered_tree;
  59. /* list of all the delalloc inodes in the FS. There are times we need
  60. * to write all the delalloc pages to disk, and this list is used
  61. * to walk them all.
  62. */
  63. struct list_head delalloc_inodes;
  64. /* node for the red-black tree that links inodes in subvolume root */
  65. struct rb_node rb_node;
  66. unsigned long runtime_flags;
  67. /* Keep track of who's O_SYNC/fsyncing currently */
  68. atomic_t sync_writers;
  69. /* full 64 bit generation number, struct vfs_inode doesn't have a big
  70. * enough field for this.
  71. */
  72. u64 generation;
  73. /*
  74. * transid of the trans_handle that last modified this inode
  75. */
  76. u64 last_trans;
  77. /*
  78. * transid that last logged this inode
  79. */
  80. u64 logged_trans;
  81. /*
  82. * log transid when this inode was last modified
  83. */
  84. int last_sub_trans;
  85. /* a local copy of root's last_log_commit */
  86. int last_log_commit;
  87. /* total number of bytes pending delalloc, used by stat to calc the
  88. * real block usage of the file
  89. */
  90. u64 delalloc_bytes;
  91. /*
  92. * Total number of bytes pending delalloc that fall within a file
  93. * range that is either a hole or beyond EOF (and no prealloc extent
  94. * exists in the range). This is always <= delalloc_bytes.
  95. */
  96. u64 new_delalloc_bytes;
  97. /*
  98. * total number of bytes pending defrag, used by stat to check whether
  99. * it needs COW.
  100. */
  101. u64 defrag_bytes;
  102. /*
  103. * the size of the file stored in the metadata on disk. data=ordered
  104. * means the in-memory i_size might be larger than the size on disk
  105. * because not all the blocks are written yet.
  106. */
  107. u64 disk_i_size;
  108. /*
  109. * if this is a directory then index_cnt is the counter for the index
  110. * number for new files that are created
  111. */
  112. u64 index_cnt;
  113. /* Cache the directory index number to speed the dir/file remove */
  114. u64 dir_index;
  115. /* the fsync log has some corner cases that mean we have to check
  116. * directories to see if any unlinks have been done before
  117. * the directory was logged. See tree-log.c for all the
  118. * details
  119. */
  120. u64 last_unlink_trans;
  121. /*
  122. * Track the transaction id of the last transaction used to create a
  123. * hard link for the inode. This is used by the log tree (fsync).
  124. */
  125. u64 last_link_trans;
  126. /*
  127. * Number of bytes outstanding that are going to need csums. This is
  128. * used in ENOSPC accounting.
  129. */
  130. u64 csum_bytes;
  131. /* flags field from the on disk inode */
  132. u32 flags;
  133. /*
  134. * Counters to keep track of the number of extent item's we may use due
  135. * to delalloc and such. outstanding_extents is the number of extent
  136. * items we think we'll end up using, and reserved_extents is the number
  137. * of extent items we've reserved metadata for.
  138. */
  139. unsigned outstanding_extents;
  140. struct btrfs_block_rsv block_rsv;
  141. /*
  142. * Cached values of inode properties
  143. */
  144. unsigned prop_compress; /* per-file compression algorithm */
  145. /*
  146. * Force compression on the file using the defrag ioctl, could be
  147. * different from prop_compress and takes precedence if set
  148. */
  149. unsigned defrag_compress;
  150. struct btrfs_delayed_node *delayed_node;
  151. /* File creation time. */
  152. struct timespec64 i_otime;
  153. /* Hook into fs_info->delayed_iputs */
  154. struct list_head delayed_iput;
  155. /*
  156. * To avoid races between lockless (i_mutex not held) direct IO writes
  157. * and concurrent fsync requests. Direct IO writes must acquire read
  158. * access on this semaphore for creating an extent map and its
  159. * corresponding ordered extent. The fast fsync path must acquire write
  160. * access on this semaphore before it collects ordered extents and
  161. * extent maps.
  162. */
  163. struct rw_semaphore dio_sem;
  164. struct inode vfs_inode;
  165. };
  166. extern unsigned char btrfs_filetype_table[];
  167. static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
  168. {
  169. return container_of(inode, struct btrfs_inode, vfs_inode);
  170. }
  171. static inline unsigned long btrfs_inode_hash(u64 objectid,
  172. const struct btrfs_root *root)
  173. {
  174. u64 h = objectid ^ (root->objectid * GOLDEN_RATIO_PRIME);
  175. #if BITS_PER_LONG == 32
  176. h = (h >> 32) ^ (h & 0xffffffff);
  177. #endif
  178. return (unsigned long)h;
  179. }
  180. static inline void btrfs_insert_inode_hash(struct inode *inode)
  181. {
  182. unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
  183. __insert_inode_hash(inode, h);
  184. }
  185. static inline u64 btrfs_ino(const struct btrfs_inode *inode)
  186. {
  187. u64 ino = inode->location.objectid;
  188. /*
  189. * !ino: btree_inode
  190. * type == BTRFS_ROOT_ITEM_KEY: subvol dir
  191. */
  192. if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
  193. ino = inode->vfs_inode.i_ino;
  194. return ino;
  195. }
  196. static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
  197. {
  198. i_size_write(&inode->vfs_inode, size);
  199. inode->disk_i_size = size;
  200. }
  201. static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
  202. {
  203. struct btrfs_root *root = inode->root;
  204. if (root == root->fs_info->tree_root &&
  205. btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
  206. return true;
  207. if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
  208. return true;
  209. return false;
  210. }
  211. static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
  212. int mod)
  213. {
  214. lockdep_assert_held(&inode->lock);
  215. inode->outstanding_extents += mod;
  216. if (btrfs_is_free_space_inode(inode))
  217. return;
  218. trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
  219. mod);
  220. }
  221. static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
  222. {
  223. int ret = 0;
  224. spin_lock(&inode->lock);
  225. if (inode->logged_trans == generation &&
  226. inode->last_sub_trans <= inode->last_log_commit &&
  227. inode->last_sub_trans <= inode->root->last_log_commit) {
  228. /*
  229. * After a ranged fsync we might have left some extent maps
  230. * (that fall outside the fsync's range). So return false
  231. * here if the list isn't empty, to make sure btrfs_log_inode()
  232. * will be called and process those extent maps.
  233. */
  234. smp_mb();
  235. if (list_empty(&inode->extent_tree.modified_extents))
  236. ret = 1;
  237. }
  238. spin_unlock(&inode->lock);
  239. return ret;
  240. }
  241. #define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
  242. struct btrfs_dio_private {
  243. struct inode *inode;
  244. unsigned long flags;
  245. u64 logical_offset;
  246. u64 disk_bytenr;
  247. u64 bytes;
  248. void *private;
  249. /* number of bios pending for this dio */
  250. atomic_t pending_bios;
  251. /* IO errors */
  252. int errors;
  253. /* orig_bio is our btrfs_io_bio */
  254. struct bio *orig_bio;
  255. /* dio_bio came from fs/direct-io.c */
  256. struct bio *dio_bio;
  257. /*
  258. * The original bio may be split to several sub-bios, this is
  259. * done during endio of sub-bios
  260. */
  261. blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
  262. blk_status_t);
  263. };
  264. /*
  265. * Disable DIO read nolock optimization, so new dio readers will be forced
  266. * to grab i_mutex. It is used to avoid the endless truncate due to
  267. * nonlocked dio read.
  268. */
  269. static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
  270. {
  271. set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  272. smp_mb();
  273. }
  274. static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
  275. {
  276. smp_mb__before_atomic();
  277. clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  278. }
  279. static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
  280. u64 logical_start, u32 csum, u32 csum_expected, int mirror_num)
  281. {
  282. struct btrfs_root *root = inode->root;
  283. /* Output minus objectid, which is more meaningful */
  284. if (root->objectid >= BTRFS_LAST_FREE_OBJECTID)
  285. btrfs_warn_rl(root->fs_info,
  286. "csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d",
  287. root->objectid, btrfs_ino(inode),
  288. logical_start, csum, csum_expected, mirror_num);
  289. else
  290. btrfs_warn_rl(root->fs_info,
  291. "csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d",
  292. root->objectid, btrfs_ino(inode),
  293. logical_start, csum, csum_expected, mirror_num);
  294. }
  295. #endif