dm-core.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Internal header file _only_ for device mapper core
  4. *
  5. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  6. *
  7. * This file is released under the LGPL.
  8. */
  9. #ifndef DM_CORE_INTERNAL_H
  10. #define DM_CORE_INTERNAL_H
  11. #include <linux/kthread.h>
  12. #include <linux/ktime.h>
  13. #include <linux/blk-mq.h>
  14. #include <linux/blk-crypto-profile.h>
  15. #include <linux/jump_label.h>
  16. #include <trace/events/block.h>
  17. #include "dm.h"
  18. #include "dm-ima.h"
  19. #define DM_RESERVED_MAX_IOS 1024
  20. #define DM_MAX_TARGETS 1048576
  21. #define DM_MAX_TARGET_PARAMS 1024
  22. struct dm_io;
  23. struct dm_kobject_holder {
  24. struct kobject kobj;
  25. struct completion completion;
  26. };
  27. /*
  28. * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
  29. * DM targets must _not_ deference a mapped_device or dm_table to directly
  30. * access their members!
  31. */
  32. /*
  33. * For mempools pre-allocation at the table loading time.
  34. */
  35. struct dm_md_mempools {
  36. struct bio_set bs;
  37. struct bio_set io_bs;
  38. };
  39. struct mapped_device {
  40. struct mutex suspend_lock;
  41. struct mutex table_devices_lock;
  42. struct list_head table_devices;
  43. /*
  44. * The current mapping (struct dm_table *).
  45. * Use dm_get_live_table{_fast} or take suspend_lock for
  46. * dereference.
  47. */
  48. void __rcu *map;
  49. unsigned long flags;
  50. /* Protect queue and type against concurrent access. */
  51. struct mutex type_lock;
  52. enum dm_queue_mode type;
  53. int numa_node_id;
  54. struct request_queue *queue;
  55. atomic_t holders;
  56. atomic_t open_count;
  57. struct dm_target *immutable_target;
  58. struct target_type *immutable_target_type;
  59. char name[16];
  60. struct gendisk *disk;
  61. struct dax_device *dax_dev;
  62. wait_queue_head_t wait;
  63. unsigned long __percpu *pending_io;
  64. /* forced geometry settings */
  65. struct hd_geometry geometry;
  66. /*
  67. * Processing queue (flush)
  68. */
  69. struct workqueue_struct *wq;
  70. /*
  71. * A list of ios that arrived while we were suspended.
  72. */
  73. struct work_struct work;
  74. spinlock_t deferred_lock;
  75. struct bio_list deferred;
  76. /*
  77. * requeue work context is needed for cloning one new bio
  78. * to represent the dm_io to be requeued, since each
  79. * dm_io may point to the original bio from FS.
  80. */
  81. struct work_struct requeue_work;
  82. struct dm_io *requeue_list;
  83. void *interface_ptr;
  84. /*
  85. * Event handling.
  86. */
  87. wait_queue_head_t eventq;
  88. atomic_t event_nr;
  89. atomic_t uevent_seq;
  90. struct list_head uevent_list;
  91. spinlock_t uevent_lock; /* Protect access to uevent_list */
  92. /* for blk-mq request-based DM support */
  93. bool init_tio_pdu:1;
  94. struct blk_mq_tag_set *tag_set;
  95. struct dm_stats stats;
  96. /* the number of internal suspends */
  97. unsigned int internal_suspend_count;
  98. int swap_bios;
  99. struct semaphore swap_bios_semaphore;
  100. struct mutex swap_bios_lock;
  101. /*
  102. * io objects are allocated from here.
  103. */
  104. struct dm_md_mempools *mempools;
  105. /* kobject and completion */
  106. struct dm_kobject_holder kobj_holder;
  107. struct srcu_struct io_barrier;
  108. #ifdef CONFIG_BLK_DEV_ZONED
  109. unsigned int nr_zones;
  110. void *zone_revalidate_map;
  111. struct task_struct *revalidate_map_task;
  112. #endif
  113. #ifdef CONFIG_IMA
  114. struct dm_ima_measurements ima;
  115. #endif
  116. };
  117. /*
  118. * Bits for the flags field of struct mapped_device.
  119. */
  120. #define DMF_BLOCK_IO_FOR_SUSPEND 0
  121. #define DMF_SUSPENDED 1
  122. #define DMF_FROZEN 2
  123. #define DMF_FREEING 3
  124. #define DMF_DELETING 4
  125. #define DMF_NOFLUSH_SUSPENDING 5
  126. #define DMF_DEFERRED_REMOVE 6
  127. #define DMF_SUSPENDED_INTERNALLY 7
  128. #define DMF_POST_SUSPENDING 8
  129. #define DMF_EMULATE_ZONE_APPEND 9
  130. #define DMF_QUEUE_STOPPED 10
  131. void disable_discard(struct mapped_device *md);
  132. void disable_write_zeroes(struct mapped_device *md);
  133. static inline sector_t dm_get_size(struct mapped_device *md)
  134. {
  135. return get_capacity(md->disk);
  136. }
  137. static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
  138. {
  139. return &md->stats;
  140. }
  141. DECLARE_STATIC_KEY_FALSE(stats_enabled);
  142. DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
  143. DECLARE_STATIC_KEY_FALSE(zoned_enabled);
  144. static inline bool dm_emulate_zone_append(struct mapped_device *md)
  145. {
  146. if (blk_queue_is_zoned(md->queue))
  147. return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
  148. return false;
  149. }
  150. #define DM_TABLE_MAX_DEPTH 16
  151. struct dm_table {
  152. struct mapped_device *md;
  153. enum dm_queue_mode type;
  154. /* btree table */
  155. unsigned int depth;
  156. unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
  157. sector_t *index[DM_TABLE_MAX_DEPTH];
  158. unsigned int num_targets;
  159. unsigned int num_allocated;
  160. sector_t *highs;
  161. struct dm_target *targets;
  162. struct target_type *immutable_target_type;
  163. bool integrity_supported:1;
  164. bool singleton:1;
  165. /* set if all the targets in the table have "flush_bypasses_map" set */
  166. bool flush_bypasses_map:1;
  167. /*
  168. * Indicates the rw permissions for the new logical device. This
  169. * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE.
  170. */
  171. blk_mode_t mode;
  172. /* a list of devices used by this table */
  173. struct list_head devices;
  174. struct rw_semaphore devices_lock;
  175. /* events get handed up using this callback */
  176. void (*event_fn)(void *data);
  177. void *event_context;
  178. struct dm_md_mempools *mempools;
  179. #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  180. struct blk_crypto_profile *crypto_profile;
  181. #endif
  182. };
  183. static inline struct dm_target *dm_table_get_target(struct dm_table *t,
  184. unsigned int index)
  185. {
  186. BUG_ON(index >= t->num_targets);
  187. return t->targets + index;
  188. }
  189. /*
  190. * One of these is allocated per clone bio.
  191. */
  192. #define DM_TIO_MAGIC 28714
  193. struct dm_target_io {
  194. unsigned short magic;
  195. blk_short_t flags;
  196. unsigned int target_bio_nr;
  197. struct dm_io *io;
  198. struct dm_target *ti;
  199. unsigned int *len_ptr;
  200. sector_t old_sector;
  201. struct bio clone;
  202. };
  203. #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
  204. #define DM_IO_BIO_OFFSET \
  205. (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
  206. /*
  207. * dm_target_io flags
  208. */
  209. enum {
  210. DM_TIO_INSIDE_DM_IO,
  211. DM_TIO_IS_DUPLICATE_BIO
  212. };
  213. static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
  214. {
  215. return (tio->flags & (1U << bit)) != 0;
  216. }
  217. static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
  218. {
  219. tio->flags |= (1U << bit);
  220. }
  221. static inline bool dm_tio_is_normal(struct dm_target_io *tio)
  222. {
  223. return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
  224. !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
  225. }
  226. /*
  227. * One of these is allocated per original bio.
  228. * It contains the first clone used for that original.
  229. */
  230. #define DM_IO_MAGIC 19577
  231. struct dm_io {
  232. unsigned short magic;
  233. blk_short_t flags;
  234. spinlock_t lock;
  235. unsigned long start_time;
  236. void *data;
  237. struct dm_io *next;
  238. struct dm_stats_aux stats_aux;
  239. blk_status_t status;
  240. atomic_t io_count;
  241. struct mapped_device *md;
  242. /* The three fields represent mapped part of original bio */
  243. struct bio *orig_bio;
  244. unsigned int sector_offset; /* offset to end of orig_bio */
  245. unsigned int sectors;
  246. /* last member of dm_target_io is 'struct bio' */
  247. struct dm_target_io tio;
  248. };
  249. /*
  250. * dm_io flags
  251. */
  252. enum {
  253. DM_IO_ACCOUNTED,
  254. DM_IO_WAS_SPLIT,
  255. DM_IO_BLK_STAT
  256. };
  257. static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
  258. {
  259. return (io->flags & (1U << bit)) != 0;
  260. }
  261. static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
  262. {
  263. io->flags |= (1U << bit);
  264. }
  265. void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
  266. static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
  267. {
  268. return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
  269. }
  270. unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
  271. static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
  272. {
  273. return !maxlen || strlen(result) + 1 >= maxlen;
  274. }
  275. extern atomic_t dm_global_event_nr;
  276. extern wait_queue_head_t dm_global_eventq;
  277. void dm_issue_global_event(void);
  278. #endif