dm-target.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2001 Sistina Software (UK) Limited
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-core.h"
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/kmod.h>
  11. #include <linux/bio.h>
  12. #include <linux/dax.h>
  13. #define DM_MSG_PREFIX "target"
  14. static LIST_HEAD(_targets);
  15. static DECLARE_RWSEM(_lock);
  16. static inline struct target_type *__find_target_type(const char *name)
  17. {
  18. struct target_type *tt;
  19. list_for_each_entry(tt, &_targets, list)
  20. if (!strcmp(name, tt->name))
  21. return tt;
  22. return NULL;
  23. }
  24. static struct target_type *get_target_type(const char *name)
  25. {
  26. struct target_type *tt;
  27. down_read(&_lock);
  28. tt = __find_target_type(name);
  29. if (tt && !try_module_get(tt->module))
  30. tt = NULL;
  31. up_read(&_lock);
  32. return tt;
  33. }
  34. static void load_module(const char *name)
  35. {
  36. request_module("dm-%s", name);
  37. }
  38. struct target_type *dm_get_target_type(const char *name)
  39. {
  40. struct target_type *tt = get_target_type(name);
  41. if (!tt) {
  42. load_module(name);
  43. tt = get_target_type(name);
  44. }
  45. return tt;
  46. }
  47. void dm_put_target_type(struct target_type *tt)
  48. {
  49. down_read(&_lock);
  50. module_put(tt->module);
  51. up_read(&_lock);
  52. }
  53. int dm_target_iterate(void (*iter_func)(struct target_type *tt,
  54. void *param), void *param)
  55. {
  56. struct target_type *tt;
  57. down_read(&_lock);
  58. list_for_each_entry(tt, &_targets, list)
  59. iter_func(tt, param);
  60. up_read(&_lock);
  61. return 0;
  62. }
  63. int dm_register_target(struct target_type *tt)
  64. {
  65. int rv = 0;
  66. down_write(&_lock);
  67. if (__find_target_type(tt->name)) {
  68. DMERR("%s: '%s' target already registered",
  69. __func__, tt->name);
  70. rv = -EEXIST;
  71. } else {
  72. list_add(&tt->list, &_targets);
  73. }
  74. up_write(&_lock);
  75. return rv;
  76. }
  77. EXPORT_SYMBOL(dm_register_target);
  78. void dm_unregister_target(struct target_type *tt)
  79. {
  80. down_write(&_lock);
  81. if (!__find_target_type(tt->name)) {
  82. DMCRIT("Unregistering unrecognised target: %s", tt->name);
  83. BUG();
  84. }
  85. list_del(&tt->list);
  86. up_write(&_lock);
  87. }
  88. EXPORT_SYMBOL(dm_unregister_target);
  89. /*
  90. * io-err: always fails an io, useful for bringing
  91. * up LVs that have holes in them.
  92. */
  93. struct io_err_c {
  94. struct dm_dev *dev;
  95. sector_t start;
  96. };
  97. static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
  98. {
  99. unsigned long long start;
  100. struct io_err_c *ioec;
  101. char dummy;
  102. int ret;
  103. ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
  104. if (!ioec) {
  105. tt->error = "Cannot allocate io_err context";
  106. return -ENOMEM;
  107. }
  108. ret = -EINVAL;
  109. if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
  110. start != (sector_t)start) {
  111. tt->error = "Invalid device sector";
  112. goto bad;
  113. }
  114. ioec->start = start;
  115. ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
  116. if (ret) {
  117. tt->error = "Device lookup failed";
  118. goto bad;
  119. }
  120. tt->private = ioec;
  121. return 0;
  122. bad:
  123. kfree(ioec);
  124. return ret;
  125. }
  126. static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
  127. {
  128. /*
  129. * If we have arguments, assume it is the path to the backing
  130. * block device and its mapping start sector (same as dm-linear).
  131. * In this case, get the device so that we can get its limits.
  132. */
  133. if (argc == 2) {
  134. int ret = io_err_get_args(tt, argc, args);
  135. if (ret)
  136. return ret;
  137. }
  138. /*
  139. * Return error for discards instead of -EOPNOTSUPP
  140. */
  141. tt->num_discard_bios = 1;
  142. tt->discards_supported = true;
  143. return 0;
  144. }
  145. static void io_err_dtr(struct dm_target *tt)
  146. {
  147. struct io_err_c *ioec = tt->private;
  148. if (ioec) {
  149. dm_put_device(tt, ioec->dev);
  150. kfree(ioec);
  151. }
  152. }
  153. static int io_err_map(struct dm_target *tt, struct bio *bio)
  154. {
  155. return DM_MAPIO_KILL;
  156. }
  157. static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
  158. union map_info *map_context,
  159. struct request **clone)
  160. {
  161. return DM_MAPIO_KILL;
  162. }
  163. static void io_err_release_clone_rq(struct request *clone,
  164. union map_info *map_context)
  165. {
  166. }
  167. #ifdef CONFIG_BLK_DEV_ZONED
  168. static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
  169. {
  170. struct io_err_c *ioec = ti->private;
  171. return ioec->start + dm_target_offset(ti, bi_sector);
  172. }
  173. static int io_err_report_zones(struct dm_target *ti,
  174. struct dm_report_zones_args *args, unsigned int nr_zones)
  175. {
  176. struct io_err_c *ioec = ti->private;
  177. /*
  178. * This should never be called when we do not have a backing device
  179. * as that mean the target is not a zoned one.
  180. */
  181. if (WARN_ON_ONCE(!ioec))
  182. return -EIO;
  183. return dm_report_zones(ioec->dev->bdev, ioec->start,
  184. io_err_map_sector(ti, args->next_sector),
  185. args, nr_zones);
  186. }
  187. #else
  188. #define io_err_report_zones NULL
  189. #endif
  190. static int io_err_iterate_devices(struct dm_target *ti,
  191. iterate_devices_callout_fn fn, void *data)
  192. {
  193. struct io_err_c *ioec = ti->private;
  194. if (!ioec)
  195. return 0;
  196. return fn(ti, ioec->dev, ioec->start, ti->len, data);
  197. }
  198. static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
  199. {
  200. limits->max_hw_discard_sectors = UINT_MAX;
  201. limits->discard_granularity = 512;
  202. }
  203. static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
  204. long nr_pages, enum dax_access_mode mode, void **kaddr,
  205. pfn_t *pfn)
  206. {
  207. return -EIO;
  208. }
  209. static struct target_type error_target = {
  210. .name = "error",
  211. .version = {1, 7, 0},
  212. .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
  213. .ctr = io_err_ctr,
  214. .dtr = io_err_dtr,
  215. .map = io_err_map,
  216. .clone_and_map_rq = io_err_clone_and_map_rq,
  217. .release_clone_rq = io_err_release_clone_rq,
  218. .iterate_devices = io_err_iterate_devices,
  219. .io_hints = io_err_io_hints,
  220. .direct_access = io_err_dax_direct_access,
  221. .report_zones = io_err_report_zones,
  222. };
  223. int __init dm_target_init(void)
  224. {
  225. return dm_register_target(&error_target);
  226. }
  227. void dm_target_exit(void)
  228. {
  229. dm_unregister_target(&error_target);
  230. }