md-linear.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
  4. * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
  5. */
  6. #include <linux/blkdev.h>
  7. #include <linux/raid/md_u.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <trace/events/block.h>
  12. #include "md.h"
  13. struct dev_info {
  14. struct md_rdev *rdev;
  15. sector_t end_sector;
  16. };
  17. struct linear_conf {
  18. struct rcu_head rcu;
  19. sector_t array_sectors;
  20. /* a copy of mddev->raid_disks */
  21. int raid_disks;
  22. struct dev_info disks[] __counted_by(raid_disks);
  23. };
  24. /*
  25. * find which device holds a particular offset
  26. */
  27. static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
  28. {
  29. int lo, mid, hi;
  30. struct linear_conf *conf;
  31. lo = 0;
  32. hi = mddev->raid_disks - 1;
  33. conf = mddev->private;
  34. /*
  35. * Binary Search
  36. */
  37. while (hi > lo) {
  38. mid = (hi + lo) / 2;
  39. if (sector < conf->disks[mid].end_sector)
  40. hi = mid;
  41. else
  42. lo = mid + 1;
  43. }
  44. return conf->disks + lo;
  45. }
  46. static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  47. {
  48. struct linear_conf *conf;
  49. sector_t array_sectors;
  50. conf = mddev->private;
  51. WARN_ONCE(sectors || raid_disks,
  52. "%s does not support generic reshape\n", __func__);
  53. array_sectors = conf->array_sectors;
  54. return array_sectors;
  55. }
  56. static int linear_set_limits(struct mddev *mddev)
  57. {
  58. struct queue_limits lim;
  59. int err;
  60. md_init_stacking_limits(&lim);
  61. lim.max_hw_sectors = mddev->chunk_sectors;
  62. lim.max_write_zeroes_sectors = mddev->chunk_sectors;
  63. lim.io_min = mddev->chunk_sectors << 9;
  64. err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
  65. if (err)
  66. return err;
  67. return queue_limits_set(mddev->gendisk->queue, &lim);
  68. }
  69. static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
  70. {
  71. struct linear_conf *conf;
  72. struct md_rdev *rdev;
  73. int ret = -EINVAL;
  74. int cnt;
  75. int i;
  76. conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
  77. if (!conf)
  78. return ERR_PTR(-ENOMEM);
  79. /*
  80. * conf->raid_disks is copy of mddev->raid_disks. The reason to
  81. * keep a copy of mddev->raid_disks in struct linear_conf is,
  82. * mddev->raid_disks may not be consistent with pointers number of
  83. * conf->disks[] when it is updated in linear_add() and used to
  84. * iterate old conf->disks[] earray in linear_congested().
  85. * Here conf->raid_disks is always consitent with number of
  86. * pointers in conf->disks[] array, and mddev->private is updated
  87. * with rcu_assign_pointer() in linear_addr(), such race can be
  88. * avoided.
  89. */
  90. conf->raid_disks = raid_disks;
  91. cnt = 0;
  92. conf->array_sectors = 0;
  93. rdev_for_each(rdev, mddev) {
  94. int j = rdev->raid_disk;
  95. struct dev_info *disk = conf->disks + j;
  96. sector_t sectors;
  97. if (j < 0 || j >= raid_disks || disk->rdev) {
  98. pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
  99. mdname(mddev));
  100. goto out;
  101. }
  102. disk->rdev = rdev;
  103. if (mddev->chunk_sectors) {
  104. sectors = rdev->sectors;
  105. sector_div(sectors, mddev->chunk_sectors);
  106. rdev->sectors = sectors * mddev->chunk_sectors;
  107. }
  108. conf->array_sectors += rdev->sectors;
  109. cnt++;
  110. }
  111. if (cnt != raid_disks) {
  112. pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
  113. mdname(mddev));
  114. goto out;
  115. }
  116. /*
  117. * Here we calculate the device offsets.
  118. */
  119. conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
  120. for (i = 1; i < raid_disks; i++)
  121. conf->disks[i].end_sector =
  122. conf->disks[i-1].end_sector +
  123. conf->disks[i].rdev->sectors;
  124. if (!mddev_is_dm(mddev)) {
  125. ret = linear_set_limits(mddev);
  126. if (ret)
  127. goto out;
  128. }
  129. return conf;
  130. out:
  131. kfree(conf);
  132. return ERR_PTR(ret);
  133. }
  134. static int linear_run(struct mddev *mddev)
  135. {
  136. struct linear_conf *conf;
  137. int ret;
  138. if (md_check_no_bitmap(mddev))
  139. return -EINVAL;
  140. conf = linear_conf(mddev, mddev->raid_disks);
  141. if (IS_ERR(conf))
  142. return PTR_ERR(conf);
  143. mddev->private = conf;
  144. md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
  145. ret = md_integrity_register(mddev);
  146. if (ret) {
  147. kfree(conf);
  148. mddev->private = NULL;
  149. }
  150. return ret;
  151. }
  152. static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
  153. {
  154. /* Adding a drive to a linear array allows the array to grow.
  155. * It is permitted if the new drive has a matching superblock
  156. * already on it, with raid_disk equal to raid_disks.
  157. * It is achieved by creating a new linear_private_data structure
  158. * and swapping it in in-place of the current one.
  159. * The current one is never freed until the array is stopped.
  160. * This avoids races.
  161. */
  162. struct linear_conf *newconf, *oldconf;
  163. if (rdev->saved_raid_disk != mddev->raid_disks)
  164. return -EINVAL;
  165. rdev->raid_disk = rdev->saved_raid_disk;
  166. rdev->saved_raid_disk = -1;
  167. newconf = linear_conf(mddev, mddev->raid_disks + 1);
  168. if (IS_ERR(newconf))
  169. return PTR_ERR(newconf);
  170. /* newconf->raid_disks already keeps a copy of * the increased
  171. * value of mddev->raid_disks, WARN_ONCE() is just used to make
  172. * sure of this. It is possible that oldconf is still referenced
  173. * in linear_congested(), therefore kfree_rcu() is used to free
  174. * oldconf until no one uses it anymore.
  175. */
  176. oldconf = rcu_dereference_protected(mddev->private,
  177. lockdep_is_held(&mddev->reconfig_mutex));
  178. mddev->raid_disks++;
  179. WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
  180. "copied raid_disks doesn't match mddev->raid_disks");
  181. rcu_assign_pointer(mddev->private, newconf);
  182. md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
  183. set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
  184. kfree_rcu(oldconf, rcu);
  185. return 0;
  186. }
  187. static void linear_free(struct mddev *mddev, void *priv)
  188. {
  189. struct linear_conf *conf = priv;
  190. kfree(conf);
  191. }
  192. static bool linear_make_request(struct mddev *mddev, struct bio *bio)
  193. {
  194. struct dev_info *tmp_dev;
  195. sector_t start_sector, end_sector, data_offset;
  196. sector_t bio_sector = bio->bi_iter.bi_sector;
  197. if (unlikely(bio->bi_opf & REQ_PREFLUSH)
  198. && md_flush_request(mddev, bio))
  199. return true;
  200. tmp_dev = which_dev(mddev, bio_sector);
  201. start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
  202. end_sector = tmp_dev->end_sector;
  203. data_offset = tmp_dev->rdev->data_offset;
  204. if (unlikely(bio_sector >= end_sector ||
  205. bio_sector < start_sector))
  206. goto out_of_bounds;
  207. if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
  208. md_error(mddev, tmp_dev->rdev);
  209. bio_io_error(bio);
  210. return true;
  211. }
  212. if (unlikely(bio_end_sector(bio) > end_sector)) {
  213. /* This bio crosses a device boundary, so we have to split it */
  214. struct bio *split = bio_split(bio, end_sector - bio_sector,
  215. GFP_NOIO, &mddev->bio_set);
  216. if (IS_ERR(split)) {
  217. bio->bi_status = errno_to_blk_status(PTR_ERR(split));
  218. bio_endio(bio);
  219. return true;
  220. }
  221. bio_chain(split, bio);
  222. trace_block_split(split, bio->bi_iter.bi_sector);
  223. submit_bio_noacct(bio);
  224. bio = split;
  225. }
  226. md_account_bio(mddev, &bio);
  227. bio_set_dev(bio, tmp_dev->rdev->bdev);
  228. bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
  229. start_sector + data_offset;
  230. if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
  231. !bdev_max_discard_sectors(bio->bi_bdev))) {
  232. /* Just ignore it */
  233. bio_endio(bio);
  234. } else {
  235. if (mddev->gendisk)
  236. trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
  237. bio_sector);
  238. mddev_check_write_zeroes(mddev, bio);
  239. submit_bio_noacct(bio);
  240. }
  241. return true;
  242. out_of_bounds:
  243. pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
  244. mdname(mddev),
  245. (unsigned long long)bio->bi_iter.bi_sector,
  246. tmp_dev->rdev->bdev,
  247. (unsigned long long)tmp_dev->rdev->sectors,
  248. (unsigned long long)start_sector);
  249. bio_io_error(bio);
  250. return true;
  251. }
  252. static void linear_status(struct seq_file *seq, struct mddev *mddev)
  253. {
  254. seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
  255. }
  256. static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
  257. {
  258. if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
  259. char *md_name = mdname(mddev);
  260. pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
  261. md_name, rdev->bdev);
  262. }
  263. }
  264. static void linear_quiesce(struct mddev *mddev, int state)
  265. {
  266. }
  267. static struct md_personality linear_personality = {
  268. .name = "linear",
  269. .level = LEVEL_LINEAR,
  270. .owner = THIS_MODULE,
  271. .make_request = linear_make_request,
  272. .run = linear_run,
  273. .free = linear_free,
  274. .status = linear_status,
  275. .hot_add_disk = linear_add,
  276. .size = linear_size,
  277. .quiesce = linear_quiesce,
  278. .error_handler = linear_error,
  279. };
  280. static int __init linear_init(void)
  281. {
  282. return register_md_personality(&linear_personality);
  283. }
  284. static void linear_exit(void)
  285. {
  286. unregister_md_personality(&linear_personality);
  287. }
  288. module_init(linear_init);
  289. module_exit(linear_exit);
  290. MODULE_LICENSE("GPL");
  291. MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
  292. MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
  293. MODULE_ALIAS("md-linear");
  294. MODULE_ALIAS("md-level--1");