blk-mq-sysfs.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/backing-dev.h>
  5. #include <linux/bio.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/mm.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/smp.h>
  12. #include "blk.h"
  13. #include "blk-mq.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
  17. free_percpu(ctxs->queue_ctx);
  18. kfree(ctxs);
  19. }
  20. static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
  21. {
  22. struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  23. /* ctx->ctxs won't be released until all ctx are freed */
  24. kobject_put(&ctx->ctxs->kobj);
  25. }
  26. static void blk_mq_hw_sysfs_release(struct kobject *kobj)
  27. {
  28. struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
  29. kobj);
  30. blk_free_flush_queue(hctx->fq);
  31. sbitmap_free(&hctx->ctx_map);
  32. free_cpumask_var(hctx->cpumask);
  33. kfree(hctx->ctxs);
  34. kfree(hctx);
  35. }
  36. struct blk_mq_hw_ctx_sysfs_entry {
  37. struct attribute attr;
  38. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  39. };
  40. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  41. struct attribute *attr, char *page)
  42. {
  43. struct blk_mq_hw_ctx_sysfs_entry *entry;
  44. struct blk_mq_hw_ctx *hctx;
  45. struct request_queue *q;
  46. ssize_t res;
  47. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  48. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  49. q = hctx->queue;
  50. if (!entry->show)
  51. return -EIO;
  52. mutex_lock(&q->sysfs_lock);
  53. res = entry->show(hctx, page);
  54. mutex_unlock(&q->sysfs_lock);
  55. return res;
  56. }
  57. static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
  58. char *page)
  59. {
  60. return sprintf(page, "%u\n", hctx->tags->nr_tags);
  61. }
  62. static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
  63. char *page)
  64. {
  65. return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
  66. }
  67. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  68. {
  69. const size_t size = PAGE_SIZE - 1;
  70. unsigned int i, first = 1;
  71. int ret = 0, pos = 0;
  72. for_each_cpu(i, hctx->cpumask) {
  73. if (first)
  74. ret = snprintf(pos + page, size - pos, "%u", i);
  75. else
  76. ret = snprintf(pos + page, size - pos, ", %u", i);
  77. if (ret >= size - pos)
  78. break;
  79. first = 0;
  80. pos += ret;
  81. }
  82. ret = snprintf(pos + page, size + 1 - pos, "\n");
  83. return pos + ret;
  84. }
  85. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
  86. .attr = {.name = "nr_tags", .mode = 0444 },
  87. .show = blk_mq_hw_sysfs_nr_tags_show,
  88. };
  89. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
  90. .attr = {.name = "nr_reserved_tags", .mode = 0444 },
  91. .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
  92. };
  93. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  94. .attr = {.name = "cpu_list", .mode = 0444 },
  95. .show = blk_mq_hw_sysfs_cpus_show,
  96. };
  97. static struct attribute *default_hw_ctx_attrs[] = {
  98. &blk_mq_hw_sysfs_nr_tags.attr,
  99. &blk_mq_hw_sysfs_nr_reserved_tags.attr,
  100. &blk_mq_hw_sysfs_cpus.attr,
  101. NULL,
  102. };
  103. ATTRIBUTE_GROUPS(default_hw_ctx);
  104. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  105. .show = blk_mq_hw_sysfs_show,
  106. };
  107. static const struct kobj_type blk_mq_ktype = {
  108. .release = blk_mq_sysfs_release,
  109. };
  110. static const struct kobj_type blk_mq_ctx_ktype = {
  111. .release = blk_mq_ctx_sysfs_release,
  112. };
  113. static const struct kobj_type blk_mq_hw_ktype = {
  114. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  115. .default_groups = default_hw_ctx_groups,
  116. .release = blk_mq_hw_sysfs_release,
  117. };
  118. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  119. {
  120. struct blk_mq_ctx *ctx;
  121. int i;
  122. if (!hctx->nr_ctx)
  123. return;
  124. hctx_for_each_ctx(hctx, ctx, i)
  125. if (ctx->kobj.state_in_sysfs)
  126. kobject_del(&ctx->kobj);
  127. if (hctx->kobj.state_in_sysfs)
  128. kobject_del(&hctx->kobj);
  129. }
  130. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  131. {
  132. struct request_queue *q = hctx->queue;
  133. struct blk_mq_ctx *ctx;
  134. int i, j, ret;
  135. if (!hctx->nr_ctx)
  136. return 0;
  137. ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
  138. if (ret)
  139. return ret;
  140. hctx_for_each_ctx(hctx, ctx, i) {
  141. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  142. if (ret)
  143. goto out;
  144. }
  145. return 0;
  146. out:
  147. hctx_for_each_ctx(hctx, ctx, j) {
  148. if (j < i)
  149. kobject_del(&ctx->kobj);
  150. }
  151. kobject_del(&hctx->kobj);
  152. return ret;
  153. }
  154. void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  155. {
  156. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  157. }
  158. void blk_mq_sysfs_deinit(struct request_queue *q)
  159. {
  160. struct blk_mq_ctx *ctx;
  161. int cpu;
  162. for_each_possible_cpu(cpu) {
  163. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  164. kobject_put(&ctx->kobj);
  165. }
  166. kobject_put(q->mq_kobj);
  167. }
  168. void blk_mq_sysfs_init(struct request_queue *q)
  169. {
  170. struct blk_mq_ctx *ctx;
  171. int cpu;
  172. kobject_init(q->mq_kobj, &blk_mq_ktype);
  173. for_each_possible_cpu(cpu) {
  174. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  175. kobject_get(q->mq_kobj);
  176. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  177. }
  178. }
  179. int blk_mq_sysfs_register(struct gendisk *disk)
  180. {
  181. struct request_queue *q = disk->queue;
  182. struct blk_mq_hw_ctx *hctx;
  183. unsigned long i, j;
  184. int ret;
  185. lockdep_assert_held(&q->sysfs_dir_lock);
  186. ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
  187. if (ret < 0)
  188. goto out;
  189. kobject_uevent(q->mq_kobj, KOBJ_ADD);
  190. queue_for_each_hw_ctx(q, hctx, i) {
  191. ret = blk_mq_register_hctx(hctx);
  192. if (ret)
  193. goto unreg;
  194. }
  195. q->mq_sysfs_init_done = true;
  196. out:
  197. return ret;
  198. unreg:
  199. queue_for_each_hw_ctx(q, hctx, j) {
  200. if (j < i)
  201. blk_mq_unregister_hctx(hctx);
  202. }
  203. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  204. kobject_del(q->mq_kobj);
  205. return ret;
  206. }
  207. void blk_mq_sysfs_unregister(struct gendisk *disk)
  208. {
  209. struct request_queue *q = disk->queue;
  210. struct blk_mq_hw_ctx *hctx;
  211. unsigned long i;
  212. lockdep_assert_held(&q->sysfs_dir_lock);
  213. queue_for_each_hw_ctx(q, hctx, i)
  214. blk_mq_unregister_hctx(hctx);
  215. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  216. kobject_del(q->mq_kobj);
  217. q->mq_sysfs_init_done = false;
  218. }
  219. void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
  220. {
  221. struct blk_mq_hw_ctx *hctx;
  222. unsigned long i;
  223. mutex_lock(&q->sysfs_dir_lock);
  224. if (!q->mq_sysfs_init_done)
  225. goto unlock;
  226. queue_for_each_hw_ctx(q, hctx, i)
  227. blk_mq_unregister_hctx(hctx);
  228. unlock:
  229. mutex_unlock(&q->sysfs_dir_lock);
  230. }
  231. int blk_mq_sysfs_register_hctxs(struct request_queue *q)
  232. {
  233. struct blk_mq_hw_ctx *hctx;
  234. unsigned long i;
  235. int ret = 0;
  236. mutex_lock(&q->sysfs_dir_lock);
  237. if (!q->mq_sysfs_init_done)
  238. goto unlock;
  239. queue_for_each_hw_ctx(q, hctx, i) {
  240. ret = blk_mq_register_hctx(hctx);
  241. if (ret)
  242. break;
  243. }
  244. unlock:
  245. mutex_unlock(&q->sysfs_dir_lock);
  246. return ret;
  247. }