blk-mq-sysfs.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/backing-dev.h>
  5. #include <linux/bio.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/mm.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/smp.h>
  12. #include "blk.h"
  13. #include "blk-mq.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
  17. free_percpu(ctxs->queue_ctx);
  18. kfree(ctxs);
  19. }
  20. static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
  21. {
  22. struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  23. /* ctx->ctxs won't be released until all ctx are freed */
  24. kobject_put(&ctx->ctxs->kobj);
  25. }
  26. static void blk_mq_hw_sysfs_release(struct kobject *kobj)
  27. {
  28. struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
  29. kobj);
  30. blk_free_flush_queue(hctx->fq);
  31. sbitmap_free(&hctx->ctx_map);
  32. free_cpumask_var(hctx->cpumask);
  33. kfree(hctx->ctxs);
  34. kfree(hctx);
  35. }
  36. struct blk_mq_hw_ctx_sysfs_entry {
  37. struct attribute attr;
  38. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  39. };
  40. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  41. struct attribute *attr, char *page)
  42. {
  43. struct blk_mq_hw_ctx_sysfs_entry *entry;
  44. struct blk_mq_hw_ctx *hctx;
  45. struct request_queue *q;
  46. ssize_t res;
  47. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  48. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  49. q = hctx->queue;
  50. if (!entry->show)
  51. return -EIO;
  52. mutex_lock(&q->sysfs_lock);
  53. res = entry->show(hctx, page);
  54. mutex_unlock(&q->sysfs_lock);
  55. return res;
  56. }
  57. static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
  58. char *page)
  59. {
  60. return sprintf(page, "%u\n", hctx->tags->nr_tags);
  61. }
  62. static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
  63. char *page)
  64. {
  65. return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
  66. }
  67. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  68. {
  69. const size_t size = PAGE_SIZE - 1;
  70. unsigned int i, first = 1;
  71. int ret = 0, pos = 0;
  72. for_each_cpu(i, hctx->cpumask) {
  73. if (first)
  74. ret = snprintf(pos + page, size - pos, "%u", i);
  75. else
  76. ret = snprintf(pos + page, size - pos, ", %u", i);
  77. if (ret >= size - pos)
  78. break;
  79. first = 0;
  80. pos += ret;
  81. }
  82. ret = snprintf(pos + page, size + 1 - pos, "\n");
  83. return pos + ret;
  84. }
  85. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
  86. .attr = {.name = "nr_tags", .mode = 0444 },
  87. .show = blk_mq_hw_sysfs_nr_tags_show,
  88. };
  89. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
  90. .attr = {.name = "nr_reserved_tags", .mode = 0444 },
  91. .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
  92. };
  93. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  94. .attr = {.name = "cpu_list", .mode = 0444 },
  95. .show = blk_mq_hw_sysfs_cpus_show,
  96. };
  97. static struct attribute *default_hw_ctx_attrs[] = {
  98. &blk_mq_hw_sysfs_nr_tags.attr,
  99. &blk_mq_hw_sysfs_nr_reserved_tags.attr,
  100. &blk_mq_hw_sysfs_cpus.attr,
  101. NULL,
  102. };
  103. ATTRIBUTE_GROUPS(default_hw_ctx);
  104. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  105. .show = blk_mq_hw_sysfs_show,
  106. };
  107. static const struct kobj_type blk_mq_ktype = {
  108. .release = blk_mq_sysfs_release,
  109. };
  110. static const struct kobj_type blk_mq_ctx_ktype = {
  111. .release = blk_mq_ctx_sysfs_release,
  112. };
  113. static const struct kobj_type blk_mq_hw_ktype = {
  114. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  115. .default_groups = default_hw_ctx_groups,
  116. .release = blk_mq_hw_sysfs_release,
  117. };
  118. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  119. {
  120. struct blk_mq_ctx *ctx;
  121. int i;
  122. if (!hctx->nr_ctx)
  123. return;
  124. hctx_for_each_ctx(hctx, ctx, i)
  125. kobject_del(&ctx->kobj);
  126. kobject_del(&hctx->kobj);
  127. }
  128. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  129. {
  130. struct request_queue *q = hctx->queue;
  131. struct blk_mq_ctx *ctx;
  132. int i, j, ret;
  133. if (!hctx->nr_ctx)
  134. return 0;
  135. ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
  136. if (ret)
  137. return ret;
  138. hctx_for_each_ctx(hctx, ctx, i) {
  139. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  140. if (ret)
  141. goto out;
  142. }
  143. return 0;
  144. out:
  145. hctx_for_each_ctx(hctx, ctx, j) {
  146. if (j < i)
  147. kobject_del(&ctx->kobj);
  148. }
  149. kobject_del(&hctx->kobj);
  150. return ret;
  151. }
  152. void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  153. {
  154. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  155. }
  156. void blk_mq_sysfs_deinit(struct request_queue *q)
  157. {
  158. struct blk_mq_ctx *ctx;
  159. int cpu;
  160. for_each_possible_cpu(cpu) {
  161. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  162. kobject_put(&ctx->kobj);
  163. }
  164. kobject_put(q->mq_kobj);
  165. }
  166. void blk_mq_sysfs_init(struct request_queue *q)
  167. {
  168. struct blk_mq_ctx *ctx;
  169. int cpu;
  170. kobject_init(q->mq_kobj, &blk_mq_ktype);
  171. for_each_possible_cpu(cpu) {
  172. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  173. kobject_get(q->mq_kobj);
  174. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  175. }
  176. }
  177. int blk_mq_sysfs_register(struct gendisk *disk)
  178. {
  179. struct request_queue *q = disk->queue;
  180. struct blk_mq_hw_ctx *hctx;
  181. unsigned long i, j;
  182. int ret;
  183. lockdep_assert_held(&q->sysfs_dir_lock);
  184. ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
  185. if (ret < 0)
  186. goto out;
  187. kobject_uevent(q->mq_kobj, KOBJ_ADD);
  188. queue_for_each_hw_ctx(q, hctx, i) {
  189. ret = blk_mq_register_hctx(hctx);
  190. if (ret)
  191. goto unreg;
  192. }
  193. q->mq_sysfs_init_done = true;
  194. out:
  195. return ret;
  196. unreg:
  197. queue_for_each_hw_ctx(q, hctx, j) {
  198. if (j < i)
  199. blk_mq_unregister_hctx(hctx);
  200. }
  201. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  202. kobject_del(q->mq_kobj);
  203. return ret;
  204. }
  205. void blk_mq_sysfs_unregister(struct gendisk *disk)
  206. {
  207. struct request_queue *q = disk->queue;
  208. struct blk_mq_hw_ctx *hctx;
  209. unsigned long i;
  210. lockdep_assert_held(&q->sysfs_dir_lock);
  211. queue_for_each_hw_ctx(q, hctx, i)
  212. blk_mq_unregister_hctx(hctx);
  213. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  214. kobject_del(q->mq_kobj);
  215. q->mq_sysfs_init_done = false;
  216. }
  217. void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
  218. {
  219. struct blk_mq_hw_ctx *hctx;
  220. unsigned long i;
  221. mutex_lock(&q->sysfs_dir_lock);
  222. if (!q->mq_sysfs_init_done)
  223. goto unlock;
  224. queue_for_each_hw_ctx(q, hctx, i)
  225. blk_mq_unregister_hctx(hctx);
  226. unlock:
  227. mutex_unlock(&q->sysfs_dir_lock);
  228. }
  229. int blk_mq_sysfs_register_hctxs(struct request_queue *q)
  230. {
  231. struct blk_mq_hw_ctx *hctx;
  232. unsigned long i;
  233. int ret = 0;
  234. mutex_lock(&q->sysfs_dir_lock);
  235. if (!q->mq_sysfs_init_done)
  236. goto unlock;
  237. queue_for_each_hw_ctx(q, hctx, i) {
  238. ret = blk_mq_register_hctx(hctx);
  239. if (ret)
  240. break;
  241. }
  242. unlock:
  243. mutex_unlock(&q->sysfs_dir_lock);
  244. return ret;
  245. }