blk-mq-debugfs.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Facebook
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/build_bug.h>
  8. #include <linux/debugfs.h>
  9. #include "blk.h"
  10. #include "blk-mq.h"
  11. #include "blk-mq-debugfs.h"
  12. #include "blk-mq-sched.h"
  13. #include "blk-rq-qos.h"
  14. static int queue_poll_stat_show(void *data, struct seq_file *m)
  15. {
  16. return 0;
  17. }
  18. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  19. __acquires(&q->requeue_lock)
  20. {
  21. struct request_queue *q = m->private;
  22. spin_lock_irq(&q->requeue_lock);
  23. return seq_list_start(&q->requeue_list, *pos);
  24. }
  25. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  26. {
  27. struct request_queue *q = m->private;
  28. return seq_list_next(v, &q->requeue_list, pos);
  29. }
  30. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  31. __releases(&q->requeue_lock)
  32. {
  33. struct request_queue *q = m->private;
  34. spin_unlock_irq(&q->requeue_lock);
  35. }
  36. static const struct seq_operations queue_requeue_list_seq_ops = {
  37. .start = queue_requeue_list_start,
  38. .next = queue_requeue_list_next,
  39. .stop = queue_requeue_list_stop,
  40. .show = blk_mq_debugfs_rq_show,
  41. };
  42. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  43. const char *const *flag_name, int flag_name_count)
  44. {
  45. bool sep = false;
  46. int i;
  47. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  48. if (!(flags & BIT(i)))
  49. continue;
  50. if (sep)
  51. seq_puts(m, "|");
  52. sep = true;
  53. if (i < flag_name_count && flag_name[i])
  54. seq_puts(m, flag_name[i]);
  55. else
  56. seq_printf(m, "%d", i);
  57. }
  58. return 0;
  59. }
  60. static int queue_pm_only_show(void *data, struct seq_file *m)
  61. {
  62. struct request_queue *q = data;
  63. seq_printf(m, "%d\n", atomic_read(&q->pm_only));
  64. return 0;
  65. }
  66. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  67. static const char *const blk_queue_flag_name[] = {
  68. QUEUE_FLAG_NAME(DYING),
  69. QUEUE_FLAG_NAME(NOMERGES),
  70. QUEUE_FLAG_NAME(SAME_COMP),
  71. QUEUE_FLAG_NAME(FAIL_IO),
  72. QUEUE_FLAG_NAME(NOXMERGES),
  73. QUEUE_FLAG_NAME(SAME_FORCE),
  74. QUEUE_FLAG_NAME(INIT_DONE),
  75. QUEUE_FLAG_NAME(STATS),
  76. QUEUE_FLAG_NAME(REGISTERED),
  77. QUEUE_FLAG_NAME(QUIESCED),
  78. QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
  79. QUEUE_FLAG_NAME(HCTX_ACTIVE),
  80. QUEUE_FLAG_NAME(SQ_SCHED),
  81. };
  82. #undef QUEUE_FLAG_NAME
  83. static int queue_state_show(void *data, struct seq_file *m)
  84. {
  85. struct request_queue *q = data;
  86. BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX);
  87. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  88. ARRAY_SIZE(blk_queue_flag_name));
  89. seq_puts(m, "\n");
  90. return 0;
  91. }
  92. static ssize_t queue_state_write(void *data, const char __user *buf,
  93. size_t count, loff_t *ppos)
  94. {
  95. struct request_queue *q = data;
  96. char opbuf[16] = { }, *op;
  97. /*
  98. * The "state" attribute is removed when the queue is removed. Don't
  99. * allow setting the state on a dying queue to avoid a use-after-free.
  100. */
  101. if (blk_queue_dying(q))
  102. return -ENOENT;
  103. if (count >= sizeof(opbuf)) {
  104. pr_err("%s: operation too long\n", __func__);
  105. goto inval;
  106. }
  107. if (copy_from_user(opbuf, buf, count))
  108. return -EFAULT;
  109. op = strstrip(opbuf);
  110. if (strcmp(op, "run") == 0) {
  111. blk_mq_run_hw_queues(q, true);
  112. } else if (strcmp(op, "start") == 0) {
  113. blk_mq_start_stopped_hw_queues(q, true);
  114. } else if (strcmp(op, "kick") == 0) {
  115. blk_mq_kick_requeue_list(q);
  116. } else {
  117. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  118. inval:
  119. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  120. return -EINVAL;
  121. }
  122. return count;
  123. }
  124. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  125. { "poll_stat", 0400, queue_poll_stat_show },
  126. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  127. { "pm_only", 0600, queue_pm_only_show, NULL },
  128. { "state", 0600, queue_state_show, queue_state_write },
  129. { "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
  130. { },
  131. };
  132. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  133. static const char *const hctx_state_name[] = {
  134. HCTX_STATE_NAME(STOPPED),
  135. HCTX_STATE_NAME(TAG_ACTIVE),
  136. HCTX_STATE_NAME(SCHED_RESTART),
  137. HCTX_STATE_NAME(INACTIVE),
  138. };
  139. #undef HCTX_STATE_NAME
  140. static int hctx_state_show(void *data, struct seq_file *m)
  141. {
  142. struct blk_mq_hw_ctx *hctx = data;
  143. BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX);
  144. blk_flags_show(m, hctx->state, hctx_state_name,
  145. ARRAY_SIZE(hctx_state_name));
  146. seq_puts(m, "\n");
  147. return 0;
  148. }
  149. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  150. static const char *const alloc_policy_name[] = {
  151. BLK_TAG_ALLOC_NAME(FIFO),
  152. BLK_TAG_ALLOC_NAME(RR),
  153. };
  154. #undef BLK_TAG_ALLOC_NAME
  155. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  156. static const char *const hctx_flag_name[] = {
  157. HCTX_FLAG_NAME(SHOULD_MERGE),
  158. HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
  159. HCTX_FLAG_NAME(STACKING),
  160. HCTX_FLAG_NAME(TAG_HCTX_SHARED),
  161. HCTX_FLAG_NAME(BLOCKING),
  162. HCTX_FLAG_NAME(NO_SCHED),
  163. HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
  164. };
  165. #undef HCTX_FLAG_NAME
  166. static int hctx_flags_show(void *data, struct seq_file *m)
  167. {
  168. struct blk_mq_hw_ctx *hctx = data;
  169. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  170. BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
  171. BLK_MQ_F_ALLOC_POLICY_START_BIT);
  172. BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
  173. seq_puts(m, "alloc_policy=");
  174. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  175. alloc_policy_name[alloc_policy])
  176. seq_puts(m, alloc_policy_name[alloc_policy]);
  177. else
  178. seq_printf(m, "%d", alloc_policy);
  179. seq_puts(m, " ");
  180. blk_flags_show(m,
  181. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  182. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  183. seq_puts(m, "\n");
  184. return 0;
  185. }
  186. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  187. static const char *const cmd_flag_name[] = {
  188. CMD_FLAG_NAME(FAILFAST_DEV),
  189. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  190. CMD_FLAG_NAME(FAILFAST_DRIVER),
  191. CMD_FLAG_NAME(SYNC),
  192. CMD_FLAG_NAME(META),
  193. CMD_FLAG_NAME(PRIO),
  194. CMD_FLAG_NAME(NOMERGE),
  195. CMD_FLAG_NAME(IDLE),
  196. CMD_FLAG_NAME(INTEGRITY),
  197. CMD_FLAG_NAME(FUA),
  198. CMD_FLAG_NAME(PREFLUSH),
  199. CMD_FLAG_NAME(RAHEAD),
  200. CMD_FLAG_NAME(BACKGROUND),
  201. CMD_FLAG_NAME(NOWAIT),
  202. CMD_FLAG_NAME(POLLED),
  203. CMD_FLAG_NAME(ALLOC_CACHE),
  204. CMD_FLAG_NAME(SWAP),
  205. CMD_FLAG_NAME(DRV),
  206. CMD_FLAG_NAME(FS_PRIVATE),
  207. CMD_FLAG_NAME(ATOMIC),
  208. CMD_FLAG_NAME(NOUNMAP),
  209. };
  210. #undef CMD_FLAG_NAME
  211. #define RQF_NAME(name) [__RQF_##name] = #name
  212. static const char *const rqf_name[] = {
  213. RQF_NAME(STARTED),
  214. RQF_NAME(FLUSH_SEQ),
  215. RQF_NAME(MIXED_MERGE),
  216. RQF_NAME(DONTPREP),
  217. RQF_NAME(SCHED_TAGS),
  218. RQF_NAME(USE_SCHED),
  219. RQF_NAME(FAILED),
  220. RQF_NAME(QUIET),
  221. RQF_NAME(IO_STAT),
  222. RQF_NAME(PM),
  223. RQF_NAME(HASHED),
  224. RQF_NAME(STATS),
  225. RQF_NAME(SPECIAL_PAYLOAD),
  226. RQF_NAME(ZONE_WRITE_PLUGGING),
  227. RQF_NAME(TIMED_OUT),
  228. RQF_NAME(RESV),
  229. };
  230. #undef RQF_NAME
  231. static const char *const blk_mq_rq_state_name_array[] = {
  232. [MQ_RQ_IDLE] = "idle",
  233. [MQ_RQ_IN_FLIGHT] = "in_flight",
  234. [MQ_RQ_COMPLETE] = "complete",
  235. };
  236. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  237. {
  238. if (WARN_ON_ONCE((unsigned int)rq_state >=
  239. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  240. return "(?)";
  241. return blk_mq_rq_state_name_array[rq_state];
  242. }
  243. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  244. {
  245. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  246. const enum req_op op = req_op(rq);
  247. const char *op_str = blk_op_str(op);
  248. BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS);
  249. BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS);
  250. seq_printf(m, "%p {.op=", rq);
  251. if (strcmp(op_str, "UNKNOWN") == 0)
  252. seq_printf(m, "%u", op);
  253. else
  254. seq_printf(m, "%s", op_str);
  255. seq_puts(m, ", .cmd_flags=");
  256. blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
  257. cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
  258. seq_puts(m, ", .rq_flags=");
  259. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  260. ARRAY_SIZE(rqf_name));
  261. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  262. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  263. rq->internal_tag);
  264. if (mq_ops->show_rq)
  265. mq_ops->show_rq(m, rq);
  266. seq_puts(m, "}\n");
  267. return 0;
  268. }
  269. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  270. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  271. {
  272. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  273. }
  274. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  275. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  276. __acquires(&hctx->lock)
  277. {
  278. struct blk_mq_hw_ctx *hctx = m->private;
  279. spin_lock(&hctx->lock);
  280. return seq_list_start(&hctx->dispatch, *pos);
  281. }
  282. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  283. {
  284. struct blk_mq_hw_ctx *hctx = m->private;
  285. return seq_list_next(v, &hctx->dispatch, pos);
  286. }
  287. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  288. __releases(&hctx->lock)
  289. {
  290. struct blk_mq_hw_ctx *hctx = m->private;
  291. spin_unlock(&hctx->lock);
  292. }
  293. static const struct seq_operations hctx_dispatch_seq_ops = {
  294. .start = hctx_dispatch_start,
  295. .next = hctx_dispatch_next,
  296. .stop = hctx_dispatch_stop,
  297. .show = blk_mq_debugfs_rq_show,
  298. };
  299. struct show_busy_params {
  300. struct seq_file *m;
  301. struct blk_mq_hw_ctx *hctx;
  302. };
  303. /*
  304. * Note: the state of a request may change while this function is in progress,
  305. * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
  306. * keep iterating requests.
  307. */
  308. static bool hctx_show_busy_rq(struct request *rq, void *data)
  309. {
  310. const struct show_busy_params *params = data;
  311. if (rq->mq_hctx == params->hctx)
  312. __blk_mq_debugfs_rq_show(params->m, rq);
  313. return true;
  314. }
  315. static int hctx_busy_show(void *data, struct seq_file *m)
  316. {
  317. struct blk_mq_hw_ctx *hctx = data;
  318. struct show_busy_params params = { .m = m, .hctx = hctx };
  319. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  320. &params);
  321. return 0;
  322. }
  323. static const char *const hctx_types[] = {
  324. [HCTX_TYPE_DEFAULT] = "default",
  325. [HCTX_TYPE_READ] = "read",
  326. [HCTX_TYPE_POLL] = "poll",
  327. };
  328. static int hctx_type_show(void *data, struct seq_file *m)
  329. {
  330. struct blk_mq_hw_ctx *hctx = data;
  331. BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
  332. seq_printf(m, "%s\n", hctx_types[hctx->type]);
  333. return 0;
  334. }
  335. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  336. {
  337. struct blk_mq_hw_ctx *hctx = data;
  338. sbitmap_bitmap_show(&hctx->ctx_map, m);
  339. return 0;
  340. }
  341. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  342. struct blk_mq_tags *tags)
  343. {
  344. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  345. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  346. seq_printf(m, "active_queues=%d\n",
  347. READ_ONCE(tags->active_queues));
  348. seq_puts(m, "\nbitmap_tags:\n");
  349. sbitmap_queue_show(&tags->bitmap_tags, m);
  350. if (tags->nr_reserved_tags) {
  351. seq_puts(m, "\nbreserved_tags:\n");
  352. sbitmap_queue_show(&tags->breserved_tags, m);
  353. }
  354. }
  355. static int hctx_tags_show(void *data, struct seq_file *m)
  356. {
  357. struct blk_mq_hw_ctx *hctx = data;
  358. struct request_queue *q = hctx->queue;
  359. int res;
  360. res = mutex_lock_interruptible(&q->sysfs_lock);
  361. if (res)
  362. goto out;
  363. if (hctx->tags)
  364. blk_mq_debugfs_tags_show(m, hctx->tags);
  365. mutex_unlock(&q->sysfs_lock);
  366. out:
  367. return res;
  368. }
  369. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  370. {
  371. struct blk_mq_hw_ctx *hctx = data;
  372. struct request_queue *q = hctx->queue;
  373. int res;
  374. res = mutex_lock_interruptible(&q->sysfs_lock);
  375. if (res)
  376. goto out;
  377. if (hctx->tags)
  378. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  379. mutex_unlock(&q->sysfs_lock);
  380. out:
  381. return res;
  382. }
  383. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  384. {
  385. struct blk_mq_hw_ctx *hctx = data;
  386. struct request_queue *q = hctx->queue;
  387. int res;
  388. res = mutex_lock_interruptible(&q->sysfs_lock);
  389. if (res)
  390. goto out;
  391. if (hctx->sched_tags)
  392. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  393. mutex_unlock(&q->sysfs_lock);
  394. out:
  395. return res;
  396. }
  397. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  398. {
  399. struct blk_mq_hw_ctx *hctx = data;
  400. struct request_queue *q = hctx->queue;
  401. int res;
  402. res = mutex_lock_interruptible(&q->sysfs_lock);
  403. if (res)
  404. goto out;
  405. if (hctx->sched_tags)
  406. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  407. mutex_unlock(&q->sysfs_lock);
  408. out:
  409. return res;
  410. }
  411. static int hctx_active_show(void *data, struct seq_file *m)
  412. {
  413. struct blk_mq_hw_ctx *hctx = data;
  414. seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
  415. return 0;
  416. }
  417. static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
  418. {
  419. struct blk_mq_hw_ctx *hctx = data;
  420. seq_printf(m, "%u\n", hctx->dispatch_busy);
  421. return 0;
  422. }
  423. #define CTX_RQ_SEQ_OPS(name, type) \
  424. static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
  425. __acquires(&ctx->lock) \
  426. { \
  427. struct blk_mq_ctx *ctx = m->private; \
  428. \
  429. spin_lock(&ctx->lock); \
  430. return seq_list_start(&ctx->rq_lists[type], *pos); \
  431. } \
  432. \
  433. static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
  434. loff_t *pos) \
  435. { \
  436. struct blk_mq_ctx *ctx = m->private; \
  437. \
  438. return seq_list_next(v, &ctx->rq_lists[type], pos); \
  439. } \
  440. \
  441. static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
  442. __releases(&ctx->lock) \
  443. { \
  444. struct blk_mq_ctx *ctx = m->private; \
  445. \
  446. spin_unlock(&ctx->lock); \
  447. } \
  448. \
  449. static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
  450. .start = ctx_##name##_rq_list_start, \
  451. .next = ctx_##name##_rq_list_next, \
  452. .stop = ctx_##name##_rq_list_stop, \
  453. .show = blk_mq_debugfs_rq_show, \
  454. }
  455. CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
  456. CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
  457. CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
  458. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  459. {
  460. const struct blk_mq_debugfs_attr *attr = m->private;
  461. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  462. return attr->show(data, m);
  463. }
  464. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  465. size_t count, loff_t *ppos)
  466. {
  467. struct seq_file *m = file->private_data;
  468. const struct blk_mq_debugfs_attr *attr = m->private;
  469. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  470. /*
  471. * Attributes that only implement .seq_ops are read-only and 'attr' is
  472. * the same with 'data' in this case.
  473. */
  474. if (attr == data || !attr->write)
  475. return -EPERM;
  476. return attr->write(data, buf, count, ppos);
  477. }
  478. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  479. {
  480. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  481. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  482. struct seq_file *m;
  483. int ret;
  484. if (attr->seq_ops) {
  485. ret = seq_open(file, attr->seq_ops);
  486. if (!ret) {
  487. m = file->private_data;
  488. m->private = data;
  489. }
  490. return ret;
  491. }
  492. if (WARN_ON_ONCE(!attr->show))
  493. return -EPERM;
  494. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  495. }
  496. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  497. {
  498. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  499. if (attr->show)
  500. return single_release(inode, file);
  501. return seq_release(inode, file);
  502. }
  503. static const struct file_operations blk_mq_debugfs_fops = {
  504. .open = blk_mq_debugfs_open,
  505. .read = seq_read,
  506. .write = blk_mq_debugfs_write,
  507. .llseek = seq_lseek,
  508. .release = blk_mq_debugfs_release,
  509. };
  510. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  511. {"state", 0400, hctx_state_show},
  512. {"flags", 0400, hctx_flags_show},
  513. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  514. {"busy", 0400, hctx_busy_show},
  515. {"ctx_map", 0400, hctx_ctx_map_show},
  516. {"tags", 0400, hctx_tags_show},
  517. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  518. {"sched_tags", 0400, hctx_sched_tags_show},
  519. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  520. {"active", 0400, hctx_active_show},
  521. {"dispatch_busy", 0400, hctx_dispatch_busy_show},
  522. {"type", 0400, hctx_type_show},
  523. {},
  524. };
  525. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  526. {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
  527. {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
  528. {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
  529. {},
  530. };
  531. static void debugfs_create_files(struct dentry *parent, void *data,
  532. const struct blk_mq_debugfs_attr *attr)
  533. {
  534. if (IS_ERR_OR_NULL(parent))
  535. return;
  536. d_inode(parent)->i_private = data;
  537. for (; attr->name; attr++)
  538. debugfs_create_file(attr->name, attr->mode, parent,
  539. (void *)attr, &blk_mq_debugfs_fops);
  540. }
  541. void blk_mq_debugfs_register(struct request_queue *q)
  542. {
  543. struct blk_mq_hw_ctx *hctx;
  544. unsigned long i;
  545. debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
  546. /*
  547. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  548. * didn't exist yet (because we don't know what to name the directory
  549. * until the queue is registered to a gendisk).
  550. */
  551. if (q->elevator && !q->sched_debugfs_dir)
  552. blk_mq_debugfs_register_sched(q);
  553. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  554. queue_for_each_hw_ctx(q, hctx, i) {
  555. if (!hctx->debugfs_dir)
  556. blk_mq_debugfs_register_hctx(q, hctx);
  557. if (q->elevator && !hctx->sched_debugfs_dir)
  558. blk_mq_debugfs_register_sched_hctx(q, hctx);
  559. }
  560. if (q->rq_qos) {
  561. struct rq_qos *rqos = q->rq_qos;
  562. while (rqos) {
  563. blk_mq_debugfs_register_rqos(rqos);
  564. rqos = rqos->next;
  565. }
  566. }
  567. }
  568. static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  569. struct blk_mq_ctx *ctx)
  570. {
  571. struct dentry *ctx_dir;
  572. char name[20];
  573. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  574. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  575. debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
  576. }
  577. void blk_mq_debugfs_register_hctx(struct request_queue *q,
  578. struct blk_mq_hw_ctx *hctx)
  579. {
  580. struct blk_mq_ctx *ctx;
  581. char name[20];
  582. int i;
  583. if (!q->debugfs_dir)
  584. return;
  585. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  586. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  587. debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
  588. hctx_for_each_ctx(hctx, ctx, i)
  589. blk_mq_debugfs_register_ctx(hctx, ctx);
  590. }
  591. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  592. {
  593. if (!hctx->queue->debugfs_dir)
  594. return;
  595. debugfs_remove_recursive(hctx->debugfs_dir);
  596. hctx->sched_debugfs_dir = NULL;
  597. hctx->debugfs_dir = NULL;
  598. }
  599. void blk_mq_debugfs_register_hctxs(struct request_queue *q)
  600. {
  601. struct blk_mq_hw_ctx *hctx;
  602. unsigned long i;
  603. queue_for_each_hw_ctx(q, hctx, i)
  604. blk_mq_debugfs_register_hctx(q, hctx);
  605. }
  606. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  607. {
  608. struct blk_mq_hw_ctx *hctx;
  609. unsigned long i;
  610. queue_for_each_hw_ctx(q, hctx, i)
  611. blk_mq_debugfs_unregister_hctx(hctx);
  612. }
  613. void blk_mq_debugfs_register_sched(struct request_queue *q)
  614. {
  615. struct elevator_type *e = q->elevator->type;
  616. lockdep_assert_held(&q->debugfs_mutex);
  617. /*
  618. * If the parent directory has not been created yet, return, we will be
  619. * called again later on and the directory/files will be created then.
  620. */
  621. if (!q->debugfs_dir)
  622. return;
  623. if (!e->queue_debugfs_attrs)
  624. return;
  625. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  626. debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
  627. }
  628. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  629. {
  630. lockdep_assert_held(&q->debugfs_mutex);
  631. debugfs_remove_recursive(q->sched_debugfs_dir);
  632. q->sched_debugfs_dir = NULL;
  633. }
  634. static const char *rq_qos_id_to_name(enum rq_qos_id id)
  635. {
  636. switch (id) {
  637. case RQ_QOS_WBT:
  638. return "wbt";
  639. case RQ_QOS_LATENCY:
  640. return "latency";
  641. case RQ_QOS_COST:
  642. return "cost";
  643. }
  644. return "unknown";
  645. }
  646. void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
  647. {
  648. lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
  649. if (!rqos->disk->queue->debugfs_dir)
  650. return;
  651. debugfs_remove_recursive(rqos->debugfs_dir);
  652. rqos->debugfs_dir = NULL;
  653. }
  654. void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
  655. {
  656. struct request_queue *q = rqos->disk->queue;
  657. const char *dir_name = rq_qos_id_to_name(rqos->id);
  658. lockdep_assert_held(&q->debugfs_mutex);
  659. if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
  660. return;
  661. if (!q->rqos_debugfs_dir)
  662. q->rqos_debugfs_dir = debugfs_create_dir("rqos",
  663. q->debugfs_dir);
  664. rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
  665. debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
  666. }
  667. void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  668. struct blk_mq_hw_ctx *hctx)
  669. {
  670. struct elevator_type *e = q->elevator->type;
  671. lockdep_assert_held(&q->debugfs_mutex);
  672. /*
  673. * If the parent debugfs directory has not been created yet, return;
  674. * We will be called again later on with appropriate parent debugfs
  675. * directory from blk_register_queue()
  676. */
  677. if (!hctx->debugfs_dir)
  678. return;
  679. if (!e->hctx_debugfs_attrs)
  680. return;
  681. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  682. hctx->debugfs_dir);
  683. debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  684. e->hctx_debugfs_attrs);
  685. }
  686. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  687. {
  688. lockdep_assert_held(&hctx->queue->debugfs_mutex);
  689. if (!hctx->queue->debugfs_dir)
  690. return;
  691. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  692. hctx->sched_debugfs_dir = NULL;
  693. }