1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Facebook
4 */
5
6#include <linux/kernel.h>
7#include <linux/blkdev.h>
8#include <linux/debugfs.h>
9
10#include "blk.h"
11#include "blk-mq.h"
12#include "blk-mq-debugfs.h"
13#include "blk-mq-sched.h"
14#include "blk-rq-qos.h"
15
16static int queue_poll_stat_show(void *data, struct seq_file *m)
17{
18 return 0;
19}
20
21static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
22 __acquires(&q->requeue_lock)
23{
24 struct request_queue *q = m->private;
25
26 spin_lock_irq(lock: &q->requeue_lock);
27 return seq_list_start(head: &q->requeue_list, pos: *pos);
28}
29
30static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
31{
32 struct request_queue *q = m->private;
33
34 return seq_list_next(v, head: &q->requeue_list, ppos: pos);
35}
36
37static void queue_requeue_list_stop(struct seq_file *m, void *v)
38 __releases(&q->requeue_lock)
39{
40 struct request_queue *q = m->private;
41
42 spin_unlock_irq(lock: &q->requeue_lock);
43}
44
45static const struct seq_operations queue_requeue_list_seq_ops = {
46 .start = queue_requeue_list_start,
47 .next = queue_requeue_list_next,
48 .stop = queue_requeue_list_stop,
49 .show = blk_mq_debugfs_rq_show,
50};
51
52static int blk_flags_show(struct seq_file *m, const unsigned long flags,
53 const char *const *flag_name, int flag_name_count)
54{
55 bool sep = false;
56 int i;
57
58 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
59 if (!(flags & BIT(i)))
60 continue;
61 if (sep)
62 seq_puts(m, s: "|");
63 sep = true;
64 if (i < flag_name_count && flag_name[i])
65 seq_puts(m, s: flag_name[i]);
66 else
67 seq_printf(m, fmt: "%d", i);
68 }
69 return 0;
70}
71
72static int queue_pm_only_show(void *data, struct seq_file *m)
73{
74 struct request_queue *q = data;
75
76 seq_printf(m, fmt: "%d\n", atomic_read(v: &q->pm_only));
77 return 0;
78}
79
80#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
81static const char *const blk_queue_flag_name[] = {
82 QUEUE_FLAG_NAME(STOPPED),
83 QUEUE_FLAG_NAME(DYING),
84 QUEUE_FLAG_NAME(NOMERGES),
85 QUEUE_FLAG_NAME(SAME_COMP),
86 QUEUE_FLAG_NAME(FAIL_IO),
87 QUEUE_FLAG_NAME(NONROT),
88 QUEUE_FLAG_NAME(IO_STAT),
89 QUEUE_FLAG_NAME(NOXMERGES),
90 QUEUE_FLAG_NAME(ADD_RANDOM),
91 QUEUE_FLAG_NAME(SYNCHRONOUS),
92 QUEUE_FLAG_NAME(SAME_FORCE),
93 QUEUE_FLAG_NAME(INIT_DONE),
94 QUEUE_FLAG_NAME(STABLE_WRITES),
95 QUEUE_FLAG_NAME(POLL),
96 QUEUE_FLAG_NAME(WC),
97 QUEUE_FLAG_NAME(FUA),
98 QUEUE_FLAG_NAME(DAX),
99 QUEUE_FLAG_NAME(STATS),
100 QUEUE_FLAG_NAME(REGISTERED),
101 QUEUE_FLAG_NAME(QUIESCED),
102 QUEUE_FLAG_NAME(PCI_P2PDMA),
103 QUEUE_FLAG_NAME(ZONE_RESETALL),
104 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
105 QUEUE_FLAG_NAME(HCTX_ACTIVE),
106 QUEUE_FLAG_NAME(NOWAIT),
107 QUEUE_FLAG_NAME(SQ_SCHED),
108 QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE),
109};
110#undef QUEUE_FLAG_NAME
111
112static int queue_state_show(void *data, struct seq_file *m)
113{
114 struct request_queue *q = data;
115
116 blk_flags_show(m, flags: q->queue_flags, flag_name: blk_queue_flag_name,
117 ARRAY_SIZE(blk_queue_flag_name));
118 seq_puts(m, s: "\n");
119 return 0;
120}
121
122static ssize_t queue_state_write(void *data, const char __user *buf,
123 size_t count, loff_t *ppos)
124{
125 struct request_queue *q = data;
126 char opbuf[16] = { }, *op;
127
128 /*
129 * The "state" attribute is removed when the queue is removed. Don't
130 * allow setting the state on a dying queue to avoid a use-after-free.
131 */
132 if (blk_queue_dying(q))
133 return -ENOENT;
134
135 if (count >= sizeof(opbuf)) {
136 pr_err("%s: operation too long\n", __func__);
137 goto inval;
138 }
139
140 if (copy_from_user(to: opbuf, from: buf, n: count))
141 return -EFAULT;
142 op = strstrip(str: opbuf);
143 if (strcmp(op, "run") == 0) {
144 blk_mq_run_hw_queues(q, async: true);
145 } else if (strcmp(op, "start") == 0) {
146 blk_mq_start_stopped_hw_queues(q, async: true);
147 } else if (strcmp(op, "kick") == 0) {
148 blk_mq_kick_requeue_list(q);
149 } else {
150 pr_err("%s: unsupported operation '%s'\n", __func__, op);
151inval:
152 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
153 return -EINVAL;
154 }
155 return count;
156}
157
158static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
159 { "poll_stat", 0400, queue_poll_stat_show },
160 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
161 { "pm_only", 0600, queue_pm_only_show, NULL },
162 { "state", 0600, queue_state_show, queue_state_write },
163 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
164 { },
165};
166
167#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
168static const char *const hctx_state_name[] = {
169 HCTX_STATE_NAME(STOPPED),
170 HCTX_STATE_NAME(TAG_ACTIVE),
171 HCTX_STATE_NAME(SCHED_RESTART),
172 HCTX_STATE_NAME(INACTIVE),
173};
174#undef HCTX_STATE_NAME
175
176static int hctx_state_show(void *data, struct seq_file *m)
177{
178 struct blk_mq_hw_ctx *hctx = data;
179
180 blk_flags_show(m, flags: hctx->state, flag_name: hctx_state_name,
181 ARRAY_SIZE(hctx_state_name));
182 seq_puts(m, s: "\n");
183 return 0;
184}
185
186#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
187static const char *const alloc_policy_name[] = {
188 BLK_TAG_ALLOC_NAME(FIFO),
189 BLK_TAG_ALLOC_NAME(RR),
190};
191#undef BLK_TAG_ALLOC_NAME
192
193#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
194static const char *const hctx_flag_name[] = {
195 HCTX_FLAG_NAME(SHOULD_MERGE),
196 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
197 HCTX_FLAG_NAME(BLOCKING),
198 HCTX_FLAG_NAME(NO_SCHED),
199 HCTX_FLAG_NAME(STACKING),
200 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
201};
202#undef HCTX_FLAG_NAME
203
204static int hctx_flags_show(void *data, struct seq_file *m)
205{
206 struct blk_mq_hw_ctx *hctx = data;
207 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
208
209 seq_puts(m, s: "alloc_policy=");
210 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
211 alloc_policy_name[alloc_policy])
212 seq_puts(m, s: alloc_policy_name[alloc_policy]);
213 else
214 seq_printf(m, fmt: "%d", alloc_policy);
215 seq_puts(m, s: " ");
216 blk_flags_show(m,
217 flags: hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
218 flag_name: hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
219 seq_puts(m, s: "\n");
220 return 0;
221}
222
223#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
224static const char *const cmd_flag_name[] = {
225 CMD_FLAG_NAME(FAILFAST_DEV),
226 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
227 CMD_FLAG_NAME(FAILFAST_DRIVER),
228 CMD_FLAG_NAME(SYNC),
229 CMD_FLAG_NAME(META),
230 CMD_FLAG_NAME(PRIO),
231 CMD_FLAG_NAME(NOMERGE),
232 CMD_FLAG_NAME(IDLE),
233 CMD_FLAG_NAME(INTEGRITY),
234 CMD_FLAG_NAME(FUA),
235 CMD_FLAG_NAME(PREFLUSH),
236 CMD_FLAG_NAME(RAHEAD),
237 CMD_FLAG_NAME(BACKGROUND),
238 CMD_FLAG_NAME(NOWAIT),
239 CMD_FLAG_NAME(NOUNMAP),
240 CMD_FLAG_NAME(POLLED),
241};
242#undef CMD_FLAG_NAME
243
244#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
245static const char *const rqf_name[] = {
246 RQF_NAME(STARTED),
247 RQF_NAME(FLUSH_SEQ),
248 RQF_NAME(MIXED_MERGE),
249 RQF_NAME(DONTPREP),
250 RQF_NAME(SCHED_TAGS),
251 RQF_NAME(USE_SCHED),
252 RQF_NAME(FAILED),
253 RQF_NAME(QUIET),
254 RQF_NAME(IO_STAT),
255 RQF_NAME(PM),
256 RQF_NAME(HASHED),
257 RQF_NAME(STATS),
258 RQF_NAME(SPECIAL_PAYLOAD),
259 RQF_NAME(ZONE_WRITE_LOCKED),
260 RQF_NAME(TIMED_OUT),
261 RQF_NAME(RESV),
262};
263#undef RQF_NAME
264
265static const char *const blk_mq_rq_state_name_array[] = {
266 [MQ_RQ_IDLE] = "idle",
267 [MQ_RQ_IN_FLIGHT] = "in_flight",
268 [MQ_RQ_COMPLETE] = "complete",
269};
270
271static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
272{
273 if (WARN_ON_ONCE((unsigned int)rq_state >=
274 ARRAY_SIZE(blk_mq_rq_state_name_array)))
275 return "(?)";
276 return blk_mq_rq_state_name_array[rq_state];
277}
278
279int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
280{
281 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
282 const enum req_op op = req_op(req: rq);
283 const char *op_str = blk_op_str(op);
284
285 seq_printf(m, fmt: "%p {.op=", rq);
286 if (strcmp(op_str, "UNKNOWN") == 0)
287 seq_printf(m, fmt: "%u", op);
288 else
289 seq_printf(m, fmt: "%s", op_str);
290 seq_puts(m, s: ", .cmd_flags=");
291 blk_flags_show(m, flags: (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
292 flag_name: cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
293 seq_puts(m, s: ", .rq_flags=");
294 blk_flags_show(m, flags: (__force unsigned int)rq->rq_flags, flag_name: rqf_name,
295 ARRAY_SIZE(rqf_name));
296 seq_printf(m, fmt: ", .state=%s", blk_mq_rq_state_name(rq_state: blk_mq_rq_state(rq)));
297 seq_printf(m, fmt: ", .tag=%d, .internal_tag=%d", rq->tag,
298 rq->internal_tag);
299 if (mq_ops->show_rq)
300 mq_ops->show_rq(m, rq);
301 seq_puts(m, s: "}\n");
302 return 0;
303}
304EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
305
306int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
307{
308 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
309}
310EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
311
312static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
313 __acquires(&hctx->lock)
314{
315 struct blk_mq_hw_ctx *hctx = m->private;
316
317 spin_lock(lock: &hctx->lock);
318 return seq_list_start(head: &hctx->dispatch, pos: *pos);
319}
320
321static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
322{
323 struct blk_mq_hw_ctx *hctx = m->private;
324
325 return seq_list_next(v, head: &hctx->dispatch, ppos: pos);
326}
327
328static void hctx_dispatch_stop(struct seq_file *m, void *v)
329 __releases(&hctx->lock)
330{
331 struct blk_mq_hw_ctx *hctx = m->private;
332
333 spin_unlock(lock: &hctx->lock);
334}
335
336static const struct seq_operations hctx_dispatch_seq_ops = {
337 .start = hctx_dispatch_start,
338 .next = hctx_dispatch_next,
339 .stop = hctx_dispatch_stop,
340 .show = blk_mq_debugfs_rq_show,
341};
342
343struct show_busy_params {
344 struct seq_file *m;
345 struct blk_mq_hw_ctx *hctx;
346};
347
348/*
349 * Note: the state of a request may change while this function is in progress,
350 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
351 * keep iterating requests.
352 */
353static bool hctx_show_busy_rq(struct request *rq, void *data)
354{
355 const struct show_busy_params *params = data;
356
357 if (rq->mq_hctx == params->hctx)
358 __blk_mq_debugfs_rq_show(params->m, rq);
359
360 return true;
361}
362
363static int hctx_busy_show(void *data, struct seq_file *m)
364{
365 struct blk_mq_hw_ctx *hctx = data;
366 struct show_busy_params params = { .m = m, .hctx = hctx };
367
368 blk_mq_tagset_busy_iter(tagset: hctx->queue->tag_set, fn: hctx_show_busy_rq,
369 priv: &params);
370
371 return 0;
372}
373
374static const char *const hctx_types[] = {
375 [HCTX_TYPE_DEFAULT] = "default",
376 [HCTX_TYPE_READ] = "read",
377 [HCTX_TYPE_POLL] = "poll",
378};
379
380static int hctx_type_show(void *data, struct seq_file *m)
381{
382 struct blk_mq_hw_ctx *hctx = data;
383
384 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
385 seq_printf(m, fmt: "%s\n", hctx_types[hctx->type]);
386 return 0;
387}
388
389static int hctx_ctx_map_show(void *data, struct seq_file *m)
390{
391 struct blk_mq_hw_ctx *hctx = data;
392
393 sbitmap_bitmap_show(sb: &hctx->ctx_map, m);
394 return 0;
395}
396
397static void blk_mq_debugfs_tags_show(struct seq_file *m,
398 struct blk_mq_tags *tags)
399{
400 seq_printf(m, fmt: "nr_tags=%u\n", tags->nr_tags);
401 seq_printf(m, fmt: "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
402 seq_printf(m, fmt: "active_queues=%d\n",
403 READ_ONCE(tags->active_queues));
404
405 seq_puts(m, s: "\nbitmap_tags:\n");
406 sbitmap_queue_show(sbq: &tags->bitmap_tags, m);
407
408 if (tags->nr_reserved_tags) {
409 seq_puts(m, s: "\nbreserved_tags:\n");
410 sbitmap_queue_show(sbq: &tags->breserved_tags, m);
411 }
412}
413
414static int hctx_tags_show(void *data, struct seq_file *m)
415{
416 struct blk_mq_hw_ctx *hctx = data;
417 struct request_queue *q = hctx->queue;
418 int res;
419
420 res = mutex_lock_interruptible(&q->sysfs_lock);
421 if (res)
422 goto out;
423 if (hctx->tags)
424 blk_mq_debugfs_tags_show(m, tags: hctx->tags);
425 mutex_unlock(lock: &q->sysfs_lock);
426
427out:
428 return res;
429}
430
431static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
432{
433 struct blk_mq_hw_ctx *hctx = data;
434 struct request_queue *q = hctx->queue;
435 int res;
436
437 res = mutex_lock_interruptible(&q->sysfs_lock);
438 if (res)
439 goto out;
440 if (hctx->tags)
441 sbitmap_bitmap_show(sb: &hctx->tags->bitmap_tags.sb, m);
442 mutex_unlock(lock: &q->sysfs_lock);
443
444out:
445 return res;
446}
447
448static int hctx_sched_tags_show(void *data, struct seq_file *m)
449{
450 struct blk_mq_hw_ctx *hctx = data;
451 struct request_queue *q = hctx->queue;
452 int res;
453
454 res = mutex_lock_interruptible(&q->sysfs_lock);
455 if (res)
456 goto out;
457 if (hctx->sched_tags)
458 blk_mq_debugfs_tags_show(m, tags: hctx->sched_tags);
459 mutex_unlock(lock: &q->sysfs_lock);
460
461out:
462 return res;
463}
464
465static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
466{
467 struct blk_mq_hw_ctx *hctx = data;
468 struct request_queue *q = hctx->queue;
469 int res;
470
471 res = mutex_lock_interruptible(&q->sysfs_lock);
472 if (res)
473 goto out;
474 if (hctx->sched_tags)
475 sbitmap_bitmap_show(sb: &hctx->sched_tags->bitmap_tags.sb, m);
476 mutex_unlock(lock: &q->sysfs_lock);
477
478out:
479 return res;
480}
481
482static int hctx_active_show(void *data, struct seq_file *m)
483{
484 struct blk_mq_hw_ctx *hctx = data;
485
486 seq_printf(m, fmt: "%d\n", __blk_mq_active_requests(hctx));
487 return 0;
488}
489
490static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
491{
492 struct blk_mq_hw_ctx *hctx = data;
493
494 seq_printf(m, fmt: "%u\n", hctx->dispatch_busy);
495 return 0;
496}
497
498#define CTX_RQ_SEQ_OPS(name, type) \
499static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
500 __acquires(&ctx->lock) \
501{ \
502 struct blk_mq_ctx *ctx = m->private; \
503 \
504 spin_lock(&ctx->lock); \
505 return seq_list_start(&ctx->rq_lists[type], *pos); \
506} \
507 \
508static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
509 loff_t *pos) \
510{ \
511 struct blk_mq_ctx *ctx = m->private; \
512 \
513 return seq_list_next(v, &ctx->rq_lists[type], pos); \
514} \
515 \
516static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
517 __releases(&ctx->lock) \
518{ \
519 struct blk_mq_ctx *ctx = m->private; \
520 \
521 spin_unlock(&ctx->lock); \
522} \
523 \
524static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
525 .start = ctx_##name##_rq_list_start, \
526 .next = ctx_##name##_rq_list_next, \
527 .stop = ctx_##name##_rq_list_stop, \
528 .show = blk_mq_debugfs_rq_show, \
529}
530
531CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
532CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
533CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
534
535static int blk_mq_debugfs_show(struct seq_file *m, void *v)
536{
537 const struct blk_mq_debugfs_attr *attr = m->private;
538 void *data = d_inode(dentry: m->file->f_path.dentry->d_parent)->i_private;
539
540 return attr->show(data, m);
541}
542
543static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
544 size_t count, loff_t *ppos)
545{
546 struct seq_file *m = file->private_data;
547 const struct blk_mq_debugfs_attr *attr = m->private;
548 void *data = d_inode(dentry: file->f_path.dentry->d_parent)->i_private;
549
550 /*
551 * Attributes that only implement .seq_ops are read-only and 'attr' is
552 * the same with 'data' in this case.
553 */
554 if (attr == data || !attr->write)
555 return -EPERM;
556
557 return attr->write(data, buf, count, ppos);
558}
559
560static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
561{
562 const struct blk_mq_debugfs_attr *attr = inode->i_private;
563 void *data = d_inode(dentry: file->f_path.dentry->d_parent)->i_private;
564 struct seq_file *m;
565 int ret;
566
567 if (attr->seq_ops) {
568 ret = seq_open(file, attr->seq_ops);
569 if (!ret) {
570 m = file->private_data;
571 m->private = data;
572 }
573 return ret;
574 }
575
576 if (WARN_ON_ONCE(!attr->show))
577 return -EPERM;
578
579 return single_open(file, blk_mq_debugfs_show, inode->i_private);
580}
581
582static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
583{
584 const struct blk_mq_debugfs_attr *attr = inode->i_private;
585
586 if (attr->show)
587 return single_release(inode, file);
588
589 return seq_release(inode, file);
590}
591
592static const struct file_operations blk_mq_debugfs_fops = {
593 .open = blk_mq_debugfs_open,
594 .read = seq_read,
595 .write = blk_mq_debugfs_write,
596 .llseek = seq_lseek,
597 .release = blk_mq_debugfs_release,
598};
599
600static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
601 {"state", 0400, hctx_state_show},
602 {"flags", 0400, hctx_flags_show},
603 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
604 {"busy", 0400, hctx_busy_show},
605 {"ctx_map", 0400, hctx_ctx_map_show},
606 {"tags", 0400, hctx_tags_show},
607 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
608 {"sched_tags", 0400, hctx_sched_tags_show},
609 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
610 {"active", 0400, hctx_active_show},
611 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
612 {"type", 0400, hctx_type_show},
613 {},
614};
615
616static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
617 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
618 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
619 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
620 {},
621};
622
623static void debugfs_create_files(struct dentry *parent, void *data,
624 const struct blk_mq_debugfs_attr *attr)
625{
626 if (IS_ERR_OR_NULL(ptr: parent))
627 return;
628
629 d_inode(dentry: parent)->i_private = data;
630
631 for (; attr->name; attr++)
632 debugfs_create_file(name: attr->name, mode: attr->mode, parent,
633 data: (void *)attr, fops: &blk_mq_debugfs_fops);
634}
635
636void blk_mq_debugfs_register(struct request_queue *q)
637{
638 struct blk_mq_hw_ctx *hctx;
639 unsigned long i;
640
641 debugfs_create_files(parent: q->debugfs_dir, data: q, attr: blk_mq_debugfs_queue_attrs);
642
643 /*
644 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
645 * didn't exist yet (because we don't know what to name the directory
646 * until the queue is registered to a gendisk).
647 */
648 if (q->elevator && !q->sched_debugfs_dir)
649 blk_mq_debugfs_register_sched(q);
650
651 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
652 queue_for_each_hw_ctx(q, hctx, i) {
653 if (!hctx->debugfs_dir)
654 blk_mq_debugfs_register_hctx(q, hctx);
655 if (q->elevator && !hctx->sched_debugfs_dir)
656 blk_mq_debugfs_register_sched_hctx(q, hctx);
657 }
658
659 if (q->rq_qos) {
660 struct rq_qos *rqos = q->rq_qos;
661
662 while (rqos) {
663 blk_mq_debugfs_register_rqos(rqos);
664 rqos = rqos->next;
665 }
666 }
667}
668
669static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
670 struct blk_mq_ctx *ctx)
671{
672 struct dentry *ctx_dir;
673 char name[20];
674
675 snprintf(buf: name, size: sizeof(name), fmt: "cpu%u", ctx->cpu);
676 ctx_dir = debugfs_create_dir(name, parent: hctx->debugfs_dir);
677
678 debugfs_create_files(parent: ctx_dir, data: ctx, attr: blk_mq_debugfs_ctx_attrs);
679}
680
681void blk_mq_debugfs_register_hctx(struct request_queue *q,
682 struct blk_mq_hw_ctx *hctx)
683{
684 struct blk_mq_ctx *ctx;
685 char name[20];
686 int i;
687
688 if (!q->debugfs_dir)
689 return;
690
691 snprintf(buf: name, size: sizeof(name), fmt: "hctx%u", hctx->queue_num);
692 hctx->debugfs_dir = debugfs_create_dir(name, parent: q->debugfs_dir);
693
694 debugfs_create_files(parent: hctx->debugfs_dir, data: hctx, attr: blk_mq_debugfs_hctx_attrs);
695
696 hctx_for_each_ctx(hctx, ctx, i)
697 blk_mq_debugfs_register_ctx(hctx, ctx);
698}
699
700void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
701{
702 if (!hctx->queue->debugfs_dir)
703 return;
704 debugfs_remove_recursive(dentry: hctx->debugfs_dir);
705 hctx->sched_debugfs_dir = NULL;
706 hctx->debugfs_dir = NULL;
707}
708
709void blk_mq_debugfs_register_hctxs(struct request_queue *q)
710{
711 struct blk_mq_hw_ctx *hctx;
712 unsigned long i;
713
714 queue_for_each_hw_ctx(q, hctx, i)
715 blk_mq_debugfs_register_hctx(q, hctx);
716}
717
718void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
719{
720 struct blk_mq_hw_ctx *hctx;
721 unsigned long i;
722
723 queue_for_each_hw_ctx(q, hctx, i)
724 blk_mq_debugfs_unregister_hctx(hctx);
725}
726
727void blk_mq_debugfs_register_sched(struct request_queue *q)
728{
729 struct elevator_type *e = q->elevator->type;
730
731 lockdep_assert_held(&q->debugfs_mutex);
732
733 /*
734 * If the parent directory has not been created yet, return, we will be
735 * called again later on and the directory/files will be created then.
736 */
737 if (!q->debugfs_dir)
738 return;
739
740 if (!e->queue_debugfs_attrs)
741 return;
742
743 q->sched_debugfs_dir = debugfs_create_dir(name: "sched", parent: q->debugfs_dir);
744
745 debugfs_create_files(parent: q->sched_debugfs_dir, data: q, attr: e->queue_debugfs_attrs);
746}
747
748void blk_mq_debugfs_unregister_sched(struct request_queue *q)
749{
750 lockdep_assert_held(&q->debugfs_mutex);
751
752 debugfs_remove_recursive(dentry: q->sched_debugfs_dir);
753 q->sched_debugfs_dir = NULL;
754}
755
756static const char *rq_qos_id_to_name(enum rq_qos_id id)
757{
758 switch (id) {
759 case RQ_QOS_WBT:
760 return "wbt";
761 case RQ_QOS_LATENCY:
762 return "latency";
763 case RQ_QOS_COST:
764 return "cost";
765 }
766 return "unknown";
767}
768
769void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
770{
771 lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
772
773 if (!rqos->disk->queue->debugfs_dir)
774 return;
775 debugfs_remove_recursive(dentry: rqos->debugfs_dir);
776 rqos->debugfs_dir = NULL;
777}
778
779void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
780{
781 struct request_queue *q = rqos->disk->queue;
782 const char *dir_name = rq_qos_id_to_name(id: rqos->id);
783
784 lockdep_assert_held(&q->debugfs_mutex);
785
786 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
787 return;
788
789 if (!q->rqos_debugfs_dir)
790 q->rqos_debugfs_dir = debugfs_create_dir(name: "rqos",
791 parent: q->debugfs_dir);
792
793 rqos->debugfs_dir = debugfs_create_dir(name: dir_name, parent: q->rqos_debugfs_dir);
794 debugfs_create_files(parent: rqos->debugfs_dir, data: rqos, attr: rqos->ops->debugfs_attrs);
795}
796
797void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
798 struct blk_mq_hw_ctx *hctx)
799{
800 struct elevator_type *e = q->elevator->type;
801
802 lockdep_assert_held(&q->debugfs_mutex);
803
804 /*
805 * If the parent debugfs directory has not been created yet, return;
806 * We will be called again later on with appropriate parent debugfs
807 * directory from blk_register_queue()
808 */
809 if (!hctx->debugfs_dir)
810 return;
811
812 if (!e->hctx_debugfs_attrs)
813 return;
814
815 hctx->sched_debugfs_dir = debugfs_create_dir(name: "sched",
816 parent: hctx->debugfs_dir);
817 debugfs_create_files(parent: hctx->sched_debugfs_dir, data: hctx,
818 attr: e->hctx_debugfs_attrs);
819}
820
821void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
822{
823 lockdep_assert_held(&hctx->queue->debugfs_mutex);
824
825 if (!hctx->queue->debugfs_dir)
826 return;
827 debugfs_remove_recursive(dentry: hctx->sched_debugfs_dir);
828 hctx->sched_debugfs_dir = NULL;
829}
830

source code of linux/block/blk-mq-debugfs.c