1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Facebook
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
16
print_stat(struct seq_file * m,struct blk_rq_stat * stat)17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18 {
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
22 } else {
23 seq_puts(m, "samples=0");
24 }
25 }
26
queue_poll_stat_show(void * data,struct seq_file * m)27 static int queue_poll_stat_show(void *data, struct seq_file *m)
28 {
29 struct request_queue *q = data;
30 int bucket;
31
32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
34 print_stat(m, &q->poll_stat[2 * bucket]);
35 seq_puts(m, "\n");
36
37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 seq_puts(m, "\n");
40 }
41 return 0;
42 }
43
queue_requeue_list_start(struct seq_file * m,loff_t * pos)44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
46 {
47 struct request_queue *q = m->private;
48
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
51 }
52
queue_requeue_list_next(struct seq_file * m,void * v,loff_t * pos)53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55 struct request_queue *q = m->private;
56
57 return seq_list_next(v, &q->requeue_list, pos);
58 }
59
queue_requeue_list_stop(struct seq_file * m,void * v)60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
62 {
63 struct request_queue *q = m->private;
64
65 spin_unlock_irq(&q->requeue_lock);
66 }
67
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
73 };
74
blk_flags_show(struct seq_file * m,const unsigned long flags,const char * const * flag_name,int flag_name_count)75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
77 {
78 bool sep = false;
79 int i;
80
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
83 continue;
84 if (sep)
85 seq_puts(m, "|");
86 sep = true;
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
89 else
90 seq_printf(m, "%d", i);
91 }
92 return 0;
93 }
94
queue_pm_only_show(void * data,struct seq_file * m)95 static int queue_pm_only_show(void *data, struct seq_file *m)
96 {
97 struct request_queue *q = data;
98
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 return 0;
101 }
102
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(STABLE_WRITES),
120 QUEUE_FLAG_NAME(POLL),
121 QUEUE_FLAG_NAME(WC),
122 QUEUE_FLAG_NAME(FUA),
123 QUEUE_FLAG_NAME(DAX),
124 QUEUE_FLAG_NAME(STATS),
125 QUEUE_FLAG_NAME(POLL_STATS),
126 QUEUE_FLAG_NAME(REGISTERED),
127 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
128 QUEUE_FLAG_NAME(QUIESCED),
129 QUEUE_FLAG_NAME(PCI_P2PDMA),
130 QUEUE_FLAG_NAME(ZONE_RESETALL),
131 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
132 QUEUE_FLAG_NAME(HCTX_ACTIVE),
133 QUEUE_FLAG_NAME(NOWAIT),
134 };
135 #undef QUEUE_FLAG_NAME
136
queue_state_show(void * data,struct seq_file * m)137 static int queue_state_show(void *data, struct seq_file *m)
138 {
139 struct request_queue *q = data;
140
141 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
142 ARRAY_SIZE(blk_queue_flag_name));
143 seq_puts(m, "\n");
144 return 0;
145 }
146
queue_state_write(void * data,const char __user * buf,size_t count,loff_t * ppos)147 static ssize_t queue_state_write(void *data, const char __user *buf,
148 size_t count, loff_t *ppos)
149 {
150 struct request_queue *q = data;
151 char opbuf[16] = { }, *op;
152
153 /*
154 * The "state" attribute is removed after blk_cleanup_queue() has called
155 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
156 * triggering a use-after-free.
157 */
158 if (blk_queue_dead(q))
159 return -ENOENT;
160
161 if (count >= sizeof(opbuf)) {
162 pr_err("%s: operation too long\n", __func__);
163 goto inval;
164 }
165
166 if (copy_from_user(opbuf, buf, count))
167 return -EFAULT;
168 op = strstrip(opbuf);
169 if (strcmp(op, "run") == 0) {
170 blk_mq_run_hw_queues(q, true);
171 } else if (strcmp(op, "start") == 0) {
172 blk_mq_start_stopped_hw_queues(q, true);
173 } else if (strcmp(op, "kick") == 0) {
174 blk_mq_kick_requeue_list(q);
175 } else {
176 pr_err("%s: unsupported operation '%s'\n", __func__, op);
177 inval:
178 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
179 return -EINVAL;
180 }
181 return count;
182 }
183
queue_write_hint_show(void * data,struct seq_file * m)184 static int queue_write_hint_show(void *data, struct seq_file *m)
185 {
186 struct request_queue *q = data;
187 int i;
188
189 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
190 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
191
192 return 0;
193 }
194
queue_write_hint_store(void * data,const char __user * buf,size_t count,loff_t * ppos)195 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
196 size_t count, loff_t *ppos)
197 {
198 struct request_queue *q = data;
199 int i;
200
201 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
202 q->write_hints[i] = 0;
203
204 return count;
205 }
206
207 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
208 { "poll_stat", 0400, queue_poll_stat_show },
209 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
210 { "pm_only", 0600, queue_pm_only_show, NULL },
211 { "state", 0600, queue_state_show, queue_state_write },
212 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
213 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
214 { },
215 };
216
217 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
218 static const char *const hctx_state_name[] = {
219 HCTX_STATE_NAME(STOPPED),
220 HCTX_STATE_NAME(TAG_ACTIVE),
221 HCTX_STATE_NAME(SCHED_RESTART),
222 HCTX_STATE_NAME(INACTIVE),
223 };
224 #undef HCTX_STATE_NAME
225
hctx_state_show(void * data,struct seq_file * m)226 static int hctx_state_show(void *data, struct seq_file *m)
227 {
228 struct blk_mq_hw_ctx *hctx = data;
229
230 blk_flags_show(m, hctx->state, hctx_state_name,
231 ARRAY_SIZE(hctx_state_name));
232 seq_puts(m, "\n");
233 return 0;
234 }
235
236 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
237 static const char *const alloc_policy_name[] = {
238 BLK_TAG_ALLOC_NAME(FIFO),
239 BLK_TAG_ALLOC_NAME(RR),
240 };
241 #undef BLK_TAG_ALLOC_NAME
242
243 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
244 static const char *const hctx_flag_name[] = {
245 HCTX_FLAG_NAME(SHOULD_MERGE),
246 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
247 HCTX_FLAG_NAME(BLOCKING),
248 HCTX_FLAG_NAME(NO_SCHED),
249 HCTX_FLAG_NAME(STACKING),
250 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
251 };
252 #undef HCTX_FLAG_NAME
253
hctx_flags_show(void * data,struct seq_file * m)254 static int hctx_flags_show(void *data, struct seq_file *m)
255 {
256 struct blk_mq_hw_ctx *hctx = data;
257 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
258
259 seq_puts(m, "alloc_policy=");
260 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
261 alloc_policy_name[alloc_policy])
262 seq_puts(m, alloc_policy_name[alloc_policy]);
263 else
264 seq_printf(m, "%d", alloc_policy);
265 seq_puts(m, " ");
266 blk_flags_show(m,
267 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
268 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
269 seq_puts(m, "\n");
270 return 0;
271 }
272
273 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
274 static const char *const cmd_flag_name[] = {
275 CMD_FLAG_NAME(FAILFAST_DEV),
276 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
277 CMD_FLAG_NAME(FAILFAST_DRIVER),
278 CMD_FLAG_NAME(SYNC),
279 CMD_FLAG_NAME(META),
280 CMD_FLAG_NAME(PRIO),
281 CMD_FLAG_NAME(NOMERGE),
282 CMD_FLAG_NAME(IDLE),
283 CMD_FLAG_NAME(INTEGRITY),
284 CMD_FLAG_NAME(FUA),
285 CMD_FLAG_NAME(PREFLUSH),
286 CMD_FLAG_NAME(RAHEAD),
287 CMD_FLAG_NAME(BACKGROUND),
288 CMD_FLAG_NAME(NOWAIT),
289 CMD_FLAG_NAME(NOUNMAP),
290 CMD_FLAG_NAME(HIPRI),
291 };
292 #undef CMD_FLAG_NAME
293
294 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
295 static const char *const rqf_name[] = {
296 RQF_NAME(STARTED),
297 RQF_NAME(SOFTBARRIER),
298 RQF_NAME(FLUSH_SEQ),
299 RQF_NAME(MIXED_MERGE),
300 RQF_NAME(MQ_INFLIGHT),
301 RQF_NAME(DONTPREP),
302 RQF_NAME(FAILED),
303 RQF_NAME(QUIET),
304 RQF_NAME(ELVPRIV),
305 RQF_NAME(IO_STAT),
306 RQF_NAME(PM),
307 RQF_NAME(HASHED),
308 RQF_NAME(STATS),
309 RQF_NAME(SPECIAL_PAYLOAD),
310 RQF_NAME(ZONE_WRITE_LOCKED),
311 RQF_NAME(MQ_POLL_SLEPT),
312 };
313 #undef RQF_NAME
314
315 static const char *const blk_mq_rq_state_name_array[] = {
316 [MQ_RQ_IDLE] = "idle",
317 [MQ_RQ_IN_FLIGHT] = "in_flight",
318 [MQ_RQ_COMPLETE] = "complete",
319 };
320
blk_mq_rq_state_name(enum mq_rq_state rq_state)321 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
322 {
323 if (WARN_ON_ONCE((unsigned int)rq_state >=
324 ARRAY_SIZE(blk_mq_rq_state_name_array)))
325 return "(?)";
326 return blk_mq_rq_state_name_array[rq_state];
327 }
328
__blk_mq_debugfs_rq_show(struct seq_file * m,struct request * rq)329 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
330 {
331 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
332 const unsigned int op = req_op(rq);
333 const char *op_str = blk_op_str(op);
334
335 seq_printf(m, "%p {.op=", rq);
336 if (strcmp(op_str, "UNKNOWN") == 0)
337 seq_printf(m, "%u", op);
338 else
339 seq_printf(m, "%s", op_str);
340 seq_puts(m, ", .cmd_flags=");
341 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
342 ARRAY_SIZE(cmd_flag_name));
343 seq_puts(m, ", .rq_flags=");
344 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
345 ARRAY_SIZE(rqf_name));
346 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
347 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
348 rq->internal_tag);
349 if (mq_ops->show_rq)
350 mq_ops->show_rq(m, rq);
351 seq_puts(m, "}\n");
352 return 0;
353 }
354 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
355
blk_mq_debugfs_rq_show(struct seq_file * m,void * v)356 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
357 {
358 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
359 }
360 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
361
hctx_dispatch_start(struct seq_file * m,loff_t * pos)362 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
363 __acquires(&hctx->lock)
364 {
365 struct blk_mq_hw_ctx *hctx = m->private;
366
367 spin_lock(&hctx->lock);
368 return seq_list_start(&hctx->dispatch, *pos);
369 }
370
hctx_dispatch_next(struct seq_file * m,void * v,loff_t * pos)371 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
372 {
373 struct blk_mq_hw_ctx *hctx = m->private;
374
375 return seq_list_next(v, &hctx->dispatch, pos);
376 }
377
hctx_dispatch_stop(struct seq_file * m,void * v)378 static void hctx_dispatch_stop(struct seq_file *m, void *v)
379 __releases(&hctx->lock)
380 {
381 struct blk_mq_hw_ctx *hctx = m->private;
382
383 spin_unlock(&hctx->lock);
384 }
385
386 static const struct seq_operations hctx_dispatch_seq_ops = {
387 .start = hctx_dispatch_start,
388 .next = hctx_dispatch_next,
389 .stop = hctx_dispatch_stop,
390 .show = blk_mq_debugfs_rq_show,
391 };
392
393 struct show_busy_params {
394 struct seq_file *m;
395 struct blk_mq_hw_ctx *hctx;
396 };
397
398 /*
399 * Note: the state of a request may change while this function is in progress,
400 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
401 * keep iterating requests.
402 */
hctx_show_busy_rq(struct request * rq,void * data,bool reserved)403 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
404 {
405 const struct show_busy_params *params = data;
406
407 if (rq->mq_hctx == params->hctx)
408 __blk_mq_debugfs_rq_show(params->m, rq);
409
410 return true;
411 }
412
hctx_busy_show(void * data,struct seq_file * m)413 static int hctx_busy_show(void *data, struct seq_file *m)
414 {
415 struct blk_mq_hw_ctx *hctx = data;
416 struct show_busy_params params = { .m = m, .hctx = hctx };
417
418 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
419 ¶ms);
420
421 return 0;
422 }
423
424 static const char *const hctx_types[] = {
425 [HCTX_TYPE_DEFAULT] = "default",
426 [HCTX_TYPE_READ] = "read",
427 [HCTX_TYPE_POLL] = "poll",
428 };
429
hctx_type_show(void * data,struct seq_file * m)430 static int hctx_type_show(void *data, struct seq_file *m)
431 {
432 struct blk_mq_hw_ctx *hctx = data;
433
434 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
435 seq_printf(m, "%s\n", hctx_types[hctx->type]);
436 return 0;
437 }
438
hctx_ctx_map_show(void * data,struct seq_file * m)439 static int hctx_ctx_map_show(void *data, struct seq_file *m)
440 {
441 struct blk_mq_hw_ctx *hctx = data;
442
443 sbitmap_bitmap_show(&hctx->ctx_map, m);
444 return 0;
445 }
446
blk_mq_debugfs_tags_show(struct seq_file * m,struct blk_mq_tags * tags)447 static void blk_mq_debugfs_tags_show(struct seq_file *m,
448 struct blk_mq_tags *tags)
449 {
450 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
451 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
452 seq_printf(m, "active_queues=%d\n",
453 atomic_read(&tags->active_queues));
454
455 seq_puts(m, "\nbitmap_tags:\n");
456 sbitmap_queue_show(tags->bitmap_tags, m);
457
458 if (tags->nr_reserved_tags) {
459 seq_puts(m, "\nbreserved_tags:\n");
460 sbitmap_queue_show(tags->breserved_tags, m);
461 }
462 }
463
hctx_tags_show(void * data,struct seq_file * m)464 static int hctx_tags_show(void *data, struct seq_file *m)
465 {
466 struct blk_mq_hw_ctx *hctx = data;
467 struct request_queue *q = hctx->queue;
468 int res;
469
470 res = mutex_lock_interruptible(&q->sysfs_lock);
471 if (res)
472 goto out;
473 if (hctx->tags)
474 blk_mq_debugfs_tags_show(m, hctx->tags);
475 mutex_unlock(&q->sysfs_lock);
476
477 out:
478 return res;
479 }
480
hctx_tags_bitmap_show(void * data,struct seq_file * m)481 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
482 {
483 struct blk_mq_hw_ctx *hctx = data;
484 struct request_queue *q = hctx->queue;
485 int res;
486
487 res = mutex_lock_interruptible(&q->sysfs_lock);
488 if (res)
489 goto out;
490 if (hctx->tags)
491 sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
492 mutex_unlock(&q->sysfs_lock);
493
494 out:
495 return res;
496 }
497
hctx_sched_tags_show(void * data,struct seq_file * m)498 static int hctx_sched_tags_show(void *data, struct seq_file *m)
499 {
500 struct blk_mq_hw_ctx *hctx = data;
501 struct request_queue *q = hctx->queue;
502 int res;
503
504 res = mutex_lock_interruptible(&q->sysfs_lock);
505 if (res)
506 goto out;
507 if (hctx->sched_tags)
508 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
509 mutex_unlock(&q->sysfs_lock);
510
511 out:
512 return res;
513 }
514
hctx_sched_tags_bitmap_show(void * data,struct seq_file * m)515 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
516 {
517 struct blk_mq_hw_ctx *hctx = data;
518 struct request_queue *q = hctx->queue;
519 int res;
520
521 res = mutex_lock_interruptible(&q->sysfs_lock);
522 if (res)
523 goto out;
524 if (hctx->sched_tags)
525 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
526 mutex_unlock(&q->sysfs_lock);
527
528 out:
529 return res;
530 }
531
hctx_io_poll_show(void * data,struct seq_file * m)532 static int hctx_io_poll_show(void *data, struct seq_file *m)
533 {
534 struct blk_mq_hw_ctx *hctx = data;
535
536 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
537 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
538 seq_printf(m, "success=%lu\n", hctx->poll_success);
539 return 0;
540 }
541
hctx_io_poll_write(void * data,const char __user * buf,size_t count,loff_t * ppos)542 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
543 size_t count, loff_t *ppos)
544 {
545 struct blk_mq_hw_ctx *hctx = data;
546
547 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
548 return count;
549 }
550
hctx_dispatched_show(void * data,struct seq_file * m)551 static int hctx_dispatched_show(void *data, struct seq_file *m)
552 {
553 struct blk_mq_hw_ctx *hctx = data;
554 int i;
555
556 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
557
558 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
559 unsigned int d = 1U << (i - 1);
560
561 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
562 }
563
564 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
565 return 0;
566 }
567
hctx_dispatched_write(void * data,const char __user * buf,size_t count,loff_t * ppos)568 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
569 size_t count, loff_t *ppos)
570 {
571 struct blk_mq_hw_ctx *hctx = data;
572 int i;
573
574 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
575 hctx->dispatched[i] = 0;
576 return count;
577 }
578
hctx_queued_show(void * data,struct seq_file * m)579 static int hctx_queued_show(void *data, struct seq_file *m)
580 {
581 struct blk_mq_hw_ctx *hctx = data;
582
583 seq_printf(m, "%lu\n", hctx->queued);
584 return 0;
585 }
586
hctx_queued_write(void * data,const char __user * buf,size_t count,loff_t * ppos)587 static ssize_t hctx_queued_write(void *data, const char __user *buf,
588 size_t count, loff_t *ppos)
589 {
590 struct blk_mq_hw_ctx *hctx = data;
591
592 hctx->queued = 0;
593 return count;
594 }
595
hctx_run_show(void * data,struct seq_file * m)596 static int hctx_run_show(void *data, struct seq_file *m)
597 {
598 struct blk_mq_hw_ctx *hctx = data;
599
600 seq_printf(m, "%lu\n", hctx->run);
601 return 0;
602 }
603
hctx_run_write(void * data,const char __user * buf,size_t count,loff_t * ppos)604 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
605 loff_t *ppos)
606 {
607 struct blk_mq_hw_ctx *hctx = data;
608
609 hctx->run = 0;
610 return count;
611 }
612
hctx_active_show(void * data,struct seq_file * m)613 static int hctx_active_show(void *data, struct seq_file *m)
614 {
615 struct blk_mq_hw_ctx *hctx = data;
616
617 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
618 return 0;
619 }
620
hctx_dispatch_busy_show(void * data,struct seq_file * m)621 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
622 {
623 struct blk_mq_hw_ctx *hctx = data;
624
625 seq_printf(m, "%u\n", hctx->dispatch_busy);
626 return 0;
627 }
628
629 #define CTX_RQ_SEQ_OPS(name, type) \
630 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
631 __acquires(&ctx->lock) \
632 { \
633 struct blk_mq_ctx *ctx = m->private; \
634 \
635 spin_lock(&ctx->lock); \
636 return seq_list_start(&ctx->rq_lists[type], *pos); \
637 } \
638 \
639 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
640 loff_t *pos) \
641 { \
642 struct blk_mq_ctx *ctx = m->private; \
643 \
644 return seq_list_next(v, &ctx->rq_lists[type], pos); \
645 } \
646 \
647 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
648 __releases(&ctx->lock) \
649 { \
650 struct blk_mq_ctx *ctx = m->private; \
651 \
652 spin_unlock(&ctx->lock); \
653 } \
654 \
655 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
656 .start = ctx_##name##_rq_list_start, \
657 .next = ctx_##name##_rq_list_next, \
658 .stop = ctx_##name##_rq_list_stop, \
659 .show = blk_mq_debugfs_rq_show, \
660 }
661
662 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
663 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
664 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
665
ctx_dispatched_show(void * data,struct seq_file * m)666 static int ctx_dispatched_show(void *data, struct seq_file *m)
667 {
668 struct blk_mq_ctx *ctx = data;
669
670 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
671 return 0;
672 }
673
ctx_dispatched_write(void * data,const char __user * buf,size_t count,loff_t * ppos)674 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
675 size_t count, loff_t *ppos)
676 {
677 struct blk_mq_ctx *ctx = data;
678
679 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
680 return count;
681 }
682
ctx_merged_show(void * data,struct seq_file * m)683 static int ctx_merged_show(void *data, struct seq_file *m)
684 {
685 struct blk_mq_ctx *ctx = data;
686
687 seq_printf(m, "%lu\n", ctx->rq_merged);
688 return 0;
689 }
690
ctx_merged_write(void * data,const char __user * buf,size_t count,loff_t * ppos)691 static ssize_t ctx_merged_write(void *data, const char __user *buf,
692 size_t count, loff_t *ppos)
693 {
694 struct blk_mq_ctx *ctx = data;
695
696 ctx->rq_merged = 0;
697 return count;
698 }
699
ctx_completed_show(void * data,struct seq_file * m)700 static int ctx_completed_show(void *data, struct seq_file *m)
701 {
702 struct blk_mq_ctx *ctx = data;
703
704 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
705 return 0;
706 }
707
ctx_completed_write(void * data,const char __user * buf,size_t count,loff_t * ppos)708 static ssize_t ctx_completed_write(void *data, const char __user *buf,
709 size_t count, loff_t *ppos)
710 {
711 struct blk_mq_ctx *ctx = data;
712
713 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
714 return count;
715 }
716
blk_mq_debugfs_show(struct seq_file * m,void * v)717 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
718 {
719 const struct blk_mq_debugfs_attr *attr = m->private;
720 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
721
722 return attr->show(data, m);
723 }
724
blk_mq_debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)725 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
726 size_t count, loff_t *ppos)
727 {
728 struct seq_file *m = file->private_data;
729 const struct blk_mq_debugfs_attr *attr = m->private;
730 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
731
732 /*
733 * Attributes that only implement .seq_ops are read-only and 'attr' is
734 * the same with 'data' in this case.
735 */
736 if (attr == data || !attr->write)
737 return -EPERM;
738
739 return attr->write(data, buf, count, ppos);
740 }
741
blk_mq_debugfs_open(struct inode * inode,struct file * file)742 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
743 {
744 const struct blk_mq_debugfs_attr *attr = inode->i_private;
745 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
746 struct seq_file *m;
747 int ret;
748
749 if (attr->seq_ops) {
750 ret = seq_open(file, attr->seq_ops);
751 if (!ret) {
752 m = file->private_data;
753 m->private = data;
754 }
755 return ret;
756 }
757
758 if (WARN_ON_ONCE(!attr->show))
759 return -EPERM;
760
761 return single_open(file, blk_mq_debugfs_show, inode->i_private);
762 }
763
blk_mq_debugfs_release(struct inode * inode,struct file * file)764 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
765 {
766 const struct blk_mq_debugfs_attr *attr = inode->i_private;
767
768 if (attr->show)
769 return single_release(inode, file);
770
771 return seq_release(inode, file);
772 }
773
774 static const struct file_operations blk_mq_debugfs_fops = {
775 .open = blk_mq_debugfs_open,
776 .read = seq_read,
777 .write = blk_mq_debugfs_write,
778 .llseek = seq_lseek,
779 .release = blk_mq_debugfs_release,
780 };
781
782 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
783 {"state", 0400, hctx_state_show},
784 {"flags", 0400, hctx_flags_show},
785 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
786 {"busy", 0400, hctx_busy_show},
787 {"ctx_map", 0400, hctx_ctx_map_show},
788 {"tags", 0400, hctx_tags_show},
789 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
790 {"sched_tags", 0400, hctx_sched_tags_show},
791 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
792 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
793 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
794 {"queued", 0600, hctx_queued_show, hctx_queued_write},
795 {"run", 0600, hctx_run_show, hctx_run_write},
796 {"active", 0400, hctx_active_show},
797 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
798 {"type", 0400, hctx_type_show},
799 {},
800 };
801
802 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
803 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
804 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
805 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
806 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
807 {"merged", 0600, ctx_merged_show, ctx_merged_write},
808 {"completed", 0600, ctx_completed_show, ctx_completed_write},
809 {},
810 };
811
debugfs_create_files(struct dentry * parent,void * data,const struct blk_mq_debugfs_attr * attr)812 static void debugfs_create_files(struct dentry *parent, void *data,
813 const struct blk_mq_debugfs_attr *attr)
814 {
815 if (IS_ERR_OR_NULL(parent))
816 return;
817
818 d_inode(parent)->i_private = data;
819
820 for (; attr->name; attr++)
821 debugfs_create_file(attr->name, attr->mode, parent,
822 (void *)attr, &blk_mq_debugfs_fops);
823 }
824
blk_mq_debugfs_register(struct request_queue * q)825 void blk_mq_debugfs_register(struct request_queue *q)
826 {
827 struct blk_mq_hw_ctx *hctx;
828 int i;
829
830 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
831
832 /*
833 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
834 * didn't exist yet (because we don't know what to name the directory
835 * until the queue is registered to a gendisk).
836 */
837 if (q->elevator && !q->sched_debugfs_dir)
838 blk_mq_debugfs_register_sched(q);
839
840 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
841 queue_for_each_hw_ctx(q, hctx, i) {
842 if (!hctx->debugfs_dir)
843 blk_mq_debugfs_register_hctx(q, hctx);
844 if (q->elevator && !hctx->sched_debugfs_dir)
845 blk_mq_debugfs_register_sched_hctx(q, hctx);
846 }
847
848 if (q->rq_qos) {
849 struct rq_qos *rqos = q->rq_qos;
850
851 while (rqos) {
852 blk_mq_debugfs_register_rqos(rqos);
853 rqos = rqos->next;
854 }
855 }
856 }
857
blk_mq_debugfs_unregister(struct request_queue * q)858 void blk_mq_debugfs_unregister(struct request_queue *q)
859 {
860 q->sched_debugfs_dir = NULL;
861 }
862
blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)863 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
864 struct blk_mq_ctx *ctx)
865 {
866 struct dentry *ctx_dir;
867 char name[20];
868
869 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
870 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
871
872 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
873 }
874
blk_mq_debugfs_register_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx)875 void blk_mq_debugfs_register_hctx(struct request_queue *q,
876 struct blk_mq_hw_ctx *hctx)
877 {
878 struct blk_mq_ctx *ctx;
879 char name[20];
880 int i;
881
882 if (!q->debugfs_dir)
883 return;
884
885 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
886 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
887
888 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
889
890 hctx_for_each_ctx(hctx, ctx, i)
891 blk_mq_debugfs_register_ctx(hctx, ctx);
892 }
893
blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx * hctx)894 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
895 {
896 debugfs_remove_recursive(hctx->debugfs_dir);
897 hctx->sched_debugfs_dir = NULL;
898 hctx->debugfs_dir = NULL;
899 }
900
blk_mq_debugfs_register_hctxs(struct request_queue * q)901 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
902 {
903 struct blk_mq_hw_ctx *hctx;
904 int i;
905
906 queue_for_each_hw_ctx(q, hctx, i)
907 blk_mq_debugfs_register_hctx(q, hctx);
908 }
909
blk_mq_debugfs_unregister_hctxs(struct request_queue * q)910 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
911 {
912 struct blk_mq_hw_ctx *hctx;
913 int i;
914
915 queue_for_each_hw_ctx(q, hctx, i)
916 blk_mq_debugfs_unregister_hctx(hctx);
917 }
918
blk_mq_debugfs_register_sched(struct request_queue * q)919 void blk_mq_debugfs_register_sched(struct request_queue *q)
920 {
921 struct elevator_type *e = q->elevator->type;
922
923 /*
924 * If the parent directory has not been created yet, return, we will be
925 * called again later on and the directory/files will be created then.
926 */
927 if (!q->debugfs_dir)
928 return;
929
930 if (!e->queue_debugfs_attrs)
931 return;
932
933 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
934
935 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
936 }
937
blk_mq_debugfs_unregister_sched(struct request_queue * q)938 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
939 {
940 debugfs_remove_recursive(q->sched_debugfs_dir);
941 q->sched_debugfs_dir = NULL;
942 }
943
rq_qos_id_to_name(enum rq_qos_id id)944 static const char *rq_qos_id_to_name(enum rq_qos_id id)
945 {
946 switch (id) {
947 case RQ_QOS_WBT:
948 return "wbt";
949 case RQ_QOS_LATENCY:
950 return "latency";
951 case RQ_QOS_COST:
952 return "cost";
953 case RQ_QOS_IOPRIO:
954 return "ioprio";
955 }
956 return "unknown";
957 }
958
blk_mq_debugfs_unregister_rqos(struct rq_qos * rqos)959 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
960 {
961 debugfs_remove_recursive(rqos->debugfs_dir);
962 rqos->debugfs_dir = NULL;
963 }
964
blk_mq_debugfs_register_rqos(struct rq_qos * rqos)965 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
966 {
967 struct request_queue *q = rqos->q;
968 const char *dir_name = rq_qos_id_to_name(rqos->id);
969
970 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
971 return;
972
973 if (!q->rqos_debugfs_dir)
974 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
975 q->debugfs_dir);
976
977 rqos->debugfs_dir = debugfs_create_dir(dir_name,
978 rqos->q->rqos_debugfs_dir);
979
980 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
981 }
982
blk_mq_debugfs_unregister_queue_rqos(struct request_queue * q)983 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
984 {
985 debugfs_remove_recursive(q->rqos_debugfs_dir);
986 q->rqos_debugfs_dir = NULL;
987 }
988
blk_mq_debugfs_register_sched_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx)989 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
990 struct blk_mq_hw_ctx *hctx)
991 {
992 struct elevator_type *e = q->elevator->type;
993
994 /*
995 * If the parent debugfs directory has not been created yet, return;
996 * We will be called again later on with appropriate parent debugfs
997 * directory from blk_register_queue()
998 */
999 if (!hctx->debugfs_dir)
1000 return;
1001
1002 if (!e->hctx_debugfs_attrs)
1003 return;
1004
1005 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
1006 hctx->debugfs_dir);
1007 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
1008 e->hctx_debugfs_attrs);
1009 }
1010
blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx * hctx)1011 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
1012 {
1013 debugfs_remove_recursive(hctx->sched_debugfs_dir);
1014 hctx->sched_debugfs_dir = NULL;
1015 }
1016
1017 DEFINE_DEBUGFS_ATTRIBUTE(blk_sub_page_limit_queues_fops,
1018 blk_sub_page_limit_queues_get, NULL, "%llu\n");
1019
blk_mq_debugfs_init(void)1020 void blk_mq_debugfs_init(void)
1021 {
1022 debugfs_create_file("sub_page_limit_queues", 0400, blk_debugfs_root,
1023 NULL, &blk_sub_page_limit_queues_fops);
1024 }
1025