• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
blk_mq_sysfs_release(struct kobject * kobj)16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19 
20 struct blk_mq_ctx_sysfs_entry {
21 	struct attribute attr;
22 	ssize_t (*show)(struct blk_mq_ctx *, char *);
23 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25 
26 struct blk_mq_hw_ctx_sysfs_entry {
27 	struct attribute attr;
28 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31 
blk_mq_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 				 char *page)
34 {
35 	struct blk_mq_ctx_sysfs_entry *entry;
36 	struct blk_mq_ctx *ctx;
37 	struct request_queue *q;
38 	ssize_t res;
39 
40 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 	q = ctx->queue;
43 
44 	if (!entry->show)
45 		return -EIO;
46 
47 	res = -ENOENT;
48 	mutex_lock(&q->sysfs_lock);
49 	if (!blk_queue_dying(q))
50 		res = entry->show(ctx, page);
51 	mutex_unlock(&q->sysfs_lock);
52 	return res;
53 }
54 
blk_mq_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 				  const char *page, size_t length)
57 {
58 	struct blk_mq_ctx_sysfs_entry *entry;
59 	struct blk_mq_ctx *ctx;
60 	struct request_queue *q;
61 	ssize_t res;
62 
63 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 	q = ctx->queue;
66 
67 	if (!entry->store)
68 		return -EIO;
69 
70 	res = -ENOENT;
71 	mutex_lock(&q->sysfs_lock);
72 	if (!blk_queue_dying(q))
73 		res = entry->store(ctx, page, length);
74 	mutex_unlock(&q->sysfs_lock);
75 	return res;
76 }
77 
blk_mq_hw_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 				    struct attribute *attr, char *page)
80 {
81 	struct blk_mq_hw_ctx_sysfs_entry *entry;
82 	struct blk_mq_hw_ctx *hctx;
83 	struct request_queue *q;
84 	ssize_t res;
85 
86 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 	q = hctx->queue;
89 
90 	if (!entry->show)
91 		return -EIO;
92 
93 	res = -ENOENT;
94 	mutex_lock(&q->sysfs_lock);
95 	if (!blk_queue_dying(q))
96 		res = entry->show(hctx, page);
97 	mutex_unlock(&q->sysfs_lock);
98 	return res;
99 }
100 
blk_mq_hw_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 				     struct attribute *attr, const char *page,
103 				     size_t length)
104 {
105 	struct blk_mq_hw_ctx_sysfs_entry *entry;
106 	struct blk_mq_hw_ctx *hctx;
107 	struct request_queue *q;
108 	ssize_t res;
109 
110 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 	q = hctx->queue;
113 
114 	if (!entry->store)
115 		return -EIO;
116 
117 	res = -ENOENT;
118 	mutex_lock(&q->sysfs_lock);
119 	if (!blk_queue_dying(q))
120 		res = entry->store(hctx, page, length);
121 	mutex_unlock(&q->sysfs_lock);
122 	return res;
123 }
124 
blk_mq_sysfs_dispatched_show(struct blk_mq_ctx * ctx,char * page)125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127 	return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 				ctx->rq_dispatched[0]);
129 }
130 
blk_mq_sysfs_merged_show(struct blk_mq_ctx * ctx,char * page)131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133 	return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135 
blk_mq_sysfs_completed_show(struct blk_mq_ctx * ctx,char * page)136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138 	return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 				ctx->rq_completed[0]);
140 }
141 
sysfs_list_show(char * page,struct list_head * list,char * msg)142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144 	struct request *rq;
145 	int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
146 
147 	list_for_each_entry(rq, list, queuelist) {
148 		const int rq_len = 2 * sizeof(rq) + 2;
149 
150 		/* if the output will be truncated */
151 		if (PAGE_SIZE - 1 < len + rq_len) {
152 			/* backspacing if it can't hold '\t...\n' */
153 			if (PAGE_SIZE - 1 < len + 5)
154 				len -= rq_len;
155 			len += snprintf(page + len, PAGE_SIZE - 1 - len,
156 					"\t...\n");
157 			break;
158 		}
159 		len += snprintf(page + len, PAGE_SIZE - 1 - len,
160 				"\t%p\n", rq);
161 	}
162 
163 	return len;
164 }
165 
blk_mq_sysfs_rq_list_show(struct blk_mq_ctx * ctx,char * page)166 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
167 {
168 	ssize_t ret;
169 
170 	spin_lock(&ctx->lock);
171 	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172 	spin_unlock(&ctx->lock);
173 
174 	return ret;
175 }
176 
blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx * hctx,char * page)177 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178 {
179 	return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
180 }
181 
blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx * hctx,char * page)182 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
183 					   char *page)
184 {
185 	return sprintf(page, "%lu\n", hctx->queued);
186 }
187 
blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx * hctx,char * page)188 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
189 {
190 	return sprintf(page, "%lu\n", hctx->run);
191 }
192 
blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx * hctx,char * page)193 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
194 					       char *page)
195 {
196 	char *start_page = page;
197 	int i;
198 
199 	page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
200 
201 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
202 		unsigned long d = 1U << (i - 1);
203 
204 		page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
205 	}
206 
207 	return page - start_page;
208 }
209 
blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx * hctx,char * page)210 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
211 					    char *page)
212 {
213 	ssize_t ret;
214 
215 	spin_lock(&hctx->lock);
216 	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
217 	spin_unlock(&hctx->lock);
218 
219 	return ret;
220 }
221 
blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx * hctx,char * page)222 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
223 {
224 	return blk_mq_tag_sysfs_show(hctx->tags, page);
225 }
226 
blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx * hctx,char * page)227 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
228 {
229 	return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
230 }
231 
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx * hctx,char * page)232 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
233 {
234 	const size_t size = PAGE_SIZE - 1;
235 	unsigned int i, first = 1;
236 	int ret = 0, pos = 0;
237 
238 	for_each_cpu(i, hctx->cpumask) {
239 		if (first)
240 			ret = snprintf(pos + page, size - pos, "%u", i);
241 		else
242 			ret = snprintf(pos + page, size - pos, ", %u", i);
243 
244 		if (ret >= size - pos)
245 			break;
246 
247 		first = 0;
248 		pos += ret;
249 	}
250 
251 	ret = snprintf(pos + page, size + 1 - pos, "\n");
252 	return pos + ret;
253 }
254 
255 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
256 	.attr = {.name = "dispatched", .mode = S_IRUGO },
257 	.show = blk_mq_sysfs_dispatched_show,
258 };
259 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
260 	.attr = {.name = "merged", .mode = S_IRUGO },
261 	.show = blk_mq_sysfs_merged_show,
262 };
263 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
264 	.attr = {.name = "completed", .mode = S_IRUGO },
265 	.show = blk_mq_sysfs_completed_show,
266 };
267 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
268 	.attr = {.name = "rq_list", .mode = S_IRUGO },
269 	.show = blk_mq_sysfs_rq_list_show,
270 };
271 
272 static struct attribute *default_ctx_attrs[] = {
273 	&blk_mq_sysfs_dispatched.attr,
274 	&blk_mq_sysfs_merged.attr,
275 	&blk_mq_sysfs_completed.attr,
276 	&blk_mq_sysfs_rq_list.attr,
277 	NULL,
278 };
279 
280 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
281 	.attr = {.name = "queued", .mode = S_IRUGO },
282 	.show = blk_mq_hw_sysfs_queued_show,
283 };
284 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
285 	.attr = {.name = "run", .mode = S_IRUGO },
286 	.show = blk_mq_hw_sysfs_run_show,
287 };
288 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
289 	.attr = {.name = "dispatched", .mode = S_IRUGO },
290 	.show = blk_mq_hw_sysfs_dispatched_show,
291 };
292 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
293 	.attr = {.name = "active", .mode = S_IRUGO },
294 	.show = blk_mq_hw_sysfs_active_show,
295 };
296 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
297 	.attr = {.name = "pending", .mode = S_IRUGO },
298 	.show = blk_mq_hw_sysfs_rq_list_show,
299 };
300 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
301 	.attr = {.name = "tags", .mode = S_IRUGO },
302 	.show = blk_mq_hw_sysfs_tags_show,
303 };
304 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
305 	.attr = {.name = "cpu_list", .mode = S_IRUGO },
306 	.show = blk_mq_hw_sysfs_cpus_show,
307 };
308 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
309 	.attr = {.name = "io_poll", .mode = S_IRUGO },
310 	.show = blk_mq_hw_sysfs_poll_show,
311 };
312 
313 static struct attribute *default_hw_ctx_attrs[] = {
314 	&blk_mq_hw_sysfs_queued.attr,
315 	&blk_mq_hw_sysfs_run.attr,
316 	&blk_mq_hw_sysfs_dispatched.attr,
317 	&blk_mq_hw_sysfs_pending.attr,
318 	&blk_mq_hw_sysfs_tags.attr,
319 	&blk_mq_hw_sysfs_cpus.attr,
320 	&blk_mq_hw_sysfs_active.attr,
321 	&blk_mq_hw_sysfs_poll.attr,
322 	NULL,
323 };
324 
325 static const struct sysfs_ops blk_mq_sysfs_ops = {
326 	.show	= blk_mq_sysfs_show,
327 	.store	= blk_mq_sysfs_store,
328 };
329 
330 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
331 	.show	= blk_mq_hw_sysfs_show,
332 	.store	= blk_mq_hw_sysfs_store,
333 };
334 
335 static struct kobj_type blk_mq_ktype = {
336 	.sysfs_ops	= &blk_mq_sysfs_ops,
337 	.release	= blk_mq_sysfs_release,
338 };
339 
340 static struct kobj_type blk_mq_ctx_ktype = {
341 	.sysfs_ops	= &blk_mq_sysfs_ops,
342 	.default_attrs	= default_ctx_attrs,
343 	.release	= blk_mq_sysfs_release,
344 };
345 
346 static struct kobj_type blk_mq_hw_ktype = {
347 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
348 	.default_attrs	= default_hw_ctx_attrs,
349 	.release	= blk_mq_sysfs_release,
350 };
351 
blk_mq_unregister_hctx(struct blk_mq_hw_ctx * hctx)352 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
353 {
354 	struct blk_mq_ctx *ctx;
355 	int i;
356 
357 	if (!hctx->nr_ctx)
358 		return;
359 
360 	hctx_for_each_ctx(hctx, ctx, i)
361 		kobject_del(&ctx->kobj);
362 
363 	kobject_del(&hctx->kobj);
364 }
365 
blk_mq_register_hctx(struct blk_mq_hw_ctx * hctx)366 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
367 {
368 	struct request_queue *q = hctx->queue;
369 	struct blk_mq_ctx *ctx;
370 	int i, ret;
371 
372 	if (!hctx->nr_ctx)
373 		return 0;
374 
375 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
376 	if (ret)
377 		return ret;
378 
379 	hctx_for_each_ctx(hctx, ctx, i) {
380 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
381 		if (ret)
382 			break;
383 	}
384 
385 	return ret;
386 }
387 
blk_mq_unregister_disk(struct gendisk * disk)388 void blk_mq_unregister_disk(struct gendisk *disk)
389 {
390 	struct request_queue *q = disk->queue;
391 	struct blk_mq_hw_ctx *hctx;
392 	struct blk_mq_ctx *ctx;
393 	int i, j;
394 
395 	blk_mq_disable_hotplug();
396 
397 	queue_for_each_hw_ctx(q, hctx, i) {
398 		blk_mq_unregister_hctx(hctx);
399 
400 		hctx_for_each_ctx(hctx, ctx, j)
401 			kobject_put(&ctx->kobj);
402 
403 		kobject_put(&hctx->kobj);
404 	}
405 
406 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
407 	kobject_del(&q->mq_kobj);
408 	kobject_put(&q->mq_kobj);
409 
410 	kobject_put(&disk_to_dev(disk)->kobj);
411 
412 	q->mq_sysfs_init_done = false;
413 	blk_mq_enable_hotplug();
414 }
415 
blk_mq_sysfs_init(struct request_queue * q)416 static void blk_mq_sysfs_init(struct request_queue *q)
417 {
418 	struct blk_mq_hw_ctx *hctx;
419 	struct blk_mq_ctx *ctx;
420 	int i;
421 
422 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
423 
424 	queue_for_each_hw_ctx(q, hctx, i)
425 		kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
426 
427 	queue_for_each_ctx(q, ctx, i)
428 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
429 }
430 
blk_mq_register_disk(struct gendisk * disk)431 int blk_mq_register_disk(struct gendisk *disk)
432 {
433 	struct device *dev = disk_to_dev(disk);
434 	struct request_queue *q = disk->queue;
435 	struct blk_mq_hw_ctx *hctx;
436 	int ret, i;
437 
438 	blk_mq_disable_hotplug();
439 
440 	blk_mq_sysfs_init(q);
441 
442 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
443 	if (ret < 0)
444 		goto out;
445 
446 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
447 
448 	queue_for_each_hw_ctx(q, hctx, i) {
449 		ret = blk_mq_register_hctx(hctx);
450 		if (ret)
451 			break;
452 	}
453 
454 	if (ret)
455 		blk_mq_unregister_disk(disk);
456 	else
457 		q->mq_sysfs_init_done = true;
458 out:
459 	blk_mq_enable_hotplug();
460 
461 	return ret;
462 }
463 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
464 
blk_mq_sysfs_unregister(struct request_queue * q)465 void blk_mq_sysfs_unregister(struct request_queue *q)
466 {
467 	struct blk_mq_hw_ctx *hctx;
468 	int i;
469 
470 	if (!q->mq_sysfs_init_done)
471 		return;
472 
473 	queue_for_each_hw_ctx(q, hctx, i)
474 		blk_mq_unregister_hctx(hctx);
475 }
476 
blk_mq_sysfs_register(struct request_queue * q)477 int blk_mq_sysfs_register(struct request_queue *q)
478 {
479 	struct blk_mq_hw_ctx *hctx;
480 	int i, ret = 0;
481 
482 	if (!q->mq_sysfs_init_done)
483 		return ret;
484 
485 	queue_for_each_hw_ctx(q, hctx, i) {
486 		ret = blk_mq_register_hctx(hctx);
487 		if (ret)
488 			break;
489 	}
490 
491 	return ret;
492 }
493