1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/backing-dev.h>
5 #include <linux/bio.h>
6 #include <linux/blkdev.h>
7 #include <linux/mm.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/workqueue.h>
11 #include <linux/smp.h>
12
13 #include <linux/blk-mq.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-tag.h"
17
blk_mq_sysfs_release(struct kobject * kobj)18 static void blk_mq_sysfs_release(struct kobject *kobj)
19 {
20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24 }
25
blk_mq_ctx_sysfs_release(struct kobject * kobj)26 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27 {
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
32 }
33
blk_mq_hw_sysfs_release(struct kobject * kobj)34 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35 {
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
38
39 if (hctx->flags & BLK_MQ_F_BLOCKING)
40 cleanup_srcu_struct(hctx->srcu);
41 blk_free_flush_queue(hctx->fq);
42 sbitmap_free(&hctx->ctx_map);
43 free_cpumask_var(hctx->cpumask);
44 kfree(hctx->ctxs);
45 kfree(hctx);
46 }
47
48 struct blk_mq_ctx_sysfs_entry {
49 struct attribute attr;
50 ssize_t (*show)(struct blk_mq_ctx *, char *);
51 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
52 };
53
54 struct blk_mq_hw_ctx_sysfs_entry {
55 struct attribute attr;
56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
58 };
59
blk_mq_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)60 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
61 char *page)
62 {
63 struct blk_mq_ctx_sysfs_entry *entry;
64 struct blk_mq_ctx *ctx;
65 struct request_queue *q;
66 ssize_t res;
67
68 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
69 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
70 q = ctx->queue;
71
72 if (!entry->show)
73 return -EIO;
74
75 res = -ENOENT;
76 mutex_lock(&q->sysfs_lock);
77 if (!blk_queue_dying(q))
78 res = entry->show(ctx, page);
79 mutex_unlock(&q->sysfs_lock);
80 return res;
81 }
82
blk_mq_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)83 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
84 const char *page, size_t length)
85 {
86 struct blk_mq_ctx_sysfs_entry *entry;
87 struct blk_mq_ctx *ctx;
88 struct request_queue *q;
89 ssize_t res;
90
91 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
92 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
93 q = ctx->queue;
94
95 if (!entry->store)
96 return -EIO;
97
98 res = -ENOENT;
99 mutex_lock(&q->sysfs_lock);
100 if (!blk_queue_dying(q))
101 res = entry->store(ctx, page, length);
102 mutex_unlock(&q->sysfs_lock);
103 return res;
104 }
105
blk_mq_hw_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)106 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
107 struct attribute *attr, char *page)
108 {
109 struct blk_mq_hw_ctx_sysfs_entry *entry;
110 struct blk_mq_hw_ctx *hctx;
111 struct request_queue *q;
112 ssize_t res;
113
114 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
115 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
116 q = hctx->queue;
117
118 if (!entry->show)
119 return -EIO;
120
121 res = -ENOENT;
122 mutex_lock(&q->sysfs_lock);
123 if (!blk_queue_dying(q))
124 res = entry->show(hctx, page);
125 mutex_unlock(&q->sysfs_lock);
126 return res;
127 }
128
blk_mq_hw_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)129 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
130 struct attribute *attr, const char *page,
131 size_t length)
132 {
133 struct blk_mq_hw_ctx_sysfs_entry *entry;
134 struct blk_mq_hw_ctx *hctx;
135 struct request_queue *q;
136 ssize_t res;
137
138 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
139 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
140 q = hctx->queue;
141
142 if (!entry->store)
143 return -EIO;
144
145 res = -ENOENT;
146 mutex_lock(&q->sysfs_lock);
147 if (!blk_queue_dying(q))
148 res = entry->store(hctx, page, length);
149 mutex_unlock(&q->sysfs_lock);
150 return res;
151 }
152
blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx * hctx,char * page)153 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
154 char *page)
155 {
156 return sprintf(page, "%u\n", hctx->tags->nr_tags);
157 }
158
blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx * hctx,char * page)159 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
160 char *page)
161 {
162 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
163 }
164
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx * hctx,char * page)165 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
166 {
167 const size_t size = PAGE_SIZE - 1;
168 unsigned int i, first = 1;
169 int ret = 0, pos = 0;
170
171 for_each_cpu(i, hctx->cpumask) {
172 if (first)
173 ret = snprintf(pos + page, size - pos, "%u", i);
174 else
175 ret = snprintf(pos + page, size - pos, ", %u", i);
176
177 if (ret >= size - pos)
178 break;
179
180 first = 0;
181 pos += ret;
182 }
183
184 ret = snprintf(pos + page, size + 1 - pos, "\n");
185 return pos + ret;
186 }
187
188 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
189 .attr = {.name = "nr_tags", .mode = 0444 },
190 .show = blk_mq_hw_sysfs_nr_tags_show,
191 };
192 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
193 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
194 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
195 };
196 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
197 .attr = {.name = "cpu_list", .mode = 0444 },
198 .show = blk_mq_hw_sysfs_cpus_show,
199 };
200
201 static struct attribute *default_hw_ctx_attrs[] = {
202 &blk_mq_hw_sysfs_nr_tags.attr,
203 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
204 &blk_mq_hw_sysfs_cpus.attr,
205 NULL,
206 };
207 ATTRIBUTE_GROUPS(default_hw_ctx);
208
209 static const struct sysfs_ops blk_mq_sysfs_ops = {
210 .show = blk_mq_sysfs_show,
211 .store = blk_mq_sysfs_store,
212 };
213
214 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
215 .show = blk_mq_hw_sysfs_show,
216 .store = blk_mq_hw_sysfs_store,
217 };
218
219 static struct kobj_type blk_mq_ktype = {
220 .sysfs_ops = &blk_mq_sysfs_ops,
221 .release = blk_mq_sysfs_release,
222 };
223
224 static struct kobj_type blk_mq_ctx_ktype = {
225 .sysfs_ops = &blk_mq_sysfs_ops,
226 .release = blk_mq_ctx_sysfs_release,
227 };
228
229 static struct kobj_type blk_mq_hw_ktype = {
230 .sysfs_ops = &blk_mq_hw_sysfs_ops,
231 .default_groups = default_hw_ctx_groups,
232 .release = blk_mq_hw_sysfs_release,
233 };
234
blk_mq_unregister_hctx(struct blk_mq_hw_ctx * hctx)235 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
236 {
237 struct blk_mq_ctx *ctx;
238 int i;
239
240 if (!hctx->nr_ctx)
241 return;
242
243 hctx_for_each_ctx(hctx, ctx, i)
244 kobject_del(&ctx->kobj);
245
246 kobject_del(&hctx->kobj);
247 }
248
blk_mq_register_hctx(struct blk_mq_hw_ctx * hctx)249 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
250 {
251 struct request_queue *q = hctx->queue;
252 struct blk_mq_ctx *ctx;
253 int i, j, ret;
254
255 if (!hctx->nr_ctx)
256 return 0;
257
258 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
259 if (ret)
260 return ret;
261
262 hctx_for_each_ctx(hctx, ctx, i) {
263 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
264 if (ret)
265 goto out;
266 }
267
268 return 0;
269 out:
270 hctx_for_each_ctx(hctx, ctx, j) {
271 if (j < i)
272 kobject_del(&ctx->kobj);
273 }
274 kobject_del(&hctx->kobj);
275 return ret;
276 }
277
blk_mq_unregister_dev(struct device * dev,struct request_queue * q)278 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
279 {
280 struct blk_mq_hw_ctx *hctx;
281 int i;
282
283 lockdep_assert_held(&q->sysfs_dir_lock);
284
285 queue_for_each_hw_ctx(q, hctx, i)
286 blk_mq_unregister_hctx(hctx);
287
288 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
289 kobject_del(q->mq_kobj);
290 kobject_put(&dev->kobj);
291
292 q->mq_sysfs_init_done = false;
293 }
294
blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx * hctx)295 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
296 {
297 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
298 }
299
blk_mq_sysfs_deinit(struct request_queue * q)300 void blk_mq_sysfs_deinit(struct request_queue *q)
301 {
302 struct blk_mq_ctx *ctx;
303 int cpu;
304
305 for_each_possible_cpu(cpu) {
306 ctx = per_cpu_ptr(q->queue_ctx, cpu);
307 kobject_put(&ctx->kobj);
308 }
309 kobject_put(q->mq_kobj);
310 }
311
blk_mq_sysfs_init(struct request_queue * q)312 void blk_mq_sysfs_init(struct request_queue *q)
313 {
314 struct blk_mq_ctx *ctx;
315 int cpu;
316
317 kobject_init(q->mq_kobj, &blk_mq_ktype);
318
319 for_each_possible_cpu(cpu) {
320 ctx = per_cpu_ptr(q->queue_ctx, cpu);
321
322 kobject_get(q->mq_kobj);
323 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
324 }
325 }
326
__blk_mq_register_dev(struct device * dev,struct request_queue * q)327 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
328 {
329 struct blk_mq_hw_ctx *hctx;
330 int ret, i;
331
332 WARN_ON_ONCE(!q->kobj.parent);
333 lockdep_assert_held(&q->sysfs_dir_lock);
334
335 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
336 if (ret < 0)
337 goto out;
338
339 kobject_uevent(q->mq_kobj, KOBJ_ADD);
340
341 queue_for_each_hw_ctx(q, hctx, i) {
342 ret = blk_mq_register_hctx(hctx);
343 if (ret)
344 goto unreg;
345 }
346
347 q->mq_sysfs_init_done = true;
348
349 out:
350 return ret;
351
352 unreg:
353 while (--i >= 0)
354 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
355
356 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
357 kobject_del(q->mq_kobj);
358 kobject_put(&dev->kobj);
359 return ret;
360 }
361
blk_mq_sysfs_unregister(struct request_queue * q)362 void blk_mq_sysfs_unregister(struct request_queue *q)
363 {
364 struct blk_mq_hw_ctx *hctx;
365 int i;
366
367 mutex_lock(&q->sysfs_dir_lock);
368 if (!q->mq_sysfs_init_done)
369 goto unlock;
370
371 queue_for_each_hw_ctx(q, hctx, i)
372 blk_mq_unregister_hctx(hctx);
373
374 unlock:
375 mutex_unlock(&q->sysfs_dir_lock);
376 }
377
blk_mq_sysfs_register(struct request_queue * q)378 int blk_mq_sysfs_register(struct request_queue *q)
379 {
380 struct blk_mq_hw_ctx *hctx;
381 int i, ret = 0;
382
383 mutex_lock(&q->sysfs_dir_lock);
384 if (!q->mq_sysfs_init_done)
385 goto unlock;
386
387 queue_for_each_hw_ctx(q, hctx, i) {
388 ret = blk_mq_register_hctx(hctx);
389 if (ret)
390 break;
391 }
392
393 unlock:
394 mutex_unlock(&q->sysfs_dir_lock);
395
396 return ret;
397 }
398