1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
6 *
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-tag.h"
17
18 #include <trace/hooks/blk_mq.h>
19
20 /*
21 * If a previously inactive queue goes active, bump the active user count.
22 * We need to do this before try to allocate driver tag, then even if fail
23 * to get tag when first time, the other shared-tag users could reserve
24 * budget for it.
25 */
__blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)26 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
27 {
28 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
29 struct request_queue *q = hctx->queue;
30 struct blk_mq_tag_set *set = q->tag_set;
31
32 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
33 !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
34 atomic_inc(&set->active_queues_shared_sbitmap);
35 } else {
36 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
37 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
38 atomic_inc(&hctx->tags->active_queues);
39 }
40
41 return true;
42 }
43
44 /*
45 * Wakeup all potentially sleeping on tags
46 */
blk_mq_tag_wakeup_all(struct blk_mq_tags * tags,bool include_reserve)47 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
48 {
49 sbitmap_queue_wake_all(tags->bitmap_tags);
50 if (include_reserve)
51 sbitmap_queue_wake_all(tags->breserved_tags);
52 }
53
54 /*
55 * If a previously busy queue goes inactive, potential waiters could now
56 * be allowed to queue. Wake them up and check.
57 */
__blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)58 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
59 {
60 struct blk_mq_tags *tags = hctx->tags;
61 struct request_queue *q = hctx->queue;
62 struct blk_mq_tag_set *set = q->tag_set;
63
64 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
65 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
66 &q->queue_flags))
67 return;
68 atomic_dec(&set->active_queues_shared_sbitmap);
69 } else {
70 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
71 return;
72 atomic_dec(&tags->active_queues);
73 }
74
75 blk_mq_tag_wakeup_all(tags, false);
76 }
77
__blk_mq_get_tag(struct blk_mq_alloc_data * data,struct sbitmap_queue * bt)78 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
79 struct sbitmap_queue *bt)
80 {
81 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
82 !hctx_may_queue(data->hctx, bt))
83 return BLK_MQ_NO_TAG;
84
85 if (data->shallow_depth)
86 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
87 else
88 return __sbitmap_queue_get(bt);
89 }
90
blk_mq_get_tag(struct blk_mq_alloc_data * data)91 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
92 {
93 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
94 struct sbitmap_queue *bt;
95 struct sbq_wait_state *ws;
96 DEFINE_SBQ_WAIT(wait);
97 unsigned int tag_offset;
98 int tag;
99
100 if (data->flags & BLK_MQ_REQ_RESERVED) {
101 if (unlikely(!tags->nr_reserved_tags)) {
102 WARN_ON_ONCE(1);
103 return BLK_MQ_NO_TAG;
104 }
105 bt = tags->breserved_tags;
106 tag_offset = 0;
107 } else {
108 bt = tags->bitmap_tags;
109 tag_offset = tags->nr_reserved_tags;
110 }
111
112 tag = __blk_mq_get_tag(data, bt);
113 if (tag != BLK_MQ_NO_TAG)
114 goto found_tag;
115
116 if (data->flags & BLK_MQ_REQ_NOWAIT)
117 return BLK_MQ_NO_TAG;
118
119 ws = bt_wait_ptr(bt, data->hctx);
120 do {
121 struct sbitmap_queue *bt_prev;
122
123 /*
124 * We're out of tags on this hardware queue, kick any
125 * pending IO submits before going to sleep waiting for
126 * some to complete.
127 */
128 blk_mq_run_hw_queue(data->hctx, false);
129
130 /*
131 * Retry tag allocation after running the hardware queue,
132 * as running the queue may also have found completions.
133 */
134 tag = __blk_mq_get_tag(data, bt);
135 if (tag != BLK_MQ_NO_TAG)
136 break;
137
138 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
139
140 tag = __blk_mq_get_tag(data, bt);
141 if (tag != BLK_MQ_NO_TAG)
142 break;
143
144 bt_prev = bt;
145 io_schedule();
146
147 sbitmap_finish_wait(bt, ws, &wait);
148
149 data->ctx = blk_mq_get_ctx(data->q);
150 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
151 data->ctx);
152 tags = blk_mq_tags_from_data(data);
153 if (data->flags & BLK_MQ_REQ_RESERVED)
154 bt = tags->breserved_tags;
155 else
156 bt = tags->bitmap_tags;
157
158 /*
159 * If destination hw queue is changed, fake wake up on
160 * previous queue for compensating the wake up miss, so
161 * other allocations on previous queue won't be starved.
162 */
163 if (bt != bt_prev)
164 sbitmap_queue_wake_up(bt_prev);
165
166 ws = bt_wait_ptr(bt, data->hctx);
167 } while (1);
168
169 sbitmap_finish_wait(bt, ws, &wait);
170
171 found_tag:
172 /*
173 * Give up this allocation if the hctx is inactive. The caller will
174 * retry on an active hctx.
175 */
176 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
177 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
178 return BLK_MQ_NO_TAG;
179 }
180 return tag + tag_offset;
181 }
182
blk_mq_put_tag(struct blk_mq_tags * tags,struct blk_mq_ctx * ctx,unsigned int tag)183 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
184 unsigned int tag)
185 {
186 if (!blk_mq_tag_is_reserved(tags, tag)) {
187 const int real_tag = tag - tags->nr_reserved_tags;
188
189 BUG_ON(real_tag >= tags->nr_tags);
190 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
191 } else {
192 BUG_ON(tag >= tags->nr_reserved_tags);
193 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
194 }
195 }
196
197 struct bt_iter_data {
198 struct blk_mq_hw_ctx *hctx;
199 busy_iter_fn *fn;
200 void *data;
201 bool reserved;
202 };
203
blk_mq_find_and_get_req(struct blk_mq_tags * tags,unsigned int bitnr)204 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
205 unsigned int bitnr)
206 {
207 struct request *rq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&tags->lock, flags);
211 rq = tags->rqs[bitnr];
212 if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
213 rq = NULL;
214 spin_unlock_irqrestore(&tags->lock, flags);
215 return rq;
216 }
217
bt_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)218 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
219 {
220 struct bt_iter_data *iter_data = data;
221 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
222 struct blk_mq_tags *tags = hctx->tags;
223 bool reserved = iter_data->reserved;
224 struct request *rq;
225 bool ret = true;
226
227 if (!reserved)
228 bitnr += tags->nr_reserved_tags;
229 /*
230 * We can hit rq == NULL here, because the tagging functions
231 * test and set the bit before assigning ->rqs[].
232 */
233 rq = blk_mq_find_and_get_req(tags, bitnr);
234 if (!rq)
235 return true;
236
237 if (rq->q == hctx->queue && rq->mq_hctx == hctx)
238 ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
239 blk_mq_put_rq_ref(rq);
240 return ret;
241 }
242
243 /**
244 * bt_for_each - iterate over the requests associated with a hardware queue
245 * @hctx: Hardware queue to examine.
246 * @bt: sbitmap to examine. This is either the breserved_tags member
247 * or the bitmap_tags member of struct blk_mq_tags.
248 * @fn: Pointer to the function that will be called for each request
249 * associated with @hctx that has been assigned a driver tag.
250 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
251 * where rq is a pointer to a request. Return true to continue
252 * iterating tags, false to stop.
253 * @data: Will be passed as third argument to @fn.
254 * @reserved: Indicates whether @bt is the breserved_tags member or the
255 * bitmap_tags member of struct blk_mq_tags.
256 */
bt_for_each(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt,busy_iter_fn * fn,void * data,bool reserved)257 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
258 busy_iter_fn *fn, void *data, bool reserved)
259 {
260 struct bt_iter_data iter_data = {
261 .hctx = hctx,
262 .fn = fn,
263 .data = data,
264 .reserved = reserved,
265 };
266
267 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
268 }
269
270 struct bt_tags_iter_data {
271 struct blk_mq_tags *tags;
272 busy_tag_iter_fn *fn;
273 void *data;
274 unsigned int flags;
275 };
276
277 #define BT_TAG_ITER_RESERVED (1 << 0)
278 #define BT_TAG_ITER_STARTED (1 << 1)
279 #define BT_TAG_ITER_STATIC_RQS (1 << 2)
280
bt_tags_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)281 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
282 {
283 struct bt_tags_iter_data *iter_data = data;
284 struct blk_mq_tags *tags = iter_data->tags;
285 bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
286 struct request *rq;
287 bool ret = true;
288 bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
289
290 if (!reserved)
291 bitnr += tags->nr_reserved_tags;
292
293 /*
294 * We can hit rq == NULL here, because the tagging functions
295 * test and set the bit before assigning ->rqs[].
296 */
297 if (iter_static_rqs)
298 rq = tags->static_rqs[bitnr];
299 else
300 rq = blk_mq_find_and_get_req(tags, bitnr);
301 if (!rq)
302 return true;
303
304 if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
305 blk_mq_request_started(rq))
306 ret = iter_data->fn(rq, iter_data->data, reserved);
307 if (!iter_static_rqs)
308 blk_mq_put_rq_ref(rq);
309 return ret;
310 }
311
312 /**
313 * bt_tags_for_each - iterate over the requests in a tag map
314 * @tags: Tag map to iterate over.
315 * @bt: sbitmap to examine. This is either the breserved_tags member
316 * or the bitmap_tags member of struct blk_mq_tags.
317 * @fn: Pointer to the function that will be called for each started
318 * request. @fn will be called as follows: @fn(rq, @data,
319 * @reserved) where rq is a pointer to a request. Return true
320 * to continue iterating tags, false to stop.
321 * @data: Will be passed as second argument to @fn.
322 * @flags: BT_TAG_ITER_*
323 */
bt_tags_for_each(struct blk_mq_tags * tags,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,unsigned int flags)324 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
325 busy_tag_iter_fn *fn, void *data, unsigned int flags)
326 {
327 struct bt_tags_iter_data iter_data = {
328 .tags = tags,
329 .fn = fn,
330 .data = data,
331 .flags = flags,
332 };
333
334 if (tags->rqs)
335 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
336 }
337
__blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv,unsigned int flags)338 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
339 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
340 {
341 bool skip = false;
342
343 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
344
345 trace_android_vh_blk_mq_all_tag_iter(&skip, tags, fn, priv);
346 if (skip)
347 return;
348 if (tags->nr_reserved_tags)
349 bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
350 flags | BT_TAG_ITER_RESERVED);
351 bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
352 }
353
354 /**
355 * blk_mq_all_tag_iter - iterate over all requests in a tag map
356 * @tags: Tag map to iterate over.
357 * @fn: Pointer to the function that will be called for each
358 * request. @fn will be called as follows: @fn(rq, @priv,
359 * reserved) where rq is a pointer to a request. 'reserved'
360 * indicates whether or not @rq is a reserved request. Return
361 * true to continue iterating tags, false to stop.
362 * @priv: Will be passed as second argument to @fn.
363 *
364 * Caller has to pass the tag map from which requests are allocated.
365 */
blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv)366 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
367 void *priv)
368 {
369 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
370 }
371
372 /**
373 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
374 * @tagset: Tag set to iterate over.
375 * @fn: Pointer to the function that will be called for each started
376 * request. @fn will be called as follows: @fn(rq, @priv,
377 * reserved) where rq is a pointer to a request. 'reserved'
378 * indicates whether or not @rq is a reserved request. Return
379 * true to continue iterating tags, false to stop.
380 * @priv: Will be passed as second argument to @fn.
381 *
382 * We grab one request reference before calling @fn and release it after
383 * @fn returns.
384 */
blk_mq_tagset_busy_iter(struct blk_mq_tag_set * tagset,busy_tag_iter_fn * fn,void * priv)385 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
386 busy_tag_iter_fn *fn, void *priv)
387 {
388 int i;
389
390 for (i = 0; i < tagset->nr_hw_queues; i++) {
391 if (tagset->tags && tagset->tags[i])
392 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
393 BT_TAG_ITER_STARTED);
394 }
395 }
396 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
397
blk_mq_tagset_count_completed_rqs(struct request * rq,void * data,bool reserved)398 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
399 void *data, bool reserved)
400 {
401 unsigned *count = data;
402
403 if (blk_mq_request_completed(rq))
404 (*count)++;
405 return true;
406 }
407
408 /**
409 * blk_mq_tagset_wait_completed_request - wait until all completed req's
410 * complete funtion is run
411 * @tagset: Tag set to drain completed request
412 *
413 * Note: This function has to be run after all IO queues are shutdown
414 */
blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set * tagset)415 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
416 {
417 while (true) {
418 unsigned count = 0;
419
420 blk_mq_tagset_busy_iter(tagset,
421 blk_mq_tagset_count_completed_rqs, &count);
422 if (!count)
423 break;
424 msleep(5);
425 }
426 }
427 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
428
429 /**
430 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
431 * @q: Request queue to examine.
432 * @fn: Pointer to the function that will be called for each request
433 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
434 * reserved) where rq is a pointer to a request and hctx points
435 * to the hardware queue associated with the request. 'reserved'
436 * indicates whether or not @rq is a reserved request.
437 * @priv: Will be passed as third argument to @fn.
438 *
439 * Note: if @q->tag_set is shared with other request queues then @fn will be
440 * called for all requests on all queues that share that tag set and not only
441 * for requests associated with @q.
442 */
blk_mq_queue_tag_busy_iter(struct request_queue * q,busy_iter_fn * fn,void * priv)443 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
444 void *priv)
445 {
446 struct blk_mq_hw_ctx *hctx;
447 int i;
448 bool skip = false;
449
450 /*
451 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
452 * while the queue is frozen. So we can use q_usage_counter to avoid
453 * racing with it.
454 */
455 if (!percpu_ref_tryget(&q->q_usage_counter))
456 return;
457
458 queue_for_each_hw_ctx(q, hctx, i) {
459 struct blk_mq_tags *tags = hctx->tags;
460
461 /*
462 * If no software queues are currently mapped to this
463 * hardware queue, there's nothing to check
464 */
465 if (!blk_mq_hw_queue_mapped(hctx))
466 continue;
467
468 trace_android_vh_blk_mq_queue_tag_busy_iter(&skip, hctx, fn,
469 priv);
470 if (skip)
471 continue;
472
473 if (tags->nr_reserved_tags)
474 bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
475 bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
476 }
477 blk_queue_exit(q);
478 }
479
bt_alloc(struct sbitmap_queue * bt,unsigned int depth,bool round_robin,int node)480 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
481 bool round_robin, int node)
482 {
483 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
484 node);
485 }
486
blk_mq_init_bitmap_tags(struct blk_mq_tags * tags,int node,int alloc_policy)487 static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
488 int node, int alloc_policy)
489 {
490 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
491 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
492
493 if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
494 return -ENOMEM;
495 if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
496 round_robin, node))
497 goto free_bitmap_tags;
498
499 tags->bitmap_tags = &tags->__bitmap_tags;
500 tags->breserved_tags = &tags->__breserved_tags;
501
502 return 0;
503 free_bitmap_tags:
504 sbitmap_queue_free(&tags->__bitmap_tags);
505 return -ENOMEM;
506 }
507
blk_mq_init_shared_sbitmap(struct blk_mq_tag_set * set,unsigned int flags)508 int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
509 {
510 unsigned int depth = set->queue_depth - set->reserved_tags;
511 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
512 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
513 int i, node = set->numa_node;
514
515 if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
516 return -ENOMEM;
517 if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
518 round_robin, node))
519 goto free_bitmap_tags;
520
521 for (i = 0; i < set->nr_hw_queues; i++) {
522 struct blk_mq_tags *tags = set->tags[i];
523
524 tags->bitmap_tags = &set->__bitmap_tags;
525 tags->breserved_tags = &set->__breserved_tags;
526 }
527
528 return 0;
529 free_bitmap_tags:
530 sbitmap_queue_free(&set->__bitmap_tags);
531 return -ENOMEM;
532 }
533
blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set * set)534 void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
535 {
536 sbitmap_queue_free(&set->__bitmap_tags);
537 sbitmap_queue_free(&set->__breserved_tags);
538 }
539
blk_mq_init_tags(unsigned int total_tags,unsigned int reserved_tags,int node,unsigned int flags)540 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
541 unsigned int reserved_tags,
542 int node, unsigned int flags)
543 {
544 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
545 struct blk_mq_tags *tags;
546
547 if (total_tags > BLK_MQ_TAG_MAX) {
548 pr_err("blk-mq: tag depth too large\n");
549 return NULL;
550 }
551
552 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
553 if (!tags)
554 return NULL;
555
556 tags->nr_tags = total_tags;
557 tags->nr_reserved_tags = reserved_tags;
558 spin_lock_init(&tags->lock);
559
560 if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
561 return tags;
562
563 if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
564 kfree(tags);
565 return NULL;
566 }
567 return tags;
568 }
569
blk_mq_free_tags(struct blk_mq_tags * tags,unsigned int flags)570 void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
571 {
572 bool skip = false;
573
574 trace_android_vh_blk_mq_free_tags(&skip, tags);
575 if (skip)
576 return;
577
578 if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
579 sbitmap_queue_free(tags->bitmap_tags);
580 sbitmap_queue_free(tags->breserved_tags);
581 }
582 kfree(tags);
583 }
584
blk_mq_tag_update_depth(struct blk_mq_hw_ctx * hctx,struct blk_mq_tags ** tagsptr,unsigned int tdepth,bool can_grow)585 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
586 struct blk_mq_tags **tagsptr, unsigned int tdepth,
587 bool can_grow)
588 {
589 struct blk_mq_tags *tags = *tagsptr;
590
591 if (tdepth <= tags->nr_reserved_tags)
592 return -EINVAL;
593
594 /*
595 * If we are allowed to grow beyond the original size, allocate
596 * a new set of tags before freeing the old one.
597 */
598 if (tdepth > tags->nr_tags) {
599 struct blk_mq_tag_set *set = hctx->queue->tag_set;
600 /* Only sched tags can grow, so clear HCTX_SHARED flag */
601 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
602 struct blk_mq_tags *new;
603 bool ret;
604
605 if (!can_grow)
606 return -EINVAL;
607
608 /*
609 * We need some sort of upper limit, set it high enough that
610 * no valid use cases should require more.
611 */
612 if (tdepth > 16 * BLKDEV_MAX_RQ)
613 return -EINVAL;
614
615 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
616 tags->nr_reserved_tags, flags);
617 if (!new)
618 return -ENOMEM;
619 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
620 if (ret) {
621 blk_mq_free_rq_map(new, flags);
622 return -ENOMEM;
623 }
624
625 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
626 blk_mq_free_rq_map(*tagsptr, flags);
627 *tagsptr = new;
628 } else {
629 /*
630 * Don't need (or can't) update reserved tags here, they
631 * remain static and should never need resizing.
632 */
633 sbitmap_queue_resize(tags->bitmap_tags,
634 tdepth - tags->nr_reserved_tags);
635 }
636
637 return 0;
638 }
639
blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set * set,unsigned int size)640 void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
641 {
642 sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
643 }
644
645 /**
646 * blk_mq_unique_tag() - return a tag that is unique queue-wide
647 * @rq: request for which to compute a unique tag
648 *
649 * The tag field in struct request is unique per hardware queue but not over
650 * all hardware queues. Hence this function that returns a tag with the
651 * hardware context index in the upper bits and the per hardware queue tag in
652 * the lower bits.
653 *
654 * Note: When called for a request that is queued on a non-multiqueue request
655 * queue, the hardware context index is set to zero.
656 */
blk_mq_unique_tag(struct request * rq)657 u32 blk_mq_unique_tag(struct request *rq)
658 {
659 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
660 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
661 }
662 EXPORT_SYMBOL(blk_mq_unique_tag);
663