1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
6 *
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-sched.h"
17 #include "blk-mq-tag.h"
18
19 /*
20 * If a previously inactive queue goes active, bump the active user count.
21 * We need to do this before try to allocate driver tag, then even if fail
22 * to get tag when first time, the other shared-tag users could reserve
23 * budget for it.
24 */
__blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)25 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
26 {
27 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
28 struct request_queue *q = hctx->queue;
29 struct blk_mq_tag_set *set = q->tag_set;
30
31 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
32 !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
33 atomic_inc(&set->active_queues_shared_sbitmap);
34 } else {
35 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
36 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
37 atomic_inc(&hctx->tags->active_queues);
38 }
39
40 return true;
41 }
42
43 /*
44 * Wakeup all potentially sleeping on tags
45 */
blk_mq_tag_wakeup_all(struct blk_mq_tags * tags,bool include_reserve)46 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
47 {
48 sbitmap_queue_wake_all(tags->bitmap_tags);
49 if (include_reserve)
50 sbitmap_queue_wake_all(tags->breserved_tags);
51 }
52
53 /*
54 * If a previously busy queue goes inactive, potential waiters could now
55 * be allowed to queue. Wake them up and check.
56 */
__blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)57 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
58 {
59 struct blk_mq_tags *tags = hctx->tags;
60 struct request_queue *q = hctx->queue;
61 struct blk_mq_tag_set *set = q->tag_set;
62
63 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
64 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
65 &q->queue_flags))
66 return;
67 atomic_dec(&set->active_queues_shared_sbitmap);
68 } else {
69 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
70 return;
71 atomic_dec(&tags->active_queues);
72 }
73
74 blk_mq_tag_wakeup_all(tags, false);
75 }
76
__blk_mq_get_tag(struct blk_mq_alloc_data * data,struct sbitmap_queue * bt)77 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
78 struct sbitmap_queue *bt)
79 {
80 if (data->shallow_depth)
81 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
82 else
83 return __sbitmap_queue_get(bt);
84 }
85
blk_mq_get_tag(struct blk_mq_alloc_data * data)86 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
87 {
88 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
89 struct sbitmap_queue *bt;
90 struct sbq_wait_state *ws;
91 DEFINE_SBQ_WAIT(wait);
92 unsigned int tag_offset;
93 int tag;
94
95 if (data->flags & BLK_MQ_REQ_RESERVED) {
96 if (unlikely(!tags->nr_reserved_tags)) {
97 WARN_ON_ONCE(1);
98 return BLK_MQ_NO_TAG;
99 }
100 bt = tags->breserved_tags;
101 tag_offset = 0;
102 } else {
103 bt = tags->bitmap_tags;
104 tag_offset = tags->nr_reserved_tags;
105 }
106
107 tag = __blk_mq_get_tag(data, bt);
108 if (tag != BLK_MQ_NO_TAG)
109 goto found_tag;
110
111 if (data->flags & BLK_MQ_REQ_NOWAIT)
112 return BLK_MQ_NO_TAG;
113
114 ws = bt_wait_ptr(bt, data->hctx);
115 do {
116 struct sbitmap_queue *bt_prev;
117
118 /*
119 * We're out of tags on this hardware queue, kick any
120 * pending IO submits before going to sleep waiting for
121 * some to complete.
122 */
123 blk_mq_run_hw_queue(data->hctx, false);
124
125 /*
126 * Retry tag allocation after running the hardware queue,
127 * as running the queue may also have found completions.
128 */
129 tag = __blk_mq_get_tag(data, bt);
130 if (tag != BLK_MQ_NO_TAG)
131 break;
132
133 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
134
135 tag = __blk_mq_get_tag(data, bt);
136 if (tag != BLK_MQ_NO_TAG)
137 break;
138
139 bt_prev = bt;
140 io_schedule();
141
142 sbitmap_finish_wait(bt, ws, &wait);
143
144 data->ctx = blk_mq_get_ctx(data->q);
145 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
146 data->ctx);
147 tags = blk_mq_tags_from_data(data);
148 if (data->flags & BLK_MQ_REQ_RESERVED)
149 bt = tags->breserved_tags;
150 else
151 bt = tags->bitmap_tags;
152
153 /*
154 * If destination hw queue is changed, fake wake up on
155 * previous queue for compensating the wake up miss, so
156 * other allocations on previous queue won't be starved.
157 */
158 if (bt != bt_prev)
159 sbitmap_queue_wake_up(bt_prev);
160
161 ws = bt_wait_ptr(bt, data->hctx);
162 } while (1);
163
164 sbitmap_finish_wait(bt, ws, &wait);
165
166 found_tag:
167 /*
168 * Give up this allocation if the hctx is inactive. The caller will
169 * retry on an active hctx.
170 */
171 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
172 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
173 return BLK_MQ_NO_TAG;
174 }
175 return tag + tag_offset;
176 }
177
blk_mq_put_tag(struct blk_mq_tags * tags,struct blk_mq_ctx * ctx,unsigned int tag)178 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
179 unsigned int tag)
180 {
181 if (!blk_mq_tag_is_reserved(tags, tag)) {
182 const int real_tag = tag - tags->nr_reserved_tags;
183
184 BUG_ON(real_tag >= tags->nr_tags);
185 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
186 } else {
187 BUG_ON(tag >= tags->nr_reserved_tags);
188 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
189 }
190 }
191
192 struct bt_iter_data {
193 struct blk_mq_hw_ctx *hctx;
194 busy_iter_fn *fn;
195 void *data;
196 bool reserved;
197 };
198
blk_mq_find_and_get_req(struct blk_mq_tags * tags,unsigned int bitnr)199 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
200 unsigned int bitnr)
201 {
202 struct request *rq;
203 unsigned long flags;
204
205 spin_lock_irqsave(&tags->lock, flags);
206 rq = tags->rqs[bitnr];
207 if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
208 rq = NULL;
209 spin_unlock_irqrestore(&tags->lock, flags);
210 return rq;
211 }
212
bt_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)213 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
214 {
215 struct bt_iter_data *iter_data = data;
216 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
217 struct blk_mq_tags *tags = hctx->tags;
218 bool reserved = iter_data->reserved;
219 struct request *rq;
220 bool ret = true;
221
222 if (!reserved)
223 bitnr += tags->nr_reserved_tags;
224 /*
225 * We can hit rq == NULL here, because the tagging functions
226 * test and set the bit before assigning ->rqs[].
227 */
228 rq = blk_mq_find_and_get_req(tags, bitnr);
229 if (!rq)
230 return true;
231
232 if (rq->q == hctx->queue && rq->mq_hctx == hctx)
233 ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
234 blk_mq_put_rq_ref(rq);
235 return ret;
236 }
237
238 /**
239 * bt_for_each - iterate over the requests associated with a hardware queue
240 * @hctx: Hardware queue to examine.
241 * @bt: sbitmap to examine. This is either the breserved_tags member
242 * or the bitmap_tags member of struct blk_mq_tags.
243 * @fn: Pointer to the function that will be called for each request
244 * associated with @hctx that has been assigned a driver tag.
245 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
246 * where rq is a pointer to a request. Return true to continue
247 * iterating tags, false to stop.
248 * @data: Will be passed as third argument to @fn.
249 * @reserved: Indicates whether @bt is the breserved_tags member or the
250 * bitmap_tags member of struct blk_mq_tags.
251 */
bt_for_each(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt,busy_iter_fn * fn,void * data,bool reserved)252 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
253 busy_iter_fn *fn, void *data, bool reserved)
254 {
255 struct bt_iter_data iter_data = {
256 .hctx = hctx,
257 .fn = fn,
258 .data = data,
259 .reserved = reserved,
260 };
261
262 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
263 }
264
265 struct bt_tags_iter_data {
266 struct blk_mq_tags *tags;
267 busy_tag_iter_fn *fn;
268 void *data;
269 unsigned int flags;
270 };
271
272 #define BT_TAG_ITER_RESERVED (1 << 0)
273 #define BT_TAG_ITER_STARTED (1 << 1)
274 #define BT_TAG_ITER_STATIC_RQS (1 << 2)
275
bt_tags_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)276 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
277 {
278 struct bt_tags_iter_data *iter_data = data;
279 struct blk_mq_tags *tags = iter_data->tags;
280 bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
281 struct request *rq;
282 bool ret = true;
283 bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
284
285 if (!reserved)
286 bitnr += tags->nr_reserved_tags;
287
288 /*
289 * We can hit rq == NULL here, because the tagging functions
290 * test and set the bit before assigning ->rqs[].
291 */
292 if (iter_static_rqs)
293 rq = tags->static_rqs[bitnr];
294 else
295 rq = blk_mq_find_and_get_req(tags, bitnr);
296 if (!rq)
297 return true;
298
299 if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
300 blk_mq_request_started(rq))
301 ret = iter_data->fn(rq, iter_data->data, reserved);
302 if (!iter_static_rqs)
303 blk_mq_put_rq_ref(rq);
304 return ret;
305 }
306
307 /**
308 * bt_tags_for_each - iterate over the requests in a tag map
309 * @tags: Tag map to iterate over.
310 * @bt: sbitmap to examine. This is either the breserved_tags member
311 * or the bitmap_tags member of struct blk_mq_tags.
312 * @fn: Pointer to the function that will be called for each started
313 * request. @fn will be called as follows: @fn(rq, @data,
314 * @reserved) where rq is a pointer to a request. Return true
315 * to continue iterating tags, false to stop.
316 * @data: Will be passed as second argument to @fn.
317 * @flags: BT_TAG_ITER_*
318 */
bt_tags_for_each(struct blk_mq_tags * tags,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,unsigned int flags)319 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
320 busy_tag_iter_fn *fn, void *data, unsigned int flags)
321 {
322 struct bt_tags_iter_data iter_data = {
323 .tags = tags,
324 .fn = fn,
325 .data = data,
326 .flags = flags,
327 };
328
329 if (tags->rqs)
330 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
331 }
332
__blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv,unsigned int flags)333 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
334 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
335 {
336 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
337
338 if (tags->nr_reserved_tags)
339 bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
340 flags | BT_TAG_ITER_RESERVED);
341 bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
342 }
343
344 /**
345 * blk_mq_all_tag_iter - iterate over all requests in a tag map
346 * @tags: Tag map to iterate over.
347 * @fn: Pointer to the function that will be called for each
348 * request. @fn will be called as follows: @fn(rq, @priv,
349 * reserved) where rq is a pointer to a request. 'reserved'
350 * indicates whether or not @rq is a reserved request. Return
351 * true to continue iterating tags, false to stop.
352 * @priv: Will be passed as second argument to @fn.
353 *
354 * Caller has to pass the tag map from which requests are allocated.
355 */
blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv)356 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
357 void *priv)
358 {
359 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
360 }
361
362 /**
363 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
364 * @tagset: Tag set to iterate over.
365 * @fn: Pointer to the function that will be called for each started
366 * request. @fn will be called as follows: @fn(rq, @priv,
367 * reserved) where rq is a pointer to a request. 'reserved'
368 * indicates whether or not @rq is a reserved request. Return
369 * true to continue iterating tags, false to stop.
370 * @priv: Will be passed as second argument to @fn.
371 *
372 * We grab one request reference before calling @fn and release it after
373 * @fn returns.
374 */
blk_mq_tagset_busy_iter(struct blk_mq_tag_set * tagset,busy_tag_iter_fn * fn,void * priv)375 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
376 busy_tag_iter_fn *fn, void *priv)
377 {
378 int i;
379
380 for (i = 0; i < tagset->nr_hw_queues; i++) {
381 if (tagset->tags && tagset->tags[i])
382 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
383 BT_TAG_ITER_STARTED);
384 }
385 }
386 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
387
blk_mq_tagset_count_completed_rqs(struct request * rq,void * data,bool reserved)388 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
389 void *data, bool reserved)
390 {
391 unsigned *count = data;
392
393 if (blk_mq_request_completed(rq))
394 (*count)++;
395 return true;
396 }
397
398 /**
399 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
400 * completions have finished.
401 * @tagset: Tag set to drain completed request
402 *
403 * Note: This function has to be run after all IO queues are shutdown
404 */
blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set * tagset)405 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
406 {
407 while (true) {
408 unsigned count = 0;
409
410 blk_mq_tagset_busy_iter(tagset,
411 blk_mq_tagset_count_completed_rqs, &count);
412 if (!count)
413 break;
414 msleep(5);
415 }
416 }
417 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
418
419 /**
420 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
421 * @q: Request queue to examine.
422 * @fn: Pointer to the function that will be called for each request
423 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
424 * reserved) where rq is a pointer to a request and hctx points
425 * to the hardware queue associated with the request. 'reserved'
426 * indicates whether or not @rq is a reserved request.
427 * @priv: Will be passed as third argument to @fn.
428 *
429 * Note: if @q->tag_set is shared with other request queues then @fn will be
430 * called for all requests on all queues that share that tag set and not only
431 * for requests associated with @q.
432 */
blk_mq_queue_tag_busy_iter(struct request_queue * q,busy_iter_fn * fn,void * priv)433 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
434 void *priv)
435 {
436 struct blk_mq_hw_ctx *hctx;
437 int i;
438
439 /*
440 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
441 * while the queue is frozen. So we can use q_usage_counter to avoid
442 * racing with it.
443 */
444 if (!percpu_ref_tryget(&q->q_usage_counter))
445 return;
446
447 queue_for_each_hw_ctx(q, hctx, i) {
448 struct blk_mq_tags *tags = hctx->tags;
449
450 /*
451 * If no software queues are currently mapped to this
452 * hardware queue, there's nothing to check
453 */
454 if (!blk_mq_hw_queue_mapped(hctx))
455 continue;
456
457 if (tags->nr_reserved_tags)
458 bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
459 bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
460 }
461 blk_queue_exit(q);
462 }
463
bt_alloc(struct sbitmap_queue * bt,unsigned int depth,bool round_robin,int node)464 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
465 bool round_robin, int node)
466 {
467 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
468 node);
469 }
470
blk_mq_init_bitmaps(struct sbitmap_queue * bitmap_tags,struct sbitmap_queue * breserved_tags,unsigned int queue_depth,unsigned int reserved,int node,int alloc_policy)471 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
472 struct sbitmap_queue *breserved_tags,
473 unsigned int queue_depth, unsigned int reserved,
474 int node, int alloc_policy)
475 {
476 unsigned int depth = queue_depth - reserved;
477 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
478
479 if (bt_alloc(bitmap_tags, depth, round_robin, node))
480 return -ENOMEM;
481 if (bt_alloc(breserved_tags, reserved, round_robin, node))
482 goto free_bitmap_tags;
483
484 return 0;
485
486 free_bitmap_tags:
487 sbitmap_queue_free(bitmap_tags);
488 return -ENOMEM;
489 }
490
blk_mq_init_bitmap_tags(struct blk_mq_tags * tags,int node,int alloc_policy)491 static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
492 int node, int alloc_policy)
493 {
494 int ret;
495
496 ret = blk_mq_init_bitmaps(&tags->__bitmap_tags,
497 &tags->__breserved_tags,
498 tags->nr_tags, tags->nr_reserved_tags,
499 node, alloc_policy);
500 if (ret)
501 return ret;
502
503 tags->bitmap_tags = &tags->__bitmap_tags;
504 tags->breserved_tags = &tags->__breserved_tags;
505
506 return 0;
507 }
508
blk_mq_init_shared_sbitmap(struct blk_mq_tag_set * set)509 int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set)
510 {
511 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
512 int i, ret;
513
514 ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags,
515 set->queue_depth, set->reserved_tags,
516 set->numa_node, alloc_policy);
517 if (ret)
518 return ret;
519
520 for (i = 0; i < set->nr_hw_queues; i++) {
521 struct blk_mq_tags *tags = set->tags[i];
522
523 tags->bitmap_tags = &set->__bitmap_tags;
524 tags->breserved_tags = &set->__breserved_tags;
525 }
526
527 return 0;
528 }
529
blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set * set)530 void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
531 {
532 sbitmap_queue_free(&set->__bitmap_tags);
533 sbitmap_queue_free(&set->__breserved_tags);
534 }
535
blk_mq_init_tags(unsigned int total_tags,unsigned int reserved_tags,int node,unsigned int flags)536 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
537 unsigned int reserved_tags,
538 int node, unsigned int flags)
539 {
540 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
541 struct blk_mq_tags *tags;
542
543 if (total_tags > BLK_MQ_TAG_MAX) {
544 pr_err("blk-mq: tag depth too large\n");
545 return NULL;
546 }
547
548 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
549 if (!tags)
550 return NULL;
551
552 tags->nr_tags = total_tags;
553 tags->nr_reserved_tags = reserved_tags;
554 spin_lock_init(&tags->lock);
555
556 if (blk_mq_is_sbitmap_shared(flags))
557 return tags;
558
559 if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
560 kfree(tags);
561 return NULL;
562 }
563 return tags;
564 }
565
blk_mq_free_tags(struct blk_mq_tags * tags,unsigned int flags)566 void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
567 {
568 if (!blk_mq_is_sbitmap_shared(flags)) {
569 sbitmap_queue_free(tags->bitmap_tags);
570 sbitmap_queue_free(tags->breserved_tags);
571 }
572 kfree(tags);
573 }
574
blk_mq_tag_update_depth(struct blk_mq_hw_ctx * hctx,struct blk_mq_tags ** tagsptr,unsigned int tdepth,bool can_grow)575 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
576 struct blk_mq_tags **tagsptr, unsigned int tdepth,
577 bool can_grow)
578 {
579 struct blk_mq_tags *tags = *tagsptr;
580
581 if (tdepth <= tags->nr_reserved_tags)
582 return -EINVAL;
583
584 /*
585 * If we are allowed to grow beyond the original size, allocate
586 * a new set of tags before freeing the old one.
587 */
588 if (tdepth > tags->nr_tags) {
589 struct blk_mq_tag_set *set = hctx->queue->tag_set;
590 struct blk_mq_tags *new;
591 bool ret;
592
593 if (!can_grow)
594 return -EINVAL;
595
596 /*
597 * We need some sort of upper limit, set it high enough that
598 * no valid use cases should require more.
599 */
600 if (tdepth > MAX_SCHED_RQ)
601 return -EINVAL;
602
603 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
604 tags->nr_reserved_tags, set->flags);
605 if (!new)
606 return -ENOMEM;
607 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
608 if (ret) {
609 blk_mq_free_rq_map(new, set->flags);
610 return -ENOMEM;
611 }
612
613 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
614 blk_mq_free_rq_map(*tagsptr, set->flags);
615 *tagsptr = new;
616 } else {
617 /*
618 * Don't need (or can't) update reserved tags here, they
619 * remain static and should never need resizing.
620 */
621 sbitmap_queue_resize(tags->bitmap_tags,
622 tdepth - tags->nr_reserved_tags);
623 }
624
625 return 0;
626 }
627
blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set * set,unsigned int size)628 void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
629 {
630 sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
631 }
632
633 /**
634 * blk_mq_unique_tag() - return a tag that is unique queue-wide
635 * @rq: request for which to compute a unique tag
636 *
637 * The tag field in struct request is unique per hardware queue but not over
638 * all hardware queues. Hence this function that returns a tag with the
639 * hardware context index in the upper bits and the per hardware queue tag in
640 * the lower bits.
641 *
642 * Note: When called for a request that is queued on a non-multiqueue request
643 * queue, the hardware context index is set to zero.
644 */
blk_mq_unique_tag(struct request * rq)645 u32 blk_mq_unique_tag(struct request *rq)
646 {
647 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
648 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
649 }
650 EXPORT_SYMBOL(blk_mq_unique_tag);
651