• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4  * fairer distribution of tags between multiple submitters when a shared tag map
5  * is used.
6  *
7  * Copyright (C) 2013-2014 Jens Axboe
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-tag.h"
17 
18 /*
19  * If a previously inactive queue goes active, bump the active user count.
20  * We need to do this before try to allocate driver tag, then even if fail
21  * to get tag when first time, the other shared-tag users could reserve
22  * budget for it.
23  */
__blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
25 {
26 	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
27 		struct request_queue *q = hctx->queue;
28 		struct blk_mq_tag_set *set = q->tag_set;
29 
30 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
31 		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
32 			atomic_inc(&set->active_queues_shared_sbitmap);
33 	} else {
34 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
35 		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
36 			atomic_inc(&hctx->tags->active_queues);
37 	}
38 
39 	return true;
40 }
41 
42 /*
43  * Wakeup all potentially sleeping on tags
44  */
blk_mq_tag_wakeup_all(struct blk_mq_tags * tags,bool include_reserve)45 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
46 {
47 	sbitmap_queue_wake_all(tags->bitmap_tags);
48 	if (include_reserve)
49 		sbitmap_queue_wake_all(tags->breserved_tags);
50 }
51 
52 /*
53  * If a previously busy queue goes inactive, potential waiters could now
54  * be allowed to queue. Wake them up and check.
55  */
__blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
57 {
58 	struct blk_mq_tags *tags = hctx->tags;
59 	struct request_queue *q = hctx->queue;
60 	struct blk_mq_tag_set *set = q->tag_set;
61 
62 	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
63 		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
64 					&q->queue_flags))
65 			return;
66 		atomic_dec(&set->active_queues_shared_sbitmap);
67 	} else {
68 		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
69 			return;
70 		atomic_dec(&tags->active_queues);
71 	}
72 
73 	blk_mq_tag_wakeup_all(tags, false);
74 }
75 
__blk_mq_get_tag(struct blk_mq_alloc_data * data,struct sbitmap_queue * bt)76 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
77 			    struct sbitmap_queue *bt)
78 {
79 	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
80 			!hctx_may_queue(data->hctx, bt))
81 		return BLK_MQ_NO_TAG;
82 
83 	if (data->shallow_depth)
84 		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
85 	else
86 		return __sbitmap_queue_get(bt);
87 }
88 
blk_mq_get_tag(struct blk_mq_alloc_data * data)89 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
90 {
91 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
92 	struct sbitmap_queue *bt;
93 	struct sbq_wait_state *ws;
94 	DEFINE_SBQ_WAIT(wait);
95 	unsigned int tag_offset;
96 	int tag;
97 
98 	if (data->flags & BLK_MQ_REQ_RESERVED) {
99 		if (unlikely(!tags->nr_reserved_tags)) {
100 			WARN_ON_ONCE(1);
101 			return BLK_MQ_NO_TAG;
102 		}
103 		bt = tags->breserved_tags;
104 		tag_offset = 0;
105 	} else {
106 		bt = tags->bitmap_tags;
107 		tag_offset = tags->nr_reserved_tags;
108 	}
109 
110 	tag = __blk_mq_get_tag(data, bt);
111 	if (tag != BLK_MQ_NO_TAG)
112 		goto found_tag;
113 
114 	if (data->flags & BLK_MQ_REQ_NOWAIT)
115 		return BLK_MQ_NO_TAG;
116 
117 	ws = bt_wait_ptr(bt, data->hctx);
118 	do {
119 		struct sbitmap_queue *bt_prev;
120 
121 		/*
122 		 * We're out of tags on this hardware queue, kick any
123 		 * pending IO submits before going to sleep waiting for
124 		 * some to complete.
125 		 */
126 		blk_mq_run_hw_queue(data->hctx, false);
127 
128 		/*
129 		 * Retry tag allocation after running the hardware queue,
130 		 * as running the queue may also have found completions.
131 		 */
132 		tag = __blk_mq_get_tag(data, bt);
133 		if (tag != BLK_MQ_NO_TAG)
134 			break;
135 
136 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
137 
138 		tag = __blk_mq_get_tag(data, bt);
139 		if (tag != BLK_MQ_NO_TAG)
140 			break;
141 
142 		bt_prev = bt;
143 		io_schedule();
144 
145 		sbitmap_finish_wait(bt, ws, &wait);
146 
147 		data->ctx = blk_mq_get_ctx(data->q);
148 		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
149 						data->ctx);
150 		tags = blk_mq_tags_from_data(data);
151 		if (data->flags & BLK_MQ_REQ_RESERVED)
152 			bt = tags->breserved_tags;
153 		else
154 			bt = tags->bitmap_tags;
155 
156 		/*
157 		 * If destination hw queue is changed, fake wake up on
158 		 * previous queue for compensating the wake up miss, so
159 		 * other allocations on previous queue won't be starved.
160 		 */
161 		if (bt != bt_prev)
162 			sbitmap_queue_wake_up(bt_prev);
163 
164 		ws = bt_wait_ptr(bt, data->hctx);
165 	} while (1);
166 
167 	sbitmap_finish_wait(bt, ws, &wait);
168 
169 found_tag:
170 	/*
171 	 * Give up this allocation if the hctx is inactive.  The caller will
172 	 * retry on an active hctx.
173 	 */
174 	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
175 		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
176 		return BLK_MQ_NO_TAG;
177 	}
178 	return tag + tag_offset;
179 }
180 
blk_mq_put_tag(struct blk_mq_tags * tags,struct blk_mq_ctx * ctx,unsigned int tag)181 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
182 		    unsigned int tag)
183 {
184 	if (!blk_mq_tag_is_reserved(tags, tag)) {
185 		const int real_tag = tag - tags->nr_reserved_tags;
186 
187 		BUG_ON(real_tag >= tags->nr_tags);
188 		sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
189 	} else {
190 		BUG_ON(tag >= tags->nr_reserved_tags);
191 		sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
192 	}
193 }
194 
195 struct bt_iter_data {
196 	struct blk_mq_hw_ctx *hctx;
197 	busy_iter_fn *fn;
198 	void *data;
199 	bool reserved;
200 };
201 
blk_mq_find_and_get_req(struct blk_mq_tags * tags,unsigned int bitnr)202 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
203 		unsigned int bitnr)
204 {
205 	struct request *rq;
206 	unsigned long flags;
207 
208 	spin_lock_irqsave(&tags->lock, flags);
209 	rq = tags->rqs[bitnr];
210 	if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
211 		rq = NULL;
212 	spin_unlock_irqrestore(&tags->lock, flags);
213 	return rq;
214 }
215 
bt_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)216 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
217 {
218 	struct bt_iter_data *iter_data = data;
219 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
220 	struct blk_mq_tags *tags = hctx->tags;
221 	bool reserved = iter_data->reserved;
222 	struct request *rq;
223 	bool ret = true;
224 
225 	if (!reserved)
226 		bitnr += tags->nr_reserved_tags;
227 	/*
228 	 * We can hit rq == NULL here, because the tagging functions
229 	 * test and set the bit before assigning ->rqs[].
230 	 */
231 	rq = blk_mq_find_and_get_req(tags, bitnr);
232 	if (!rq)
233 		return true;
234 
235 	if (rq->q == hctx->queue && rq->mq_hctx == hctx)
236 		ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
237 	blk_mq_put_rq_ref(rq);
238 	return ret;
239 }
240 
241 /**
242  * bt_for_each - iterate over the requests associated with a hardware queue
243  * @hctx:	Hardware queue to examine.
244  * @bt:		sbitmap to examine. This is either the breserved_tags member
245  *		or the bitmap_tags member of struct blk_mq_tags.
246  * @fn:		Pointer to the function that will be called for each request
247  *		associated with @hctx that has been assigned a driver tag.
248  *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
249  *		where rq is a pointer to a request. Return true to continue
250  *		iterating tags, false to stop.
251  * @data:	Will be passed as third argument to @fn.
252  * @reserved:	Indicates whether @bt is the breserved_tags member or the
253  *		bitmap_tags member of struct blk_mq_tags.
254  */
bt_for_each(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt,busy_iter_fn * fn,void * data,bool reserved)255 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
256 			busy_iter_fn *fn, void *data, bool reserved)
257 {
258 	struct bt_iter_data iter_data = {
259 		.hctx = hctx,
260 		.fn = fn,
261 		.data = data,
262 		.reserved = reserved,
263 	};
264 
265 	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
266 }
267 
268 struct bt_tags_iter_data {
269 	struct blk_mq_tags *tags;
270 	busy_tag_iter_fn *fn;
271 	void *data;
272 	unsigned int flags;
273 };
274 
275 #define BT_TAG_ITER_RESERVED		(1 << 0)
276 #define BT_TAG_ITER_STARTED		(1 << 1)
277 #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
278 
bt_tags_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)279 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
280 {
281 	struct bt_tags_iter_data *iter_data = data;
282 	struct blk_mq_tags *tags = iter_data->tags;
283 	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
284 	struct request *rq;
285 	bool ret = true;
286 	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
287 
288 	if (!reserved)
289 		bitnr += tags->nr_reserved_tags;
290 
291 	/*
292 	 * We can hit rq == NULL here, because the tagging functions
293 	 * test and set the bit before assigning ->rqs[].
294 	 */
295 	if (iter_static_rqs)
296 		rq = tags->static_rqs[bitnr];
297 	else
298 		rq = blk_mq_find_and_get_req(tags, bitnr);
299 	if (!rq)
300 		return true;
301 
302 	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
303 	    blk_mq_request_started(rq))
304 		ret = iter_data->fn(rq, iter_data->data, reserved);
305 	if (!iter_static_rqs)
306 		blk_mq_put_rq_ref(rq);
307 	return ret;
308 }
309 
310 /**
311  * bt_tags_for_each - iterate over the requests in a tag map
312  * @tags:	Tag map to iterate over.
313  * @bt:		sbitmap to examine. This is either the breserved_tags member
314  *		or the bitmap_tags member of struct blk_mq_tags.
315  * @fn:		Pointer to the function that will be called for each started
316  *		request. @fn will be called as follows: @fn(rq, @data,
317  *		@reserved) where rq is a pointer to a request. Return true
318  *		to continue iterating tags, false to stop.
319  * @data:	Will be passed as second argument to @fn.
320  * @flags:	BT_TAG_ITER_*
321  */
bt_tags_for_each(struct blk_mq_tags * tags,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,unsigned int flags)322 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
323 			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
324 {
325 	struct bt_tags_iter_data iter_data = {
326 		.tags = tags,
327 		.fn = fn,
328 		.data = data,
329 		.flags = flags,
330 	};
331 
332 	if (tags->rqs)
333 		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
334 }
335 
__blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv,unsigned int flags)336 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
337 		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
338 {
339 	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
340 
341 	if (tags->nr_reserved_tags)
342 		bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
343 				 flags | BT_TAG_ITER_RESERVED);
344 	bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
345 }
346 
347 /**
348  * blk_mq_all_tag_iter - iterate over all requests in a tag map
349  * @tags:	Tag map to iterate over.
350  * @fn:		Pointer to the function that will be called for each
351  *		request. @fn will be called as follows: @fn(rq, @priv,
352  *		reserved) where rq is a pointer to a request. 'reserved'
353  *		indicates whether or not @rq is a reserved request. Return
354  *		true to continue iterating tags, false to stop.
355  * @priv:	Will be passed as second argument to @fn.
356  *
357  * Caller has to pass the tag map from which requests are allocated.
358  */
blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv)359 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
360 		void *priv)
361 {
362 	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
363 }
364 
365 /**
366  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
367  * @tagset:	Tag set to iterate over.
368  * @fn:		Pointer to the function that will be called for each started
369  *		request. @fn will be called as follows: @fn(rq, @priv,
370  *		reserved) where rq is a pointer to a request. 'reserved'
371  *		indicates whether or not @rq is a reserved request. Return
372  *		true to continue iterating tags, false to stop.
373  * @priv:	Will be passed as second argument to @fn.
374  *
375  * We grab one request reference before calling @fn and release it after
376  * @fn returns.
377  */
blk_mq_tagset_busy_iter(struct blk_mq_tag_set * tagset,busy_tag_iter_fn * fn,void * priv)378 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
379 		busy_tag_iter_fn *fn, void *priv)
380 {
381 	int i;
382 
383 	for (i = 0; i < tagset->nr_hw_queues; i++) {
384 		if (tagset->tags && tagset->tags[i])
385 			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
386 					      BT_TAG_ITER_STARTED);
387 	}
388 }
389 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
390 
blk_mq_tagset_count_completed_rqs(struct request * rq,void * data,bool reserved)391 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
392 		void *data, bool reserved)
393 {
394 	unsigned *count = data;
395 
396 	if (blk_mq_request_completed(rq))
397 		(*count)++;
398 	return true;
399 }
400 
401 /**
402  * blk_mq_tagset_wait_completed_request - wait until all completed req's
403  * complete funtion is run
404  * @tagset:	Tag set to drain completed request
405  *
406  * Note: This function has to be run after all IO queues are shutdown
407  */
blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set * tagset)408 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
409 {
410 	while (true) {
411 		unsigned count = 0;
412 
413 		blk_mq_tagset_busy_iter(tagset,
414 				blk_mq_tagset_count_completed_rqs, &count);
415 		if (!count)
416 			break;
417 		msleep(5);
418 	}
419 }
420 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
421 
422 /**
423  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
424  * @q:		Request queue to examine.
425  * @fn:		Pointer to the function that will be called for each request
426  *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
427  *		reserved) where rq is a pointer to a request and hctx points
428  *		to the hardware queue associated with the request. 'reserved'
429  *		indicates whether or not @rq is a reserved request.
430  * @priv:	Will be passed as third argument to @fn.
431  *
432  * Note: if @q->tag_set is shared with other request queues then @fn will be
433  * called for all requests on all queues that share that tag set and not only
434  * for requests associated with @q.
435  */
blk_mq_queue_tag_busy_iter(struct request_queue * q,busy_iter_fn * fn,void * priv)436 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
437 		void *priv)
438 {
439 	struct blk_mq_hw_ctx *hctx;
440 	int i;
441 
442 	/*
443 	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
444 	 * while the queue is frozen. So we can use q_usage_counter to avoid
445 	 * racing with it.
446 	 */
447 	if (!percpu_ref_tryget(&q->q_usage_counter))
448 		return;
449 
450 	queue_for_each_hw_ctx(q, hctx, i) {
451 		struct blk_mq_tags *tags = hctx->tags;
452 
453 		/*
454 		 * If no software queues are currently mapped to this
455 		 * hardware queue, there's nothing to check
456 		 */
457 		if (!blk_mq_hw_queue_mapped(hctx))
458 			continue;
459 
460 		if (tags->nr_reserved_tags)
461 			bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
462 		bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
463 	}
464 	blk_queue_exit(q);
465 }
466 
bt_alloc(struct sbitmap_queue * bt,unsigned int depth,bool round_robin,int node)467 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
468 		    bool round_robin, int node)
469 {
470 	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
471 				       node);
472 }
473 
blk_mq_init_bitmap_tags(struct blk_mq_tags * tags,int node,int alloc_policy)474 static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
475 				   int node, int alloc_policy)
476 {
477 	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
478 	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
479 
480 	if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
481 		return -ENOMEM;
482 	if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
483 		     round_robin, node))
484 		goto free_bitmap_tags;
485 
486 	tags->bitmap_tags = &tags->__bitmap_tags;
487 	tags->breserved_tags = &tags->__breserved_tags;
488 
489 	return 0;
490 free_bitmap_tags:
491 	sbitmap_queue_free(&tags->__bitmap_tags);
492 	return -ENOMEM;
493 }
494 
blk_mq_init_shared_sbitmap(struct blk_mq_tag_set * set,unsigned int flags)495 int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
496 {
497 	unsigned int depth = set->queue_depth - set->reserved_tags;
498 	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
499 	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
500 	int i, node = set->numa_node;
501 
502 	if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
503 		return -ENOMEM;
504 	if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
505 		     round_robin, node))
506 		goto free_bitmap_tags;
507 
508 	for (i = 0; i < set->nr_hw_queues; i++) {
509 		struct blk_mq_tags *tags = set->tags[i];
510 
511 		tags->bitmap_tags = &set->__bitmap_tags;
512 		tags->breserved_tags = &set->__breserved_tags;
513 	}
514 
515 	return 0;
516 free_bitmap_tags:
517 	sbitmap_queue_free(&set->__bitmap_tags);
518 	return -ENOMEM;
519 }
520 
blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set * set)521 void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
522 {
523 	sbitmap_queue_free(&set->__bitmap_tags);
524 	sbitmap_queue_free(&set->__breserved_tags);
525 }
526 
blk_mq_init_tags(unsigned int total_tags,unsigned int reserved_tags,int node,unsigned int flags)527 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
528 				     unsigned int reserved_tags,
529 				     int node, unsigned int flags)
530 {
531 	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
532 	struct blk_mq_tags *tags;
533 
534 	if (total_tags > BLK_MQ_TAG_MAX) {
535 		pr_err("blk-mq: tag depth too large\n");
536 		return NULL;
537 	}
538 
539 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
540 	if (!tags)
541 		return NULL;
542 
543 	tags->nr_tags = total_tags;
544 	tags->nr_reserved_tags = reserved_tags;
545 	spin_lock_init(&tags->lock);
546 
547 	if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
548 		return tags;
549 
550 	if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
551 		kfree(tags);
552 		return NULL;
553 	}
554 	return tags;
555 }
556 
blk_mq_free_tags(struct blk_mq_tags * tags,unsigned int flags)557 void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
558 {
559 	if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
560 		sbitmap_queue_free(tags->bitmap_tags);
561 		sbitmap_queue_free(tags->breserved_tags);
562 	}
563 	kfree(tags);
564 }
565 
blk_mq_tag_update_depth(struct blk_mq_hw_ctx * hctx,struct blk_mq_tags ** tagsptr,unsigned int tdepth,bool can_grow)566 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
567 			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
568 			    bool can_grow)
569 {
570 	struct blk_mq_tags *tags = *tagsptr;
571 
572 	if (tdepth <= tags->nr_reserved_tags)
573 		return -EINVAL;
574 
575 	/*
576 	 * If we are allowed to grow beyond the original size, allocate
577 	 * a new set of tags before freeing the old one.
578 	 */
579 	if (tdepth > tags->nr_tags) {
580 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
581 		/* Only sched tags can grow, so clear HCTX_SHARED flag  */
582 		unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
583 		struct blk_mq_tags *new;
584 		bool ret;
585 
586 		if (!can_grow)
587 			return -EINVAL;
588 
589 		/*
590 		 * We need some sort of upper limit, set it high enough that
591 		 * no valid use cases should require more.
592 		 */
593 		if (tdepth > 16 * BLKDEV_MAX_RQ)
594 			return -EINVAL;
595 
596 		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
597 				tags->nr_reserved_tags, flags);
598 		if (!new)
599 			return -ENOMEM;
600 		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
601 		if (ret) {
602 			blk_mq_free_rq_map(new, flags);
603 			return -ENOMEM;
604 		}
605 
606 		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
607 		blk_mq_free_rq_map(*tagsptr, flags);
608 		*tagsptr = new;
609 	} else {
610 		/*
611 		 * Don't need (or can't) update reserved tags here, they
612 		 * remain static and should never need resizing.
613 		 */
614 		sbitmap_queue_resize(tags->bitmap_tags,
615 				tdepth - tags->nr_reserved_tags);
616 	}
617 
618 	return 0;
619 }
620 
blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set * set,unsigned int size)621 void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
622 {
623 	sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
624 }
625 
626 /**
627  * blk_mq_unique_tag() - return a tag that is unique queue-wide
628  * @rq: request for which to compute a unique tag
629  *
630  * The tag field in struct request is unique per hardware queue but not over
631  * all hardware queues. Hence this function that returns a tag with the
632  * hardware context index in the upper bits and the per hardware queue tag in
633  * the lower bits.
634  *
635  * Note: When called for a request that is queued on a non-multiqueue request
636  * queue, the hardware context index is set to zero.
637  */
blk_mq_unique_tag(struct request * rq)638 u32 blk_mq_unique_tag(struct request *rq)
639 {
640 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
641 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
642 }
643 EXPORT_SYMBOL(blk_mq_unique_tag);
644