1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4  * fairer distribution of tags between multiple submitters when a shared tag map
5  * is used.
6  *
7  * Copyright (C) 2013-2014 Jens Axboe
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 
12 #include <linux/delay.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-sched.h"
16 
17 /*
18  * Recalculate wakeup batch when tag is shared by hctx.
19  */
blk_mq_update_wake_batch(struct blk_mq_tags * tags,unsigned int users)20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
21 		unsigned int users)
22 {
23 	if (!users)
24 		return;
25 
26 	sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
27 			users);
28 	sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
29 			users);
30 }
31 
32 /*
33  * If a previously inactive queue goes active, bump the active user count.
34  * We need to do this before try to allocate driver tag, then even if fail
35  * to get tag when first time, the other shared-tag users could reserve
36  * budget for it.
37  */
__blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
39 {
40 	unsigned int users;
41 	unsigned long flags;
42 	struct blk_mq_tags *tags = hctx->tags;
43 
44 	/*
45 	 * calling test_bit() prior to test_and_set_bit() is intentional,
46 	 * it avoids dirtying the cacheline if the queue is already active.
47 	 */
48 	if (blk_mq_is_shared_tags(hctx->flags)) {
49 		struct request_queue *q = hctx->queue;
50 
51 		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
52 		    test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
53 			return;
54 	} else {
55 		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
56 		    test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
57 			return;
58 	}
59 
60 	spin_lock_irqsave(&tags->lock, flags);
61 	users = tags->active_queues + 1;
62 	WRITE_ONCE(tags->active_queues, users);
63 	blk_mq_update_wake_batch(tags, users);
64 	spin_unlock_irqrestore(&tags->lock, flags);
65 }
66 
67 /*
68  * Wakeup all potentially sleeping on tags
69  */
blk_mq_tag_wakeup_all(struct blk_mq_tags * tags,bool include_reserve)70 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
71 {
72 	sbitmap_queue_wake_all(&tags->bitmap_tags);
73 	if (include_reserve)
74 		sbitmap_queue_wake_all(&tags->breserved_tags);
75 }
76 
77 /*
78  * If a previously busy queue goes inactive, potential waiters could now
79  * be allowed to queue. Wake them up and check.
80  */
__blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)81 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
82 {
83 	struct blk_mq_tags *tags = hctx->tags;
84 	unsigned int users;
85 
86 	if (blk_mq_is_shared_tags(hctx->flags)) {
87 		struct request_queue *q = hctx->queue;
88 
89 		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
90 					&q->queue_flags))
91 			return;
92 	} else {
93 		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
94 			return;
95 	}
96 
97 	spin_lock_irq(&tags->lock);
98 	users = tags->active_queues - 1;
99 	WRITE_ONCE(tags->active_queues, users);
100 	blk_mq_update_wake_batch(tags, users);
101 	spin_unlock_irq(&tags->lock);
102 
103 	blk_mq_tag_wakeup_all(tags, false);
104 }
105 
__blk_mq_get_tag(struct blk_mq_alloc_data * data,struct sbitmap_queue * bt)106 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
107 			    struct sbitmap_queue *bt)
108 {
109 	if (data->shallow_depth)
110 		return sbitmap_queue_get_shallow(bt, data->shallow_depth);
111 	else
112 		return __sbitmap_queue_get(bt);
113 }
114 
blk_mq_get_tags(struct blk_mq_alloc_data * data,int nr_tags,unsigned int * offset)115 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
116 			      unsigned int *offset)
117 {
118 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
119 	struct sbitmap_queue *bt = &tags->bitmap_tags;
120 	unsigned long ret;
121 
122 	if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
123 	    data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
124 		return 0;
125 	ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
126 	*offset += tags->nr_reserved_tags;
127 	return ret;
128 }
129 
blk_mq_get_tag(struct blk_mq_alloc_data * data)130 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
131 {
132 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
133 	struct sbitmap_queue *bt;
134 	struct sbq_wait_state *ws;
135 	DEFINE_SBQ_WAIT(wait);
136 	unsigned int tag_offset;
137 	int tag;
138 
139 	if (data->flags & BLK_MQ_REQ_RESERVED) {
140 		if (unlikely(!tags->nr_reserved_tags)) {
141 			WARN_ON_ONCE(1);
142 			return BLK_MQ_NO_TAG;
143 		}
144 		bt = &tags->breserved_tags;
145 		tag_offset = 0;
146 	} else {
147 		bt = &tags->bitmap_tags;
148 		tag_offset = tags->nr_reserved_tags;
149 	}
150 
151 	tag = __blk_mq_get_tag(data, bt);
152 	if (tag != BLK_MQ_NO_TAG)
153 		goto found_tag;
154 
155 	if (data->flags & BLK_MQ_REQ_NOWAIT)
156 		return BLK_MQ_NO_TAG;
157 
158 	ws = bt_wait_ptr(bt, data->hctx);
159 	do {
160 		struct sbitmap_queue *bt_prev;
161 
162 		/*
163 		 * We're out of tags on this hardware queue, kick any
164 		 * pending IO submits before going to sleep waiting for
165 		 * some to complete.
166 		 */
167 		blk_mq_run_hw_queue(data->hctx, false);
168 
169 		/*
170 		 * Retry tag allocation after running the hardware queue,
171 		 * as running the queue may also have found completions.
172 		 */
173 		tag = __blk_mq_get_tag(data, bt);
174 		if (tag != BLK_MQ_NO_TAG)
175 			break;
176 
177 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
178 
179 		tag = __blk_mq_get_tag(data, bt);
180 		if (tag != BLK_MQ_NO_TAG)
181 			break;
182 
183 		bt_prev = bt;
184 		io_schedule();
185 
186 		sbitmap_finish_wait(bt, ws, &wait);
187 
188 		data->ctx = blk_mq_get_ctx(data->q);
189 		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
190 						data->ctx);
191 		tags = blk_mq_tags_from_data(data);
192 		if (data->flags & BLK_MQ_REQ_RESERVED)
193 			bt = &tags->breserved_tags;
194 		else
195 			bt = &tags->bitmap_tags;
196 
197 		/*
198 		 * If destination hw queue is changed, fake wake up on
199 		 * previous queue for compensating the wake up miss, so
200 		 * other allocations on previous queue won't be starved.
201 		 */
202 		if (bt != bt_prev)
203 			sbitmap_queue_wake_up(bt_prev, 1);
204 
205 		ws = bt_wait_ptr(bt, data->hctx);
206 	} while (1);
207 
208 	sbitmap_finish_wait(bt, ws, &wait);
209 
210 found_tag:
211 	/*
212 	 * Give up this allocation if the hctx is inactive.  The caller will
213 	 * retry on an active hctx.
214 	 */
215 	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
216 		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
217 		return BLK_MQ_NO_TAG;
218 	}
219 	return tag + tag_offset;
220 }
221 
blk_mq_put_tag(struct blk_mq_tags * tags,struct blk_mq_ctx * ctx,unsigned int tag)222 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
223 		    unsigned int tag)
224 {
225 	if (!blk_mq_tag_is_reserved(tags, tag)) {
226 		const int real_tag = tag - tags->nr_reserved_tags;
227 
228 		BUG_ON(real_tag >= tags->nr_tags);
229 		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
230 	} else {
231 		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
232 	}
233 }
234 
blk_mq_put_tags(struct blk_mq_tags * tags,int * tag_array,int nr_tags)235 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
236 {
237 	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
238 					tag_array, nr_tags);
239 }
240 
241 struct bt_iter_data {
242 	struct blk_mq_hw_ctx *hctx;
243 	struct request_queue *q;
244 	busy_tag_iter_fn *fn;
245 	void *data;
246 	bool reserved;
247 };
248 
blk_mq_find_and_get_req(struct blk_mq_tags * tags,unsigned int bitnr)249 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
250 		unsigned int bitnr)
251 {
252 	struct request *rq;
253 	unsigned long flags;
254 
255 	spin_lock_irqsave(&tags->lock, flags);
256 	rq = tags->rqs[bitnr];
257 	if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
258 		rq = NULL;
259 	spin_unlock_irqrestore(&tags->lock, flags);
260 	return rq;
261 }
262 
bt_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)263 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
264 {
265 	struct bt_iter_data *iter_data = data;
266 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
267 	struct request_queue *q = iter_data->q;
268 	struct blk_mq_tag_set *set = q->tag_set;
269 	struct blk_mq_tags *tags;
270 	struct request *rq;
271 	bool ret = true;
272 
273 	if (blk_mq_is_shared_tags(set->flags))
274 		tags = set->shared_tags;
275 	else
276 		tags = hctx->tags;
277 
278 	if (!iter_data->reserved)
279 		bitnr += tags->nr_reserved_tags;
280 	/*
281 	 * We can hit rq == NULL here, because the tagging functions
282 	 * test and set the bit before assigning ->rqs[].
283 	 */
284 	rq = blk_mq_find_and_get_req(tags, bitnr);
285 	if (!rq)
286 		return true;
287 
288 	if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
289 		ret = iter_data->fn(rq, iter_data->data);
290 	blk_mq_put_rq_ref(rq);
291 	return ret;
292 }
293 
294 /**
295  * bt_for_each - iterate over the requests associated with a hardware queue
296  * @hctx:	Hardware queue to examine.
297  * @q:		Request queue to examine.
298  * @bt:		sbitmap to examine. This is either the breserved_tags member
299  *		or the bitmap_tags member of struct blk_mq_tags.
300  * @fn:		Pointer to the function that will be called for each request
301  *		associated with @hctx that has been assigned a driver tag.
302  *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
303  *		where rq is a pointer to a request. Return true to continue
304  *		iterating tags, false to stop.
305  * @data:	Will be passed as third argument to @fn.
306  * @reserved:	Indicates whether @bt is the breserved_tags member or the
307  *		bitmap_tags member of struct blk_mq_tags.
308  */
bt_for_each(struct blk_mq_hw_ctx * hctx,struct request_queue * q,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,bool reserved)309 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
310 			struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
311 			void *data, bool reserved)
312 {
313 	struct bt_iter_data iter_data = {
314 		.hctx = hctx,
315 		.fn = fn,
316 		.data = data,
317 		.reserved = reserved,
318 		.q = q,
319 	};
320 
321 	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
322 }
323 
324 struct bt_tags_iter_data {
325 	struct blk_mq_tags *tags;
326 	busy_tag_iter_fn *fn;
327 	void *data;
328 	unsigned int flags;
329 };
330 
331 #define BT_TAG_ITER_RESERVED		(1 << 0)
332 #define BT_TAG_ITER_STARTED		(1 << 1)
333 #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
334 
bt_tags_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)335 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
336 {
337 	struct bt_tags_iter_data *iter_data = data;
338 	struct blk_mq_tags *tags = iter_data->tags;
339 	struct request *rq;
340 	bool ret = true;
341 	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
342 
343 	if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
344 		bitnr += tags->nr_reserved_tags;
345 
346 	/*
347 	 * We can hit rq == NULL here, because the tagging functions
348 	 * test and set the bit before assigning ->rqs[].
349 	 */
350 	if (iter_static_rqs)
351 		rq = tags->static_rqs[bitnr];
352 	else
353 		rq = blk_mq_find_and_get_req(tags, bitnr);
354 	if (!rq)
355 		return true;
356 
357 	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
358 	    blk_mq_request_started(rq))
359 		ret = iter_data->fn(rq, iter_data->data);
360 	if (!iter_static_rqs)
361 		blk_mq_put_rq_ref(rq);
362 	return ret;
363 }
364 
365 /**
366  * bt_tags_for_each - iterate over the requests in a tag map
367  * @tags:	Tag map to iterate over.
368  * @bt:		sbitmap to examine. This is either the breserved_tags member
369  *		or the bitmap_tags member of struct blk_mq_tags.
370  * @fn:		Pointer to the function that will be called for each started
371  *		request. @fn will be called as follows: @fn(rq, @data,
372  *		@reserved) where rq is a pointer to a request. Return true
373  *		to continue iterating tags, false to stop.
374  * @data:	Will be passed as second argument to @fn.
375  * @flags:	BT_TAG_ITER_*
376  */
bt_tags_for_each(struct blk_mq_tags * tags,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,unsigned int flags)377 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
378 			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
379 {
380 	struct bt_tags_iter_data iter_data = {
381 		.tags = tags,
382 		.fn = fn,
383 		.data = data,
384 		.flags = flags,
385 	};
386 
387 	if (tags->rqs)
388 		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
389 }
390 
__blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv,unsigned int flags)391 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
392 		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
393 {
394 	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
395 
396 	if (tags->nr_reserved_tags)
397 		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
398 				 flags | BT_TAG_ITER_RESERVED);
399 	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
400 }
401 
402 /**
403  * blk_mq_all_tag_iter - iterate over all requests in a tag map
404  * @tags:	Tag map to iterate over.
405  * @fn:		Pointer to the function that will be called for each
406  *		request. @fn will be called as follows: @fn(rq, @priv,
407  *		reserved) where rq is a pointer to a request. 'reserved'
408  *		indicates whether or not @rq is a reserved request. Return
409  *		true to continue iterating tags, false to stop.
410  * @priv:	Will be passed as second argument to @fn.
411  *
412  * Caller has to pass the tag map from which requests are allocated.
413  */
blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv)414 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
415 		void *priv)
416 {
417 	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
418 }
419 
420 /**
421  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
422  * @tagset:	Tag set to iterate over.
423  * @fn:		Pointer to the function that will be called for each started
424  *		request. @fn will be called as follows: @fn(rq, @priv,
425  *		reserved) where rq is a pointer to a request. 'reserved'
426  *		indicates whether or not @rq is a reserved request. Return
427  *		true to continue iterating tags, false to stop.
428  * @priv:	Will be passed as second argument to @fn.
429  *
430  * We grab one request reference before calling @fn and release it after
431  * @fn returns.
432  */
blk_mq_tagset_busy_iter(struct blk_mq_tag_set * tagset,busy_tag_iter_fn * fn,void * priv)433 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
434 		busy_tag_iter_fn *fn, void *priv)
435 {
436 	unsigned int flags = tagset->flags;
437 	int i, nr_tags;
438 
439 	nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
440 
441 	for (i = 0; i < nr_tags; i++) {
442 		if (tagset->tags && tagset->tags[i])
443 			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
444 					      BT_TAG_ITER_STARTED);
445 	}
446 }
447 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
448 
blk_mq_tagset_count_completed_rqs(struct request * rq,void * data)449 static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
450 {
451 	unsigned *count = data;
452 
453 	if (blk_mq_request_completed(rq))
454 		(*count)++;
455 	return true;
456 }
457 
458 /**
459  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
460  * completions have finished.
461  * @tagset:	Tag set to drain completed request
462  *
463  * Note: This function has to be run after all IO queues are shutdown
464  */
blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set * tagset)465 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
466 {
467 	while (true) {
468 		unsigned count = 0;
469 
470 		blk_mq_tagset_busy_iter(tagset,
471 				blk_mq_tagset_count_completed_rqs, &count);
472 		if (!count)
473 			break;
474 		msleep(5);
475 	}
476 }
477 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
478 
479 /**
480  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
481  * @q:		Request queue to examine.
482  * @fn:		Pointer to the function that will be called for each request
483  *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
484  *		reserved) where rq is a pointer to a request and hctx points
485  *		to the hardware queue associated with the request. 'reserved'
486  *		indicates whether or not @rq is a reserved request.
487  * @priv:	Will be passed as third argument to @fn.
488  *
489  * Note: if @q->tag_set is shared with other request queues then @fn will be
490  * called for all requests on all queues that share that tag set and not only
491  * for requests associated with @q.
492  */
blk_mq_queue_tag_busy_iter(struct request_queue * q,busy_tag_iter_fn * fn,void * priv)493 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
494 		void *priv)
495 {
496 	/*
497 	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
498 	 * while the queue is frozen. So we can use q_usage_counter to avoid
499 	 * racing with it.
500 	 */
501 	if (!percpu_ref_tryget(&q->q_usage_counter))
502 		return;
503 
504 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
505 		struct blk_mq_tags *tags = q->tag_set->shared_tags;
506 		struct sbitmap_queue *bresv = &tags->breserved_tags;
507 		struct sbitmap_queue *btags = &tags->bitmap_tags;
508 
509 		if (tags->nr_reserved_tags)
510 			bt_for_each(NULL, q, bresv, fn, priv, true);
511 		bt_for_each(NULL, q, btags, fn, priv, false);
512 	} else {
513 		struct blk_mq_hw_ctx *hctx;
514 		unsigned long i;
515 
516 		queue_for_each_hw_ctx(q, hctx, i) {
517 			struct blk_mq_tags *tags = hctx->tags;
518 			struct sbitmap_queue *bresv = &tags->breserved_tags;
519 			struct sbitmap_queue *btags = &tags->bitmap_tags;
520 
521 			/*
522 			 * If no software queues are currently mapped to this
523 			 * hardware queue, there's nothing to check
524 			 */
525 			if (!blk_mq_hw_queue_mapped(hctx))
526 				continue;
527 
528 			if (tags->nr_reserved_tags)
529 				bt_for_each(hctx, q, bresv, fn, priv, true);
530 			bt_for_each(hctx, q, btags, fn, priv, false);
531 		}
532 	}
533 	blk_queue_exit(q);
534 }
535 
bt_alloc(struct sbitmap_queue * bt,unsigned int depth,bool round_robin,int node)536 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
537 		    bool round_robin, int node)
538 {
539 	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
540 				       node);
541 }
542 
blk_mq_init_bitmaps(struct sbitmap_queue * bitmap_tags,struct sbitmap_queue * breserved_tags,unsigned int queue_depth,unsigned int reserved,int node,int alloc_policy)543 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
544 			struct sbitmap_queue *breserved_tags,
545 			unsigned int queue_depth, unsigned int reserved,
546 			int node, int alloc_policy)
547 {
548 	unsigned int depth = queue_depth - reserved;
549 	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
550 
551 	if (bt_alloc(bitmap_tags, depth, round_robin, node))
552 		return -ENOMEM;
553 	if (bt_alloc(breserved_tags, reserved, round_robin, node))
554 		goto free_bitmap_tags;
555 
556 	return 0;
557 
558 free_bitmap_tags:
559 	sbitmap_queue_free(bitmap_tags);
560 	return -ENOMEM;
561 }
562 
blk_mq_init_tags(unsigned int total_tags,unsigned int reserved_tags,int node,int alloc_policy)563 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
564 				     unsigned int reserved_tags,
565 				     int node, int alloc_policy)
566 {
567 	struct blk_mq_tags *tags;
568 
569 	if (total_tags > BLK_MQ_TAG_MAX) {
570 		pr_err("blk-mq: tag depth too large\n");
571 		return NULL;
572 	}
573 
574 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
575 	if (!tags)
576 		return NULL;
577 
578 	tags->nr_tags = total_tags;
579 	tags->nr_reserved_tags = reserved_tags;
580 	spin_lock_init(&tags->lock);
581 
582 	if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
583 				total_tags, reserved_tags, node,
584 				alloc_policy) < 0) {
585 		kfree(tags);
586 		return NULL;
587 	}
588 	return tags;
589 }
590 
blk_mq_free_tags(struct blk_mq_tags * tags)591 void blk_mq_free_tags(struct blk_mq_tags *tags)
592 {
593 	sbitmap_queue_free(&tags->bitmap_tags);
594 	sbitmap_queue_free(&tags->breserved_tags);
595 	kfree(tags);
596 }
597 
blk_mq_tag_update_depth(struct blk_mq_hw_ctx * hctx,struct blk_mq_tags ** tagsptr,unsigned int tdepth,bool can_grow)598 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
599 			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
600 			    bool can_grow)
601 {
602 	struct blk_mq_tags *tags = *tagsptr;
603 
604 	if (tdepth <= tags->nr_reserved_tags)
605 		return -EINVAL;
606 
607 	/*
608 	 * If we are allowed to grow beyond the original size, allocate
609 	 * a new set of tags before freeing the old one.
610 	 */
611 	if (tdepth > tags->nr_tags) {
612 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
613 		struct blk_mq_tags *new;
614 
615 		if (!can_grow)
616 			return -EINVAL;
617 
618 		/*
619 		 * We need some sort of upper limit, set it high enough that
620 		 * no valid use cases should require more.
621 		 */
622 		if (tdepth > MAX_SCHED_RQ)
623 			return -EINVAL;
624 
625 		/*
626 		 * Only the sbitmap needs resizing since we allocated the max
627 		 * initially.
628 		 */
629 		if (blk_mq_is_shared_tags(set->flags))
630 			return 0;
631 
632 		new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
633 		if (!new)
634 			return -ENOMEM;
635 
636 		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
637 		*tagsptr = new;
638 	} else {
639 		/*
640 		 * Don't need (or can't) update reserved tags here, they
641 		 * remain static and should never need resizing.
642 		 */
643 		sbitmap_queue_resize(&tags->bitmap_tags,
644 				tdepth - tags->nr_reserved_tags);
645 	}
646 
647 	return 0;
648 }
649 
blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set * set,unsigned int size)650 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
651 {
652 	struct blk_mq_tags *tags = set->shared_tags;
653 
654 	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
655 }
656 
blk_mq_tag_update_sched_shared_tags(struct request_queue * q)657 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
658 {
659 	sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
660 			     q->nr_requests - q->tag_set->reserved_tags);
661 }
662 
663 /**
664  * blk_mq_unique_tag() - return a tag that is unique queue-wide
665  * @rq: request for which to compute a unique tag
666  *
667  * The tag field in struct request is unique per hardware queue but not over
668  * all hardware queues. Hence this function that returns a tag with the
669  * hardware context index in the upper bits and the per hardware queue tag in
670  * the lower bits.
671  *
672  * Note: When called for a request that is queued on a non-multiqueue request
673  * queue, the hardware context index is set to zero.
674  */
blk_mq_unique_tag(struct request * rq)675 u32 blk_mq_unique_tag(struct request *rq)
676 {
677 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
678 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
679 }
680 EXPORT_SYMBOL(blk_mq_unique_tag);
681