• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-mq scheduling framework
4  *
5  * Copyright (C) 2016 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/blk-mq.h>
10 #include <linux/list_sort.h>
11 
12 #include <trace/events/block.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-mq-tag.h"
19 #include "blk-wbt.h"
20 
blk_mq_sched_assign_ioc(struct request * rq)21 void blk_mq_sched_assign_ioc(struct request *rq)
22 {
23 	struct request_queue *q = rq->q;
24 	struct io_context *ioc;
25 	struct io_cq *icq;
26 
27 	/*
28 	 * May not have an IO context if it's a passthrough request
29 	 */
30 	ioc = current->io_context;
31 	if (!ioc)
32 		return;
33 
34 	spin_lock_irq(&q->queue_lock);
35 	icq = ioc_lookup_icq(ioc, q);
36 	spin_unlock_irq(&q->queue_lock);
37 
38 	if (!icq) {
39 		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40 		if (!icq)
41 			return;
42 	}
43 	get_io_context(icq->ioc);
44 	rq->elv.icq = icq;
45 }
46 
47 /*
48  * Mark a hardware queue as needing a restart.
49  */
blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx * hctx)50 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
51 {
52 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
53 		return;
54 
55 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
56 }
57 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
58 
blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)59 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
60 {
61 	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
62 		return;
63 	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
64 
65 	/*
66 	 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
67 	 * in blk_mq_run_hw_queue(). Its pair is the barrier in
68 	 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
69 	 * meantime new request added to hctx->dispatch is missed to check in
70 	 * blk_mq_run_hw_queue().
71 	 */
72 	smp_mb();
73 
74 	blk_mq_run_hw_queue(hctx, true);
75 }
76 
sched_rq_cmp(void * priv,struct list_head * a,struct list_head * b)77 static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
78 {
79 	struct request *rqa = container_of(a, struct request, queuelist);
80 	struct request *rqb = container_of(b, struct request, queuelist);
81 
82 	return rqa->mq_hctx > rqb->mq_hctx;
83 }
84 
blk_mq_dispatch_hctx_list(struct list_head * rq_list)85 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
86 {
87 	struct blk_mq_hw_ctx *hctx =
88 		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
89 	struct request *rq;
90 	LIST_HEAD(hctx_list);
91 	unsigned int count = 0;
92 
93 	list_for_each_entry(rq, rq_list, queuelist) {
94 		if (rq->mq_hctx != hctx) {
95 			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
96 			goto dispatch;
97 		}
98 		count++;
99 	}
100 	list_splice_tail_init(rq_list, &hctx_list);
101 
102 dispatch:
103 	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
104 }
105 
106 #define BLK_MQ_BUDGET_DELAY	3		/* ms units */
107 
108 /*
109  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
110  * its queue by itself in its completion handler, so we don't need to
111  * restart queue if .get_budget() fails to get the budget.
112  *
113  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
114  * be run again.  This is necessary to avoid starving flushes.
115  */
__blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)116 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
117 {
118 	struct request_queue *q = hctx->queue;
119 	struct elevator_queue *e = q->elevator;
120 	bool multi_hctxs = false, run_queue = false;
121 	bool dispatched = false, busy = false;
122 	unsigned int max_dispatch;
123 	LIST_HEAD(rq_list);
124 	int count = 0;
125 
126 	if (hctx->dispatch_busy)
127 		max_dispatch = 1;
128 	else
129 		max_dispatch = hctx->queue->nr_requests;
130 
131 	do {
132 		struct request *rq;
133 
134 		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
135 			break;
136 
137 		if (!list_empty_careful(&hctx->dispatch)) {
138 			busy = true;
139 			break;
140 		}
141 
142 		if (!blk_mq_get_dispatch_budget(q))
143 			break;
144 
145 		rq = e->type->ops.dispatch_request(hctx);
146 		if (!rq) {
147 			blk_mq_put_dispatch_budget(q);
148 			/*
149 			 * We're releasing without dispatching. Holding the
150 			 * budget could have blocked any "hctx"s with the
151 			 * same queue and if we didn't dispatch then there's
152 			 * no guarantee anyone will kick the queue.  Kick it
153 			 * ourselves.
154 			 */
155 			run_queue = true;
156 			break;
157 		}
158 
159 		/*
160 		 * Now this rq owns the budget which has to be released
161 		 * if this rq won't be queued to driver via .queue_rq()
162 		 * in blk_mq_dispatch_rq_list().
163 		 */
164 		list_add_tail(&rq->queuelist, &rq_list);
165 		if (rq->mq_hctx != hctx)
166 			multi_hctxs = true;
167 	} while (++count < max_dispatch);
168 
169 	if (!count) {
170 		if (run_queue)
171 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
172 	} else if (multi_hctxs) {
173 		/*
174 		 * Requests from different hctx may be dequeued from some
175 		 * schedulers, such as bfq and deadline.
176 		 *
177 		 * Sort the requests in the list according to their hctx,
178 		 * dispatch batching requests from same hctx at a time.
179 		 */
180 		list_sort(NULL, &rq_list, sched_rq_cmp);
181 		do {
182 			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
183 		} while (!list_empty(&rq_list));
184 	} else {
185 		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
186 	}
187 
188 	if (busy)
189 		return -EAGAIN;
190 	return !!dispatched;
191 }
192 
blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)193 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
194 {
195 	unsigned long end = jiffies + HZ;
196 	int ret;
197 
198 	do {
199 		ret = __blk_mq_do_dispatch_sched(hctx);
200 		if (ret != 1)
201 			break;
202 		if (need_resched() || time_is_before_jiffies(end)) {
203 			blk_mq_delay_run_hw_queue(hctx, 0);
204 			break;
205 		}
206 	} while (1);
207 
208 	return ret;
209 }
210 
blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)211 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
212 					  struct blk_mq_ctx *ctx)
213 {
214 	unsigned short idx = ctx->index_hw[hctx->type];
215 
216 	if (++idx == hctx->nr_ctx)
217 		idx = 0;
218 
219 	return hctx->ctxs[idx];
220 }
221 
222 /*
223  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
224  * its queue by itself in its completion handler, so we don't need to
225  * restart queue if .get_budget() fails to get the budget.
226  *
227  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
228  * be run again.  This is necessary to avoid starving flushes.
229  */
blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx)230 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
231 {
232 	struct request_queue *q = hctx->queue;
233 	LIST_HEAD(rq_list);
234 	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
235 	int ret = 0;
236 	struct request *rq;
237 
238 	do {
239 		if (!list_empty_careful(&hctx->dispatch)) {
240 			ret = -EAGAIN;
241 			break;
242 		}
243 
244 		if (!sbitmap_any_bit_set(&hctx->ctx_map))
245 			break;
246 
247 		if (!blk_mq_get_dispatch_budget(q))
248 			break;
249 
250 		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
251 		if (!rq) {
252 			blk_mq_put_dispatch_budget(q);
253 			/*
254 			 * We're releasing without dispatching. Holding the
255 			 * budget could have blocked any "hctx"s with the
256 			 * same queue and if we didn't dispatch then there's
257 			 * no guarantee anyone will kick the queue.  Kick it
258 			 * ourselves.
259 			 */
260 			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
261 			break;
262 		}
263 
264 		/*
265 		 * Now this rq owns the budget which has to be released
266 		 * if this rq won't be queued to driver via .queue_rq()
267 		 * in blk_mq_dispatch_rq_list().
268 		 */
269 		list_add(&rq->queuelist, &rq_list);
270 
271 		/* round robin for fair dispatch */
272 		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
273 
274 	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
275 
276 	WRITE_ONCE(hctx->dispatch_from, ctx);
277 	return ret;
278 }
279 
__blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)280 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
281 {
282 	struct request_queue *q = hctx->queue;
283 	struct elevator_queue *e = q->elevator;
284 	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
285 	int ret = 0;
286 	LIST_HEAD(rq_list);
287 
288 	/*
289 	 * If we have previous entries on our dispatch list, grab them first for
290 	 * more fair dispatch.
291 	 */
292 	if (!list_empty_careful(&hctx->dispatch)) {
293 		spin_lock(&hctx->lock);
294 		if (!list_empty(&hctx->dispatch))
295 			list_splice_init(&hctx->dispatch, &rq_list);
296 		spin_unlock(&hctx->lock);
297 	}
298 
299 	/*
300 	 * Only ask the scheduler for requests, if we didn't have residual
301 	 * requests from the dispatch list. This is to avoid the case where
302 	 * we only ever dispatch a fraction of the requests available because
303 	 * of low device queue depth. Once we pull requests out of the IO
304 	 * scheduler, we can no longer merge or sort them. So it's best to
305 	 * leave them there for as long as we can. Mark the hw queue as
306 	 * needing a restart in that case.
307 	 *
308 	 * We want to dispatch from the scheduler if there was nothing
309 	 * on the dispatch list or we were able to dispatch from the
310 	 * dispatch list.
311 	 */
312 	if (!list_empty(&rq_list)) {
313 		blk_mq_sched_mark_restart_hctx(hctx);
314 		if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
315 			if (has_sched_dispatch)
316 				ret = blk_mq_do_dispatch_sched(hctx);
317 			else
318 				ret = blk_mq_do_dispatch_ctx(hctx);
319 		}
320 	} else if (has_sched_dispatch) {
321 		ret = blk_mq_do_dispatch_sched(hctx);
322 	} else if (hctx->dispatch_busy) {
323 		/* dequeue request one by one from sw queue if queue is busy */
324 		ret = blk_mq_do_dispatch_ctx(hctx);
325 	} else {
326 		blk_mq_flush_busy_ctxs(hctx, &rq_list);
327 		blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
328 	}
329 
330 	return ret;
331 }
332 
blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)333 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
334 {
335 	struct request_queue *q = hctx->queue;
336 
337 	/* RCU or SRCU read lock is needed before checking quiesced flag */
338 	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
339 		return;
340 
341 	hctx->run++;
342 
343 	/*
344 	 * A return of -EAGAIN is an indication that hctx->dispatch is not
345 	 * empty and we must run again in order to avoid starving flushes.
346 	 */
347 	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
348 		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
349 			blk_mq_run_hw_queue(hctx, true);
350 	}
351 }
352 
__blk_mq_sched_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)353 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
354 		unsigned int nr_segs)
355 {
356 	struct elevator_queue *e = q->elevator;
357 	struct blk_mq_ctx *ctx;
358 	struct blk_mq_hw_ctx *hctx;
359 	bool ret = false;
360 	enum hctx_type type;
361 
362 	if (e && e->type->ops.bio_merge)
363 		return e->type->ops.bio_merge(q, bio, nr_segs);
364 
365 	ctx = blk_mq_get_ctx(q);
366 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
367 	type = hctx->type;
368 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
369 	    list_empty_careful(&ctx->rq_lists[type]))
370 		return false;
371 
372 	/* default per sw-queue merge */
373 	spin_lock(&ctx->lock);
374 	/*
375 	 * Reverse check our software queue for entries that we could
376 	 * potentially merge with. Currently includes a hand-wavy stop
377 	 * count of 8, to not spend too much time checking for merges.
378 	 */
379 	if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
380 		ctx->rq_merged++;
381 		ret = true;
382 	}
383 
384 	spin_unlock(&ctx->lock);
385 
386 	return ret;
387 }
388 
blk_mq_sched_try_insert_merge(struct request_queue * q,struct request * rq)389 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
390 {
391 	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
392 }
393 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
394 
blk_mq_sched_request_inserted(struct request * rq)395 void blk_mq_sched_request_inserted(struct request *rq)
396 {
397 	trace_block_rq_insert(rq->q, rq);
398 }
399 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
400 
blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx * hctx,bool has_sched,struct request * rq)401 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
402 				       bool has_sched,
403 				       struct request *rq)
404 {
405 	/*
406 	 * dispatch flush and passthrough rq directly
407 	 *
408 	 * passthrough request has to be added to hctx->dispatch directly.
409 	 * For some reason, device may be in one situation which can't
410 	 * handle FS request, so STS_RESOURCE is always returned and the
411 	 * FS request will be added to hctx->dispatch. However passthrough
412 	 * request may be required at that time for fixing the problem. If
413 	 * passthrough request is added to scheduler queue, there isn't any
414 	 * chance to dispatch it given we prioritize requests in hctx->dispatch.
415 	 */
416 	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
417 		return true;
418 
419 	if (has_sched)
420 		rq->rq_flags |= RQF_SORTED;
421 
422 	return false;
423 }
424 
425 #include <trace/hooks/block.h>
blk_mq_sched_insert_request(struct request * rq,bool at_head,bool run_queue,bool async)426 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
427 				 bool run_queue, bool async)
428 {
429 	struct request_queue *q = rq->q;
430 	struct elevator_queue *e = q->elevator;
431 	struct blk_mq_ctx *ctx = rq->mq_ctx;
432 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
433 	bool skip = false;
434 
435 	WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
436 
437 	trace_android_vh_blk_mq_sched_insert_request(&skip, rq);
438 
439 	if (!skip && blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
440 		/*
441 		 * Firstly normal IO request is inserted to scheduler queue or
442 		 * sw queue, meantime we add flush request to dispatch queue(
443 		 * hctx->dispatch) directly and there is at most one in-flight
444 		 * flush request for each hw queue, so it doesn't matter to add
445 		 * flush request to tail or front of the dispatch queue.
446 		 *
447 		 * Secondly in case of NCQ, flush request belongs to non-NCQ
448 		 * command, and queueing it will fail when there is any
449 		 * in-flight normal IO request(NCQ command). When adding flush
450 		 * rq to the front of hctx->dispatch, it is easier to introduce
451 		 * extra time to flush rq's latency because of S_SCHED_RESTART
452 		 * compared with adding to the tail of dispatch queue, then
453 		 * chance of flush merge is increased, and less flush requests
454 		 * will be issued to controller. It is observed that ~10% time
455 		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
456 		 * drive when adding flush rq to the front of hctx->dispatch.
457 		 *
458 		 * Simply queue flush rq to the front of hctx->dispatch so that
459 		 * intensive flush workloads can benefit in case of NCQ HW.
460 		 */
461 		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
462 		blk_mq_request_bypass_insert(rq, at_head, false);
463 		goto run;
464 	}
465 
466 	if (e && e->type->ops.insert_requests) {
467 		LIST_HEAD(list);
468 
469 		list_add(&rq->queuelist, &list);
470 		e->type->ops.insert_requests(hctx, &list, at_head);
471 	} else {
472 		spin_lock(&ctx->lock);
473 		__blk_mq_insert_request(hctx, rq, at_head);
474 		spin_unlock(&ctx->lock);
475 	}
476 
477 run:
478 	if (run_queue)
479 		blk_mq_run_hw_queue(hctx, async);
480 }
481 
blk_mq_sched_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)482 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
483 				  struct blk_mq_ctx *ctx,
484 				  struct list_head *list, bool run_queue_async)
485 {
486 	struct elevator_queue *e;
487 	struct request_queue *q = hctx->queue;
488 
489 	/*
490 	 * blk_mq_sched_insert_requests() is called from flush plug
491 	 * context only, and hold one usage counter to prevent queue
492 	 * from being released.
493 	 */
494 	percpu_ref_get(&q->q_usage_counter);
495 
496 	e = hctx->queue->elevator;
497 	if (e && e->type->ops.insert_requests)
498 		e->type->ops.insert_requests(hctx, list, false);
499 	else {
500 		/*
501 		 * try to issue requests directly if the hw queue isn't
502 		 * busy in case of 'none' scheduler, and this way may save
503 		 * us one extra enqueue & dequeue to sw queue.
504 		 */
505 		if (!hctx->dispatch_busy && !e && !run_queue_async) {
506 			blk_mq_try_issue_list_directly(hctx, list);
507 			if (list_empty(list))
508 				goto out;
509 		}
510 		blk_mq_insert_requests(hctx, ctx, list);
511 	}
512 
513 	blk_mq_run_hw_queue(hctx, run_queue_async);
514  out:
515 	percpu_ref_put(&q->q_usage_counter);
516 }
517 
blk_mq_sched_free_tags(struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)518 static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
519 				   struct blk_mq_hw_ctx *hctx,
520 				   unsigned int hctx_idx)
521 {
522 	unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
523 
524 	if (hctx->sched_tags) {
525 		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
526 		blk_mq_free_rq_map(hctx->sched_tags, flags);
527 		hctx->sched_tags = NULL;
528 	}
529 }
530 
blk_mq_sched_alloc_tags(struct request_queue * q,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)531 static int blk_mq_sched_alloc_tags(struct request_queue *q,
532 				   struct blk_mq_hw_ctx *hctx,
533 				   unsigned int hctx_idx)
534 {
535 	struct blk_mq_tag_set *set = q->tag_set;
536 	/* Clear HCTX_SHARED so tags are init'ed */
537 	unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
538 	int ret;
539 
540 	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
541 					       set->reserved_tags, flags);
542 	if (!hctx->sched_tags)
543 		return -ENOMEM;
544 
545 	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
546 	if (ret)
547 		blk_mq_sched_free_tags(set, hctx, hctx_idx);
548 
549 	return ret;
550 }
551 
552 /* called in queue's release handler, tagset has gone away */
blk_mq_sched_tags_teardown(struct request_queue * q)553 static void blk_mq_sched_tags_teardown(struct request_queue *q)
554 {
555 	struct blk_mq_hw_ctx *hctx;
556 	int i;
557 
558 	queue_for_each_hw_ctx(q, hctx, i) {
559 		/* Clear HCTX_SHARED so tags are freed */
560 		unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
561 
562 		if (hctx->sched_tags) {
563 			blk_mq_free_rq_map(hctx->sched_tags, flags);
564 			hctx->sched_tags = NULL;
565 		}
566 	}
567 }
568 
blk_mq_init_sched(struct request_queue * q,struct elevator_type * e)569 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
570 {
571 	struct blk_mq_hw_ctx *hctx;
572 	struct elevator_queue *eq;
573 	unsigned int i;
574 	int ret;
575 
576 	if (!e) {
577 		q->elevator = NULL;
578 		q->nr_requests = q->tag_set->queue_depth;
579 		return 0;
580 	}
581 
582 	/*
583 	 * Default to double of smaller one between hw queue_depth and 128,
584 	 * since we don't split into sync/async like the old code did.
585 	 * Additionally, this is a per-hw queue depth.
586 	 */
587 	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
588 				   BLKDEV_MAX_RQ);
589 
590 	queue_for_each_hw_ctx(q, hctx, i) {
591 		ret = blk_mq_sched_alloc_tags(q, hctx, i);
592 		if (ret)
593 			goto err;
594 	}
595 
596 	ret = e->ops.init_sched(q, e);
597 	if (ret)
598 		goto err;
599 
600 	blk_mq_debugfs_register_sched(q);
601 
602 	queue_for_each_hw_ctx(q, hctx, i) {
603 		if (e->ops.init_hctx) {
604 			ret = e->ops.init_hctx(hctx, i);
605 			if (ret) {
606 				eq = q->elevator;
607 				blk_mq_sched_free_requests(q);
608 				blk_mq_exit_sched(q, eq);
609 				kobject_put(&eq->kobj);
610 				return ret;
611 			}
612 		}
613 		blk_mq_debugfs_register_sched_hctx(q, hctx);
614 	}
615 
616 	return 0;
617 
618 err:
619 	blk_mq_sched_free_requests(q);
620 	blk_mq_sched_tags_teardown(q);
621 	q->elevator = NULL;
622 	return ret;
623 }
624 
625 /*
626  * called in either blk_queue_cleanup or elevator_switch, tagset
627  * is required for freeing requests
628  */
blk_mq_sched_free_requests(struct request_queue * q)629 void blk_mq_sched_free_requests(struct request_queue *q)
630 {
631 	struct blk_mq_hw_ctx *hctx;
632 	int i;
633 
634 	queue_for_each_hw_ctx(q, hctx, i) {
635 		if (hctx->sched_tags)
636 			blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
637 	}
638 }
639 
blk_mq_exit_sched(struct request_queue * q,struct elevator_queue * e)640 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
641 {
642 	struct blk_mq_hw_ctx *hctx;
643 	unsigned int i;
644 
645 	queue_for_each_hw_ctx(q, hctx, i) {
646 		blk_mq_debugfs_unregister_sched_hctx(hctx);
647 		if (e->type->ops.exit_hctx && hctx->sched_data) {
648 			e->type->ops.exit_hctx(hctx, i);
649 			hctx->sched_data = NULL;
650 		}
651 	}
652 	blk_mq_debugfs_unregister_sched(q);
653 	if (e->type->ops.exit_sched)
654 		e->type->ops.exit_sched(e);
655 	blk_mq_sched_tags_teardown(q);
656 	q->elevator = NULL;
657 }
658