1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * blk-mq scheduling framework
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/blk-mq.h>
10 #include <linux/list_sort.h>
11
12 #include <trace/events/block.h>
13
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-mq-tag.h"
19 #include "blk-wbt.h"
20
blk_mq_sched_assign_ioc(struct request * rq)21 void blk_mq_sched_assign_ioc(struct request *rq)
22 {
23 struct request_queue *q = rq->q;
24 struct io_context *ioc;
25 struct io_cq *icq;
26
27 /*
28 * May not have an IO context if it's a passthrough request
29 */
30 ioc = current->io_context;
31 if (!ioc)
32 return;
33
34 spin_lock_irq(&q->queue_lock);
35 icq = ioc_lookup_icq(ioc, q);
36 spin_unlock_irq(&q->queue_lock);
37
38 if (!icq) {
39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40 if (!icq)
41 return;
42 }
43 get_io_context(icq->ioc);
44 rq->elv.icq = icq;
45 }
46
47 /*
48 * Mark a hardware queue as needing a restart. For shared queues, maintain
49 * a count of how many hardware queues are marked for restart.
50 */
blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx * hctx)51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52 {
53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
54 return;
55
56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
57 }
58 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
59
blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)60 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61 {
62 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63 return;
64 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
65
66 /*
67 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
68 * in blk_mq_run_hw_queue(). Its pair is the barrier in
69 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
70 * meantime new request added to hctx->dispatch is missed to check in
71 * blk_mq_run_hw_queue().
72 */
73 smp_mb();
74
75 blk_mq_run_hw_queue(hctx, true);
76 }
77
sched_rq_cmp(void * priv,const struct list_head * a,const struct list_head * b)78 static int sched_rq_cmp(void *priv, const struct list_head *a,
79 const struct list_head *b)
80 {
81 struct request *rqa = container_of(a, struct request, queuelist);
82 struct request *rqb = container_of(b, struct request, queuelist);
83
84 return rqa->mq_hctx > rqb->mq_hctx;
85 }
86
blk_mq_dispatch_hctx_list(struct list_head * rq_list)87 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
88 {
89 struct blk_mq_hw_ctx *hctx =
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
91 struct request *rq;
92 LIST_HEAD(hctx_list);
93 unsigned int count = 0;
94
95 list_for_each_entry(rq, rq_list, queuelist) {
96 if (rq->mq_hctx != hctx) {
97 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
98 goto dispatch;
99 }
100 count++;
101 }
102 list_splice_tail_init(rq_list, &hctx_list);
103
104 dispatch:
105 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
106 }
107
108 #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
109
110 /*
111 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
112 * its queue by itself in its completion handler, so we don't need to
113 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
114 *
115 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
116 * be run again. This is necessary to avoid starving flushes.
117 */
__blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)118 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
119 {
120 struct request_queue *q = hctx->queue;
121 struct elevator_queue *e = q->elevator;
122 bool multi_hctxs = false, run_queue = false;
123 bool dispatched = false, busy = false;
124 unsigned int max_dispatch;
125 LIST_HEAD(rq_list);
126 int count = 0;
127
128 if (hctx->dispatch_busy)
129 max_dispatch = 1;
130 else
131 max_dispatch = hctx->queue->nr_requests;
132
133 do {
134 struct request *rq;
135
136 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
137 break;
138
139 if (!list_empty_careful(&hctx->dispatch)) {
140 busy = true;
141 break;
142 }
143
144 if (!blk_mq_get_dispatch_budget(q))
145 break;
146
147 rq = e->type->ops.dispatch_request(hctx);
148 if (!rq) {
149 blk_mq_put_dispatch_budget(q);
150 /*
151 * We're releasing without dispatching. Holding the
152 * budget could have blocked any "hctx"s with the
153 * same queue and if we didn't dispatch then there's
154 * no guarantee anyone will kick the queue. Kick it
155 * ourselves.
156 */
157 run_queue = true;
158 break;
159 }
160
161 /*
162 * Now this rq owns the budget which has to be released
163 * if this rq won't be queued to driver via .queue_rq()
164 * in blk_mq_dispatch_rq_list().
165 */
166 list_add_tail(&rq->queuelist, &rq_list);
167 if (rq->mq_hctx != hctx)
168 multi_hctxs = true;
169 } while (++count < max_dispatch);
170
171 if (!count) {
172 if (run_queue)
173 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
174 } else if (multi_hctxs) {
175 /*
176 * Requests from different hctx may be dequeued from some
177 * schedulers, such as bfq and deadline.
178 *
179 * Sort the requests in the list according to their hctx,
180 * dispatch batching requests from same hctx at a time.
181 */
182 list_sort(NULL, &rq_list, sched_rq_cmp);
183 do {
184 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
185 } while (!list_empty(&rq_list));
186 } else {
187 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
188 }
189
190 if (busy)
191 return -EAGAIN;
192 return !!dispatched;
193 }
194
blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)195 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
196 {
197 unsigned long end = jiffies + HZ;
198 int ret;
199
200 do {
201 ret = __blk_mq_do_dispatch_sched(hctx);
202 if (ret != 1)
203 break;
204 if (need_resched() || time_is_before_jiffies(end)) {
205 blk_mq_delay_run_hw_queue(hctx, 0);
206 break;
207 }
208 } while (1);
209
210 return ret;
211 }
212
blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)213 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
214 struct blk_mq_ctx *ctx)
215 {
216 unsigned short idx = ctx->index_hw[hctx->type];
217
218 if (++idx == hctx->nr_ctx)
219 idx = 0;
220
221 return hctx->ctxs[idx];
222 }
223
224 /*
225 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
226 * its queue by itself in its completion handler, so we don't need to
227 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
228 *
229 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
230 * be run again. This is necessary to avoid starving flushes.
231 */
blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx)232 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
233 {
234 struct request_queue *q = hctx->queue;
235 LIST_HEAD(rq_list);
236 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
237 int ret = 0;
238 struct request *rq;
239
240 do {
241 if (!list_empty_careful(&hctx->dispatch)) {
242 ret = -EAGAIN;
243 break;
244 }
245
246 if (!sbitmap_any_bit_set(&hctx->ctx_map))
247 break;
248
249 if (!blk_mq_get_dispatch_budget(q))
250 break;
251
252 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
253 if (!rq) {
254 blk_mq_put_dispatch_budget(q);
255 /*
256 * We're releasing without dispatching. Holding the
257 * budget could have blocked any "hctx"s with the
258 * same queue and if we didn't dispatch then there's
259 * no guarantee anyone will kick the queue. Kick it
260 * ourselves.
261 */
262 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
263 break;
264 }
265
266 /*
267 * Now this rq owns the budget which has to be released
268 * if this rq won't be queued to driver via .queue_rq()
269 * in blk_mq_dispatch_rq_list().
270 */
271 list_add(&rq->queuelist, &rq_list);
272
273 /* round robin for fair dispatch */
274 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
275
276 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
277
278 WRITE_ONCE(hctx->dispatch_from, ctx);
279 return ret;
280 }
281
__blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)282 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
283 {
284 struct request_queue *q = hctx->queue;
285 struct elevator_queue *e = q->elevator;
286 const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
287 int ret = 0;
288 LIST_HEAD(rq_list);
289
290 /*
291 * If we have previous entries on our dispatch list, grab them first for
292 * more fair dispatch.
293 */
294 if (!list_empty_careful(&hctx->dispatch)) {
295 spin_lock(&hctx->lock);
296 if (!list_empty(&hctx->dispatch))
297 list_splice_init(&hctx->dispatch, &rq_list);
298 spin_unlock(&hctx->lock);
299 }
300
301 /*
302 * Only ask the scheduler for requests, if we didn't have residual
303 * requests from the dispatch list. This is to avoid the case where
304 * we only ever dispatch a fraction of the requests available because
305 * of low device queue depth. Once we pull requests out of the IO
306 * scheduler, we can no longer merge or sort them. So it's best to
307 * leave them there for as long as we can. Mark the hw queue as
308 * needing a restart in that case.
309 *
310 * We want to dispatch from the scheduler if there was nothing
311 * on the dispatch list or we were able to dispatch from the
312 * dispatch list.
313 */
314 if (!list_empty(&rq_list)) {
315 blk_mq_sched_mark_restart_hctx(hctx);
316 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
317 if (has_sched_dispatch)
318 ret = blk_mq_do_dispatch_sched(hctx);
319 else
320 ret = blk_mq_do_dispatch_ctx(hctx);
321 }
322 } else if (has_sched_dispatch) {
323 ret = blk_mq_do_dispatch_sched(hctx);
324 } else if (hctx->dispatch_busy) {
325 /* dequeue request one by one from sw queue if queue is busy */
326 ret = blk_mq_do_dispatch_ctx(hctx);
327 } else {
328 blk_mq_flush_busy_ctxs(hctx, &rq_list);
329 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
330 }
331
332 return ret;
333 }
334
blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)335 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
336 {
337 struct request_queue *q = hctx->queue;
338
339 /* RCU or SRCU read lock is needed before checking quiesced flag */
340 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
341 return;
342
343 hctx->run++;
344
345 /*
346 * A return of -EAGAIN is an indication that hctx->dispatch is not
347 * empty and we must run again in order to avoid starving flushes.
348 */
349 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
350 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
351 blk_mq_run_hw_queue(hctx, true);
352 }
353 }
354
__blk_mq_sched_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)355 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
356 unsigned int nr_segs)
357 {
358 struct elevator_queue *e = q->elevator;
359 struct blk_mq_ctx *ctx;
360 struct blk_mq_hw_ctx *hctx;
361 bool ret = false;
362 enum hctx_type type;
363
364 if (e && e->type->ops.bio_merge)
365 return e->type->ops.bio_merge(q, bio, nr_segs);
366
367 ctx = blk_mq_get_ctx(q);
368 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
369 type = hctx->type;
370 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
371 list_empty_careful(&ctx->rq_lists[type]))
372 return false;
373
374 /* default per sw-queue merge */
375 spin_lock(&ctx->lock);
376 /*
377 * Reverse check our software queue for entries that we could
378 * potentially merge with. Currently includes a hand-wavy stop
379 * count of 8, to not spend too much time checking for merges.
380 */
381 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
382 ctx->rq_merged++;
383 ret = true;
384 }
385
386 spin_unlock(&ctx->lock);
387
388 return ret;
389 }
390
blk_mq_sched_try_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)391 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
392 struct list_head *free)
393 {
394 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
395 }
396 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
397
blk_mq_sched_request_inserted(struct request * rq)398 void blk_mq_sched_request_inserted(struct request *rq)
399 {
400 trace_block_rq_insert(rq);
401 }
402 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
403
blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx * hctx,bool has_sched,struct request * rq)404 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
405 bool has_sched,
406 struct request *rq)
407 {
408 /*
409 * dispatch flush and passthrough rq directly
410 *
411 * passthrough request has to be added to hctx->dispatch directly.
412 * For some reason, device may be in one situation which can't
413 * handle FS request, so STS_RESOURCE is always returned and the
414 * FS request will be added to hctx->dispatch. However passthrough
415 * request may be required at that time for fixing the problem. If
416 * passthrough request is added to scheduler queue, there isn't any
417 * chance to dispatch it given we prioritize requests in hctx->dispatch.
418 */
419 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
420 return true;
421
422 if (has_sched)
423 rq->rq_flags |= RQF_SORTED;
424
425 return false;
426 }
427
blk_mq_sched_insert_request(struct request * rq,bool at_head,bool run_queue,bool async)428 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
429 bool run_queue, bool async)
430 {
431 struct request_queue *q = rq->q;
432 struct elevator_queue *e = q->elevator;
433 struct blk_mq_ctx *ctx = rq->mq_ctx;
434 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
435
436 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
437
438 if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
439 /*
440 * Firstly normal IO request is inserted to scheduler queue or
441 * sw queue, meantime we add flush request to dispatch queue(
442 * hctx->dispatch) directly and there is at most one in-flight
443 * flush request for each hw queue, so it doesn't matter to add
444 * flush request to tail or front of the dispatch queue.
445 *
446 * Secondly in case of NCQ, flush request belongs to non-NCQ
447 * command, and queueing it will fail when there is any
448 * in-flight normal IO request(NCQ command). When adding flush
449 * rq to the front of hctx->dispatch, it is easier to introduce
450 * extra time to flush rq's latency because of S_SCHED_RESTART
451 * compared with adding to the tail of dispatch queue, then
452 * chance of flush merge is increased, and less flush requests
453 * will be issued to controller. It is observed that ~10% time
454 * is saved in blktests block/004 on disk attached to AHCI/NCQ
455 * drive when adding flush rq to the front of hctx->dispatch.
456 *
457 * Simply queue flush rq to the front of hctx->dispatch so that
458 * intensive flush workloads can benefit in case of NCQ HW.
459 */
460 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
461 blk_mq_request_bypass_insert(rq, at_head, false);
462 goto run;
463 }
464
465 if (e && e->type->ops.insert_requests) {
466 LIST_HEAD(list);
467
468 list_add(&rq->queuelist, &list);
469 e->type->ops.insert_requests(hctx, &list, at_head);
470 } else {
471 spin_lock(&ctx->lock);
472 __blk_mq_insert_request(hctx, rq, at_head);
473 spin_unlock(&ctx->lock);
474 }
475
476 run:
477 if (run_queue)
478 blk_mq_run_hw_queue(hctx, async);
479 }
480
blk_mq_sched_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)481 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
482 struct blk_mq_ctx *ctx,
483 struct list_head *list, bool run_queue_async)
484 {
485 struct elevator_queue *e;
486 struct request_queue *q = hctx->queue;
487
488 /*
489 * blk_mq_sched_insert_requests() is called from flush plug
490 * context only, and hold one usage counter to prevent queue
491 * from being released.
492 */
493 percpu_ref_get(&q->q_usage_counter);
494
495 e = hctx->queue->elevator;
496 if (e && e->type->ops.insert_requests)
497 e->type->ops.insert_requests(hctx, list, false);
498 else {
499 /*
500 * try to issue requests directly if the hw queue isn't
501 * busy in case of 'none' scheduler, and this way may save
502 * us one extra enqueue & dequeue to sw queue.
503 */
504 if (!hctx->dispatch_busy && !e && !run_queue_async) {
505 blk_mq_try_issue_list_directly(hctx, list);
506 if (list_empty(list))
507 goto out;
508 }
509 blk_mq_insert_requests(hctx, ctx, list);
510 }
511
512 blk_mq_run_hw_queue(hctx, run_queue_async);
513 out:
514 percpu_ref_put(&q->q_usage_counter);
515 }
516
blk_mq_sched_alloc_tags(struct request_queue * q,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)517 static int blk_mq_sched_alloc_tags(struct request_queue *q,
518 struct blk_mq_hw_ctx *hctx,
519 unsigned int hctx_idx)
520 {
521 struct blk_mq_tag_set *set = q->tag_set;
522 /* Clear HCTX_SHARED so tags are init'ed */
523 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
524 int ret;
525
526 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
527 set->reserved_tags, flags);
528 if (!hctx->sched_tags)
529 return -ENOMEM;
530
531 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
532 if (ret) {
533 blk_mq_free_rq_map(hctx->sched_tags, flags);
534 hctx->sched_tags = NULL;
535 }
536
537 return ret;
538 }
539
540 /* called in queue's release handler, tagset has gone away */
blk_mq_sched_tags_teardown(struct request_queue * q)541 static void blk_mq_sched_tags_teardown(struct request_queue *q)
542 {
543 struct blk_mq_hw_ctx *hctx;
544 int i;
545
546 queue_for_each_hw_ctx(q, hctx, i) {
547 /* Clear HCTX_SHARED so tags are freed */
548 unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
549
550 if (hctx->sched_tags) {
551 blk_mq_free_rq_map(hctx->sched_tags, flags);
552 hctx->sched_tags = NULL;
553 }
554 }
555 }
556
blk_mq_init_sched(struct request_queue * q,struct elevator_type * e)557 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
558 {
559 struct blk_mq_hw_ctx *hctx;
560 struct elevator_queue *eq;
561 unsigned int i;
562 int ret;
563
564 if (!e) {
565 q->elevator = NULL;
566 q->nr_requests = q->tag_set->queue_depth;
567 return 0;
568 }
569
570 /*
571 * Default to double of smaller one between hw queue_depth and 128,
572 * since we don't split into sync/async like the old code did.
573 * Additionally, this is a per-hw queue depth.
574 */
575 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
576 BLKDEV_MAX_RQ);
577
578 queue_for_each_hw_ctx(q, hctx, i) {
579 ret = blk_mq_sched_alloc_tags(q, hctx, i);
580 if (ret)
581 goto err;
582 }
583
584 ret = e->ops.init_sched(q, e);
585 if (ret)
586 goto err;
587
588 blk_mq_debugfs_register_sched(q);
589
590 queue_for_each_hw_ctx(q, hctx, i) {
591 if (e->ops.init_hctx) {
592 ret = e->ops.init_hctx(hctx, i);
593 if (ret) {
594 eq = q->elevator;
595 blk_mq_sched_free_requests(q);
596 blk_mq_exit_sched(q, eq);
597 kobject_put(&eq->kobj);
598 return ret;
599 }
600 }
601 blk_mq_debugfs_register_sched_hctx(q, hctx);
602 }
603
604 return 0;
605
606 err:
607 blk_mq_sched_free_requests(q);
608 blk_mq_sched_tags_teardown(q);
609 q->elevator = NULL;
610 return ret;
611 }
612
613 /*
614 * called in either blk_queue_cleanup or elevator_switch, tagset
615 * is required for freeing requests
616 */
blk_mq_sched_free_requests(struct request_queue * q)617 void blk_mq_sched_free_requests(struct request_queue *q)
618 {
619 struct blk_mq_hw_ctx *hctx;
620 int i;
621
622 queue_for_each_hw_ctx(q, hctx, i) {
623 if (hctx->sched_tags)
624 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
625 }
626 }
627
blk_mq_exit_sched(struct request_queue * q,struct elevator_queue * e)628 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
629 {
630 struct blk_mq_hw_ctx *hctx;
631 unsigned int i;
632
633 queue_for_each_hw_ctx(q, hctx, i) {
634 blk_mq_debugfs_unregister_sched_hctx(hctx);
635 if (e->type->ops.exit_hctx && hctx->sched_data) {
636 e->type->ops.exit_hctx(hctx, i);
637 hctx->sched_data = NULL;
638 }
639 }
640 blk_mq_debugfs_unregister_sched(q);
641 if (e->type->ops.exit_sched)
642 e->type->ops.exit_sched(e);
643 blk_mq_sched_tags_teardown(q);
644 q->elevator = NULL;
645 }
646