• Home
  • Raw
  • Download

Lines Matching refs:hctx

50 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_mark_restart_hctx()  argument
52 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
55 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
59 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
61 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart()
63 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_restart()
74 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_restart()
88 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
95 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
104 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list()
117 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument
119 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched()
127 if (hctx->dispatch_busy) in __blk_mq_do_dispatch_sched()
130 max_dispatch = hctx->queue->nr_requests; in __blk_mq_do_dispatch_sched()
135 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched()
138 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_do_dispatch_sched()
146 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched()
166 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()
186 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); in __blk_mq_do_dispatch_sched()
194 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_sched() argument
200 ret = __blk_mq_do_dispatch_sched(hctx); in blk_mq_do_dispatch_sched()
204 blk_mq_delay_run_hw_queue(hctx, 0); in blk_mq_do_dispatch_sched()
212 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_next_ctx() argument
215 unsigned short idx = ctx->index_hw[hctx->type]; in blk_mq_next_ctx()
217 if (++idx == hctx->nr_ctx) in blk_mq_next_ctx()
220 return hctx->ctxs[idx]; in blk_mq_next_ctx()
231 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_ctx() argument
233 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx()
235 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); in blk_mq_do_dispatch_ctx()
240 if (!list_empty_careful(&hctx->dispatch)) { in blk_mq_do_dispatch_ctx()
245 if (!sbitmap_any_bit_set(&hctx->ctx_map)) in blk_mq_do_dispatch_ctx()
251 rq = blk_mq_dequeue_from_ctx(hctx, ctx); in blk_mq_do_dispatch_ctx()
273 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); in blk_mq_do_dispatch_ctx()
277 WRITE_ONCE(hctx->dispatch_from, ctx); in blk_mq_do_dispatch_ctx()
281 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_dispatch_requests() argument
283 struct request_queue *q = hctx->queue; in __blk_mq_sched_dispatch_requests()
293 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_sched_dispatch_requests()
294 spin_lock(&hctx->lock); in __blk_mq_sched_dispatch_requests()
295 if (!list_empty(&hctx->dispatch)) in __blk_mq_sched_dispatch_requests()
296 list_splice_init(&hctx->dispatch, &rq_list); in __blk_mq_sched_dispatch_requests()
297 spin_unlock(&hctx->lock); in __blk_mq_sched_dispatch_requests()
314 blk_mq_sched_mark_restart_hctx(hctx); in __blk_mq_sched_dispatch_requests()
315 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { in __blk_mq_sched_dispatch_requests()
317 ret = blk_mq_do_dispatch_sched(hctx); in __blk_mq_sched_dispatch_requests()
319 ret = blk_mq_do_dispatch_ctx(hctx); in __blk_mq_sched_dispatch_requests()
322 ret = blk_mq_do_dispatch_sched(hctx); in __blk_mq_sched_dispatch_requests()
323 } else if (hctx->dispatch_busy) { in __blk_mq_sched_dispatch_requests()
325 ret = blk_mq_do_dispatch_ctx(hctx); in __blk_mq_sched_dispatch_requests()
327 blk_mq_flush_busy_ctxs(hctx, &rq_list); in __blk_mq_sched_dispatch_requests()
328 blk_mq_dispatch_rq_list(hctx, &rq_list, 0); in __blk_mq_sched_dispatch_requests()
334 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_dispatch_requests() argument
336 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests()
339 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) in blk_mq_sched_dispatch_requests()
342 hctx->run++; in blk_mq_sched_dispatch_requests()
348 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { in blk_mq_sched_dispatch_requests()
349 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) in blk_mq_sched_dispatch_requests()
350 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_dispatch_requests()
359 struct blk_mq_hw_ctx *hctx; in __blk_mq_sched_bio_merge() local
367 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in __blk_mq_sched_bio_merge()
368 type = hctx->type; in __blk_mq_sched_bio_merge()
369 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || in __blk_mq_sched_bio_merge()
403 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, in blk_mq_sched_bypass_insert() argument
433 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request() local
437 if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { in blk_mq_sched_insert_request()
468 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
471 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_sched_insert_request()
477 blk_mq_run_hw_queue(hctx, async); in blk_mq_sched_insert_request()
480 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, in blk_mq_sched_insert_requests() argument
485 struct request_queue *q = hctx->queue; in blk_mq_sched_insert_requests()
494 e = hctx->queue->elevator; in blk_mq_sched_insert_requests()
496 e->type->ops.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests()
503 if (!hctx->dispatch_busy && !e && !run_queue_async) { in blk_mq_sched_insert_requests()
504 blk_mq_try_issue_list_directly(hctx, list); in blk_mq_sched_insert_requests()
508 blk_mq_insert_requests(hctx, ctx, list); in blk_mq_sched_insert_requests()
511 blk_mq_run_hw_queue(hctx, run_queue_async); in blk_mq_sched_insert_requests()
517 struct blk_mq_hw_ctx *hctx, in blk_mq_sched_alloc_tags() argument
525 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, in blk_mq_sched_alloc_tags()
527 if (!hctx->sched_tags) in blk_mq_sched_alloc_tags()
530 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); in blk_mq_sched_alloc_tags()
532 blk_mq_free_rq_map(hctx->sched_tags, flags); in blk_mq_sched_alloc_tags()
533 hctx->sched_tags = NULL; in blk_mq_sched_alloc_tags()
542 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_tags_teardown() local
545 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_tags_teardown()
547 unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; in blk_mq_sched_tags_teardown()
549 if (hctx->sched_tags) { in blk_mq_sched_tags_teardown()
550 blk_mq_free_rq_map(hctx->sched_tags, flags); in blk_mq_sched_tags_teardown()
551 hctx->sched_tags = NULL; in blk_mq_sched_tags_teardown()
558 struct blk_mq_hw_ctx *hctx; in blk_mq_init_sched() local
577 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
578 ret = blk_mq_sched_alloc_tags(q, hctx, i); in blk_mq_init_sched()
589 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
591 ret = e->ops.init_hctx(hctx, i); in blk_mq_init_sched()
600 blk_mq_debugfs_register_sched_hctx(q, hctx); in blk_mq_init_sched()
618 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_free_requests() local
621 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_requests()
622 if (hctx->sched_tags) in blk_mq_sched_free_requests()
623 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); in blk_mq_sched_free_requests()
629 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_sched() local
632 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_sched()
633 blk_mq_debugfs_unregister_sched_hctx(hctx); in blk_mq_exit_sched()
634 if (e->type->ops.exit_hctx && hctx->sched_data) { in blk_mq_exit_sched()
635 e->type->ops.exit_hctx(hctx, i); in blk_mq_exit_sched()
636 hctx->sched_data = NULL; in blk_mq_exit_sched()