/block/ |
D | blk-mq-sched.c | 22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument 24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx() 27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx() 31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument 33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart() 44 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart() 58 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local 65 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 74 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list() 87 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument [all …]
|
D | blk-mq-sysfs.c | 34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local 37 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release() 38 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 39 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release() 40 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 41 kfree(hctx); in blk_mq_hw_sysfs_release() 53 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 58 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() 59 q = hctx->queue; in blk_mq_hw_sysfs_show() 65 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show() [all …]
|
D | blk-mq.c | 50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument 61 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending() 62 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending() 63 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending() 69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument 72 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 74 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending() 75 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending() [all …]
|
D | blk-mq-debugfs.c | 178 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local 180 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show() 206 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local 207 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show() 217 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show() 314 __acquires(&hctx->lock) in hctx_dispatch_start() 316 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local 318 spin_lock(&hctx->lock); in hctx_dispatch_start() 319 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start() 324 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local [all …]
|
D | blk-mq.h | 48 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 50 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 51 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 160 struct blk_mq_hw_ctx *hctx; member 176 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 189 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 191 if (!hctx) in bt_wait_ptr() 193 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr() 199 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument [all …]
|
D | blk-mq-tag.c | 38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument 42 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy() 48 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy() 49 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() 55 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || in __blk_mq_tag_busy() 56 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy() 81 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument 83 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 86 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_idle() 87 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle() [all …]
|
D | kyber-iosched.c | 453 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument 455 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated() 456 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated() 464 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 469 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 473 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx() 475 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 479 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx() 483 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx() 484 ilog2(8), GFP_KERNEL, hctx->numa_node, in kyber_init_hctx() [all …]
|
D | blk-mq-sched.h | 16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 17 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 19 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 25 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 27 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 28 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart() 70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 72 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 75 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() 80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument [all …]
|
D | blk-mq-debugfs.h | 27 struct blk_mq_hw_ctx *hctx); 28 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 35 struct blk_mq_hw_ctx *hctx); 36 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 50 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 54 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 75 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 79 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
D | mq-deadline.c | 596 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument 598 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 630 static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) in dd_to_word_depth() argument 632 struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; in dd_to_word_depth() 633 const unsigned int nrr = hctx->queue->nr_requests; in dd_to_word_depth() 654 data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); in dd_limit_depth() 658 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument 660 struct request_queue *q = hctx->queue; in dd_depth_updated() 662 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() 670 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument [all …]
|
D | blk-flush.c | 363 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local 387 blk_mq_sched_restart(hctx); in mq_flush_data_end_io() 542 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument 545 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
|
D | bfq-iosched.c | 5147 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument 5149 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 5159 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument 5161 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() 5300 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument 5302 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() 5312 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request() 5319 bfq_update_dispatch_stats(hctx->queue, rq, in bfq_dispatch_request() 6239 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request() argument 6242 struct request_queue *q = hctx->queue; in bfq_insert_request() [all …]
|
D | bsg-lib.c | 272 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq() argument 275 struct request_queue *q = hctx->queue; in bsg_queue_rq()
|
D | elevator.h | 42 void (*insert_requests)(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|