Searched refs:hctx (Results 1 – 8 of 8) sorted by relevance
/block/ |
D | blk-mq.c | 36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument 45 for (i = 0; i < hctx->ctx_map.size; i++) in blk_mq_hctx_has_pending() 46 if (hctx->ctx_map.map[i].word) in blk_mq_hctx_has_pending() 52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, in get_bm() argument 55 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; in get_bm() 58 #define CTX_TO_BIT(hctx, ctx) \ argument 59 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) 64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument 67 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_mark_pending() [all …]
|
D | blk-mq-sysfs.c | 82 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() 88 q = hctx->queue; in blk_mq_hw_sysfs_show() 96 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show() 106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store() local 111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store() 112 q = hctx->queue; in blk_mq_hw_sysfs_store() 120 res = entry->store(hctx, page, length); in blk_mq_hw_sysfs_store() 177 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page) in blk_mq_hw_sysfs_poll_show() argument 179 return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success); in blk_mq_hw_sysfs_poll_show() [all …]
|
D | blk-mq-tag.c | 61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument 63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy() 64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy() 65 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy() 104 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument 106 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 108 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_idle() 120 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue() argument 125 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) in hctx_may_queue() 127 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue() [all …]
|
D | blk-mq-tag.h | 55 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); 78 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument 80 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_busy() 83 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy() 86 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument 88 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_idle() 91 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle() 100 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_set_rq() argument 103 hctx->tags->rqs[tag] = rq; in blk_mq_tag_set_rq()
|
D | blk-mq.h | 28 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 104 struct blk_mq_hw_ctx *hctx; member 110 struct blk_mq_hw_ctx *hctx) in blk_mq_set_alloc_data() argument 116 data->hctx = hctx; in blk_mq_set_alloc_data() 119 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) in blk_mq_hw_queue_mapped() argument 121 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
|
D | blk-flush.c | 230 struct blk_mq_hw_ctx *hctx; in flush_end_io() local 234 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); in flush_end_io() 235 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io() 321 struct blk_mq_hw_ctx *hctx; in blk_kick_flush() local 327 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); in blk_kick_flush() 328 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); in blk_kick_flush() 355 struct blk_mq_hw_ctx *hctx; in mq_flush_data_end_io() local 360 hctx = q->mq_ops->map_queue(q, ctx->cpu); in mq_flush_data_end_io() 368 blk_mq_run_hw_queue(hctx, true); in mq_flush_data_end_io()
|
D | blk.h | 42 struct blk_mq_hw_ctx *hctx; in blk_get_flush_queue() local 47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue() 49 return hctx->fq; in blk_get_flush_queue()
|
D | blk-core.c | 289 struct blk_mq_hw_ctx *hctx; in blk_sync_queue() local 292 queue_for_each_hw_ctx(q, hctx, i) { in blk_sync_queue() 293 cancel_delayed_work_sync(&hctx->run_work); in blk_sync_queue() 294 cancel_delayed_work_sync(&hctx->delay_work); in blk_sync_queue() 3369 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num]; in blk_poll() local 3372 hctx->poll_invoked++; in blk_poll() 3374 ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie)); in blk_poll() 3376 hctx->poll_success++; in blk_poll()
|