• Home
  • Raw
  • Download

Lines Matching refs:hctx

45 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
49 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
50 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
71 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
75 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
80 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
132 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
162 struct blk_mq_hw_ctx *hctx; member
173 return data->hctx->sched_tags; in blk_mq_tags_from_data()
175 return data->hctx->tags; in blk_mq_tags_from_data()
178 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_stopped() argument
180 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
183 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) in blk_mq_hw_queue_mapped() argument
185 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
205 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_inc_active_requests() argument
207 if (blk_mq_is_sbitmap_shared(hctx->flags)) in __blk_mq_inc_active_requests()
208 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_inc_active_requests()
210 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests()
213 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_dec_active_requests() argument
215 if (blk_mq_is_sbitmap_shared(hctx->flags)) in __blk_mq_dec_active_requests()
216 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_dec_active_requests()
218 atomic_dec(&hctx->nr_active); in __blk_mq_dec_active_requests()
221 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_active_requests() argument
223 if (blk_mq_is_sbitmap_shared(hctx->flags)) in __blk_mq_active_requests()
224 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); in __blk_mq_active_requests()
225 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
227 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, in __blk_mq_put_driver_tag() argument
230 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
235 __blk_mq_dec_active_requests(hctx); in __blk_mq_put_driver_tag()
291 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue() argument
296 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
305 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in hctx_may_queue()
306 struct request_queue *q = hctx->queue; in hctx_may_queue()
313 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
315 users = atomic_read(&hctx->tags->active_queues); in hctx_may_queue()
325 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()