Home
last modified time | relevance | path

Searched refs:tags (Results 1 – 8 of 8) sorted by relevance

/block/
Dblk-mq-tag.c38 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
47 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) in blk_mq_tag_wakeup_all() argument
49 sbitmap_queue_wake_all(tags->bitmap_tags); in blk_mq_tag_wakeup_all()
51 sbitmap_queue_wake_all(tags->breserved_tags); in blk_mq_tag_wakeup_all()
60 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() local
72 atomic_dec(&tags->active_queues); in __blk_mq_tag_idle()
75 blk_mq_tag_wakeup_all(tags, false); in __blk_mq_tag_idle()
93 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_get_tag() local
101 if (unlikely(!tags->nr_reserved_tags)) { in blk_mq_get_tag()
105 bt = tags->breserved_tags; in blk_mq_get_tag()
[all …]
Dblk-mq-tag.h36 extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags);
43 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
46 struct blk_mq_tags **tags,
51 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
54 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
90 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, in blk_mq_tag_is_reserved() argument
93 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
Dblk-mq.c266 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
281 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_rq_ctx_init() local
282 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init()
346 trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns); in blk_mq_rq_ctx_init()
511 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
882 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) in blk_mq_tag_to_rq() argument
884 if (tag < tags->nr_tags) { in blk_mq_tag_to_rq()
885 prefetch(tags->rqs[tag]); in blk_mq_tag_to_rq()
886 return tags->rqs[tag]; in blk_mq_tag_to_rq()
1107 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; in __blk_mq_get_driver_tag()
[all …]
Dblk-mq.h57 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
59 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
65 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
175 return data->hctx->tags; in blk_mq_tags_from_data()
185 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
230 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
315 users = atomic_read(&hctx->tags->active_queues); in hctx_may_queue()
Dblk-mq-debugfs.c450 struct blk_mq_tags *tags) in blk_mq_debugfs_tags_show() argument
452 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); in blk_mq_debugfs_tags_show()
453 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); in blk_mq_debugfs_tags_show()
455 atomic_read(&tags->active_queues)); in blk_mq_debugfs_tags_show()
458 sbitmap_queue_show(tags->bitmap_tags, m); in blk_mq_debugfs_tags_show()
460 if (tags->nr_reserved_tags) { in blk_mq_debugfs_tags_show()
462 sbitmap_queue_show(tags->breserved_tags, m); in blk_mq_debugfs_tags_show()
475 if (hctx->tags) in hctx_tags_show()
476 blk_mq_debugfs_tags_show(m, hctx->tags); in hctx_tags_show()
492 if (hctx->tags) in hctx_tags_bitmap_show()
[all …]
Dblk-mq-sysfs.c148 return sprintf(page, "%u\n", hctx->tags->nr_tags); in blk_mq_hw_sysfs_nr_tags_show()
154 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); in blk_mq_hw_sysfs_nr_reserved_tags_show()
Dmq-deadline-main.c551 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() local
552 unsigned int shift = tags->bitmap_tags->sb.shift; in dd_depth_updated()
556 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); in dd_depth_updated()
Dbfq-iosched.c6387 struct blk_mq_tags *tags = hctx->sched_tags; in bfq_depth_updated() local
6390 min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags); in bfq_depth_updated()
6391 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow); in bfq_depth_updated()