1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_TAG_H
3 #define INT_BLK_MQ_TAG_H
4
5 #include "blk-mq.h"
6
7 /*
8 * Tag address space map.
9 */
10 struct blk_mq_tags {
11 unsigned int nr_tags;
12 unsigned int nr_reserved_tags;
13
14 atomic_t active_queues;
15
16 struct sbitmap_queue bitmap_tags;
17 struct sbitmap_queue breserved_tags;
18
19 struct request **rqs;
20 struct request **static_rqs;
21 struct list_head page_list;
22 };
23
24 /*
25 * Extended tag address space map. This was needed
26 * to add a spinlock to blk_mq_tags in a KMI compliant
27 * way (no changes could be made to struct blk_mq_tags).
28 */
29 struct ext_blk_mq_tags {
30 struct blk_mq_tags tags;
31
32 /*
33 * used to clear request reference in rqs[] before freeing one
34 * request pool
35 */
36 spinlock_t lock;
37 };
38
39
40 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
41 extern void blk_mq_free_tags(struct blk_mq_tags *tags);
42
43 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
44 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
45 struct blk_mq_ctx *ctx, unsigned int tag);
46 extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
47 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
48 struct blk_mq_tags **tags,
49 unsigned int depth, bool can_grow);
50 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
51 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
52 void *priv);
53
bt_wait_ptr(struct sbitmap_queue * bt,struct blk_mq_hw_ctx * hctx)54 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
55 struct blk_mq_hw_ctx *hctx)
56 {
57 if (!hctx)
58 return &bt->ws[0];
59 return sbq_wait_ptr(bt, &hctx->wait_index);
60 }
61
62 enum {
63 BLK_MQ_TAG_FAIL = -1U,
64 BLK_MQ_TAG_MIN = 1,
65 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
66 };
67
68 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
69 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
70
blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)71 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
72 {
73 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
74 return false;
75
76 return __blk_mq_tag_busy(hctx);
77 }
78
blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)79 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
80 {
81 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
82 return;
83
84 __blk_mq_tag_idle(hctx);
85 }
86
87 /*
88 * This helper should only be used for flush request to share tag
89 * with the request cloned from, and both the two requests can't be
90 * in flight at the same time. The caller has to make sure the tag
91 * can't be freed.
92 */
blk_mq_tag_set_rq(struct blk_mq_hw_ctx * hctx,unsigned int tag,struct request * rq)93 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
94 unsigned int tag, struct request *rq)
95 {
96 hctx->tags->rqs[tag] = rq;
97 }
98
blk_mq_tag_is_reserved(struct blk_mq_tags * tags,unsigned int tag)99 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
100 unsigned int tag)
101 {
102 return tag < tags->nr_reserved_tags;
103 }
104
105 #endif
106