1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_H
3 #define INT_BLK_MQ_H
4
5 #include "blk-stat.h"
6 #include "blk-mq-tag.h"
7
8 struct blk_mq_tag_set;
9
10 struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13 };
14
15 /**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
18 struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
23
24 unsigned int cpu;
25 unsigned short index_hw[HCTX_MAX_TYPES];
26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
27
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched[2];
30 unsigned long rq_merged;
31
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34
35 struct request_queue *queue;
36 struct blk_mq_ctxs *ctxs;
37 struct kobject kobj;
38 } ____cacheline_aligned_in_smp;
39
40 void blk_mq_exit_queue(struct request_queue *q);
41 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42 void blk_mq_wake_waiters(struct request_queue *q);
43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
44 unsigned int);
45 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
46 bool kick_requeue_list);
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49 struct blk_mq_ctx *start);
50 void blk_mq_put_rq_ref(struct request *rq);
51
52 /*
53 * Internal helpers for allocating/freeing the request map
54 */
55 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
56 unsigned int hctx_idx);
57 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
58 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
59 unsigned int hctx_idx,
60 unsigned int nr_tags,
61 unsigned int reserved_tags,
62 unsigned int flags);
63 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
64 unsigned int hctx_idx, unsigned int depth);
65
66 /*
67 * Internal helpers for request insertion into sw queues
68 */
69 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
70 bool at_head);
71 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
72 bool run_queue);
73 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
74 struct list_head *list);
75
76 /* Used by blk_insert_cloned_request() to issue request directly */
77 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
78 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
79 struct list_head *list);
80
81 /*
82 * CPU -> queue mappings
83 */
84 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
85
86 /*
87 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
88 * @q: request queue
89 * @type: the hctx type index
90 * @cpu: CPU
91 */
blk_mq_map_queue_type(struct request_queue * q,enum hctx_type type,unsigned int cpu)92 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
93 enum hctx_type type,
94 unsigned int cpu)
95 {
96 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
97 }
98
99 /*
100 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
101 * @q: request queue
102 * @flags: request command flags
103 * @cpu: cpu ctx
104 */
blk_mq_map_queue(struct request_queue * q,unsigned int flags,struct blk_mq_ctx * ctx)105 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
106 unsigned int flags,
107 struct blk_mq_ctx *ctx)
108 {
109 enum hctx_type type = HCTX_TYPE_DEFAULT;
110
111 /*
112 * The caller ensure that if REQ_HIPRI, poll must be enabled.
113 */
114 if (flags & REQ_HIPRI)
115 type = HCTX_TYPE_POLL;
116 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
117 type = HCTX_TYPE_READ;
118
119 return ctx->hctxs[type];
120 }
121
122 /*
123 * sysfs helpers
124 */
125 extern void blk_mq_sysfs_init(struct request_queue *q);
126 extern void blk_mq_sysfs_deinit(struct request_queue *q);
127 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
128 extern int blk_mq_sysfs_register(struct request_queue *q);
129 extern void blk_mq_sysfs_unregister(struct request_queue *q);
130 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
131
132 void blk_mq_cancel_work_sync(struct request_queue *q);
133 void blk_mq_release(struct request_queue *q);
134
__blk_mq_get_ctx(struct request_queue * q,unsigned int cpu)135 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
136 unsigned int cpu)
137 {
138 return per_cpu_ptr(q->queue_ctx, cpu);
139 }
140
141 /*
142 * This assumes per-cpu software queueing queues. They could be per-node
143 * as well, for instance. For now this is hardcoded as-is. Note that we don't
144 * care about preemption, since we know the ctx's are persistent. This does
145 * mean that we can't rely on ctx always matching the currently running CPU.
146 */
blk_mq_get_ctx(struct request_queue * q)147 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
148 {
149 return __blk_mq_get_ctx(q, raw_smp_processor_id());
150 }
151
152 struct blk_mq_alloc_data {
153 /* input parameter */
154 struct request_queue *q;
155 blk_mq_req_flags_t flags;
156 unsigned int shallow_depth;
157 unsigned int cmd_flags;
158
159 /* input & output parameter */
160 struct blk_mq_ctx *ctx;
161 struct blk_mq_hw_ctx *hctx;
162 };
163
blk_mq_is_sbitmap_shared(unsigned int flags)164 static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
165 {
166 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
167 }
168
blk_mq_tags_from_data(struct blk_mq_alloc_data * data)169 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
170 {
171 if (data->q->elevator)
172 return data->hctx->sched_tags;
173
174 return data->hctx->tags;
175 }
176
blk_mq_hctx_stopped(struct blk_mq_hw_ctx * hctx)177 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
178 {
179 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
180 }
181
blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx * hctx)182 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
183 {
184 return hctx->nr_ctx && hctx->tags;
185 }
186
187 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
188 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
189 unsigned int inflight[2]);
190
blk_mq_put_dispatch_budget(struct request_queue * q)191 static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
192 {
193 if (q->mq_ops->put_budget)
194 q->mq_ops->put_budget(q);
195 }
196
blk_mq_get_dispatch_budget(struct request_queue * q)197 static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
198 {
199 if (q->mq_ops->get_budget)
200 return q->mq_ops->get_budget(q);
201 return true;
202 }
203
__blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx)204 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
205 {
206 if (blk_mq_is_sbitmap_shared(hctx->flags))
207 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
208 else
209 atomic_inc(&hctx->nr_active);
210 }
211
__blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx)212 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
213 {
214 if (blk_mq_is_sbitmap_shared(hctx->flags))
215 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
216 else
217 atomic_dec(&hctx->nr_active);
218 }
219
__blk_mq_active_requests(struct blk_mq_hw_ctx * hctx)220 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
221 {
222 if (blk_mq_is_sbitmap_shared(hctx->flags))
223 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
224 return atomic_read(&hctx->nr_active);
225 }
__blk_mq_put_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)226 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
227 struct request *rq)
228 {
229 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
230 rq->tag = BLK_MQ_NO_TAG;
231
232 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
233 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
234 __blk_mq_dec_active_requests(hctx);
235 }
236 }
237
blk_mq_put_driver_tag(struct request * rq)238 static inline void blk_mq_put_driver_tag(struct request *rq)
239 {
240 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
241 return;
242
243 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
244 }
245
blk_mq_clear_mq_map(struct blk_mq_queue_map * qmap)246 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
247 {
248 int cpu;
249
250 for_each_possible_cpu(cpu)
251 qmap->mq_map[cpu] = 0;
252 }
253
254 /*
255 * blk_mq_plug() - Get caller context plug
256 * @q: request queue
257 * @bio : the bio being submitted by the caller context
258 *
259 * Plugging, by design, may delay the insertion of BIOs into the elevator in
260 * order to increase BIO merging opportunities. This however can cause BIO
261 * insertion order to change from the order in which submit_bio() is being
262 * executed in the case of multiple contexts concurrently issuing BIOs to a
263 * device, even if these context are synchronized to tightly control BIO issuing
264 * order. While this is not a problem with regular block devices, this ordering
265 * change can cause write BIO failures with zoned block devices as these
266 * require sequential write patterns to zones. Prevent this from happening by
267 * ignoring the plug state of a BIO issuing context if the target request queue
268 * is for a zoned block device and the BIO to plug is a write operation.
269 *
270 * Return current->plug if the bio can be plugged and NULL otherwise
271 */
blk_mq_plug(struct request_queue * q,struct bio * bio)272 static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
273 struct bio *bio)
274 {
275 /*
276 * For regular block devices or read operations, use the context plug
277 * which may be NULL if blk_start_plug() was not executed.
278 */
279 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
280 return current->plug;
281
282 /* Zoned block device write operation case: do not plug the BIO */
283 return NULL;
284 }
285
286 /* Free all requests on the list */
blk_mq_free_requests(struct list_head * list)287 static inline void blk_mq_free_requests(struct list_head *list)
288 {
289 while (!list_empty(list)) {
290 struct request *rq = list_entry_rq(list->next);
291
292 list_del_init(&rq->queuelist);
293 blk_mq_free_request(rq);
294 }
295 }
296
297 /*
298 * For shared tag users, we track the number of currently active users
299 * and attempt to provide a fair share of the tag depth for each of them.
300 */
hctx_may_queue(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt)301 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
302 struct sbitmap_queue *bt)
303 {
304 unsigned int depth, users;
305
306 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
307 return true;
308
309 /*
310 * Don't try dividing an ant
311 */
312 if (bt->sb.depth == 1)
313 return true;
314
315 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
316 struct request_queue *q = hctx->queue;
317 struct blk_mq_tag_set *set = q->tag_set;
318
319 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
320 return true;
321 users = atomic_read(&set->active_queues_shared_sbitmap);
322 } else {
323 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
324 return true;
325 users = atomic_read(&hctx->tags->active_queues);
326 }
327
328 if (!users)
329 return true;
330
331 /*
332 * Allow at least some tags
333 */
334 depth = max((bt->sb.depth + users - 1) / users, 4U);
335 return __blk_mq_active_requests(hctx) < depth;
336 }
337
338
339 #endif
340