1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
11
12 #include "blk-mq-debugfs.h"
13
14 struct blk_mq_debugfs_attr;
15
16 enum rq_qos_id {
17 RQ_QOS_WBT,
18 RQ_QOS_LATENCY,
19 RQ_QOS_COST,
20 };
21
22 struct rq_wait {
23 wait_queue_head_t wait;
24 atomic_t inflight;
25 };
26
27 struct rq_qos {
28 struct rq_qos_ops *ops;
29 struct request_queue *q;
30 enum rq_qos_id id;
31 struct rq_qos *next;
32 #ifdef CONFIG_BLK_DEBUG_FS
33 struct dentry *debugfs_dir;
34 #endif
35 };
36
37 struct rq_qos_ops {
38 void (*throttle)(struct rq_qos *, struct bio *);
39 void (*track)(struct rq_qos *, struct request *, struct bio *);
40 void (*merge)(struct rq_qos *, struct request *, struct bio *);
41 void (*issue)(struct rq_qos *, struct request *);
42 void (*requeue)(struct rq_qos *, struct request *);
43 void (*done)(struct rq_qos *, struct request *);
44 void (*done_bio)(struct rq_qos *, struct bio *);
45 void (*cleanup)(struct rq_qos *, struct bio *);
46 void (*queue_depth_changed)(struct rq_qos *);
47 void (*exit)(struct rq_qos *);
48 const struct blk_mq_debugfs_attr *debugfs_attrs;
49 };
50
51 struct rq_depth {
52 unsigned int max_depth;
53
54 int scale_step;
55 bool scaled_max;
56
57 unsigned int queue_depth;
58 unsigned int default_depth;
59 };
60
rq_qos_id(struct request_queue * q,enum rq_qos_id id)61 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
62 enum rq_qos_id id)
63 {
64 struct rq_qos *rqos;
65 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
66 if (rqos->id == id)
67 break;
68 }
69 return rqos;
70 }
71
wbt_rq_qos(struct request_queue * q)72 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
73 {
74 return rq_qos_id(q, RQ_QOS_WBT);
75 }
76
blkcg_rq_qos(struct request_queue * q)77 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
78 {
79 return rq_qos_id(q, RQ_QOS_LATENCY);
80 }
81
rq_qos_id_to_name(enum rq_qos_id id)82 static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
83 {
84 switch (id) {
85 case RQ_QOS_WBT:
86 return "wbt";
87 case RQ_QOS_LATENCY:
88 return "latency";
89 case RQ_QOS_COST:
90 return "cost";
91 }
92 return "unknown";
93 }
94
rq_wait_init(struct rq_wait * rq_wait)95 static inline void rq_wait_init(struct rq_wait *rq_wait)
96 {
97 atomic_set(&rq_wait->inflight, 0);
98 init_waitqueue_head(&rq_wait->wait);
99 }
100
rq_qos_add(struct request_queue * q,struct rq_qos * rqos)101 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
102 {
103 /*
104 * No IO can be in-flight when adding rqos, so freeze queue, which
105 * is fine since we only support rq_qos for blk-mq queue.
106 *
107 * Reuse ->queue_lock for protecting against other concurrent
108 * rq_qos adding/deleting
109 */
110 blk_mq_freeze_queue(q);
111
112 spin_lock_irq(&q->queue_lock);
113 rqos->next = q->rq_qos;
114 q->rq_qos = rqos;
115 spin_unlock_irq(&q->queue_lock);
116
117 blk_mq_unfreeze_queue(q);
118
119 if (rqos->ops->debugfs_attrs)
120 blk_mq_debugfs_register_rqos(rqos);
121 }
122
rq_qos_del(struct request_queue * q,struct rq_qos * rqos)123 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
124 {
125 struct rq_qos **cur;
126
127 /*
128 * See comment in rq_qos_add() about freezing queue & using
129 * ->queue_lock.
130 */
131 blk_mq_freeze_queue(q);
132
133 spin_lock_irq(&q->queue_lock);
134 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
135 if (*cur == rqos) {
136 *cur = rqos->next;
137 break;
138 }
139 }
140 spin_unlock_irq(&q->queue_lock);
141
142 blk_mq_unfreeze_queue(q);
143
144 blk_mq_debugfs_unregister_rqos(rqos);
145 }
146
147 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
148 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
149
150 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
151 acquire_inflight_cb_t *acquire_inflight_cb,
152 cleanup_cb_t *cleanup_cb);
153 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
154 bool rq_depth_scale_up(struct rq_depth *rqd);
155 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
156 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
157
158 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
159 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
160 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
161 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
162 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
163 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
164 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
165 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
166 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
167
rq_qos_cleanup(struct request_queue * q,struct bio * bio)168 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
169 {
170 if (q->rq_qos)
171 __rq_qos_cleanup(q->rq_qos, bio);
172 }
173
rq_qos_done(struct request_queue * q,struct request * rq)174 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
175 {
176 if (q->rq_qos)
177 __rq_qos_done(q->rq_qos, rq);
178 }
179
rq_qos_issue(struct request_queue * q,struct request * rq)180 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
181 {
182 if (q->rq_qos)
183 __rq_qos_issue(q->rq_qos, rq);
184 }
185
rq_qos_requeue(struct request_queue * q,struct request * rq)186 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
187 {
188 if (q->rq_qos)
189 __rq_qos_requeue(q->rq_qos, rq);
190 }
191
rq_qos_done_bio(struct request_queue * q,struct bio * bio)192 static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
193 {
194 if (q->rq_qos)
195 __rq_qos_done_bio(q->rq_qos, bio);
196 }
197
rq_qos_throttle(struct request_queue * q,struct bio * bio)198 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
199 {
200 /*
201 * BIO_TRACKED lets controllers know that a bio went through the
202 * normal rq_qos path.
203 */
204 bio_set_flag(bio, BIO_TRACKED);
205 if (q->rq_qos)
206 __rq_qos_throttle(q->rq_qos, bio);
207 }
208
rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio)209 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
210 struct bio *bio)
211 {
212 if (q->rq_qos)
213 __rq_qos_track(q->rq_qos, rq, bio);
214 }
215
rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio)216 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
217 struct bio *bio)
218 {
219 if (q->rq_qos)
220 __rq_qos_merge(q->rq_qos, rq, bio);
221 }
222
rq_qos_queue_depth_changed(struct request_queue * q)223 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
224 {
225 if (q->rq_qos)
226 __rq_qos_queue_depth_changed(q->rq_qos);
227 }
228
229 void rq_qos_exit(struct request_queue *);
230
231 #endif
232