1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef WB_THROTTLE_H
3 #define WB_THROTTLE_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/wait.h>
8 #include <linux/timer.h>
9 #include <linux/ktime.h>
10
11 #include "blk-stat.h"
12 #include "blk-rq-qos.h"
13
14 enum wbt_flags {
15 WBT_TRACKED = 1, /* write, tracked for throttling */
16 WBT_READ = 2, /* read */
17 WBT_KSWAPD = 4, /* write, from kswapd */
18 WBT_DISCARD = 8, /* discard */
19
20 WBT_NR_BITS = 4, /* number of bits */
21 };
22
23 enum {
24 WBT_RWQ_BG = 0,
25 WBT_RWQ_KSWAPD,
26 WBT_RWQ_DISCARD,
27 WBT_NUM_RWQ,
28 };
29
30 /*
31 * Enable states. Either off, or on by default (done at init time),
32 * or on through manual setup in sysfs.
33 */
34 enum {
35 WBT_STATE_ON_DEFAULT = 1,
36 WBT_STATE_ON_MANUAL = 2,
37 WBT_STATE_OFF_DEFAULT
38 };
39
40 struct rq_wb {
41 /*
42 * Settings that govern how we throttle
43 */
44 unsigned int wb_background; /* background writeback */
45 unsigned int wb_normal; /* normal writeback */
46
47 short enable_state; /* WBT_STATE_* */
48
49 /*
50 * Number of consecutive periods where we don't have enough
51 * information to make a firm scale up/down decision.
52 */
53 unsigned int unknown_cnt;
54
55 u64 win_nsec; /* default window size */
56 u64 cur_win_nsec; /* current window size */
57
58 struct blk_stat_callback *cb;
59
60 u64 sync_issue;
61 void *sync_cookie;
62
63 unsigned int wc;
64
65 unsigned long last_issue; /* last non-throttled issue */
66 unsigned long last_comp; /* last non-throttled comp */
67 unsigned long min_lat_nsec;
68 struct rq_qos rqos;
69 struct rq_wait rq_wait[WBT_NUM_RWQ];
70 struct rq_depth rq_depth;
71 };
72
RQWB(struct rq_qos * rqos)73 static inline struct rq_wb *RQWB(struct rq_qos *rqos)
74 {
75 return container_of(rqos, struct rq_wb, rqos);
76 }
77
wbt_inflight(struct rq_wb * rwb)78 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
79 {
80 unsigned int i, ret = 0;
81
82 for (i = 0; i < WBT_NUM_RWQ; i++)
83 ret += atomic_read(&rwb->rq_wait[i].inflight);
84
85 return ret;
86 }
87
88
89 #ifdef CONFIG_BLK_WBT
90
91 int wbt_init(struct request_queue *);
92 void wbt_disable_default(struct request_queue *);
93 void wbt_enable_default(struct request_queue *);
94
95 u64 wbt_get_min_lat(struct request_queue *q);
96 void wbt_set_min_lat(struct request_queue *q, u64 val);
97
98 void wbt_set_write_cache(struct request_queue *, bool);
99
100 u64 wbt_default_latency_nsec(struct request_queue *);
101
102 #else
103
wbt_track(struct request * rq,enum wbt_flags flags)104 static inline void wbt_track(struct request *rq, enum wbt_flags flags)
105 {
106 }
wbt_init(struct request_queue * q)107 static inline int wbt_init(struct request_queue *q)
108 {
109 return -EINVAL;
110 }
wbt_disable_default(struct request_queue * q)111 static inline void wbt_disable_default(struct request_queue *q)
112 {
113 }
wbt_enable_default(struct request_queue * q)114 static inline void wbt_enable_default(struct request_queue *q)
115 {
116 }
wbt_set_write_cache(struct request_queue * q,bool wc)117 static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
118 {
119 }
wbt_get_min_lat(struct request_queue * q)120 static inline u64 wbt_get_min_lat(struct request_queue *q)
121 {
122 return 0;
123 }
wbt_set_min_lat(struct request_queue * q,u64 val)124 static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
125 {
126 }
wbt_default_latency_nsec(struct request_queue * q)127 static inline u64 wbt_default_latency_nsec(struct request_queue *q)
128 {
129 return 0;
130 }
131
132 #endif /* CONFIG_BLK_WBT */
133
134 #endif
135