1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_BACKING_DEV_DEFS_H
3 #define __LINUX_BACKING_DEV_DEFS_H
4
5 #include <linux/list.h>
6 #include <linux/radix-tree.h>
7 #include <linux/rbtree.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/percpu-refcount.h>
11 #include <linux/flex_proportions.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/kref.h>
15
16 struct page;
17 struct device;
18 struct dentry;
19
20 /*
21 * Bits in bdi_writeback.state
22 */
23 enum wb_state {
24 WB_registered, /* bdi_register() was done */
25 WB_writeback_running, /* Writeback is in progress */
26 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
27 };
28
29 enum wb_congested_state {
30 WB_async_congested, /* The async (write) queue is getting full */
31 WB_sync_congested, /* The sync queue is getting full */
32 };
33
34 typedef int (congested_fn)(void *, int);
35
36 enum wb_stat_item {
37 WB_RECLAIMABLE,
38 WB_WRITEBACK,
39 WB_DIRTIED,
40 WB_WRITTEN,
41 NR_WB_STAT_ITEMS
42 };
43
44 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45
46 /*
47 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
48 * wb's can operate mostly independently but should share the congested
49 * state. To facilitate such sharing, the congested state is tracked using
50 * the following struct which is created on demand, indexed by blkcg ID on
51 * its bdi, and refcounted.
52 */
53 struct bdi_writeback_congested {
54 unsigned long state; /* WB_[a]sync_congested flags */
55 atomic_t refcnt; /* nr of attached wb's and blkg */
56
57 #ifdef CONFIG_CGROUP_WRITEBACK
58 struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
59 * on bdi unregistration. For memcg-wb
60 * internal use only! */
61 int blkcg_id; /* ID of the associated blkcg */
62 struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
63 #endif
64 };
65
66 /*
67 * Each wb (bdi_writeback) can perform writeback operations, is measured
68 * and throttled, independently. Without cgroup writeback, each bdi
69 * (bdi_writeback) is served by its embedded bdi->wb.
70 *
71 * On the default hierarchy, blkcg implicitly enables memcg. This allows
72 * using memcg's page ownership for attributing writeback IOs, and every
73 * memcg - blkcg combination can be served by its own wb by assigning a
74 * dedicated wb to each memcg, which enables isolation across different
75 * cgroups and propagation of IO back pressure down from the IO layer upto
76 * the tasks which are generating the dirty pages to be written back.
77 *
78 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
79 * refcounted with the number of inodes attached to it, and pins the memcg
80 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
81 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
82 * is tested for blkcg after lookup and removed from index on mismatch so
83 * that a new wb for the combination can be created.
84 */
85 struct bdi_writeback {
86 struct backing_dev_info *bdi; /* our parent bdi */
87
88 unsigned long state; /* Always use atomic bitops on this */
89 unsigned long last_old_flush; /* last old data flush */
90
91 struct list_head b_dirty; /* dirty inodes */
92 struct list_head b_io; /* parked for writeback */
93 struct list_head b_more_io; /* parked for more writeback */
94 struct list_head b_dirty_time; /* time stamps are dirty */
95 spinlock_t list_lock; /* protects the b_* lists */
96
97 struct percpu_counter stat[NR_WB_STAT_ITEMS];
98
99 struct bdi_writeback_congested *congested;
100
101 unsigned long bw_time_stamp; /* last time write bw is updated */
102 unsigned long dirtied_stamp;
103 unsigned long written_stamp; /* pages written at bw_time_stamp */
104 unsigned long write_bandwidth; /* the estimated write bandwidth */
105 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
106
107 /*
108 * The base dirty throttle rate, re-calculated on every 200ms.
109 * All the bdi tasks' dirty rate will be curbed under it.
110 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
111 * in small steps and is much more smooth/stable than the latter.
112 */
113 unsigned long dirty_ratelimit;
114 unsigned long balanced_dirty_ratelimit;
115
116 struct fprop_local_percpu completions;
117 int dirty_exceeded;
118
119 spinlock_t work_lock; /* protects work_list & dwork scheduling */
120 struct list_head work_list;
121 struct delayed_work dwork; /* work item used for writeback */
122
123 unsigned long dirty_sleep; /* last wait */
124
125 struct list_head bdi_node; /* anchored at bdi->wb_list */
126
127 #ifdef CONFIG_CGROUP_WRITEBACK
128 struct percpu_ref refcnt; /* used only for !root wb's */
129 struct fprop_local_percpu memcg_completions;
130 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
131 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
132 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
133 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
134
135 union {
136 struct work_struct release_work;
137 struct rcu_head rcu;
138 };
139 #endif
140 };
141
142 struct backing_dev_info {
143 struct list_head bdi_list;
144 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
145 unsigned long io_pages; /* max allowed IO size */
146 congested_fn *congested_fn; /* Function pointer if device is md/dm */
147 void *congested_data; /* Pointer to aux data for congested func */
148
149 const char *name;
150
151 struct kref refcnt; /* Reference counter for the structure */
152 unsigned int capabilities; /* Device capabilities */
153 unsigned int min_ratio;
154 unsigned int max_ratio, max_prop_frac;
155
156 /*
157 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
158 * any dirty wbs, which is depended upon by bdi_has_dirty().
159 */
160 atomic_long_t tot_write_bandwidth;
161
162 struct bdi_writeback wb; /* the root writeback info for this bdi */
163 struct list_head wb_list; /* list of all wbs */
164 #ifdef CONFIG_CGROUP_WRITEBACK
165 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
166 struct rb_root cgwb_congested_tree; /* their congested states */
167 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
168 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
169 #else
170 struct bdi_writeback_congested *wb_congested;
171 #endif
172 wait_queue_head_t wb_waitq;
173
174 struct device *dev;
175 struct device *owner;
176
177 struct timer_list laptop_mode_wb_timer;
178
179 #ifdef CONFIG_DEBUG_FS
180 struct dentry *debug_dir;
181 struct dentry *debug_stats;
182 #endif
183 };
184
185 enum {
186 BLK_RW_ASYNC = 0,
187 BLK_RW_SYNC = 1,
188 };
189
190 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
191 void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
192
clear_bdi_congested(struct backing_dev_info * bdi,int sync)193 static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
194 {
195 clear_wb_congested(bdi->wb.congested, sync);
196 }
197
set_bdi_congested(struct backing_dev_info * bdi,int sync)198 static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
199 {
200 set_wb_congested(bdi->wb.congested, sync);
201 }
202
203 struct wb_lock_cookie {
204 bool locked;
205 unsigned long flags;
206 };
207
208 #ifdef CONFIG_CGROUP_WRITEBACK
209
210 /**
211 * wb_tryget - try to increment a wb's refcount
212 * @wb: bdi_writeback to get
213 */
wb_tryget(struct bdi_writeback * wb)214 static inline bool wb_tryget(struct bdi_writeback *wb)
215 {
216 if (wb != &wb->bdi->wb)
217 return percpu_ref_tryget(&wb->refcnt);
218 return true;
219 }
220
221 /**
222 * wb_get - increment a wb's refcount
223 * @wb: bdi_writeback to get
224 */
wb_get(struct bdi_writeback * wb)225 static inline void wb_get(struct bdi_writeback *wb)
226 {
227 if (wb != &wb->bdi->wb)
228 percpu_ref_get(&wb->refcnt);
229 }
230
231 /**
232 * wb_put - decrement a wb's refcount
233 * @wb: bdi_writeback to put
234 */
wb_put(struct bdi_writeback * wb)235 static inline void wb_put(struct bdi_writeback *wb)
236 {
237 if (WARN_ON_ONCE(!wb->bdi)) {
238 /*
239 * A driver bug might cause a file to be removed before bdi was
240 * initialized.
241 */
242 return;
243 }
244
245 if (wb != &wb->bdi->wb)
246 percpu_ref_put(&wb->refcnt);
247 }
248
249 /**
250 * wb_dying - is a wb dying?
251 * @wb: bdi_writeback of interest
252 *
253 * Returns whether @wb is unlinked and being drained.
254 */
wb_dying(struct bdi_writeback * wb)255 static inline bool wb_dying(struct bdi_writeback *wb)
256 {
257 return percpu_ref_is_dying(&wb->refcnt);
258 }
259
260 #else /* CONFIG_CGROUP_WRITEBACK */
261
wb_tryget(struct bdi_writeback * wb)262 static inline bool wb_tryget(struct bdi_writeback *wb)
263 {
264 return true;
265 }
266
wb_get(struct bdi_writeback * wb)267 static inline void wb_get(struct bdi_writeback *wb)
268 {
269 }
270
wb_put(struct bdi_writeback * wb)271 static inline void wb_put(struct bdi_writeback *wb)
272 {
273 }
274
wb_dying(struct bdi_writeback * wb)275 static inline bool wb_dying(struct bdi_writeback *wb)
276 {
277 return false;
278 }
279
280 #endif /* CONFIG_CGROUP_WRITEBACK */
281
282 #endif /* __LINUX_BACKING_DEV_DEFS_H */
283