• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/backing-dev.h
4  *
5  * low-level device information and state which is propagated up through
6  * to high-level code.
7  */
8 
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11 
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/device.h>
17 #include <linux/writeback.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/backing-dev-defs.h>
20 #include <linux/slab.h>
21 
bdi_get(struct backing_dev_info * bdi)22 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23 {
24 	kref_get(&bdi->refcnt);
25 	return bdi;
26 }
27 
28 struct backing_dev_info *bdi_get_by_id(u64 id);
29 void bdi_put(struct backing_dev_info *bdi);
30 
31 __printf(2, 3)
32 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
33 __printf(2, 0)
34 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 		    va_list args);
36 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
37 void bdi_unregister(struct backing_dev_info *bdi);
38 
39 struct backing_dev_info *bdi_alloc(int node_id);
40 
41 void wb_start_background_writeback(struct bdi_writeback *wb);
42 void wb_workfn(struct work_struct *work);
43 void wb_wakeup_delayed(struct bdi_writeback *wb);
44 
45 void wb_wait_for_completion(struct wb_completion *done);
46 
47 extern spinlock_t bdi_lock;
48 extern struct list_head bdi_list;
49 
50 extern struct workqueue_struct *bdi_wq;
51 extern struct workqueue_struct *bdi_async_bio_wq;
52 
wb_has_dirty_io(struct bdi_writeback * wb)53 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
54 {
55 	return test_bit(WB_has_dirty_io, &wb->state);
56 }
57 
bdi_has_dirty_io(struct backing_dev_info * bdi)58 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59 {
60 	/*
61 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 	 * any dirty wbs.  See wb_update_write_bandwidth().
63 	 */
64 	return atomic_long_read(&bdi->tot_write_bandwidth);
65 }
66 
__add_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)67 static inline void __add_wb_stat(struct bdi_writeback *wb,
68 				 enum wb_stat_item item, s64 amount)
69 {
70 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71 }
72 
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)73 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74 {
75 	__add_wb_stat(wb, item, 1);
76 }
77 
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)78 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79 {
80 	__add_wb_stat(wb, item, -1);
81 }
82 
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)83 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84 {
85 	return percpu_counter_read_positive(&wb->stat[item]);
86 }
87 
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)88 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89 {
90 	return percpu_counter_sum_positive(&wb->stat[item]);
91 }
92 
93 extern void wb_writeout_inc(struct bdi_writeback *wb);
94 
95 /*
96  * maximal error of a stat counter.
97  */
wb_stat_error(void)98 static inline unsigned long wb_stat_error(void)
99 {
100 #ifdef CONFIG_SMP
101 	return nr_cpu_ids * WB_STAT_BATCH;
102 #else
103 	return 1;
104 #endif
105 }
106 
107 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109 
110 /*
111  * Flags in backing_dev_info::capability
112  *
113  * BDI_CAP_WRITEBACK:		Supports dirty page writeback, and dirty pages
114  *				should contribute to accounting
115  * BDI_CAP_WRITEBACK_ACCT:	Automatically account writeback pages
116  * BDI_CAP_STRICTLIMIT:		Keep number of dirty pages below bdi threshold
117  */
118 #define BDI_CAP_WRITEBACK		(1 << 0)
119 #define BDI_CAP_WRITEBACK_ACCT		(1 << 1)
120 #define BDI_CAP_STRICTLIMIT		(1 << 2)
121 
122 extern struct backing_dev_info noop_backing_dev_info;
123 
124 int bdi_init(struct backing_dev_info *bdi);
125 
126 /**
127  * writeback_in_progress - determine whether there is writeback in progress
128  * @wb: bdi_writeback of interest
129  *
130  * Determine whether there is writeback waiting to be handled against a
131  * bdi_writeback.
132  */
writeback_in_progress(struct bdi_writeback * wb)133 static inline bool writeback_in_progress(struct bdi_writeback *wb)
134 {
135 	return test_bit(WB_writeback_running, &wb->state);
136 }
137 
inode_to_bdi(struct inode * inode)138 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
139 {
140 	struct super_block *sb;
141 
142 	if (!inode)
143 		return &noop_backing_dev_info;
144 
145 	sb = inode->i_sb;
146 #ifdef CONFIG_BLOCK
147 	if (sb_is_blkdev_sb(sb))
148 		return I_BDEV(inode)->bd_disk->bdi;
149 #endif
150 	return sb->s_bdi;
151 }
152 
wb_congested(struct bdi_writeback * wb,int cong_bits)153 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
154 {
155 	return wb->congested & cong_bits;
156 }
157 
158 long congestion_wait(int sync, long timeout);
159 long wait_iff_congested(int sync, long timeout);
160 
mapping_can_writeback(struct address_space * mapping)161 static inline bool mapping_can_writeback(struct address_space *mapping)
162 {
163 	return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
164 }
165 
bdi_sched_wait(void * word)166 static inline int bdi_sched_wait(void *word)
167 {
168 	schedule();
169 	return 0;
170 }
171 
172 #ifdef CONFIG_CGROUP_WRITEBACK
173 
174 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
175 				    struct cgroup_subsys_state *memcg_css);
176 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
177 				    struct cgroup_subsys_state *memcg_css,
178 				    gfp_t gfp);
179 void wb_memcg_offline(struct mem_cgroup *memcg);
180 void wb_blkcg_offline(struct blkcg *blkcg);
181 int inode_congested(struct inode *inode, int cong_bits);
182 
183 /**
184  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
185  * @inode: inode of interest
186  *
187  * Cgroup writeback requires support from the filesystem.  Also, both memcg and
188  * iocg have to be on the default hierarchy.  Test whether all conditions are
189  * met.
190  *
191  * Note that the test result may change dynamically on the same inode
192  * depending on how memcg and iocg are configured.
193  */
inode_cgwb_enabled(struct inode * inode)194 static inline bool inode_cgwb_enabled(struct inode *inode)
195 {
196 	struct backing_dev_info *bdi = inode_to_bdi(inode);
197 
198 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
199 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
200 		(bdi->capabilities & BDI_CAP_WRITEBACK) &&
201 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
202 }
203 
204 /**
205  * wb_find_current - find wb for %current on a bdi
206  * @bdi: bdi of interest
207  *
208  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
209  * Must be called under rcu_read_lock() which protects the returend wb.
210  * NULL if not found.
211  */
wb_find_current(struct backing_dev_info * bdi)212 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
213 {
214 	struct cgroup_subsys_state *memcg_css;
215 	struct bdi_writeback *wb;
216 
217 	memcg_css = task_css(current, memory_cgrp_id);
218 	if (!memcg_css->parent)
219 		return &bdi->wb;
220 
221 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
222 
223 	/*
224 	 * %current's blkcg equals the effective blkcg of its memcg.  No
225 	 * need to use the relatively expensive cgroup_get_e_css().
226 	 */
227 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
228 		return wb;
229 	return NULL;
230 }
231 
232 /**
233  * wb_get_create_current - get or create wb for %current on a bdi
234  * @bdi: bdi of interest
235  * @gfp: allocation mask
236  *
237  * Equivalent to wb_get_create() on %current's memcg.  This function is
238  * called from a relatively hot path and optimizes the common cases using
239  * wb_find_current().
240  */
241 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)242 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
243 {
244 	struct bdi_writeback *wb;
245 
246 	rcu_read_lock();
247 	wb = wb_find_current(bdi);
248 	if (wb && unlikely(!wb_tryget(wb)))
249 		wb = NULL;
250 	rcu_read_unlock();
251 
252 	if (unlikely(!wb)) {
253 		struct cgroup_subsys_state *memcg_css;
254 
255 		memcg_css = task_get_css(current, memory_cgrp_id);
256 		wb = wb_get_create(bdi, memcg_css, gfp);
257 		css_put(memcg_css);
258 	}
259 	return wb;
260 }
261 
262 /**
263  * inode_to_wb_is_valid - test whether an inode has a wb associated
264  * @inode: inode of interest
265  *
266  * Returns %true if @inode has a wb associated.  May be called without any
267  * locking.
268  */
inode_to_wb_is_valid(struct inode * inode)269 static inline bool inode_to_wb_is_valid(struct inode *inode)
270 {
271 	return inode->i_wb;
272 }
273 
274 /**
275  * inode_to_wb - determine the wb of an inode
276  * @inode: inode of interest
277  *
278  * Returns the wb @inode is currently associated with.  The caller must be
279  * holding either @inode->i_lock, the i_pages lock, or the
280  * associated wb's list_lock.
281  */
inode_to_wb(const struct inode * inode)282 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
283 {
284 #ifdef CONFIG_LOCKDEP
285 	WARN_ON_ONCE(debug_locks &&
286 		     (!lockdep_is_held(&inode->i_lock) &&
287 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
288 		      !lockdep_is_held(&inode->i_wb->list_lock)));
289 #endif
290 	return inode->i_wb;
291 }
292 
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)293 static inline struct bdi_writeback *inode_to_wb_wbc(
294 				struct inode *inode,
295 				struct writeback_control *wbc)
296 {
297 	/*
298 	 * If wbc does not have inode attached, it means cgroup writeback was
299 	 * disabled when wbc started. Just use the default wb in that case.
300 	 */
301 	return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
302 }
303 
304 /**
305  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
306  * @inode: target inode
307  * @cookie: output param, to be passed to the end function
308  *
309  * The caller wants to access the wb associated with @inode but isn't
310  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
311  * function determines the wb associated with @inode and ensures that the
312  * association doesn't change until the transaction is finished with
313  * unlocked_inode_to_wb_end().
314  *
315  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
316  * can't sleep during the transaction.  IRQs may or may not be disabled on
317  * return.
318  */
319 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)320 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
321 {
322 	rcu_read_lock();
323 
324 	/*
325 	 * Paired with store_release in inode_switch_wbs_work_fn() and
326 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
327 	 */
328 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
329 
330 	if (unlikely(cookie->locked))
331 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
332 
333 	/*
334 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
335 	 * lock.  inode_to_wb() will bark.  Deref directly.
336 	 */
337 	return inode->i_wb;
338 }
339 
340 /**
341  * unlocked_inode_to_wb_end - end inode wb access transaction
342  * @inode: target inode
343  * @cookie: @cookie from unlocked_inode_to_wb_begin()
344  */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)345 static inline void unlocked_inode_to_wb_end(struct inode *inode,
346 					    struct wb_lock_cookie *cookie)
347 {
348 	if (unlikely(cookie->locked))
349 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
350 
351 	rcu_read_unlock();
352 }
353 
354 #else	/* CONFIG_CGROUP_WRITEBACK */
355 
inode_cgwb_enabled(struct inode * inode)356 static inline bool inode_cgwb_enabled(struct inode *inode)
357 {
358 	return false;
359 }
360 
wb_find_current(struct backing_dev_info * bdi)361 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
362 {
363 	return &bdi->wb;
364 }
365 
366 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)367 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
368 {
369 	return &bdi->wb;
370 }
371 
inode_to_wb_is_valid(struct inode * inode)372 static inline bool inode_to_wb_is_valid(struct inode *inode)
373 {
374 	return true;
375 }
376 
inode_to_wb(struct inode * inode)377 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
378 {
379 	return &inode_to_bdi(inode)->wb;
380 }
381 
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)382 static inline struct bdi_writeback *inode_to_wb_wbc(
383 				struct inode *inode,
384 				struct writeback_control *wbc)
385 {
386 	return inode_to_wb(inode);
387 }
388 
389 
390 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)391 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
392 {
393 	return inode_to_wb(inode);
394 }
395 
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)396 static inline void unlocked_inode_to_wb_end(struct inode *inode,
397 					    struct wb_lock_cookie *cookie)
398 {
399 }
400 
wb_memcg_offline(struct mem_cgroup * memcg)401 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
402 {
403 }
404 
wb_blkcg_offline(struct blkcg * blkcg)405 static inline void wb_blkcg_offline(struct blkcg *blkcg)
406 {
407 }
408 
inode_congested(struct inode * inode,int cong_bits)409 static inline int inode_congested(struct inode *inode, int cong_bits)
410 {
411 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
412 }
413 
414 #endif	/* CONFIG_CGROUP_WRITEBACK */
415 
inode_read_congested(struct inode * inode)416 static inline int inode_read_congested(struct inode *inode)
417 {
418 	return inode_congested(inode, 1 << WB_sync_congested);
419 }
420 
inode_write_congested(struct inode * inode)421 static inline int inode_write_congested(struct inode *inode)
422 {
423 	return inode_congested(inode, 1 << WB_async_congested);
424 }
425 
inode_rw_congested(struct inode * inode)426 static inline int inode_rw_congested(struct inode *inode)
427 {
428 	return inode_congested(inode, (1 << WB_sync_congested) |
429 				      (1 << WB_async_congested));
430 }
431 
bdi_congested(struct backing_dev_info * bdi,int cong_bits)432 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
433 {
434 	return wb_congested(&bdi->wb, cong_bits);
435 }
436 
bdi_read_congested(struct backing_dev_info * bdi)437 static inline int bdi_read_congested(struct backing_dev_info *bdi)
438 {
439 	return bdi_congested(bdi, 1 << WB_sync_congested);
440 }
441 
bdi_write_congested(struct backing_dev_info * bdi)442 static inline int bdi_write_congested(struct backing_dev_info *bdi)
443 {
444 	return bdi_congested(bdi, 1 << WB_async_congested);
445 }
446 
bdi_rw_congested(struct backing_dev_info * bdi)447 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
448 {
449 	return bdi_congested(bdi, (1 << WB_sync_congested) |
450 				  (1 << WB_async_congested));
451 }
452 
453 const char *bdi_dev_name(struct backing_dev_info *bdi);
454 
455 #endif	/* _LINUX_BACKING_DEV_H */
456