• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/backing-dev.h
4  *
5  * low-level device information and state which is propagated up through
6  * to high-level code.
7  */
8 
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11 
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/device.h>
17 #include <linux/writeback.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/backing-dev-defs.h>
20 #include <linux/slab.h>
21 
bdi_get(struct backing_dev_info * bdi)22 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23 {
24 	kref_get(&bdi->refcnt);
25 	return bdi;
26 }
27 
28 struct backing_dev_info *bdi_get_by_id(u64 id);
29 void bdi_put(struct backing_dev_info *bdi);
30 
31 __printf(2, 3)
32 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
33 __printf(2, 0)
34 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 		    va_list args);
36 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
37 void bdi_unregister(struct backing_dev_info *bdi);
38 
39 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
bdi_alloc(gfp_t gfp_mask)40 static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
41 {
42 	return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
43 }
44 
45 void wb_start_background_writeback(struct bdi_writeback *wb);
46 void wb_workfn(struct work_struct *work);
47 void wb_wakeup_delayed(struct bdi_writeback *wb);
48 
49 void wb_wait_for_completion(struct wb_completion *done);
50 
51 extern spinlock_t bdi_lock;
52 extern struct list_head bdi_list;
53 
54 extern struct workqueue_struct *bdi_wq;
55 extern struct workqueue_struct *bdi_async_bio_wq;
56 
wb_has_dirty_io(struct bdi_writeback * wb)57 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
58 {
59 	return test_bit(WB_has_dirty_io, &wb->state);
60 }
61 
bdi_has_dirty_io(struct backing_dev_info * bdi)62 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
63 {
64 	/*
65 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
66 	 * any dirty wbs.  See wb_update_write_bandwidth().
67 	 */
68 	return atomic_long_read(&bdi->tot_write_bandwidth);
69 }
70 
__add_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)71 static inline void __add_wb_stat(struct bdi_writeback *wb,
72 				 enum wb_stat_item item, s64 amount)
73 {
74 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
75 }
76 
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)77 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
78 {
79 	__add_wb_stat(wb, item, 1);
80 }
81 
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)82 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
83 {
84 	__add_wb_stat(wb, item, -1);
85 }
86 
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)87 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
88 {
89 	return percpu_counter_read_positive(&wb->stat[item]);
90 }
91 
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)92 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
93 {
94 	return percpu_counter_sum_positive(&wb->stat[item]);
95 }
96 
97 extern void wb_writeout_inc(struct bdi_writeback *wb);
98 
99 /*
100  * maximal error of a stat counter.
101  */
wb_stat_error(void)102 static inline unsigned long wb_stat_error(void)
103 {
104 #ifdef CONFIG_SMP
105 	return nr_cpu_ids * WB_STAT_BATCH;
106 #else
107 	return 1;
108 #endif
109 }
110 
111 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
112 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
113 
114 /*
115  * Flags in backing_dev_info::capability
116  *
117  * The first three flags control whether dirty pages will contribute to the
118  * VM's accounting and whether writepages() should be called for dirty pages
119  * (something that would not, for example, be appropriate for ramfs)
120  *
121  * WARNING: these flags are closely related and should not normally be
122  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
123  * three flags into a single convenience macro.
124  *
125  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
126  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
127  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
128  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
129  *
130  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
131  * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
132  *			   inefficient.
133  */
134 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
135 #define BDI_CAP_NO_WRITEBACK	0x00000002
136 #define BDI_CAP_NO_ACCT_WB	0x00000004
137 #define BDI_CAP_STABLE_WRITES	0x00000008
138 #define BDI_CAP_STRICTLIMIT	0x00000010
139 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
140 #define BDI_CAP_SYNCHRONOUS_IO	0x00000040
141 
142 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
143 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
144 
145 extern struct backing_dev_info noop_backing_dev_info;
146 
147 /**
148  * writeback_in_progress - determine whether there is writeback in progress
149  * @wb: bdi_writeback of interest
150  *
151  * Determine whether there is writeback waiting to be handled against a
152  * bdi_writeback.
153  */
writeback_in_progress(struct bdi_writeback * wb)154 static inline bool writeback_in_progress(struct bdi_writeback *wb)
155 {
156 	return test_bit(WB_writeback_running, &wb->state);
157 }
158 
inode_to_bdi(struct inode * inode)159 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
160 {
161 	struct super_block *sb;
162 
163 	if (!inode)
164 		return &noop_backing_dev_info;
165 
166 	sb = inode->i_sb;
167 #ifdef CONFIG_BLOCK
168 	if (sb_is_blkdev_sb(sb))
169 		return I_BDEV(inode)->bd_bdi;
170 #endif
171 	return sb->s_bdi;
172 }
173 
wb_congested(struct bdi_writeback * wb,int cong_bits)174 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
175 {
176 	struct backing_dev_info *bdi = wb->bdi;
177 
178 	if (bdi->congested_fn)
179 		return bdi->congested_fn(bdi->congested_data, cong_bits);
180 	return wb->congested->state & cong_bits;
181 }
182 
183 long congestion_wait(int sync, long timeout);
184 long wait_iff_congested(int sync, long timeout);
185 
bdi_cap_synchronous_io(struct backing_dev_info * bdi)186 static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
187 {
188 	return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
189 }
190 
bdi_cap_stable_pages_required(struct backing_dev_info * bdi)191 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
192 {
193 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
194 }
195 
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)196 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
197 {
198 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
199 }
200 
bdi_cap_account_dirty(struct backing_dev_info * bdi)201 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
202 {
203 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
204 }
205 
bdi_cap_account_writeback(struct backing_dev_info * bdi)206 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
207 {
208 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
209 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
210 				      BDI_CAP_NO_WRITEBACK));
211 }
212 
mapping_cap_writeback_dirty(struct address_space * mapping)213 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
214 {
215 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
216 }
217 
mapping_cap_account_dirty(struct address_space * mapping)218 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
219 {
220 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
221 }
222 
bdi_sched_wait(void * word)223 static inline int bdi_sched_wait(void *word)
224 {
225 	schedule();
226 	return 0;
227 }
228 
229 #ifdef CONFIG_CGROUP_WRITEBACK
230 
231 struct bdi_writeback_congested *
232 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
233 void wb_congested_put(struct bdi_writeback_congested *congested);
234 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
235 				    struct cgroup_subsys_state *memcg_css);
236 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
237 				    struct cgroup_subsys_state *memcg_css,
238 				    gfp_t gfp);
239 void wb_memcg_offline(struct mem_cgroup *memcg);
240 void wb_blkcg_offline(struct blkcg *blkcg);
241 int inode_congested(struct inode *inode, int cong_bits);
242 
243 /**
244  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
245  * @inode: inode of interest
246  *
247  * cgroup writeback requires support from both the bdi and filesystem.
248  * Also, both memcg and iocg have to be on the default hierarchy.  Test
249  * whether all conditions are met.
250  *
251  * Note that the test result may change dynamically on the same inode
252  * depending on how memcg and iocg are configured.
253  */
inode_cgwb_enabled(struct inode * inode)254 static inline bool inode_cgwb_enabled(struct inode *inode)
255 {
256 	struct backing_dev_info *bdi = inode_to_bdi(inode);
257 
258 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
259 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
260 		bdi_cap_account_dirty(bdi) &&
261 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
262 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
263 }
264 
265 /**
266  * wb_find_current - find wb for %current on a bdi
267  * @bdi: bdi of interest
268  *
269  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
270  * Must be called under rcu_read_lock() which protects the returend wb.
271  * NULL if not found.
272  */
wb_find_current(struct backing_dev_info * bdi)273 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
274 {
275 	struct cgroup_subsys_state *memcg_css;
276 	struct bdi_writeback *wb;
277 
278 	memcg_css = task_css(current, memory_cgrp_id);
279 	if (!memcg_css->parent)
280 		return &bdi->wb;
281 
282 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
283 
284 	/*
285 	 * %current's blkcg equals the effective blkcg of its memcg.  No
286 	 * need to use the relatively expensive cgroup_get_e_css().
287 	 */
288 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
289 		return wb;
290 	return NULL;
291 }
292 
293 /**
294  * wb_get_create_current - get or create wb for %current on a bdi
295  * @bdi: bdi of interest
296  * @gfp: allocation mask
297  *
298  * Equivalent to wb_get_create() on %current's memcg.  This function is
299  * called from a relatively hot path and optimizes the common cases using
300  * wb_find_current().
301  */
302 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)303 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
304 {
305 	struct bdi_writeback *wb;
306 
307 	rcu_read_lock();
308 	wb = wb_find_current(bdi);
309 	if (wb && unlikely(!wb_tryget(wb)))
310 		wb = NULL;
311 	rcu_read_unlock();
312 
313 	if (unlikely(!wb)) {
314 		struct cgroup_subsys_state *memcg_css;
315 
316 		memcg_css = task_get_css(current, memory_cgrp_id);
317 		wb = wb_get_create(bdi, memcg_css, gfp);
318 		css_put(memcg_css);
319 	}
320 	return wb;
321 }
322 
323 /**
324  * inode_to_wb_is_valid - test whether an inode has a wb associated
325  * @inode: inode of interest
326  *
327  * Returns %true if @inode has a wb associated.  May be called without any
328  * locking.
329  */
inode_to_wb_is_valid(struct inode * inode)330 static inline bool inode_to_wb_is_valid(struct inode *inode)
331 {
332 	return inode->i_wb;
333 }
334 
335 /**
336  * inode_to_wb - determine the wb of an inode
337  * @inode: inode of interest
338  *
339  * Returns the wb @inode is currently associated with.  The caller must be
340  * holding either @inode->i_lock, the i_pages lock, or the
341  * associated wb's list_lock.
342  */
inode_to_wb(const struct inode * inode)343 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
344 {
345 #ifdef CONFIG_LOCKDEP
346 	WARN_ON_ONCE(debug_locks &&
347 		     (!lockdep_is_held(&inode->i_lock) &&
348 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
349 		      !lockdep_is_held(&inode->i_wb->list_lock)));
350 #endif
351 	return inode->i_wb;
352 }
353 
354 /**
355  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
356  * @inode: target inode
357  * @cookie: output param, to be passed to the end function
358  *
359  * The caller wants to access the wb associated with @inode but isn't
360  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
361  * function determines the wb associated with @inode and ensures that the
362  * association doesn't change until the transaction is finished with
363  * unlocked_inode_to_wb_end().
364  *
365  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
366  * can't sleep during the transaction.  IRQs may or may not be disabled on
367  * return.
368  */
369 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)370 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
371 {
372 	rcu_read_lock();
373 
374 	/*
375 	 * Paired with store_release in inode_switch_wbs_work_fn() and
376 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
377 	 */
378 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
379 
380 	if (unlikely(cookie->locked))
381 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
382 
383 	/*
384 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
385 	 * lock.  inode_to_wb() will bark.  Deref directly.
386 	 */
387 	return inode->i_wb;
388 }
389 
390 /**
391  * unlocked_inode_to_wb_end - end inode wb access transaction
392  * @inode: target inode
393  * @cookie: @cookie from unlocked_inode_to_wb_begin()
394  */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)395 static inline void unlocked_inode_to_wb_end(struct inode *inode,
396 					    struct wb_lock_cookie *cookie)
397 {
398 	if (unlikely(cookie->locked))
399 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
400 
401 	rcu_read_unlock();
402 }
403 
404 #else	/* CONFIG_CGROUP_WRITEBACK */
405 
inode_cgwb_enabled(struct inode * inode)406 static inline bool inode_cgwb_enabled(struct inode *inode)
407 {
408 	return false;
409 }
410 
411 static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info * bdi,int blkcg_id,gfp_t gfp)412 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
413 {
414 	refcount_inc(&bdi->wb_congested->refcnt);
415 	return bdi->wb_congested;
416 }
417 
wb_congested_put(struct bdi_writeback_congested * congested)418 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
419 {
420 	if (refcount_dec_and_test(&congested->refcnt))
421 		kfree(congested);
422 }
423 
wb_find_current(struct backing_dev_info * bdi)424 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
425 {
426 	return &bdi->wb;
427 }
428 
429 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)430 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
431 {
432 	return &bdi->wb;
433 }
434 
inode_to_wb_is_valid(struct inode * inode)435 static inline bool inode_to_wb_is_valid(struct inode *inode)
436 {
437 	return true;
438 }
439 
inode_to_wb(struct inode * inode)440 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
441 {
442 	return &inode_to_bdi(inode)->wb;
443 }
444 
445 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)446 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
447 {
448 	return inode_to_wb(inode);
449 }
450 
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)451 static inline void unlocked_inode_to_wb_end(struct inode *inode,
452 					    struct wb_lock_cookie *cookie)
453 {
454 }
455 
wb_memcg_offline(struct mem_cgroup * memcg)456 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
457 {
458 }
459 
wb_blkcg_offline(struct blkcg * blkcg)460 static inline void wb_blkcg_offline(struct blkcg *blkcg)
461 {
462 }
463 
inode_congested(struct inode * inode,int cong_bits)464 static inline int inode_congested(struct inode *inode, int cong_bits)
465 {
466 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
467 }
468 
469 #endif	/* CONFIG_CGROUP_WRITEBACK */
470 
inode_read_congested(struct inode * inode)471 static inline int inode_read_congested(struct inode *inode)
472 {
473 	return inode_congested(inode, 1 << WB_sync_congested);
474 }
475 
inode_write_congested(struct inode * inode)476 static inline int inode_write_congested(struct inode *inode)
477 {
478 	return inode_congested(inode, 1 << WB_async_congested);
479 }
480 
inode_rw_congested(struct inode * inode)481 static inline int inode_rw_congested(struct inode *inode)
482 {
483 	return inode_congested(inode, (1 << WB_sync_congested) |
484 				      (1 << WB_async_congested));
485 }
486 
bdi_congested(struct backing_dev_info * bdi,int cong_bits)487 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
488 {
489 	return wb_congested(&bdi->wb, cong_bits);
490 }
491 
bdi_read_congested(struct backing_dev_info * bdi)492 static inline int bdi_read_congested(struct backing_dev_info *bdi)
493 {
494 	return bdi_congested(bdi, 1 << WB_sync_congested);
495 }
496 
bdi_write_congested(struct backing_dev_info * bdi)497 static inline int bdi_write_congested(struct backing_dev_info *bdi)
498 {
499 	return bdi_congested(bdi, 1 << WB_async_congested);
500 }
501 
bdi_rw_congested(struct backing_dev_info * bdi)502 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
503 {
504 	return bdi_congested(bdi, (1 << WB_sync_congested) |
505 				  (1 << WB_async_congested));
506 }
507 
508 const char *bdi_dev_name(struct backing_dev_info *bdi);
509 
510 #endif	/* _LINUX_BACKING_DEV_H */
511