• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/writeback.h>
16 #include <linux/blk-cgroup.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
19 
20 int __must_check bdi_init(struct backing_dev_info *bdi);
21 void bdi_exit(struct backing_dev_info *bdi);
22 
23 __printf(3, 4)
24 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
25 		const char *fmt, ...);
26 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
27 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
28 void bdi_unregister(struct backing_dev_info *bdi);
29 
30 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31 void bdi_destroy(struct backing_dev_info *bdi);
32 
33 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
34 			bool range_cyclic, enum wb_reason reason);
35 void wb_start_background_writeback(struct bdi_writeback *wb);
36 void wb_workfn(struct work_struct *work);
37 void wb_wakeup_delayed(struct bdi_writeback *wb);
38 
39 extern spinlock_t bdi_lock;
40 extern struct list_head bdi_list;
41 
42 extern struct workqueue_struct *bdi_wq;
43 
wb_has_dirty_io(struct bdi_writeback * wb)44 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
45 {
46 	return test_bit(WB_has_dirty_io, &wb->state);
47 }
48 
bdi_has_dirty_io(struct backing_dev_info * bdi)49 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
50 {
51 	/*
52 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
53 	 * any dirty wbs.  See wb_update_write_bandwidth().
54 	 */
55 	return atomic_long_read(&bdi->tot_write_bandwidth);
56 }
57 
__add_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)58 static inline void __add_wb_stat(struct bdi_writeback *wb,
59 				 enum wb_stat_item item, s64 amount)
60 {
61 	__percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
62 }
63 
__inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)64 static inline void __inc_wb_stat(struct bdi_writeback *wb,
65 				 enum wb_stat_item item)
66 {
67 	__add_wb_stat(wb, item, 1);
68 }
69 
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)70 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
71 {
72 	unsigned long flags;
73 
74 	local_irq_save(flags);
75 	__inc_wb_stat(wb, item);
76 	local_irq_restore(flags);
77 }
78 
__dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)79 static inline void __dec_wb_stat(struct bdi_writeback *wb,
80 				 enum wb_stat_item item)
81 {
82 	__add_wb_stat(wb, item, -1);
83 }
84 
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)85 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
86 {
87 	unsigned long flags;
88 
89 	local_irq_save(flags);
90 	__dec_wb_stat(wb, item);
91 	local_irq_restore(flags);
92 }
93 
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)94 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
95 {
96 	return percpu_counter_read_positive(&wb->stat[item]);
97 }
98 
__wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)99 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
100 				enum wb_stat_item item)
101 {
102 	return percpu_counter_sum_positive(&wb->stat[item]);
103 }
104 
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)105 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
106 {
107 	s64 sum;
108 	unsigned long flags;
109 
110 	local_irq_save(flags);
111 	sum = __wb_stat_sum(wb, item);
112 	local_irq_restore(flags);
113 
114 	return sum;
115 }
116 
117 extern void wb_writeout_inc(struct bdi_writeback *wb);
118 
119 /*
120  * maximal error of a stat counter.
121  */
wb_stat_error(struct bdi_writeback * wb)122 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
123 {
124 #ifdef CONFIG_SMP
125 	return nr_cpu_ids * WB_STAT_BATCH;
126 #else
127 	return 1;
128 #endif
129 }
130 
131 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
132 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
133 
134 /*
135  * Flags in backing_dev_info::capability
136  *
137  * The first three flags control whether dirty pages will contribute to the
138  * VM's accounting and whether writepages() should be called for dirty pages
139  * (something that would not, for example, be appropriate for ramfs)
140  *
141  * WARNING: these flags are closely related and should not normally be
142  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
143  * three flags into a single convenience macro.
144  *
145  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
146  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
147  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
148  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
149  *
150  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
151  */
152 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
153 #define BDI_CAP_NO_WRITEBACK	0x00000002
154 #define BDI_CAP_NO_ACCT_WB	0x00000004
155 #define BDI_CAP_STABLE_WRITES	0x00000008
156 #define BDI_CAP_STRICTLIMIT	0x00000010
157 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
158 
159 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
160 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
161 
162 extern struct backing_dev_info noop_backing_dev_info;
163 
164 /**
165  * writeback_in_progress - determine whether there is writeback in progress
166  * @wb: bdi_writeback of interest
167  *
168  * Determine whether there is writeback waiting to be handled against a
169  * bdi_writeback.
170  */
writeback_in_progress(struct bdi_writeback * wb)171 static inline bool writeback_in_progress(struct bdi_writeback *wb)
172 {
173 	return test_bit(WB_writeback_running, &wb->state);
174 }
175 
inode_to_bdi(struct inode * inode)176 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
177 {
178 	struct super_block *sb;
179 
180 	if (!inode)
181 		return &noop_backing_dev_info;
182 
183 	sb = inode->i_sb;
184 #ifdef CONFIG_BLOCK
185 	if (sb_is_blkdev_sb(sb))
186 		return blk_get_backing_dev_info(I_BDEV(inode));
187 #endif
188 	return sb->s_bdi;
189 }
190 
wb_congested(struct bdi_writeback * wb,int cong_bits)191 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
192 {
193 	struct backing_dev_info *bdi = wb->bdi;
194 
195 	if (bdi->congested_fn)
196 		return bdi->congested_fn(bdi->congested_data, cong_bits);
197 	return wb->congested->state & cong_bits;
198 }
199 
200 long congestion_wait(int sync, long timeout);
201 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
202 int pdflush_proc_obsolete(struct ctl_table *table, int write,
203 		void __user *buffer, size_t *lenp, loff_t *ppos);
204 
bdi_cap_stable_pages_required(struct backing_dev_info * bdi)205 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
206 {
207 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
208 }
209 
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)210 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
211 {
212 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
213 }
214 
bdi_cap_account_dirty(struct backing_dev_info * bdi)215 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
216 {
217 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
218 }
219 
bdi_cap_account_writeback(struct backing_dev_info * bdi)220 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
221 {
222 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
223 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
224 				      BDI_CAP_NO_WRITEBACK));
225 }
226 
mapping_cap_writeback_dirty(struct address_space * mapping)227 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
228 {
229 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
230 }
231 
mapping_cap_account_dirty(struct address_space * mapping)232 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
233 {
234 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
235 }
236 
bdi_sched_wait(void * word)237 static inline int bdi_sched_wait(void *word)
238 {
239 	schedule();
240 	return 0;
241 }
242 
243 #ifdef CONFIG_CGROUP_WRITEBACK
244 
245 struct bdi_writeback_congested *
246 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
247 void wb_congested_put(struct bdi_writeback_congested *congested);
248 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
249 				    struct cgroup_subsys_state *memcg_css,
250 				    gfp_t gfp);
251 void wb_memcg_offline(struct mem_cgroup *memcg);
252 void wb_blkcg_offline(struct blkcg *blkcg);
253 int inode_congested(struct inode *inode, int cong_bits);
254 
255 /**
256  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
257  * @inode: inode of interest
258  *
259  * cgroup writeback requires support from both the bdi and filesystem.
260  * Also, both memcg and iocg have to be on the default hierarchy.  Test
261  * whether all conditions are met.
262  *
263  * Note that the test result may change dynamically on the same inode
264  * depending on how memcg and iocg are configured.
265  */
inode_cgwb_enabled(struct inode * inode)266 static inline bool inode_cgwb_enabled(struct inode *inode)
267 {
268 	struct backing_dev_info *bdi = inode_to_bdi(inode);
269 
270 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
271 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
272 		bdi_cap_account_dirty(bdi) &&
273 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
274 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
275 }
276 
277 /**
278  * wb_find_current - find wb for %current on a bdi
279  * @bdi: bdi of interest
280  *
281  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
282  * Must be called under rcu_read_lock() which protects the returend wb.
283  * NULL if not found.
284  */
wb_find_current(struct backing_dev_info * bdi)285 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
286 {
287 	struct cgroup_subsys_state *memcg_css;
288 	struct bdi_writeback *wb;
289 
290 	memcg_css = task_css(current, memory_cgrp_id);
291 	if (!memcg_css->parent)
292 		return &bdi->wb;
293 
294 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
295 
296 	/*
297 	 * %current's blkcg equals the effective blkcg of its memcg.  No
298 	 * need to use the relatively expensive cgroup_get_e_css().
299 	 */
300 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
301 		return wb;
302 	return NULL;
303 }
304 
305 /**
306  * wb_get_create_current - get or create wb for %current on a bdi
307  * @bdi: bdi of interest
308  * @gfp: allocation mask
309  *
310  * Equivalent to wb_get_create() on %current's memcg.  This function is
311  * called from a relatively hot path and optimizes the common cases using
312  * wb_find_current().
313  */
314 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)315 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
316 {
317 	struct bdi_writeback *wb;
318 
319 	rcu_read_lock();
320 	wb = wb_find_current(bdi);
321 	if (wb && unlikely(!wb_tryget(wb)))
322 		wb = NULL;
323 	rcu_read_unlock();
324 
325 	if (unlikely(!wb)) {
326 		struct cgroup_subsys_state *memcg_css;
327 
328 		memcg_css = task_get_css(current, memory_cgrp_id);
329 		wb = wb_get_create(bdi, memcg_css, gfp);
330 		css_put(memcg_css);
331 	}
332 	return wb;
333 }
334 
335 /**
336  * inode_to_wb_is_valid - test whether an inode has a wb associated
337  * @inode: inode of interest
338  *
339  * Returns %true if @inode has a wb associated.  May be called without any
340  * locking.
341  */
inode_to_wb_is_valid(struct inode * inode)342 static inline bool inode_to_wb_is_valid(struct inode *inode)
343 {
344 	return inode->i_wb;
345 }
346 
347 /**
348  * inode_to_wb - determine the wb of an inode
349  * @inode: inode of interest
350  *
351  * Returns the wb @inode is currently associated with.  The caller must be
352  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
353  * associated wb's list_lock.
354  */
inode_to_wb(struct inode * inode)355 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
356 {
357 #ifdef CONFIG_LOCKDEP
358 	WARN_ON_ONCE(debug_locks &&
359 		     (!lockdep_is_held(&inode->i_lock) &&
360 		      !lockdep_is_held(&inode->i_mapping->tree_lock) &&
361 		      !lockdep_is_held(&inode->i_wb->list_lock)));
362 #endif
363 	return inode->i_wb;
364 }
365 
366 /**
367  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
368  * @inode: target inode
369  * @cookie: output param, to be passed to the end function
370  *
371  * The caller wants to access the wb associated with @inode but isn't
372  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
373  * function determines the wb associated with @inode and ensures that the
374  * association doesn't change until the transaction is finished with
375  * unlocked_inode_to_wb_end().
376  *
377  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
378  * can't sleep during the transaction.  IRQs may or may not be disabled on
379  * return.
380  */
381 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)382 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
383 {
384 	rcu_read_lock();
385 
386 	/*
387 	 * Paired with store_release in inode_switch_wb_work_fn() and
388 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
389 	 */
390 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
391 
392 	if (unlikely(cookie->locked))
393 		spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
394 
395 	/*
396 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
397 	 * inode_to_wb() will bark.  Deref directly.
398 	 */
399 	return inode->i_wb;
400 }
401 
402 /**
403  * unlocked_inode_to_wb_end - end inode wb access transaction
404  * @inode: target inode
405  * @cookie: @cookie from unlocked_inode_to_wb_begin()
406  */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)407 static inline void unlocked_inode_to_wb_end(struct inode *inode,
408 					    struct wb_lock_cookie *cookie)
409 {
410 	if (unlikely(cookie->locked))
411 		spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
412 
413 	rcu_read_unlock();
414 }
415 
416 #else	/* CONFIG_CGROUP_WRITEBACK */
417 
inode_cgwb_enabled(struct inode * inode)418 static inline bool inode_cgwb_enabled(struct inode *inode)
419 {
420 	return false;
421 }
422 
423 static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info * bdi,int blkcg_id,gfp_t gfp)424 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
425 {
426 	atomic_inc(&bdi->wb_congested->refcnt);
427 	return bdi->wb_congested;
428 }
429 
wb_congested_put(struct bdi_writeback_congested * congested)430 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
431 {
432 	if (atomic_dec_and_test(&congested->refcnt))
433 		kfree(congested);
434 }
435 
wb_find_current(struct backing_dev_info * bdi)436 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
437 {
438 	return &bdi->wb;
439 }
440 
441 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)442 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
443 {
444 	return &bdi->wb;
445 }
446 
inode_to_wb_is_valid(struct inode * inode)447 static inline bool inode_to_wb_is_valid(struct inode *inode)
448 {
449 	return true;
450 }
451 
inode_to_wb(struct inode * inode)452 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
453 {
454 	return &inode_to_bdi(inode)->wb;
455 }
456 
457 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)458 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
459 {
460 	return inode_to_wb(inode);
461 }
462 
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)463 static inline void unlocked_inode_to_wb_end(struct inode *inode,
464 					    struct wb_lock_cookie *cookie)
465 {
466 }
467 
wb_memcg_offline(struct mem_cgroup * memcg)468 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
469 {
470 }
471 
wb_blkcg_offline(struct blkcg * blkcg)472 static inline void wb_blkcg_offline(struct blkcg *blkcg)
473 {
474 }
475 
inode_congested(struct inode * inode,int cong_bits)476 static inline int inode_congested(struct inode *inode, int cong_bits)
477 {
478 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
479 }
480 
481 #endif	/* CONFIG_CGROUP_WRITEBACK */
482 
inode_read_congested(struct inode * inode)483 static inline int inode_read_congested(struct inode *inode)
484 {
485 	return inode_congested(inode, 1 << WB_sync_congested);
486 }
487 
inode_write_congested(struct inode * inode)488 static inline int inode_write_congested(struct inode *inode)
489 {
490 	return inode_congested(inode, 1 << WB_async_congested);
491 }
492 
inode_rw_congested(struct inode * inode)493 static inline int inode_rw_congested(struct inode *inode)
494 {
495 	return inode_congested(inode, (1 << WB_sync_congested) |
496 				      (1 << WB_async_congested));
497 }
498 
bdi_congested(struct backing_dev_info * bdi,int cong_bits)499 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
500 {
501 	return wb_congested(&bdi->wb, cong_bits);
502 }
503 
bdi_read_congested(struct backing_dev_info * bdi)504 static inline int bdi_read_congested(struct backing_dev_info *bdi)
505 {
506 	return bdi_congested(bdi, 1 << WB_sync_congested);
507 }
508 
bdi_write_congested(struct backing_dev_info * bdi)509 static inline int bdi_write_congested(struct backing_dev_info *bdi)
510 {
511 	return bdi_congested(bdi, 1 << WB_async_congested);
512 }
513 
bdi_rw_congested(struct backing_dev_info * bdi)514 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
515 {
516 	return bdi_congested(bdi, (1 << WB_sync_congested) |
517 				  (1 << WB_async_congested));
518 }
519 
520 #endif	/* _LINUX_BACKING_DEV_H */
521