• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/device.h>
16 #include <linux/writeback.h>
17 #include <linux/blk-cgroup.h>
18 #include <linux/backing-dev-defs.h>
19 #include <linux/slab.h>
20 
21 int __must_check bdi_init(struct backing_dev_info *bdi);
22 void bdi_exit(struct backing_dev_info *bdi);
23 
24 __printf(3, 4)
25 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
26 		const char *fmt, ...);
27 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
29 void bdi_unregister(struct backing_dev_info *bdi);
30 
31 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
32 void bdi_destroy(struct backing_dev_info *bdi);
33 
34 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
35 			bool range_cyclic, enum wb_reason reason);
36 void wb_start_background_writeback(struct bdi_writeback *wb);
37 void wb_workfn(struct work_struct *work);
38 void wb_wakeup_delayed(struct bdi_writeback *wb);
39 
40 extern spinlock_t bdi_lock;
41 extern struct list_head bdi_list;
42 
43 extern struct workqueue_struct *bdi_wq;
44 
wb_has_dirty_io(struct bdi_writeback * wb)45 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
46 {
47 	return test_bit(WB_has_dirty_io, &wb->state);
48 }
49 
bdi_has_dirty_io(struct backing_dev_info * bdi)50 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
51 {
52 	/*
53 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
54 	 * any dirty wbs.  See wb_update_write_bandwidth().
55 	 */
56 	return atomic_long_read(&bdi->tot_write_bandwidth);
57 }
58 
__add_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)59 static inline void __add_wb_stat(struct bdi_writeback *wb,
60 				 enum wb_stat_item item, s64 amount)
61 {
62 	__percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
63 }
64 
__inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)65 static inline void __inc_wb_stat(struct bdi_writeback *wb,
66 				 enum wb_stat_item item)
67 {
68 	__add_wb_stat(wb, item, 1);
69 }
70 
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
72 {
73 	unsigned long flags;
74 
75 	local_irq_save(flags);
76 	__inc_wb_stat(wb, item);
77 	local_irq_restore(flags);
78 }
79 
__dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)80 static inline void __dec_wb_stat(struct bdi_writeback *wb,
81 				 enum wb_stat_item item)
82 {
83 	__add_wb_stat(wb, item, -1);
84 }
85 
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)86 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
87 {
88 	unsigned long flags;
89 
90 	local_irq_save(flags);
91 	__dec_wb_stat(wb, item);
92 	local_irq_restore(flags);
93 }
94 
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)95 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
96 {
97 	return percpu_counter_read_positive(&wb->stat[item]);
98 }
99 
__wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)100 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
101 				enum wb_stat_item item)
102 {
103 	return percpu_counter_sum_positive(&wb->stat[item]);
104 }
105 
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)106 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
107 {
108 	s64 sum;
109 	unsigned long flags;
110 
111 	local_irq_save(flags);
112 	sum = __wb_stat_sum(wb, item);
113 	local_irq_restore(flags);
114 
115 	return sum;
116 }
117 
118 extern void wb_writeout_inc(struct bdi_writeback *wb);
119 
120 /*
121  * maximal error of a stat counter.
122  */
wb_stat_error(struct bdi_writeback * wb)123 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
124 {
125 #ifdef CONFIG_SMP
126 	return nr_cpu_ids * WB_STAT_BATCH;
127 #else
128 	return 1;
129 #endif
130 }
131 
132 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
133 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
134 
135 /*
136  * Flags in backing_dev_info::capability
137  *
138  * The first three flags control whether dirty pages will contribute to the
139  * VM's accounting and whether writepages() should be called for dirty pages
140  * (something that would not, for example, be appropriate for ramfs)
141  *
142  * WARNING: these flags are closely related and should not normally be
143  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
144  * three flags into a single convenience macro.
145  *
146  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
147  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
148  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
149  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
150  *
151  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
152  */
153 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
154 #define BDI_CAP_NO_WRITEBACK	0x00000002
155 #define BDI_CAP_NO_ACCT_WB	0x00000004
156 #define BDI_CAP_STABLE_WRITES	0x00000008
157 #define BDI_CAP_STRICTLIMIT	0x00000010
158 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
159 
160 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
161 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
162 
163 extern struct backing_dev_info noop_backing_dev_info;
164 
165 /**
166  * writeback_in_progress - determine whether there is writeback in progress
167  * @wb: bdi_writeback of interest
168  *
169  * Determine whether there is writeback waiting to be handled against a
170  * bdi_writeback.
171  */
writeback_in_progress(struct bdi_writeback * wb)172 static inline bool writeback_in_progress(struct bdi_writeback *wb)
173 {
174 	return test_bit(WB_writeback_running, &wb->state);
175 }
176 
inode_to_bdi(struct inode * inode)177 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
178 {
179 	struct super_block *sb;
180 
181 	if (!inode)
182 		return &noop_backing_dev_info;
183 
184 	sb = inode->i_sb;
185 #ifdef CONFIG_BLOCK
186 	if (sb_is_blkdev_sb(sb))
187 		return blk_get_backing_dev_info(I_BDEV(inode));
188 #endif
189 	return sb->s_bdi;
190 }
191 
wb_congested(struct bdi_writeback * wb,int cong_bits)192 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
193 {
194 	struct backing_dev_info *bdi = wb->bdi;
195 
196 	if (bdi->congested_fn)
197 		return bdi->congested_fn(bdi->congested_data, cong_bits);
198 	return wb->congested->state & cong_bits;
199 }
200 
201 long congestion_wait(int sync, long timeout);
202 long wait_iff_congested(struct zone *zone, int sync, long timeout);
203 int pdflush_proc_obsolete(struct ctl_table *table, int write,
204 		void __user *buffer, size_t *lenp, loff_t *ppos);
205 
bdi_cap_stable_pages_required(struct backing_dev_info * bdi)206 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
207 {
208 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
209 }
210 
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)211 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
212 {
213 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
214 }
215 
bdi_cap_account_dirty(struct backing_dev_info * bdi)216 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
217 {
218 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
219 }
220 
bdi_cap_account_writeback(struct backing_dev_info * bdi)221 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
222 {
223 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
224 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
225 				      BDI_CAP_NO_WRITEBACK));
226 }
227 
mapping_cap_writeback_dirty(struct address_space * mapping)228 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
229 {
230 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
231 }
232 
mapping_cap_account_dirty(struct address_space * mapping)233 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
234 {
235 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
236 }
237 
bdi_sched_wait(void * word)238 static inline int bdi_sched_wait(void *word)
239 {
240 	schedule();
241 	return 0;
242 }
243 
244 #ifdef CONFIG_CGROUP_WRITEBACK
245 
246 struct bdi_writeback_congested *
247 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
248 void wb_congested_put(struct bdi_writeback_congested *congested);
249 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
250 				    struct cgroup_subsys_state *memcg_css,
251 				    gfp_t gfp);
252 void wb_memcg_offline(struct mem_cgroup *memcg);
253 void wb_blkcg_offline(struct blkcg *blkcg);
254 int inode_congested(struct inode *inode, int cong_bits);
255 
256 /**
257  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
258  * @inode: inode of interest
259  *
260  * cgroup writeback requires support from both the bdi and filesystem.
261  * Also, both memcg and iocg have to be on the default hierarchy.  Test
262  * whether all conditions are met.
263  *
264  * Note that the test result may change dynamically on the same inode
265  * depending on how memcg and iocg are configured.
266  */
inode_cgwb_enabled(struct inode * inode)267 static inline bool inode_cgwb_enabled(struct inode *inode)
268 {
269 	struct backing_dev_info *bdi = inode_to_bdi(inode);
270 
271 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
272 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
273 		bdi_cap_account_dirty(bdi) &&
274 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
275 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
276 }
277 
278 /**
279  * wb_find_current - find wb for %current on a bdi
280  * @bdi: bdi of interest
281  *
282  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
283  * Must be called under rcu_read_lock() which protects the returend wb.
284  * NULL if not found.
285  */
wb_find_current(struct backing_dev_info * bdi)286 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
287 {
288 	struct cgroup_subsys_state *memcg_css;
289 	struct bdi_writeback *wb;
290 
291 	memcg_css = task_css(current, memory_cgrp_id);
292 	if (!memcg_css->parent)
293 		return &bdi->wb;
294 
295 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
296 
297 	/*
298 	 * %current's blkcg equals the effective blkcg of its memcg.  No
299 	 * need to use the relatively expensive cgroup_get_e_css().
300 	 */
301 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
302 		return wb;
303 	return NULL;
304 }
305 
306 /**
307  * wb_get_create_current - get or create wb for %current on a bdi
308  * @bdi: bdi of interest
309  * @gfp: allocation mask
310  *
311  * Equivalent to wb_get_create() on %current's memcg.  This function is
312  * called from a relatively hot path and optimizes the common cases using
313  * wb_find_current().
314  */
315 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)316 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
317 {
318 	struct bdi_writeback *wb;
319 
320 	rcu_read_lock();
321 	wb = wb_find_current(bdi);
322 	if (wb && unlikely(!wb_tryget(wb)))
323 		wb = NULL;
324 	rcu_read_unlock();
325 
326 	if (unlikely(!wb)) {
327 		struct cgroup_subsys_state *memcg_css;
328 
329 		memcg_css = task_get_css(current, memory_cgrp_id);
330 		wb = wb_get_create(bdi, memcg_css, gfp);
331 		css_put(memcg_css);
332 	}
333 	return wb;
334 }
335 
336 /**
337  * inode_to_wb_is_valid - test whether an inode has a wb associated
338  * @inode: inode of interest
339  *
340  * Returns %true if @inode has a wb associated.  May be called without any
341  * locking.
342  */
inode_to_wb_is_valid(struct inode * inode)343 static inline bool inode_to_wb_is_valid(struct inode *inode)
344 {
345 	return inode->i_wb;
346 }
347 
348 /**
349  * inode_to_wb - determine the wb of an inode
350  * @inode: inode of interest
351  *
352  * Returns the wb @inode is currently associated with.  The caller must be
353  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
354  * associated wb's list_lock.
355  */
inode_to_wb(struct inode * inode)356 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
357 {
358 #ifdef CONFIG_LOCKDEP
359 	WARN_ON_ONCE(debug_locks &&
360 		     (!lockdep_is_held(&inode->i_lock) &&
361 		      !lockdep_is_held(&inode->i_mapping->tree_lock) &&
362 		      !lockdep_is_held(&inode->i_wb->list_lock)));
363 #endif
364 	return inode->i_wb;
365 }
366 
367 /**
368  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
369  * @inode: target inode
370  * @cookie: output param, to be passed to the end function
371  *
372  * The caller wants to access the wb associated with @inode but isn't
373  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
374  * function determines the wb associated with @inode and ensures that the
375  * association doesn't change until the transaction is finished with
376  * unlocked_inode_to_wb_end().
377  *
378  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
379  * can't sleep during the transaction.  IRQs may or may not be disabled on
380  * return.
381  */
382 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)383 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
384 {
385 	rcu_read_lock();
386 
387 	/*
388 	 * Paired with store_release in inode_switch_wb_work_fn() and
389 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
390 	 */
391 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
392 
393 	if (unlikely(cookie->locked))
394 		spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
395 
396 	/*
397 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
398 	 * inode_to_wb() will bark.  Deref directly.
399 	 */
400 	return inode->i_wb;
401 }
402 
403 /**
404  * unlocked_inode_to_wb_end - end inode wb access transaction
405  * @inode: target inode
406  * @cookie: @cookie from unlocked_inode_to_wb_begin()
407  */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)408 static inline void unlocked_inode_to_wb_end(struct inode *inode,
409 					    struct wb_lock_cookie *cookie)
410 {
411 	if (unlikely(cookie->locked))
412 		spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
413 				       cookie->flags);
414 
415 	rcu_read_unlock();
416 }
417 
418 #else	/* CONFIG_CGROUP_WRITEBACK */
419 
inode_cgwb_enabled(struct inode * inode)420 static inline bool inode_cgwb_enabled(struct inode *inode)
421 {
422 	return false;
423 }
424 
425 static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info * bdi,int blkcg_id,gfp_t gfp)426 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
427 {
428 	atomic_inc(&bdi->wb_congested->refcnt);
429 	return bdi->wb_congested;
430 }
431 
wb_congested_put(struct bdi_writeback_congested * congested)432 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
433 {
434 	if (atomic_dec_and_test(&congested->refcnt))
435 		kfree(congested);
436 }
437 
wb_find_current(struct backing_dev_info * bdi)438 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
439 {
440 	return &bdi->wb;
441 }
442 
443 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)444 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
445 {
446 	return &bdi->wb;
447 }
448 
inode_to_wb_is_valid(struct inode * inode)449 static inline bool inode_to_wb_is_valid(struct inode *inode)
450 {
451 	return true;
452 }
453 
inode_to_wb(struct inode * inode)454 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
455 {
456 	return &inode_to_bdi(inode)->wb;
457 }
458 
459 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)460 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
461 {
462 	return inode_to_wb(inode);
463 }
464 
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)465 static inline void unlocked_inode_to_wb_end(struct inode *inode,
466 					    struct wb_lock_cookie *cookie)
467 {
468 }
469 
wb_memcg_offline(struct mem_cgroup * memcg)470 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
471 {
472 }
473 
wb_blkcg_offline(struct blkcg * blkcg)474 static inline void wb_blkcg_offline(struct blkcg *blkcg)
475 {
476 }
477 
inode_congested(struct inode * inode,int cong_bits)478 static inline int inode_congested(struct inode *inode, int cong_bits)
479 {
480 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
481 }
482 
483 #endif	/* CONFIG_CGROUP_WRITEBACK */
484 
inode_read_congested(struct inode * inode)485 static inline int inode_read_congested(struct inode *inode)
486 {
487 	return inode_congested(inode, 1 << WB_sync_congested);
488 }
489 
inode_write_congested(struct inode * inode)490 static inline int inode_write_congested(struct inode *inode)
491 {
492 	return inode_congested(inode, 1 << WB_async_congested);
493 }
494 
inode_rw_congested(struct inode * inode)495 static inline int inode_rw_congested(struct inode *inode)
496 {
497 	return inode_congested(inode, (1 << WB_sync_congested) |
498 				      (1 << WB_async_congested));
499 }
500 
bdi_congested(struct backing_dev_info * bdi,int cong_bits)501 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
502 {
503 	return wb_congested(&bdi->wb, cong_bits);
504 }
505 
bdi_read_congested(struct backing_dev_info * bdi)506 static inline int bdi_read_congested(struct backing_dev_info *bdi)
507 {
508 	return bdi_congested(bdi, 1 << WB_sync_congested);
509 }
510 
bdi_write_congested(struct backing_dev_info * bdi)511 static inline int bdi_write_congested(struct backing_dev_info *bdi)
512 {
513 	return bdi_congested(bdi, 1 << WB_async_congested);
514 }
515 
bdi_rw_congested(struct backing_dev_info * bdi)516 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
517 {
518 	return bdi_congested(bdi, (1 << WB_sync_congested) |
519 				  (1 << WB_async_congested));
520 }
521 
522 extern const char *bdi_unknown_name;
523 
bdi_dev_name(struct backing_dev_info * bdi)524 static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
525 {
526 	if (!bdi || !bdi->dev)
527 		return bdi_unknown_name;
528 	return dev_name(bdi->dev);
529 }
530 
531 #endif	/* _LINUX_BACKING_DEV_H */
532