1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
17 #include <linux/blk-cgroup.h>
18 #include <linux/backing-dev-defs.h>
19 #include <linux/slab.h>
20
bdi_get(struct backing_dev_info * bdi)21 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22 {
23 kref_get(&bdi->refcnt);
24 return bdi;
25 }
26
27 void bdi_put(struct backing_dev_info *bdi);
28
29 __printf(2, 3)
30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
32 va_list args);
33 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
34 void bdi_unregister(struct backing_dev_info *bdi);
35
36 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
bdi_alloc(gfp_t gfp_mask)37 static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
38 {
39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
40 }
41
42 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
43 bool range_cyclic, enum wb_reason reason);
44 void wb_start_background_writeback(struct bdi_writeback *wb);
45 void wb_workfn(struct work_struct *work);
46 void wb_wakeup_delayed(struct bdi_writeback *wb);
47
48 extern spinlock_t bdi_lock;
49 extern struct list_head bdi_list;
50
51 extern struct workqueue_struct *bdi_wq;
52
wb_has_dirty_io(struct bdi_writeback * wb)53 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
54 {
55 return test_bit(WB_has_dirty_io, &wb->state);
56 }
57
bdi_has_dirty_io(struct backing_dev_info * bdi)58 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59 {
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
65 }
66
__add_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)67 static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
69 {
70 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71 }
72
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)73 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74 {
75 __add_wb_stat(wb, item, 1);
76 }
77
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)78 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79 {
80 __add_wb_stat(wb, item, -1);
81 }
82
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)83 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84 {
85 return percpu_counter_read_positive(&wb->stat[item]);
86 }
87
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)88 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89 {
90 return percpu_counter_sum_positive(&wb->stat[item]);
91 }
92
93 extern void wb_writeout_inc(struct bdi_writeback *wb);
94
95 /*
96 * maximal error of a stat counter.
97 */
wb_stat_error(struct bdi_writeback * wb)98 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
99 {
100 #ifdef CONFIG_SMP
101 return nr_cpu_ids * WB_STAT_BATCH;
102 #else
103 return 1;
104 #endif
105 }
106
107 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109
110 /*
111 * Flags in backing_dev_info::capability
112 *
113 * The first three flags control whether dirty pages will contribute to the
114 * VM's accounting and whether writepages() should be called for dirty pages
115 * (something that would not, for example, be appropriate for ramfs)
116 *
117 * WARNING: these flags are closely related and should not normally be
118 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119 * three flags into a single convenience macro.
120 *
121 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
122 * BDI_CAP_NO_WRITEBACK: Don't write pages back
123 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
125 *
126 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
127 */
128 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
129 #define BDI_CAP_NO_WRITEBACK 0x00000002
130 #define BDI_CAP_NO_ACCT_WB 0x00000004
131 #define BDI_CAP_STABLE_WRITES 0x00000008
132 #define BDI_CAP_STRICTLIMIT 0x00000010
133 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
134
135 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
136 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
137
138 extern struct backing_dev_info noop_backing_dev_info;
139
140 /**
141 * writeback_in_progress - determine whether there is writeback in progress
142 * @wb: bdi_writeback of interest
143 *
144 * Determine whether there is writeback waiting to be handled against a
145 * bdi_writeback.
146 */
writeback_in_progress(struct bdi_writeback * wb)147 static inline bool writeback_in_progress(struct bdi_writeback *wb)
148 {
149 return test_bit(WB_writeback_running, &wb->state);
150 }
151
inode_to_bdi(struct inode * inode)152 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
153 {
154 struct super_block *sb;
155
156 if (!inode)
157 return &noop_backing_dev_info;
158
159 sb = inode->i_sb;
160 #ifdef CONFIG_BLOCK
161 if (sb_is_blkdev_sb(sb))
162 return I_BDEV(inode)->bd_bdi;
163 #endif
164 return sb->s_bdi;
165 }
166
wb_congested(struct bdi_writeback * wb,int cong_bits)167 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
168 {
169 struct backing_dev_info *bdi = wb->bdi;
170
171 if (bdi->congested_fn)
172 return bdi->congested_fn(bdi->congested_data, cong_bits);
173 return wb->congested->state & cong_bits;
174 }
175
176 long congestion_wait(int sync, long timeout);
177 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
178 int pdflush_proc_obsolete(struct ctl_table *table, int write,
179 void __user *buffer, size_t *lenp, loff_t *ppos);
180
bdi_cap_stable_pages_required(struct backing_dev_info * bdi)181 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
182 {
183 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
184 }
185
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)186 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
187 {
188 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
189 }
190
bdi_cap_account_dirty(struct backing_dev_info * bdi)191 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
192 {
193 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
194 }
195
bdi_cap_account_writeback(struct backing_dev_info * bdi)196 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
197 {
198 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
199 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
200 BDI_CAP_NO_WRITEBACK));
201 }
202
mapping_cap_writeback_dirty(struct address_space * mapping)203 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
204 {
205 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
206 }
207
mapping_cap_account_dirty(struct address_space * mapping)208 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
209 {
210 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
211 }
212
bdi_sched_wait(void * word)213 static inline int bdi_sched_wait(void *word)
214 {
215 schedule();
216 return 0;
217 }
218
219 #ifdef CONFIG_CGROUP_WRITEBACK
220
221 struct bdi_writeback_congested *
222 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
223 void wb_congested_put(struct bdi_writeback_congested *congested);
224 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
225 struct cgroup_subsys_state *memcg_css,
226 gfp_t gfp);
227 void wb_memcg_offline(struct mem_cgroup *memcg);
228 void wb_blkcg_offline(struct blkcg *blkcg);
229 int inode_congested(struct inode *inode, int cong_bits);
230
231 /**
232 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
233 * @inode: inode of interest
234 *
235 * cgroup writeback requires support from both the bdi and filesystem.
236 * Also, both memcg and iocg have to be on the default hierarchy. Test
237 * whether all conditions are met.
238 *
239 * Note that the test result may change dynamically on the same inode
240 * depending on how memcg and iocg are configured.
241 */
inode_cgwb_enabled(struct inode * inode)242 static inline bool inode_cgwb_enabled(struct inode *inode)
243 {
244 struct backing_dev_info *bdi = inode_to_bdi(inode);
245
246 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
247 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
248 bdi_cap_account_dirty(bdi) &&
249 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
250 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
251 }
252
253 /**
254 * wb_find_current - find wb for %current on a bdi
255 * @bdi: bdi of interest
256 *
257 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
258 * Must be called under rcu_read_lock() which protects the returend wb.
259 * NULL if not found.
260 */
wb_find_current(struct backing_dev_info * bdi)261 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
262 {
263 struct cgroup_subsys_state *memcg_css;
264 struct bdi_writeback *wb;
265
266 memcg_css = task_css(current, memory_cgrp_id);
267 if (!memcg_css->parent)
268 return &bdi->wb;
269
270 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
271
272 /*
273 * %current's blkcg equals the effective blkcg of its memcg. No
274 * need to use the relatively expensive cgroup_get_e_css().
275 */
276 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
277 return wb;
278 return NULL;
279 }
280
281 /**
282 * wb_get_create_current - get or create wb for %current on a bdi
283 * @bdi: bdi of interest
284 * @gfp: allocation mask
285 *
286 * Equivalent to wb_get_create() on %current's memcg. This function is
287 * called from a relatively hot path and optimizes the common cases using
288 * wb_find_current().
289 */
290 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)291 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
292 {
293 struct bdi_writeback *wb;
294
295 rcu_read_lock();
296 wb = wb_find_current(bdi);
297 if (wb && unlikely(!wb_tryget(wb)))
298 wb = NULL;
299 rcu_read_unlock();
300
301 if (unlikely(!wb)) {
302 struct cgroup_subsys_state *memcg_css;
303
304 memcg_css = task_get_css(current, memory_cgrp_id);
305 wb = wb_get_create(bdi, memcg_css, gfp);
306 css_put(memcg_css);
307 }
308 return wb;
309 }
310
311 /**
312 * inode_to_wb_is_valid - test whether an inode has a wb associated
313 * @inode: inode of interest
314 *
315 * Returns %true if @inode has a wb associated. May be called without any
316 * locking.
317 */
inode_to_wb_is_valid(struct inode * inode)318 static inline bool inode_to_wb_is_valid(struct inode *inode)
319 {
320 return inode->i_wb;
321 }
322
323 /**
324 * inode_to_wb - determine the wb of an inode
325 * @inode: inode of interest
326 *
327 * Returns the wb @inode is currently associated with. The caller must be
328 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
329 * associated wb's list_lock.
330 */
inode_to_wb(struct inode * inode)331 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
332 {
333 #ifdef CONFIG_LOCKDEP
334 WARN_ON_ONCE(debug_locks &&
335 (!lockdep_is_held(&inode->i_lock) &&
336 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
337 !lockdep_is_held(&inode->i_wb->list_lock)));
338 #endif
339 return inode->i_wb;
340 }
341
342 /**
343 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
344 * @inode: target inode
345 * @cookie: output param, to be passed to the end function
346 *
347 * The caller wants to access the wb associated with @inode but isn't
348 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
349 * function determines the wb associated with @inode and ensures that the
350 * association doesn't change until the transaction is finished with
351 * unlocked_inode_to_wb_end().
352 *
353 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
354 * can't sleep during the transaction. IRQs may or may not be disabled on
355 * return.
356 */
357 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)358 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
359 {
360 rcu_read_lock();
361
362 /*
363 * Paired with store_release in inode_switch_wb_work_fn() and
364 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
365 */
366 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
367
368 if (unlikely(cookie->locked))
369 spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
370
371 /*
372 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
373 * inode_to_wb() will bark. Deref directly.
374 */
375 return inode->i_wb;
376 }
377
378 /**
379 * unlocked_inode_to_wb_end - end inode wb access transaction
380 * @inode: target inode
381 * @cookie: @cookie from unlocked_inode_to_wb_begin()
382 */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)383 static inline void unlocked_inode_to_wb_end(struct inode *inode,
384 struct wb_lock_cookie *cookie)
385 {
386 if (unlikely(cookie->locked))
387 spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
388
389 rcu_read_unlock();
390 }
391
392 #else /* CONFIG_CGROUP_WRITEBACK */
393
inode_cgwb_enabled(struct inode * inode)394 static inline bool inode_cgwb_enabled(struct inode *inode)
395 {
396 return false;
397 }
398
399 static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info * bdi,int blkcg_id,gfp_t gfp)400 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
401 {
402 atomic_inc(&bdi->wb_congested->refcnt);
403 return bdi->wb_congested;
404 }
405
wb_congested_put(struct bdi_writeback_congested * congested)406 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
407 {
408 if (atomic_dec_and_test(&congested->refcnt))
409 kfree(congested);
410 }
411
wb_find_current(struct backing_dev_info * bdi)412 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
413 {
414 return &bdi->wb;
415 }
416
417 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)418 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
419 {
420 return &bdi->wb;
421 }
422
inode_to_wb_is_valid(struct inode * inode)423 static inline bool inode_to_wb_is_valid(struct inode *inode)
424 {
425 return true;
426 }
427
inode_to_wb(struct inode * inode)428 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
429 {
430 return &inode_to_bdi(inode)->wb;
431 }
432
433 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)434 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
435 {
436 return inode_to_wb(inode);
437 }
438
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)439 static inline void unlocked_inode_to_wb_end(struct inode *inode,
440 struct wb_lock_cookie *cookie)
441 {
442 }
443
wb_memcg_offline(struct mem_cgroup * memcg)444 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
445 {
446 }
447
wb_blkcg_offline(struct blkcg * blkcg)448 static inline void wb_blkcg_offline(struct blkcg *blkcg)
449 {
450 }
451
inode_congested(struct inode * inode,int cong_bits)452 static inline int inode_congested(struct inode *inode, int cong_bits)
453 {
454 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
455 }
456
457 #endif /* CONFIG_CGROUP_WRITEBACK */
458
inode_read_congested(struct inode * inode)459 static inline int inode_read_congested(struct inode *inode)
460 {
461 return inode_congested(inode, 1 << WB_sync_congested);
462 }
463
inode_write_congested(struct inode * inode)464 static inline int inode_write_congested(struct inode *inode)
465 {
466 return inode_congested(inode, 1 << WB_async_congested);
467 }
468
inode_rw_congested(struct inode * inode)469 static inline int inode_rw_congested(struct inode *inode)
470 {
471 return inode_congested(inode, (1 << WB_sync_congested) |
472 (1 << WB_async_congested));
473 }
474
bdi_congested(struct backing_dev_info * bdi,int cong_bits)475 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
476 {
477 return wb_congested(&bdi->wb, cong_bits);
478 }
479
bdi_read_congested(struct backing_dev_info * bdi)480 static inline int bdi_read_congested(struct backing_dev_info *bdi)
481 {
482 return bdi_congested(bdi, 1 << WB_sync_congested);
483 }
484
bdi_write_congested(struct backing_dev_info * bdi)485 static inline int bdi_write_congested(struct backing_dev_info *bdi)
486 {
487 return bdi_congested(bdi, 1 << WB_async_congested);
488 }
489
bdi_rw_congested(struct backing_dev_info * bdi)490 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
491 {
492 return bdi_congested(bdi, (1 << WB_sync_congested) |
493 (1 << WB_async_congested));
494 }
495
496 #endif /* _LINUX_BACKING_DEV_H */
497