• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * include/linux/backing-dev.h
3   *
4   * low-level device information and state which is propagated up through
5   * to high-level code.
6   */
7  
8  #ifndef _LINUX_BACKING_DEV_H
9  #define _LINUX_BACKING_DEV_H
10  
11  #include <linux/percpu_counter.h>
12  #include <linux/log2.h>
13  #include <linux/proportions.h>
14  #include <linux/kernel.h>
15  #include <linux/fs.h>
16  #include <linux/sched.h>
17  #include <linux/timer.h>
18  #include <linux/writeback.h>
19  #include <linux/atomic.h>
20  
21  struct page;
22  struct device;
23  struct dentry;
24  
25  /*
26   * Bits in backing_dev_info.state
27   */
28  enum bdi_state {
29  	BDI_pending,		/* On its way to being activated */
30  	BDI_wb_alloc,		/* Default embedded wb allocated */
31  	BDI_async_congested,	/* The async (write) queue is getting full */
32  	BDI_sync_congested,	/* The sync queue is getting full */
33  	BDI_registered,		/* bdi_register() was done */
34  	BDI_writeback_running,	/* Writeback is in progress */
35  	BDI_unused,		/* Available bits start here */
36  };
37  
38  typedef int (congested_fn)(void *, int);
39  
40  enum bdi_stat_item {
41  	BDI_RECLAIMABLE,
42  	BDI_WRITEBACK,
43  	BDI_DIRTIED,
44  	BDI_WRITTEN,
45  	NR_BDI_STAT_ITEMS
46  };
47  
48  #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
49  
50  struct bdi_writeback {
51  	struct backing_dev_info *bdi;	/* our parent bdi */
52  	unsigned int nr;
53  
54  	unsigned long last_old_flush;	/* last old data flush */
55  	unsigned long last_active;	/* last time bdi thread was active */
56  
57  	struct task_struct *task;	/* writeback thread */
58  	struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
59  	struct list_head b_dirty;	/* dirty inodes */
60  	struct list_head b_io;		/* parked for writeback */
61  	struct list_head b_more_io;	/* parked for more writeback */
62  	spinlock_t list_lock;		/* protects the b_* lists */
63  };
64  
65  struct backing_dev_info {
66  	struct list_head bdi_list;
67  	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
68  	unsigned long state;	/* Always use atomic bitops on this */
69  	unsigned int capabilities; /* Device capabilities */
70  	congested_fn *congested_fn; /* Function pointer if device is md/dm */
71  	void *congested_data;	/* Pointer to aux data for congested func */
72  
73  	char *name;
74  
75  	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
76  
77  	unsigned long bw_time_stamp;	/* last time write bw is updated */
78  	unsigned long dirtied_stamp;
79  	unsigned long written_stamp;	/* pages written at bw_time_stamp */
80  	unsigned long write_bandwidth;	/* the estimated write bandwidth */
81  	unsigned long avg_write_bandwidth; /* further smoothed write bw */
82  
83  	/*
84  	 * The base dirty throttle rate, re-calculated on every 200ms.
85  	 * All the bdi tasks' dirty rate will be curbed under it.
86  	 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
87  	 * in small steps and is much more smooth/stable than the latter.
88  	 */
89  	unsigned long dirty_ratelimit;
90  	unsigned long balanced_dirty_ratelimit;
91  
92  	struct prop_local_percpu completions;
93  	int dirty_exceeded;
94  
95  	unsigned int min_ratio;
96  	unsigned int max_ratio, max_prop_frac;
97  
98  	struct bdi_writeback wb;  /* default writeback info for this bdi */
99  	spinlock_t wb_lock;	  /* protects work_list */
100  
101  	struct list_head work_list;
102  
103  	struct device *dev;
104  
105  	struct timer_list laptop_mode_wb_timer;
106  
107  #ifdef CONFIG_DEBUG_FS
108  	struct dentry *debug_dir;
109  	struct dentry *debug_stats;
110  #endif
111  };
112  
113  int bdi_init(struct backing_dev_info *bdi);
114  void bdi_destroy(struct backing_dev_info *bdi);
115  
116  int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117  		const char *fmt, ...);
118  int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119  void bdi_unregister(struct backing_dev_info *bdi);
120  int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
121  void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122  			enum wb_reason reason);
123  void bdi_start_background_writeback(struct backing_dev_info *bdi);
124  int bdi_writeback_thread(void *data);
125  int bdi_has_dirty_io(struct backing_dev_info *bdi);
126  void bdi_arm_supers_timer(void);
127  void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
128  void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
129  
130  extern spinlock_t bdi_lock;
131  extern struct list_head bdi_list;
132  extern struct list_head bdi_pending_list;
133  
wb_has_dirty_io(struct bdi_writeback * wb)134  static inline int wb_has_dirty_io(struct bdi_writeback *wb)
135  {
136  	return !list_empty(&wb->b_dirty) ||
137  	       !list_empty(&wb->b_io) ||
138  	       !list_empty(&wb->b_more_io);
139  }
140  
__add_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item,s64 amount)141  static inline void __add_bdi_stat(struct backing_dev_info *bdi,
142  		enum bdi_stat_item item, s64 amount)
143  {
144  	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
145  }
146  
__inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)147  static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
148  		enum bdi_stat_item item)
149  {
150  	__add_bdi_stat(bdi, item, 1);
151  }
152  
inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)153  static inline void inc_bdi_stat(struct backing_dev_info *bdi,
154  		enum bdi_stat_item item)
155  {
156  	unsigned long flags;
157  
158  	local_irq_save(flags);
159  	__inc_bdi_stat(bdi, item);
160  	local_irq_restore(flags);
161  }
162  
__dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)163  static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
164  		enum bdi_stat_item item)
165  {
166  	__add_bdi_stat(bdi, item, -1);
167  }
168  
dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)169  static inline void dec_bdi_stat(struct backing_dev_info *bdi,
170  		enum bdi_stat_item item)
171  {
172  	unsigned long flags;
173  
174  	local_irq_save(flags);
175  	__dec_bdi_stat(bdi, item);
176  	local_irq_restore(flags);
177  }
178  
bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)179  static inline s64 bdi_stat(struct backing_dev_info *bdi,
180  		enum bdi_stat_item item)
181  {
182  	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
183  }
184  
__bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)185  static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
186  		enum bdi_stat_item item)
187  {
188  	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
189  }
190  
bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)191  static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
192  		enum bdi_stat_item item)
193  {
194  	s64 sum;
195  	unsigned long flags;
196  
197  	local_irq_save(flags);
198  	sum = __bdi_stat_sum(bdi, item);
199  	local_irq_restore(flags);
200  
201  	return sum;
202  }
203  
204  extern void bdi_writeout_inc(struct backing_dev_info *bdi);
205  
206  /*
207   * maximal error of a stat counter.
208   */
bdi_stat_error(struct backing_dev_info * bdi)209  static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
210  {
211  #ifdef CONFIG_SMP
212  	return nr_cpu_ids * BDI_STAT_BATCH;
213  #else
214  	return 1;
215  #endif
216  }
217  
218  int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
219  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
220  
221  /*
222   * Flags in backing_dev_info::capability
223   *
224   * The first three flags control whether dirty pages will contribute to the
225   * VM's accounting and whether writepages() should be called for dirty pages
226   * (something that would not, for example, be appropriate for ramfs)
227   *
228   * WARNING: these flags are closely related and should not normally be
229   * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
230   * three flags into a single convenience macro.
231   *
232   * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
233   * BDI_CAP_NO_WRITEBACK:   Don't write pages back
234   * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
235   *
236   * These flags let !MMU mmap() govern direct device mapping vs immediate
237   * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
238   *
239   * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
240   * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
241   * BDI_CAP_READ_MAP:       Can be mapped for reading
242   * BDI_CAP_WRITE_MAP:      Can be mapped for writing
243   * BDI_CAP_EXEC_MAP:       Can be mapped for execution
244   *
245   * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
246   */
247  #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
248  #define BDI_CAP_NO_WRITEBACK	0x00000002
249  #define BDI_CAP_MAP_COPY	0x00000004
250  #define BDI_CAP_MAP_DIRECT	0x00000008
251  #define BDI_CAP_READ_MAP	0x00000010
252  #define BDI_CAP_WRITE_MAP	0x00000020
253  #define BDI_CAP_EXEC_MAP	0x00000040
254  #define BDI_CAP_NO_ACCT_WB	0x00000080
255  #define BDI_CAP_SWAP_BACKED	0x00000100
256  
257  #define BDI_CAP_VMFLAGS \
258  	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
259  
260  #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
261  	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
262  
263  #if defined(VM_MAYREAD) && \
264  	(BDI_CAP_READ_MAP != VM_MAYREAD || \
265  	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
266  	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
267  #error please change backing_dev_info::capabilities flags
268  #endif
269  
270  extern struct backing_dev_info default_backing_dev_info;
271  extern struct backing_dev_info noop_backing_dev_info;
272  
273  int writeback_in_progress(struct backing_dev_info *bdi);
274  
bdi_congested(struct backing_dev_info * bdi,int bdi_bits)275  static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
276  {
277  	if (bdi->congested_fn)
278  		return bdi->congested_fn(bdi->congested_data, bdi_bits);
279  	return (bdi->state & bdi_bits);
280  }
281  
bdi_read_congested(struct backing_dev_info * bdi)282  static inline int bdi_read_congested(struct backing_dev_info *bdi)
283  {
284  	return bdi_congested(bdi, 1 << BDI_sync_congested);
285  }
286  
bdi_write_congested(struct backing_dev_info * bdi)287  static inline int bdi_write_congested(struct backing_dev_info *bdi)
288  {
289  	return bdi_congested(bdi, 1 << BDI_async_congested);
290  }
291  
bdi_rw_congested(struct backing_dev_info * bdi)292  static inline int bdi_rw_congested(struct backing_dev_info *bdi)
293  {
294  	return bdi_congested(bdi, (1 << BDI_sync_congested) |
295  				  (1 << BDI_async_congested));
296  }
297  
298  enum {
299  	BLK_RW_ASYNC	= 0,
300  	BLK_RW_SYNC	= 1,
301  };
302  
303  void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
304  void set_bdi_congested(struct backing_dev_info *bdi, int sync);
305  long congestion_wait(int sync, long timeout);
306  long wait_iff_congested(struct zone *zone, int sync, long timeout);
307  
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)308  static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
309  {
310  	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
311  }
312  
bdi_cap_account_dirty(struct backing_dev_info * bdi)313  static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
314  {
315  	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
316  }
317  
bdi_cap_account_writeback(struct backing_dev_info * bdi)318  static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
319  {
320  	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
321  	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
322  				      BDI_CAP_NO_WRITEBACK));
323  }
324  
bdi_cap_swap_backed(struct backing_dev_info * bdi)325  static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
326  {
327  	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
328  }
329  
bdi_cap_flush_forker(struct backing_dev_info * bdi)330  static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
331  {
332  	return bdi == &default_backing_dev_info;
333  }
334  
mapping_cap_writeback_dirty(struct address_space * mapping)335  static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
336  {
337  	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
338  }
339  
mapping_cap_account_dirty(struct address_space * mapping)340  static inline bool mapping_cap_account_dirty(struct address_space *mapping)
341  {
342  	return bdi_cap_account_dirty(mapping->backing_dev_info);
343  }
344  
mapping_cap_swap_backed(struct address_space * mapping)345  static inline bool mapping_cap_swap_backed(struct address_space *mapping)
346  {
347  	return bdi_cap_swap_backed(mapping->backing_dev_info);
348  }
349  
bdi_sched_wait(void * word)350  static inline int bdi_sched_wait(void *word)
351  {
352  	schedule();
353  	return 0;
354  }
355  
356  #endif		/* _LINUX_BACKING_DEV_H */
357