• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <asm/atomic.h>
17 
18 struct page;
19 struct device;
20 struct dentry;
21 
22 /*
23  * Bits in backing_dev_info.state
24  */
25 enum bdi_state {
26 	BDI_pdflush,		/* A pdflush thread is working this device */
27 	BDI_write_congested,	/* The write queue is getting full */
28 	BDI_read_congested,	/* The read queue is getting full */
29 	BDI_unused,		/* Available bits start here */
30 };
31 
32 typedef int (congested_fn)(void *, int);
33 
34 enum bdi_stat_item {
35 	BDI_RECLAIMABLE,
36 	BDI_WRITEBACK,
37 	NR_BDI_STAT_ITEMS
38 };
39 
40 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
41 
42 struct backing_dev_info {
43 	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
44 	unsigned long state;	/* Always use atomic bitops on this */
45 	unsigned int capabilities; /* Device capabilities */
46 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
47 	void *congested_data;	/* Pointer to aux data for congested func */
48 	void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
49 	void *unplug_io_data;
50 
51 	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
52 
53 	struct prop_local_percpu completions;
54 	int dirty_exceeded;
55 
56 	unsigned int min_ratio;
57 	unsigned int max_ratio, max_prop_frac;
58 
59 	struct device *dev;
60 
61 #ifdef CONFIG_DEBUG_FS
62 	struct dentry *debug_dir;
63 	struct dentry *debug_stats;
64 #endif
65 };
66 
67 int bdi_init(struct backing_dev_info *bdi);
68 void bdi_destroy(struct backing_dev_info *bdi);
69 
70 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
71 		const char *fmt, ...);
72 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
73 void bdi_unregister(struct backing_dev_info *bdi);
74 
__add_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item,s64 amount)75 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
76 		enum bdi_stat_item item, s64 amount)
77 {
78 	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
79 }
80 
__inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)81 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
82 		enum bdi_stat_item item)
83 {
84 	__add_bdi_stat(bdi, item, 1);
85 }
86 
inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)87 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
88 		enum bdi_stat_item item)
89 {
90 	unsigned long flags;
91 
92 	local_irq_save(flags);
93 	__inc_bdi_stat(bdi, item);
94 	local_irq_restore(flags);
95 }
96 
__dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)97 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
98 		enum bdi_stat_item item)
99 {
100 	__add_bdi_stat(bdi, item, -1);
101 }
102 
dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)103 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
104 		enum bdi_stat_item item)
105 {
106 	unsigned long flags;
107 
108 	local_irq_save(flags);
109 	__dec_bdi_stat(bdi, item);
110 	local_irq_restore(flags);
111 }
112 
bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)113 static inline s64 bdi_stat(struct backing_dev_info *bdi,
114 		enum bdi_stat_item item)
115 {
116 	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
117 }
118 
__bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)119 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
120 		enum bdi_stat_item item)
121 {
122 	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
123 }
124 
bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)125 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
126 		enum bdi_stat_item item)
127 {
128 	s64 sum;
129 	unsigned long flags;
130 
131 	local_irq_save(flags);
132 	sum = __bdi_stat_sum(bdi, item);
133 	local_irq_restore(flags);
134 
135 	return sum;
136 }
137 
138 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
139 
140 /*
141  * maximal error of a stat counter.
142  */
bdi_stat_error(struct backing_dev_info * bdi)143 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
144 {
145 #ifdef CONFIG_SMP
146 	return nr_cpu_ids * BDI_STAT_BATCH;
147 #else
148 	return 1;
149 #endif
150 }
151 
152 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
153 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
154 
155 /*
156  * Flags in backing_dev_info::capability
157  *
158  * The first three flags control whether dirty pages will contribute to the
159  * VM's accounting and whether writepages() should be called for dirty pages
160  * (something that would not, for example, be appropriate for ramfs)
161  *
162  * WARNING: these flags are closely related and should not normally be
163  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
164  * three flags into a single convenience macro.
165  *
166  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
167  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
168  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
169  *
170  * These flags let !MMU mmap() govern direct device mapping vs immediate
171  * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
172  *
173  * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
174  * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
175  * BDI_CAP_READ_MAP:       Can be mapped for reading
176  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
177  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
178  *
179  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
180  */
181 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
182 #define BDI_CAP_NO_WRITEBACK	0x00000002
183 #define BDI_CAP_MAP_COPY	0x00000004
184 #define BDI_CAP_MAP_DIRECT	0x00000008
185 #define BDI_CAP_READ_MAP	0x00000010
186 #define BDI_CAP_WRITE_MAP	0x00000020
187 #define BDI_CAP_EXEC_MAP	0x00000040
188 #define BDI_CAP_NO_ACCT_WB	0x00000080
189 #define BDI_CAP_SWAP_BACKED	0x00000100
190 
191 #define BDI_CAP_VMFLAGS \
192 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
193 
194 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
195 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
196 
197 #if defined(VM_MAYREAD) && \
198 	(BDI_CAP_READ_MAP != VM_MAYREAD || \
199 	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
200 	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
201 #error please change backing_dev_info::capabilities flags
202 #endif
203 
204 extern struct backing_dev_info default_backing_dev_info;
205 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
206 
207 int writeback_in_progress(struct backing_dev_info *bdi);
208 
bdi_congested(struct backing_dev_info * bdi,int bdi_bits)209 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
210 {
211 	if (bdi->congested_fn)
212 		return bdi->congested_fn(bdi->congested_data, bdi_bits);
213 	return (bdi->state & bdi_bits);
214 }
215 
bdi_read_congested(struct backing_dev_info * bdi)216 static inline int bdi_read_congested(struct backing_dev_info *bdi)
217 {
218 	return bdi_congested(bdi, 1 << BDI_read_congested);
219 }
220 
bdi_write_congested(struct backing_dev_info * bdi)221 static inline int bdi_write_congested(struct backing_dev_info *bdi)
222 {
223 	return bdi_congested(bdi, 1 << BDI_write_congested);
224 }
225 
bdi_rw_congested(struct backing_dev_info * bdi)226 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
227 {
228 	return bdi_congested(bdi, (1 << BDI_read_congested)|
229 				  (1 << BDI_write_congested));
230 }
231 
232 void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
233 void set_bdi_congested(struct backing_dev_info *bdi, int rw);
234 long congestion_wait(int rw, long timeout);
235 
236 
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)237 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
238 {
239 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
240 }
241 
bdi_cap_account_dirty(struct backing_dev_info * bdi)242 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
243 {
244 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
245 }
246 
bdi_cap_account_writeback(struct backing_dev_info * bdi)247 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
248 {
249 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
250 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
251 				      BDI_CAP_NO_WRITEBACK));
252 }
253 
bdi_cap_swap_backed(struct backing_dev_info * bdi)254 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
255 {
256 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
257 }
258 
mapping_cap_writeback_dirty(struct address_space * mapping)259 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
260 {
261 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
262 }
263 
mapping_cap_account_dirty(struct address_space * mapping)264 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
265 {
266 	return bdi_cap_account_dirty(mapping->backing_dev_info);
267 }
268 
mapping_cap_swap_backed(struct address_space * mapping)269 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
270 {
271 	return bdi_cap_swap_backed(mapping->backing_dev_info);
272 }
273 
274 #endif		/* _LINUX_BACKING_DEV_H */
275