1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27
28
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41 #ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43 #endif
44 #ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
48 UNEVICTABLE_PGMLOCKED,
49 UNEVICTABLE_PGMUNLOCKED,
50 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
51 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
52 UNEVICTABLE_MLOCKFREED,
53 #endif
54 NR_VM_EVENT_ITEMS
55 };
56
57 extern int sysctl_stat_interval;
58
59 #ifdef CONFIG_VM_EVENT_COUNTERS
60 /*
61 * Light weight per cpu counter implementation.
62 *
63 * Counters should only be incremented and no critical kernel component
64 * should rely on the counter values.
65 *
66 * Counters are handled completely inline. On many platforms the code
67 * generated will simply be the increment of a global address.
68 */
69
70 struct vm_event_state {
71 unsigned long event[NR_VM_EVENT_ITEMS];
72 };
73
74 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
75
__count_vm_event(enum vm_event_item item)76 static inline void __count_vm_event(enum vm_event_item item)
77 {
78 __get_cpu_var(vm_event_states).event[item]++;
79 }
80
count_vm_event(enum vm_event_item item)81 static inline void count_vm_event(enum vm_event_item item)
82 {
83 get_cpu_var(vm_event_states).event[item]++;
84 put_cpu();
85 }
86
__count_vm_events(enum vm_event_item item,long delta)87 static inline void __count_vm_events(enum vm_event_item item, long delta)
88 {
89 __get_cpu_var(vm_event_states).event[item] += delta;
90 }
91
count_vm_events(enum vm_event_item item,long delta)92 static inline void count_vm_events(enum vm_event_item item, long delta)
93 {
94 get_cpu_var(vm_event_states).event[item] += delta;
95 put_cpu();
96 }
97
98 extern void all_vm_events(unsigned long *);
99 #ifdef CONFIG_HOTPLUG
100 extern void vm_events_fold_cpu(int cpu);
101 #else
vm_events_fold_cpu(int cpu)102 static inline void vm_events_fold_cpu(int cpu)
103 {
104 }
105 #endif
106
107 #else
108
109 /* Disable counters */
count_vm_event(enum vm_event_item item)110 static inline void count_vm_event(enum vm_event_item item)
111 {
112 }
count_vm_events(enum vm_event_item item,long delta)113 static inline void count_vm_events(enum vm_event_item item, long delta)
114 {
115 }
__count_vm_event(enum vm_event_item item)116 static inline void __count_vm_event(enum vm_event_item item)
117 {
118 }
__count_vm_events(enum vm_event_item item,long delta)119 static inline void __count_vm_events(enum vm_event_item item, long delta)
120 {
121 }
all_vm_events(unsigned long * ret)122 static inline void all_vm_events(unsigned long *ret)
123 {
124 }
vm_events_fold_cpu(int cpu)125 static inline void vm_events_fold_cpu(int cpu)
126 {
127 }
128
129 #endif /* CONFIG_VM_EVENT_COUNTERS */
130
131 #define __count_zone_vm_events(item, zone, delta) \
132 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
133 zone_idx(zone), delta)
134
135 /*
136 * Zone based page accounting with per cpu differentials.
137 */
138 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
139
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)140 static inline void zone_page_state_add(long x, struct zone *zone,
141 enum zone_stat_item item)
142 {
143 atomic_long_add(x, &zone->vm_stat[item]);
144 atomic_long_add(x, &vm_stat[item]);
145 }
146
global_page_state(enum zone_stat_item item)147 static inline unsigned long global_page_state(enum zone_stat_item item)
148 {
149 long x = atomic_long_read(&vm_stat[item]);
150 #ifdef CONFIG_SMP
151 if (x < 0)
152 x = 0;
153 #endif
154 return x;
155 }
156
zone_page_state(struct zone * zone,enum zone_stat_item item)157 static inline unsigned long zone_page_state(struct zone *zone,
158 enum zone_stat_item item)
159 {
160 long x = atomic_long_read(&zone->vm_stat[item]);
161 #ifdef CONFIG_SMP
162 if (x < 0)
163 x = 0;
164 #endif
165 return x;
166 }
167
168 extern unsigned long global_lru_pages(void);
169
zone_lru_pages(struct zone * zone)170 static inline unsigned long zone_lru_pages(struct zone *zone)
171 {
172 return (zone_page_state(zone, NR_ACTIVE_ANON)
173 + zone_page_state(zone, NR_ACTIVE_FILE)
174 + zone_page_state(zone, NR_INACTIVE_ANON)
175 + zone_page_state(zone, NR_INACTIVE_FILE));
176 }
177
178 #ifdef CONFIG_NUMA
179 /*
180 * Determine the per node value of a stat item. This function
181 * is called frequently in a NUMA machine, so try to be as
182 * frugal as possible.
183 */
node_page_state(int node,enum zone_stat_item item)184 static inline unsigned long node_page_state(int node,
185 enum zone_stat_item item)
186 {
187 struct zone *zones = NODE_DATA(node)->node_zones;
188
189 return
190 #ifdef CONFIG_ZONE_DMA
191 zone_page_state(&zones[ZONE_DMA], item) +
192 #endif
193 #ifdef CONFIG_ZONE_DMA32
194 zone_page_state(&zones[ZONE_DMA32], item) +
195 #endif
196 #ifdef CONFIG_HIGHMEM
197 zone_page_state(&zones[ZONE_HIGHMEM], item) +
198 #endif
199 zone_page_state(&zones[ZONE_NORMAL], item) +
200 zone_page_state(&zones[ZONE_MOVABLE], item);
201 }
202
203 extern void zone_statistics(struct zone *, struct zone *);
204
205 #else
206
207 #define node_page_state(node, item) global_page_state(item)
208 #define zone_statistics(_zl,_z) do { } while (0)
209
210 #endif /* CONFIG_NUMA */
211
212 #define __add_zone_page_state(__z, __i, __d) \
213 __mod_zone_page_state(__z, __i, __d)
214 #define __sub_zone_page_state(__z, __i, __d) \
215 __mod_zone_page_state(__z, __i,-(__d))
216
217 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
218 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
219
zap_zone_vm_stats(struct zone * zone)220 static inline void zap_zone_vm_stats(struct zone *zone)
221 {
222 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
223 }
224
225 extern void inc_zone_state(struct zone *, enum zone_stat_item);
226
227 #ifdef CONFIG_SMP
228 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
229 void __inc_zone_page_state(struct page *, enum zone_stat_item);
230 void __dec_zone_page_state(struct page *, enum zone_stat_item);
231
232 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
233 void inc_zone_page_state(struct page *, enum zone_stat_item);
234 void dec_zone_page_state(struct page *, enum zone_stat_item);
235
236 extern void inc_zone_state(struct zone *, enum zone_stat_item);
237 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
238 extern void dec_zone_state(struct zone *, enum zone_stat_item);
239 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
240
241 void refresh_cpu_vm_stats(int);
242 #else /* CONFIG_SMP */
243
244 /*
245 * We do not maintain differentials in a single processor configuration.
246 * The functions directly modify the zone and global counters.
247 */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,int delta)248 static inline void __mod_zone_page_state(struct zone *zone,
249 enum zone_stat_item item, int delta)
250 {
251 zone_page_state_add(delta, zone, item);
252 }
253
__inc_zone_state(struct zone * zone,enum zone_stat_item item)254 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255 {
256 atomic_long_inc(&zone->vm_stat[item]);
257 atomic_long_inc(&vm_stat[item]);
258 }
259
__inc_zone_page_state(struct page * page,enum zone_stat_item item)260 static inline void __inc_zone_page_state(struct page *page,
261 enum zone_stat_item item)
262 {
263 __inc_zone_state(page_zone(page), item);
264 }
265
__dec_zone_state(struct zone * zone,enum zone_stat_item item)266 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
267 {
268 atomic_long_dec(&zone->vm_stat[item]);
269 atomic_long_dec(&vm_stat[item]);
270 }
271
__dec_zone_page_state(struct page * page,enum zone_stat_item item)272 static inline void __dec_zone_page_state(struct page *page,
273 enum zone_stat_item item)
274 {
275 __dec_zone_state(page_zone(page), item);
276 }
277
278 /*
279 * We only use atomic operations to update counters. So there is no need to
280 * disable interrupts.
281 */
282 #define inc_zone_page_state __inc_zone_page_state
283 #define dec_zone_page_state __dec_zone_page_state
284 #define mod_zone_page_state __mod_zone_page_state
285
refresh_cpu_vm_stats(int cpu)286 static inline void refresh_cpu_vm_stats(int cpu) { }
287 #endif
288
289 #endif /* _LINUX_VMSTAT_H */
290