1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/vm_event.h>
10 #include <linux/atomic.h>
11 #include <linux/static_key.h>
12 #include <linux/mmdebug.h>
13
14 extern int sysctl_stat_interval;
15
16 #ifdef CONFIG_NUMA
17 #define ENABLE_NUMA_STAT 1
18 #define DISABLE_NUMA_STAT 0
19 extern int sysctl_vm_numa_stat;
20 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
21 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
22 void *buffer, size_t *length, loff_t *ppos);
23 #endif
24
25 struct reclaim_stat {
26 unsigned nr_dirty;
27 unsigned nr_unqueued_dirty;
28 unsigned nr_congested;
29 unsigned nr_writeback;
30 unsigned nr_immediate;
31 unsigned nr_pageout;
32 unsigned nr_activate[ANON_AND_FILE];
33 unsigned nr_ref_keep;
34 unsigned nr_unmap_fail;
35 unsigned nr_lazyfree_fail;
36 };
37
38 enum writeback_stat_item {
39 NR_DIRTY_THRESHOLD,
40 NR_DIRTY_BG_THRESHOLD,
41 NR_VM_WRITEBACK_STAT_ITEMS,
42 };
43
44 /*
45 * Zone and node-based page accounting with per cpu differentials.
46 */
47 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
48 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
49 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
50
51 #ifdef CONFIG_NUMA
zone_numa_event_add(long x,struct zone * zone,enum numa_stat_item item)52 static inline void zone_numa_event_add(long x, struct zone *zone,
53 enum numa_stat_item item)
54 {
55 atomic_long_add(x, &zone->vm_numa_event[item]);
56 atomic_long_add(x, &vm_numa_event[item]);
57 }
58
zone_numa_event_state(struct zone * zone,enum numa_stat_item item)59 static inline unsigned long zone_numa_event_state(struct zone *zone,
60 enum numa_stat_item item)
61 {
62 return atomic_long_read(&zone->vm_numa_event[item]);
63 }
64
65 static inline unsigned long
global_numa_event_state(enum numa_stat_item item)66 global_numa_event_state(enum numa_stat_item item)
67 {
68 return atomic_long_read(&vm_numa_event[item]);
69 }
70 #endif /* CONFIG_NUMA */
71
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)72 static inline void zone_page_state_add(long x, struct zone *zone,
73 enum zone_stat_item item)
74 {
75 atomic_long_add(x, &zone->vm_stat[item]);
76 atomic_long_add(x, &vm_zone_stat[item]);
77 }
78
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)79 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
80 enum node_stat_item item)
81 {
82 atomic_long_add(x, &pgdat->vm_stat[item]);
83 atomic_long_add(x, &vm_node_stat[item]);
84 }
85
global_zone_page_state(enum zone_stat_item item)86 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
87 {
88 long x = atomic_long_read(&vm_zone_stat[item]);
89 #ifdef CONFIG_SMP
90 if (x < 0)
91 x = 0;
92 #endif
93 return x;
94 }
95
96 static inline
global_node_page_state_pages(enum node_stat_item item)97 unsigned long global_node_page_state_pages(enum node_stat_item item)
98 {
99 long x = atomic_long_read(&vm_node_stat[item]);
100 #ifdef CONFIG_SMP
101 if (x < 0)
102 x = 0;
103 #endif
104 return x;
105 }
106
global_node_page_state(enum node_stat_item item)107 static inline unsigned long global_node_page_state(enum node_stat_item item)
108 {
109 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
110
111 return global_node_page_state_pages(item);
112 }
113
zone_page_state(struct zone * zone,enum zone_stat_item item)114 static inline unsigned long zone_page_state(struct zone *zone,
115 enum zone_stat_item item)
116 {
117 long x = atomic_long_read(&zone->vm_stat[item]);
118 #ifdef CONFIG_SMP
119 if (x < 0)
120 x = 0;
121 #endif
122 return x;
123 }
124
125 /*
126 * More accurate version that also considers the currently pending
127 * deltas. For that we need to loop over all cpus to find the current
128 * deltas. There is no synchronization so the result cannot be
129 * exactly accurate either.
130 */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)131 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
132 enum zone_stat_item item)
133 {
134 long x = atomic_long_read(&zone->vm_stat[item]);
135
136 #ifdef CONFIG_SMP
137 int cpu;
138 for_each_online_cpu(cpu)
139 x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
140
141 if (x < 0)
142 x = 0;
143 #endif
144 return x;
145 }
146
147 #ifdef CONFIG_NUMA
148 /* See __count_vm_event comment on why raw_cpu_inc is used. */
149 static inline void
__count_numa_event(struct zone * zone,enum numa_stat_item item)150 __count_numa_event(struct zone *zone, enum numa_stat_item item)
151 {
152 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
153
154 raw_cpu_inc(pzstats->vm_numa_event[item]);
155 }
156
157 static inline void
__count_numa_events(struct zone * zone,enum numa_stat_item item,long delta)158 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
159 {
160 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
161
162 raw_cpu_add(pzstats->vm_numa_event[item], delta);
163 }
164
165 extern unsigned long sum_zone_node_page_state(int node,
166 enum zone_stat_item item);
167 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
168 extern unsigned long node_page_state(struct pglist_data *pgdat,
169 enum node_stat_item item);
170 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
171 enum node_stat_item item);
172 extern void fold_vm_numa_events(void);
173 #else
174 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
175 #define node_page_state(node, item) global_node_page_state(item)
176 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
fold_vm_numa_events(void)177 static inline void fold_vm_numa_events(void)
178 {
179 }
180 #endif /* CONFIG_NUMA */
181
182 #ifdef CONFIG_SMP
183 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
184 void __inc_zone_page_state(struct page *, enum zone_stat_item);
185 void __dec_zone_page_state(struct page *, enum zone_stat_item);
186
187 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
188 void __inc_node_page_state(struct page *, enum node_stat_item);
189 void __dec_node_page_state(struct page *, enum node_stat_item);
190
191 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
192 void inc_zone_page_state(struct page *, enum zone_stat_item);
193 void dec_zone_page_state(struct page *, enum zone_stat_item);
194
195 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
196 void inc_node_page_state(struct page *, enum node_stat_item);
197 void dec_node_page_state(struct page *, enum node_stat_item);
198
199 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
200 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
201 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
202 extern void dec_zone_state(struct zone *, enum zone_stat_item);
203 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
204 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
205
206 void quiet_vmstat(void);
207 void cpu_vm_stats_fold(int cpu);
208 void refresh_zone_stat_thresholds(void);
209
210 struct ctl_table;
211 int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
212 loff_t *ppos);
213
214 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
215
216 int calculate_pressure_threshold(struct zone *zone);
217 int calculate_normal_threshold(struct zone *zone);
218 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
219 int (*calculate_pressure)(struct zone *));
220 #else /* CONFIG_SMP */
221
222 /*
223 * We do not maintain differentials in a single processor configuration.
224 * The functions directly modify the zone and global counters.
225 */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)226 static inline void __mod_zone_page_state(struct zone *zone,
227 enum zone_stat_item item, long delta)
228 {
229 zone_page_state_add(delta, zone, item);
230 }
231
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)232 static inline void __mod_node_page_state(struct pglist_data *pgdat,
233 enum node_stat_item item, int delta)
234 {
235 if (vmstat_item_in_bytes(item)) {
236 /*
237 * Only cgroups use subpage accounting right now; at
238 * the global level, these items still change in
239 * multiples of whole pages. Store them as pages
240 * internally to keep the per-cpu counters compact.
241 */
242 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
243 delta >>= PAGE_SHIFT;
244 }
245
246 node_page_state_add(delta, pgdat, item);
247 }
248
__inc_zone_state(struct zone * zone,enum zone_stat_item item)249 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
250 {
251 atomic_long_inc(&zone->vm_stat[item]);
252 atomic_long_inc(&vm_zone_stat[item]);
253 }
254
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)255 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
256 {
257 atomic_long_inc(&pgdat->vm_stat[item]);
258 atomic_long_inc(&vm_node_stat[item]);
259 }
260
__dec_zone_state(struct zone * zone,enum zone_stat_item item)261 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
262 {
263 atomic_long_dec(&zone->vm_stat[item]);
264 atomic_long_dec(&vm_zone_stat[item]);
265 }
266
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)267 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
268 {
269 atomic_long_dec(&pgdat->vm_stat[item]);
270 atomic_long_dec(&vm_node_stat[item]);
271 }
272
__inc_zone_page_state(struct page * page,enum zone_stat_item item)273 static inline void __inc_zone_page_state(struct page *page,
274 enum zone_stat_item item)
275 {
276 __inc_zone_state(page_zone(page), item);
277 }
278
__inc_node_page_state(struct page * page,enum node_stat_item item)279 static inline void __inc_node_page_state(struct page *page,
280 enum node_stat_item item)
281 {
282 __inc_node_state(page_pgdat(page), item);
283 }
284
285
__dec_zone_page_state(struct page * page,enum zone_stat_item item)286 static inline void __dec_zone_page_state(struct page *page,
287 enum zone_stat_item item)
288 {
289 __dec_zone_state(page_zone(page), item);
290 }
291
__dec_node_page_state(struct page * page,enum node_stat_item item)292 static inline void __dec_node_page_state(struct page *page,
293 enum node_stat_item item)
294 {
295 __dec_node_state(page_pgdat(page), item);
296 }
297
298
299 /*
300 * We only use atomic operations to update counters. So there is no need to
301 * disable interrupts.
302 */
303 #define inc_zone_page_state __inc_zone_page_state
304 #define dec_zone_page_state __dec_zone_page_state
305 #define mod_zone_page_state __mod_zone_page_state
306
307 #define inc_node_page_state __inc_node_page_state
308 #define dec_node_page_state __dec_node_page_state
309 #define mod_node_page_state __mod_node_page_state
310
311 #define inc_zone_state __inc_zone_state
312 #define inc_node_state __inc_node_state
313 #define dec_zone_state __dec_zone_state
314
315 #define set_pgdat_percpu_threshold(pgdat, callback) { }
316
refresh_zone_stat_thresholds(void)317 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)318 static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)319 static inline void quiet_vmstat(void) { }
320
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)321 static inline void drain_zonestat(struct zone *zone,
322 struct per_cpu_zonestat *pzstats) { }
323 #endif /* CONFIG_SMP */
324
__mod_zone_freepage_state(struct zone * zone,int nr_pages,int migratetype)325 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
326 int migratetype)
327 {
328 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
329 if (is_migrate_cma(migratetype))
330 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
331 }
332
333 extern const char * const vmstat_text[];
334
zone_stat_name(enum zone_stat_item item)335 static inline const char *zone_stat_name(enum zone_stat_item item)
336 {
337 return vmstat_text[item];
338 }
339
340 #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)341 static inline const char *numa_stat_name(enum numa_stat_item item)
342 {
343 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
344 item];
345 }
346 #endif /* CONFIG_NUMA */
347
node_stat_name(enum node_stat_item item)348 static inline const char *node_stat_name(enum node_stat_item item)
349 {
350 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
351 NR_VM_NUMA_EVENT_ITEMS +
352 item];
353 }
354
lru_list_name(enum lru_list lru)355 static inline const char *lru_list_name(enum lru_list lru)
356 {
357 return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
358 }
359
writeback_stat_name(enum writeback_stat_item item)360 static inline const char *writeback_stat_name(enum writeback_stat_item item)
361 {
362 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
363 NR_VM_NUMA_EVENT_ITEMS +
364 NR_VM_NODE_STAT_ITEMS +
365 item];
366 }
367
368 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)369 static inline const char *vm_event_name(enum vm_event_item item)
370 {
371 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
372 NR_VM_NUMA_EVENT_ITEMS +
373 NR_VM_NODE_STAT_ITEMS +
374 NR_VM_WRITEBACK_STAT_ITEMS +
375 item];
376 }
377 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
378
379 #ifdef CONFIG_MEMCG
380
381 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
382 int val);
383
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)384 static inline void mod_lruvec_state(struct lruvec *lruvec,
385 enum node_stat_item idx, int val)
386 {
387 unsigned long flags;
388
389 local_irq_save(flags);
390 __mod_lruvec_state(lruvec, idx, val);
391 local_irq_restore(flags);
392 }
393
394 void __mod_lruvec_page_state(struct page *page,
395 enum node_stat_item idx, int val);
396
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)397 static inline void mod_lruvec_page_state(struct page *page,
398 enum node_stat_item idx, int val)
399 {
400 unsigned long flags;
401
402 local_irq_save(flags);
403 __mod_lruvec_page_state(page, idx, val);
404 local_irq_restore(flags);
405 }
406
407 #else
408
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)409 static inline void __mod_lruvec_state(struct lruvec *lruvec,
410 enum node_stat_item idx, int val)
411 {
412 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
413 }
414
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)415 static inline void mod_lruvec_state(struct lruvec *lruvec,
416 enum node_stat_item idx, int val)
417 {
418 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
419 }
420
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)421 static inline void __mod_lruvec_page_state(struct page *page,
422 enum node_stat_item idx, int val)
423 {
424 __mod_node_page_state(page_pgdat(page), idx, val);
425 }
426
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)427 static inline void mod_lruvec_page_state(struct page *page,
428 enum node_stat_item idx, int val)
429 {
430 mod_node_page_state(page_pgdat(page), idx, val);
431 }
432
433 #endif /* CONFIG_MEMCG */
434
inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)435 static inline void inc_lruvec_state(struct lruvec *lruvec,
436 enum node_stat_item idx)
437 {
438 mod_lruvec_state(lruvec, idx, 1);
439 }
440
__inc_lruvec_page_state(struct page * page,enum node_stat_item idx)441 static inline void __inc_lruvec_page_state(struct page *page,
442 enum node_stat_item idx)
443 {
444 __mod_lruvec_page_state(page, idx, 1);
445 }
446
__dec_lruvec_page_state(struct page * page,enum node_stat_item idx)447 static inline void __dec_lruvec_page_state(struct page *page,
448 enum node_stat_item idx)
449 {
450 __mod_lruvec_page_state(page, idx, -1);
451 }
452
inc_lruvec_page_state(struct page * page,enum node_stat_item idx)453 static inline void inc_lruvec_page_state(struct page *page,
454 enum node_stat_item idx)
455 {
456 mod_lruvec_page_state(page, idx, 1);
457 }
458
dec_lruvec_page_state(struct page * page,enum node_stat_item idx)459 static inline void dec_lruvec_page_state(struct page *page,
460 enum node_stat_item idx)
461 {
462 mod_lruvec_page_state(page, idx, -1);
463 }
464
465 #endif /* _LINUX_VMSTAT_H */
466