• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic show_mem() implementation
4  *
5  * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/cma.h>
10 #include <linux/cpuset.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mm.h>
14 #include <linux/mmzone.h>
15 #include <linux/swap.h>
16 #include <linux/vmstat.h>
17 
18 #undef CREATE_TRACE_POINTS
19 #include <trace/hooks/mm.h>
20 
21 #include "internal.h"
22 #include "swap.h"
23 
24 atomic_long_t _totalram_pages __read_mostly;
25 EXPORT_SYMBOL(_totalram_pages);
26 unsigned long totalreserve_pages __read_mostly;
27 unsigned long totalcma_pages __read_mostly;
28 
show_node(struct zone * zone)29 static inline void show_node(struct zone *zone)
30 {
31 	if (IS_ENABLED(CONFIG_NUMA))
32 		printk("Node %d ", zone_to_nid(zone));
33 }
34 
si_mem_available(void)35 long si_mem_available(void)
36 {
37 	long available;
38 	unsigned long pagecache;
39 	unsigned long wmark_low = 0;
40 	unsigned long reclaimable;
41 	struct zone *zone;
42 
43 	for_each_zone(zone)
44 		wmark_low += low_wmark_pages(zone);
45 
46 	/*
47 	 * Estimate the amount of memory available for userspace allocations,
48 	 * without causing swapping or OOM.
49 	 */
50 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
51 
52 	/*
53 	 * Not all the page cache can be freed, otherwise the system will
54 	 * start swapping or thrashing. Assume at least half of the page
55 	 * cache, or the low watermark worth of cache, needs to stay.
56 	 */
57 	pagecache = global_node_page_state(NR_ACTIVE_FILE) +
58 		global_node_page_state(NR_INACTIVE_FILE);
59 	pagecache -= min(pagecache / 2, wmark_low);
60 	available += pagecache;
61 
62 	/*
63 	 * Part of the reclaimable slab and other kernel memory consists of
64 	 * items that are in use, and cannot be freed. Cap this estimate at the
65 	 * low watermark.
66 	 */
67 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
68 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
69 	reclaimable -= min(reclaimable / 2, wmark_low);
70 	available += reclaimable;
71 	trace_android_vh_si_mem_available_adjust(&available);
72 
73 	if (available < 0)
74 		available = 0;
75 	return available;
76 }
77 EXPORT_SYMBOL_GPL(si_mem_available);
78 
si_meminfo(struct sysinfo * val)79 void si_meminfo(struct sysinfo *val)
80 {
81 	val->totalram = totalram_pages();
82 	val->sharedram = global_node_page_state(NR_SHMEM);
83 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
84 	val->bufferram = nr_blockdev_pages();
85 	val->totalhigh = totalhigh_pages();
86 	val->freehigh = nr_free_highpages();
87 	val->mem_unit = PAGE_SIZE;
88 	trace_android_vh_si_meminfo_adjust(&val->totalram, &val->freeram);
89 	trace_android_vh_si_meminfo_adjust_shmem(&val->sharedram);
90 }
91 
92 EXPORT_SYMBOL(si_meminfo);
93 
94 #ifdef CONFIG_NUMA
si_meminfo_node(struct sysinfo * val,int nid)95 void si_meminfo_node(struct sysinfo *val, int nid)
96 {
97 	int zone_type;		/* needs to be signed */
98 	unsigned long managed_pages = 0;
99 	unsigned long managed_highpages = 0;
100 	unsigned long free_highpages = 0;
101 	pg_data_t *pgdat = NODE_DATA(nid);
102 
103 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
104 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
105 	val->totalram = managed_pages;
106 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
107 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
108 #ifdef CONFIG_HIGHMEM
109 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
110 		struct zone *zone = &pgdat->node_zones[zone_type];
111 
112 		if (is_highmem(zone)) {
113 			managed_highpages += zone_managed_pages(zone);
114 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
115 		}
116 	}
117 	val->totalhigh = managed_highpages;
118 	val->freehigh = free_highpages;
119 #else
120 	val->totalhigh = managed_highpages;
121 	val->freehigh = free_highpages;
122 #endif
123 	val->mem_unit = PAGE_SIZE;
124 }
125 #endif
126 
127 /*
128  * Determine whether the node should be displayed or not, depending on whether
129  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
130  */
show_mem_node_skip(unsigned int flags,int nid,nodemask_t * nodemask)131 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
132 {
133 	if (!(flags & SHOW_MEM_FILTER_NODES))
134 		return false;
135 
136 	/*
137 	 * no node mask - aka implicit memory numa policy. Do not bother with
138 	 * the synchronization - read_mems_allowed_begin - because we do not
139 	 * have to be precise here.
140 	 */
141 	if (!nodemask)
142 		nodemask = &cpuset_current_mems_allowed;
143 
144 	return !node_isset(nid, *nodemask);
145 }
146 
show_migration_types(unsigned char type)147 static void show_migration_types(unsigned char type)
148 {
149 	static const char types[MIGRATE_TYPES] = {
150 		[MIGRATE_UNMOVABLE]	= 'U',
151 		[MIGRATE_MOVABLE]	= 'M',
152 		[MIGRATE_RECLAIMABLE]	= 'E',
153 		[MIGRATE_HIGHATOMIC]	= 'H',
154 #ifdef CONFIG_CMA
155 		[MIGRATE_CMA]		= 'C',
156 #endif
157 #ifdef CONFIG_MEMORY_ISOLATION
158 		[MIGRATE_ISOLATE]	= 'I',
159 #endif
160 	};
161 	char tmp[MIGRATE_TYPES + 1];
162 	char *p = tmp;
163 	int i;
164 
165 	for (i = 0; i < MIGRATE_TYPES; i++) {
166 		if (type & (1 << i))
167 			*p++ = types[i];
168 	}
169 
170 	*p = '\0';
171 	printk(KERN_CONT "(%s) ", tmp);
172 }
173 
node_has_managed_zones(pg_data_t * pgdat,int max_zone_idx)174 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
175 {
176 	int zone_idx;
177 	for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
178 		if (zone_managed_pages(pgdat->node_zones + zone_idx))
179 			return true;
180 	return false;
181 }
182 
183 /*
184  * Show free area list (used inside shift_scroll-lock stuff)
185  * We also calculate the percentage fragmentation. We do this by counting the
186  * memory on each free list with the exception of the first item on the list.
187  *
188  * Bits in @filter:
189  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
190  *   cpuset.
191  */
show_free_areas(unsigned int filter,nodemask_t * nodemask,int max_zone_idx)192 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
193 {
194 	unsigned long free_pcp = 0;
195 	int cpu, nid;
196 	struct zone *zone;
197 	pg_data_t *pgdat;
198 
199 	for_each_populated_zone(zone) {
200 		if (zone_idx(zone) > max_zone_idx)
201 			continue;
202 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
203 			continue;
204 
205 		for_each_online_cpu(cpu)
206 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
207 	}
208 
209 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
210 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
211 		" unevictable:%lu dirty:%lu writeback:%lu\n"
212 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
213 		" mapped:%lu shmem:%lu pagetables:%lu\n"
214 		" sec_pagetables:%lu bounce:%lu\n"
215 		" kernel_misc_reclaimable:%lu\n"
216 		" free:%lu free_pcp:%lu free_cma:%lu\n",
217 		global_node_page_state(NR_ACTIVE_ANON),
218 		global_node_page_state(NR_INACTIVE_ANON),
219 		global_node_page_state(NR_ISOLATED_ANON),
220 		global_node_page_state(NR_ACTIVE_FILE),
221 		global_node_page_state(NR_INACTIVE_FILE),
222 		global_node_page_state(NR_ISOLATED_FILE),
223 		global_node_page_state(NR_UNEVICTABLE),
224 		global_node_page_state(NR_FILE_DIRTY),
225 		global_node_page_state(NR_WRITEBACK),
226 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
227 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
228 		global_node_page_state(NR_FILE_MAPPED),
229 		global_node_page_state(NR_SHMEM),
230 		global_node_page_state(NR_PAGETABLE),
231 		global_node_page_state(NR_SECONDARY_PAGETABLE),
232 		global_zone_page_state(NR_BOUNCE),
233 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
234 		global_zone_page_state(NR_FREE_PAGES),
235 		free_pcp,
236 		global_zone_page_state(NR_FREE_CMA_PAGES));
237 
238 	for_each_online_pgdat(pgdat) {
239 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
240 			continue;
241 		if (!node_has_managed_zones(pgdat, max_zone_idx))
242 			continue;
243 
244 		printk("Node %d"
245 			" active_anon:%lukB"
246 			" inactive_anon:%lukB"
247 			" active_file:%lukB"
248 			" inactive_file:%lukB"
249 			" unevictable:%lukB"
250 			" isolated(anon):%lukB"
251 			" isolated(file):%lukB"
252 			" mapped:%lukB"
253 			" dirty:%lukB"
254 			" writeback:%lukB"
255 			" shmem:%lukB"
256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
257 			" shmem_thp:%lukB"
258 			" shmem_pmdmapped:%lukB"
259 			" anon_thp:%lukB"
260 #endif
261 			" writeback_tmp:%lukB"
262 			" kernel_stack:%lukB"
263 #ifdef CONFIG_SHADOW_CALL_STACK
264 			" shadow_call_stack:%lukB"
265 #endif
266 			" pagetables:%lukB"
267 			" sec_pagetables:%lukB"
268 			" all_unreclaimable? %s"
269 			"\n",
270 			pgdat->node_id,
271 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
272 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
273 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
274 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
275 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
276 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
277 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
278 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
279 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
280 			K(node_page_state(pgdat, NR_WRITEBACK)),
281 			K(node_page_state(pgdat, NR_SHMEM)),
282 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
283 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
284 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
285 			K(node_page_state(pgdat, NR_ANON_THPS)),
286 #endif
287 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
288 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
289 #ifdef CONFIG_SHADOW_CALL_STACK
290 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
291 #endif
292 			K(node_page_state(pgdat, NR_PAGETABLE)),
293 			K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
294 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
295 				"yes" : "no");
296 	}
297 
298 	for_each_populated_zone(zone) {
299 		int i;
300 
301 		if (zone_idx(zone) > max_zone_idx)
302 			continue;
303 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
304 			continue;
305 
306 		free_pcp = 0;
307 		for_each_online_cpu(cpu)
308 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
309 
310 		show_node(zone);
311 		printk(KERN_CONT
312 			"%s"
313 			" free:%lukB"
314 			" boost:%lukB"
315 			" min:%lukB"
316 			" low:%lukB"
317 			" high:%lukB"
318 			" reserved_highatomic:%luKB"
319 			" free_highatomic:%luKB"
320 			" active_anon:%lukB"
321 			" inactive_anon:%lukB"
322 			" active_file:%lukB"
323 			" inactive_file:%lukB"
324 			" unevictable:%lukB"
325 			" writepending:%lukB"
326 			" present:%lukB"
327 			" managed:%lukB"
328 			" mlocked:%lukB"
329 			" bounce:%lukB"
330 			" free_pcp:%lukB"
331 			" local_pcp:%ukB"
332 			" free_cma:%lukB"
333 			"\n",
334 			zone->name,
335 			K(zone_page_state(zone, NR_FREE_PAGES)),
336 			K(zone->watermark_boost),
337 			K(min_wmark_pages(zone)),
338 			K(low_wmark_pages(zone)),
339 			K(high_wmark_pages(zone)),
340 			K(zone->nr_reserved_highatomic),
341 			K(zone->nr_free_highatomic),
342 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
343 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
344 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
345 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
346 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
347 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
348 			K(zone->present_pages),
349 			K(zone_managed_pages(zone)),
350 			K(zone_page_state(zone, NR_MLOCK)),
351 			K(zone_page_state(zone, NR_BOUNCE)),
352 			K(free_pcp),
353 			K(this_cpu_read(zone->per_cpu_pageset->count)),
354 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
355 		printk("lowmem_reserve[]:");
356 		for (i = 0; i < MAX_NR_ZONES; i++)
357 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
358 		printk(KERN_CONT "\n");
359 	}
360 
361 	for_each_populated_zone(zone) {
362 		unsigned int order;
363 		unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
364 		unsigned char types[NR_PAGE_ORDERS];
365 
366 		if (zone_idx(zone) > max_zone_idx)
367 			continue;
368 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
369 			continue;
370 		show_node(zone);
371 		printk(KERN_CONT "%s: ", zone->name);
372 
373 		spin_lock_irqsave(&zone->lock, flags);
374 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
375 			struct free_area *area = &zone->free_area[order];
376 			int type;
377 
378 			nr[order] = area->nr_free;
379 			total += nr[order] << order;
380 
381 			types[order] = 0;
382 			for (type = 0; type < MIGRATE_TYPES; type++) {
383 				if (!free_area_empty(area, type))
384 					types[order] |= 1 << type;
385 			}
386 		}
387 		spin_unlock_irqrestore(&zone->lock, flags);
388 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
389 			printk(KERN_CONT "%lu*%lukB ",
390 			       nr[order], K(1UL) << order);
391 			if (nr[order])
392 				show_migration_types(types[order]);
393 		}
394 		printk(KERN_CONT "= %lukB\n", K(total));
395 	}
396 
397 	for_each_online_node(nid) {
398 		if (show_mem_node_skip(filter, nid, nodemask))
399 			continue;
400 		hugetlb_show_meminfo_node(nid);
401 	}
402 
403 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
404 
405 	show_swap_cache_info();
406 }
407 
__show_mem(unsigned int filter,nodemask_t * nodemask,int max_zone_idx)408 void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
409 {
410 	unsigned long total = 0, reserved = 0, highmem = 0;
411 	struct zone *zone;
412 
413 	printk("Mem-Info:\n");
414 	show_free_areas(filter, nodemask, max_zone_idx);
415 
416 	for_each_populated_zone(zone) {
417 
418 		total += zone->present_pages;
419 		reserved += zone->present_pages - zone_managed_pages(zone);
420 
421 		if (is_highmem(zone))
422 			highmem += zone->present_pages;
423 	}
424 
425 	printk("%lu pages RAM\n", total);
426 	printk("%lu pages HighMem/MovableOnly\n", highmem);
427 	printk("%lu pages reserved\n", reserved);
428 #ifdef CONFIG_CMA
429 	printk("%lu pages cma reserved\n", totalcma_pages);
430 #endif
431 #ifdef CONFIG_MEMORY_FAILURE
432 	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
433 #endif
434 #ifdef CONFIG_MEM_ALLOC_PROFILING
435 	{
436 		struct codetag_bytes tags[10];
437 		size_t i, nr;
438 
439 		nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false);
440 		if (nr) {
441 			pr_notice("Memory allocations:\n");
442 			for (i = 0; i < nr; i++) {
443 				struct codetag *ct = tags[i].ct;
444 				struct alloc_tag *tag = ct_to_alloc_tag(ct);
445 				struct alloc_tag_counters counter = alloc_tag_read(tag);
446 				char bytes[10];
447 
448 				string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes));
449 
450 				/* Same as alloc_tag_to_text() but w/o intermediate buffer */
451 				if (ct->modname)
452 					pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
453 						  bytes, counter.calls, ct->filename,
454 						  ct->lineno, ct->modname, ct->function);
455 				else
456 					pr_notice("%12s %8llu %s:%u func:%s\n",
457 						  bytes, counter.calls, ct->filename,
458 						  ct->lineno, ct->function);
459 			}
460 		}
461 	}
462 #endif
463 	trace_android_vh_show_mem(filter, nodemask);
464 }
465 EXPORT_SYMBOL_GPL(__show_mem);
466