• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include "internal.h"
18 
19 #ifdef CONFIG_DEBUG_MEMORY_INIT
20 int __meminitdata mminit_loglevel;
21 
22 #ifndef SECTIONS_SHIFT
23 #define SECTIONS_SHIFT	0
24 #endif
25 
26 /* The zonelists are simply reported, validation is manual. */
mminit_verify_zonelist(void)27 void __init mminit_verify_zonelist(void)
28 {
29 	int nid;
30 
31 	if (mminit_loglevel < MMINIT_VERIFY)
32 		return;
33 
34 	for_each_online_node(nid) {
35 		pg_data_t *pgdat = NODE_DATA(nid);
36 		struct zone *zone;
37 		struct zoneref *z;
38 		struct zonelist *zonelist;
39 		int i, listid, zoneid;
40 
41 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
42 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
43 
44 			/* Identify the zone and nodelist */
45 			zoneid = i % MAX_NR_ZONES;
46 			listid = i / MAX_NR_ZONES;
47 			zonelist = &pgdat->node_zonelists[listid];
48 			zone = &pgdat->node_zones[zoneid];
49 			if (!populated_zone(zone))
50 				continue;
51 
52 			/* Print information about the zonelist */
53 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
54 				listid > 0 ? "thisnode" : "general", nid,
55 				zone->name);
56 
57 			/* Iterate the zonelist */
58 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
59 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
60 			pr_cont("\n");
61 		}
62 	}
63 }
64 
mminit_verify_pageflags_layout(void)65 void __init mminit_verify_pageflags_layout(void)
66 {
67 	int shift, width;
68 	unsigned long or_mask, add_mask;
69 
70 	shift = 8 * sizeof(unsigned long);
71 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
72 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
73 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
74 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
75 		SECTIONS_WIDTH,
76 		NODES_WIDTH,
77 		ZONES_WIDTH,
78 		LAST_CPUPID_WIDTH,
79 		KASAN_TAG_WIDTH,
80 		NR_PAGEFLAGS);
81 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
82 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
83 		SECTIONS_SHIFT,
84 		NODES_SHIFT,
85 		ZONES_SHIFT,
86 		LAST_CPUPID_SHIFT,
87 		KASAN_TAG_WIDTH);
88 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
89 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
90 		(unsigned long)SECTIONS_PGSHIFT,
91 		(unsigned long)NODES_PGSHIFT,
92 		(unsigned long)ZONES_PGSHIFT,
93 		(unsigned long)LAST_CPUPID_PGSHIFT,
94 		(unsigned long)KASAN_TAG_PGSHIFT);
95 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
96 		"Node/Zone ID: %lu -> %lu\n",
97 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
98 		(unsigned long)ZONEID_PGOFF);
99 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
100 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
101 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
102 #ifdef NODE_NOT_IN_PAGE_FLAGS
103 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
104 		"Node not in page flags");
105 #endif
106 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
107 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
108 		"Last cpupid not in page flags");
109 #endif
110 
111 	if (SECTIONS_WIDTH) {
112 		shift -= SECTIONS_WIDTH;
113 		BUG_ON(shift != SECTIONS_PGSHIFT);
114 	}
115 	if (NODES_WIDTH) {
116 		shift -= NODES_WIDTH;
117 		BUG_ON(shift != NODES_PGSHIFT);
118 	}
119 	if (ZONES_WIDTH) {
120 		shift -= ZONES_WIDTH;
121 		BUG_ON(shift != ZONES_PGSHIFT);
122 	}
123 
124 	/* Check for bitmask overlaps */
125 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
126 			(NODES_MASK << NODES_PGSHIFT) |
127 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
128 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
129 			(NODES_MASK << NODES_PGSHIFT) +
130 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
131 	BUG_ON(or_mask != add_mask);
132 }
133 
set_mminit_loglevel(char * str)134 static __init int set_mminit_loglevel(char *str)
135 {
136 	get_option(&str, &mminit_loglevel);
137 	return 0;
138 }
139 early_param("mminit_loglevel", set_mminit_loglevel);
140 #endif /* CONFIG_DEBUG_MEMORY_INIT */
141 
142 struct kobject *mm_kobj;
143 EXPORT_SYMBOL_GPL(mm_kobj);
144 
145 #ifdef CONFIG_SMP
146 s32 vm_committed_as_batch = 32;
147 
mm_compute_batch(int overcommit_policy)148 void mm_compute_batch(int overcommit_policy)
149 {
150 	u64 memsized_batch;
151 	s32 nr = num_present_cpus();
152 	s32 batch = max_t(s32, nr*2, 32);
153 	unsigned long ram_pages = totalram_pages();
154 
155 	/*
156 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
157 	 * (total memory/#cpus), and lift it to 25% for other policies
158 	 * to easy the possible lock contention for percpu_counter
159 	 * vm_committed_as, while the max limit is INT_MAX
160 	 */
161 	if (overcommit_policy == OVERCOMMIT_NEVER)
162 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
163 	else
164 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
165 
166 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
167 }
168 
mm_compute_batch_notifier(struct notifier_block * self,unsigned long action,void * arg)169 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
170 					unsigned long action, void *arg)
171 {
172 	switch (action) {
173 	case MEM_ONLINE:
174 	case MEM_OFFLINE:
175 		mm_compute_batch(sysctl_overcommit_memory);
176 	default:
177 		break;
178 	}
179 	return NOTIFY_OK;
180 }
181 
182 static struct notifier_block compute_batch_nb __meminitdata = {
183 	.notifier_call = mm_compute_batch_notifier,
184 	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
185 };
186 
mm_compute_batch_init(void)187 static int __init mm_compute_batch_init(void)
188 {
189 	mm_compute_batch(sysctl_overcommit_memory);
190 	register_hotmemory_notifier(&compute_batch_nb);
191 
192 	return 0;
193 }
194 
195 __initcall(mm_compute_batch_init);
196 
197 #endif
198 
mm_sysfs_init(void)199 static int __init mm_sysfs_init(void)
200 {
201 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
202 	if (!mm_kobj)
203 		return -ENOMEM;
204 
205 	return 0;
206 }
207 postcore_initcall(mm_sysfs_init);
208