• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * A test for the patch "Allow compaction of unevictable pages".
5  * With this patch we should be able to allocate at least 1/4
6  * of RAM in huge pages. Without the patch much less is
7  * allocated.
8  */
9 
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <sys/mman.h>
13 #include <sys/resource.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <unistd.h>
17 #include <string.h>
18 
19 #include "../kselftest.h"
20 
21 #define MAP_SIZE_MB	100
22 #define MAP_SIZE	(MAP_SIZE_MB * 1024 * 1024)
23 
24 struct map_list {
25 	void *map;
26 	struct map_list *next;
27 };
28 
read_memory_info(unsigned long * memfree,unsigned long * hugepagesize)29 int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
30 {
31 	char  buffer[256] = {0};
32 	char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'";
33 	FILE *cmdfile = popen(cmd, "r");
34 
35 	if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
36 		perror("Failed to read meminfo\n");
37 		return -1;
38 	}
39 
40 	pclose(cmdfile);
41 
42 	*memfree = atoll(buffer);
43 	cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'";
44 	cmdfile = popen(cmd, "r");
45 
46 	if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
47 		perror("Failed to read meminfo\n");
48 		return -1;
49 	}
50 
51 	pclose(cmdfile);
52 	*hugepagesize = atoll(buffer);
53 
54 	return 0;
55 }
56 
prereq(void)57 int prereq(void)
58 {
59 	char allowed;
60 	int fd;
61 
62 	fd = open("/proc/sys/vm/compact_unevictable_allowed",
63 		  O_RDONLY | O_NONBLOCK);
64 	if (fd < 0) {
65 		perror("Failed to open\n"
66 		       "/proc/sys/vm/compact_unevictable_allowed\n");
67 		return -1;
68 	}
69 
70 	if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
71 		perror("Failed to read from\n"
72 		       "/proc/sys/vm/compact_unevictable_allowed\n");
73 		close(fd);
74 		return -1;
75 	}
76 
77 	close(fd);
78 	if (allowed == '1')
79 		return 0;
80 
81 	return -1;
82 }
83 
check_compaction(unsigned long mem_free,unsigned int hugepage_size)84 int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
85 {
86 	int fd;
87 	int compaction_index = 0;
88 	char initial_nr_hugepages[10] = {0};
89 	char nr_hugepages[10] = {0};
90 
91 	/* We want to test with 80% of available memory. Else, OOM killer comes
92 	   in to play */
93 	mem_free = mem_free * 0.8;
94 
95 	fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
96 	if (fd < 0) {
97 		perror("Failed to open /proc/sys/vm/nr_hugepages");
98 		return -1;
99 	}
100 
101 	if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
102 		perror("Failed to read from /proc/sys/vm/nr_hugepages");
103 		goto close_fd;
104 	}
105 
106 	/* Start with the initial condition of 0 huge pages*/
107 	if (write(fd, "0", sizeof(char)) != sizeof(char)) {
108 		perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
109 		goto close_fd;
110 	}
111 
112 	lseek(fd, 0, SEEK_SET);
113 
114 	/* Request a large number of huge pages. The Kernel will allocate
115 	   as much as it can */
116 	if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
117 		perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
118 		goto close_fd;
119 	}
120 
121 	lseek(fd, 0, SEEK_SET);
122 
123 	if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
124 		perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
125 		goto close_fd;
126 	}
127 
128 	/* We should have been able to request at least 1/3 rd of the memory in
129 	   huge pages */
130 	compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
131 
132 	if (compaction_index > 3) {
133 		printf("No of huge pages allocated = %d\n",
134 		       (atoi(nr_hugepages)));
135 		fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
136 			"as huge pages\n", compaction_index);
137 		goto close_fd;
138 	}
139 
140 	printf("No of huge pages allocated = %d\n",
141 	       (atoi(nr_hugepages)));
142 
143 	lseek(fd, 0, SEEK_SET);
144 
145 	if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
146 	    != strlen(initial_nr_hugepages)) {
147 		perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
148 		goto close_fd;
149 	}
150 
151 	close(fd);
152 	return 0;
153 
154  close_fd:
155 	close(fd);
156 	printf("Not OK. Compaction test failed.");
157 	return -1;
158 }
159 
160 
main(int argc,char ** argv)161 int main(int argc, char **argv)
162 {
163 	struct rlimit lim;
164 	struct map_list *list, *entry;
165 	size_t page_size, i;
166 	void *map = NULL;
167 	unsigned long mem_free = 0;
168 	unsigned long hugepage_size = 0;
169 	long mem_fragmentable_MB = 0;
170 
171 	if (prereq() != 0) {
172 		printf("Either the sysctl compact_unevictable_allowed is not\n"
173 		       "set to 1 or couldn't read the proc file.\n"
174 		       "Skipping the test\n");
175 		return KSFT_SKIP;
176 	}
177 
178 	lim.rlim_cur = RLIM_INFINITY;
179 	lim.rlim_max = RLIM_INFINITY;
180 	if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
181 		perror("Failed to set rlimit:\n");
182 		return -1;
183 	}
184 
185 	page_size = getpagesize();
186 
187 	list = NULL;
188 
189 	if (read_memory_info(&mem_free, &hugepage_size) != 0) {
190 		printf("ERROR: Cannot read meminfo\n");
191 		return -1;
192 	}
193 
194 	mem_fragmentable_MB = mem_free * 0.8 / 1024;
195 
196 	while (mem_fragmentable_MB > 0) {
197 		map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
198 			   MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
199 		if (map == MAP_FAILED)
200 			break;
201 
202 		entry = malloc(sizeof(struct map_list));
203 		if (!entry) {
204 			munmap(map, MAP_SIZE);
205 			break;
206 		}
207 		entry->map = map;
208 		entry->next = list;
209 		list = entry;
210 
211 		/* Write something (in this case the address of the map) to
212 		 * ensure that KSM can't merge the mapped pages
213 		 */
214 		for (i = 0; i < MAP_SIZE; i += page_size)
215 			*(unsigned long *)(map + i) = (unsigned long)map + i;
216 
217 		mem_fragmentable_MB -= MAP_SIZE_MB;
218 	}
219 
220 	for (entry = list; entry != NULL; entry = entry->next) {
221 		munmap(entry->map, MAP_SIZE);
222 		if (!entry->next)
223 			break;
224 		entry = entry->next;
225 	}
226 
227 	if (check_compaction(mem_free, hugepage_size) == 0)
228 		return 0;
229 
230 	return -1;
231 }
232