• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2017 Pavel Boldin <pboldin@cloudlinux.com>
4  * Copyright (c) 2023 Rick Edgecombe <rick.p.edgecombe@intel.com>
5  * Copyright (c) Linux Test Project, 2017-2023
6  */
7 
8 /*\
9  * [Description]
10  *
11  * This is a regression test of the Stack Clash [1] vulnerability. This tests
12  * that there is at least 256 PAGE_SIZE of stack guard gap which is considered
13  * hard to hop above. Code adapted from the Novell's bugzilla [2].
14  *
15  * The code `mmap(2)`s region close to the stack end. The code then allocates
16  * memory on stack until it hits guard page and SIGSEGV or SIGBUS is generated
17  * by the kernel. The signal handler checks that fault address is further than
18  * THRESHOLD from the mmapped area.
19  *
20  * We read /proc/self/maps to examine exact top of the stack and `mmap(2)`
21  * our region exactly GAP_PAGES * PAGE_SIZE away. We read /proc/cmdline to
22  * see if a different stack_guard_gap size is configured. We set stack limit
23  * to infinity and preallocate REQ_STACK_SIZE bytes of stack so that no calls
24  * after `mmap` are moving stack further.
25  *
26  * If the architecture meets certain requirements (only x86_64 is verified)
27  * then the test also tests that new mmap()s can't be placed in the stack's
28  * guard gap. This part of the test works by forcing a bottom up search. The
29  * assumptions are that the stack grows down (start gap) and either:
30  *
31  * 1. The default search is top down, and will switch to bottom up if
32  *      space is exhausted.
33  * 2. The default search is bottom up and the stack is above mmap base.
34  *
35  * [1] https://blog.qualys.com/securitylabs/2017/06/19/the-stack-clash
36  * [2] https://bugzilla.novell.com/show_bug.cgi?id=CVE-2017-1000364
37  */
38 
39 #include <sys/wait.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <alloca.h>
43 #include <signal.h>
44 #include <stdlib.h>
45 
46 #include "tst_test.h"
47 #include "tst_safe_stdio.h"
48 #include "lapi/mmap.h"
49 
50 static unsigned long page_size;
51 static unsigned long page_mask;
52 static unsigned long GAP_PAGES = 256;
53 static unsigned long THRESHOLD;
54 static int STACK_GROWSDOWN;
55 
56 #define SIGNAL_STACK_SIZE	(1UL<<20)
57 #define FRAME_SIZE		1024
58 #define REQ_STACK_SIZE		(1024 * 1024)
59 
60 #define EXIT_TESTBROKE		TBROK
61 
exhaust_stack_into_sigsegv(void)62 void exhaust_stack_into_sigsegv(void)
63 {
64 	volatile char * ptr = alloca(FRAME_SIZE - sizeof(long));
65 	*ptr = '\0';
66 	exhaust_stack_into_sigsegv();
67 }
68 
69 #define MAPPED_LEN page_size
70 static unsigned long mapped_addr;
71 
segv_handler(int sig,siginfo_t * info,void * data LTP_ATTRIBUTE_UNUSED)72 void segv_handler(int sig, siginfo_t *info, void *data LTP_ATTRIBUTE_UNUSED)
73 {
74 	unsigned long fault_addr = (unsigned long)info->si_addr;
75 	unsigned long mmap_end = mapped_addr + MAPPED_LEN;
76 	ssize_t diff;
77 
78 	if (sig != SIGSEGV && sig != SIGBUS)
79 		return;
80 
81 	if (STACK_GROWSDOWN)
82 		diff = fault_addr - mmap_end;
83 	else
84 		diff = mapped_addr - fault_addr;
85 
86 	tst_res(TINFO,
87 		"mmap = [%lx, %lx), addr = %lx, diff = %zx, THRESHOLD = %lx",
88 		mapped_addr, mmap_end, fault_addr, diff, THRESHOLD);
89 	if (diff < 0 || (unsigned long)diff < THRESHOLD)
90 		_exit(EXIT_FAILURE);
91 	else
92 		_exit(EXIT_SUCCESS);
93 }
94 
95 #ifdef __x86_64__
force_bottom_up(void)96 static void force_bottom_up(void)
97 {
98 	FILE *fh;
99 	char buf[1024];
100 	unsigned long start, end, size, lastend = 0;
101 
102 	/* start filling from mmap_min_addr */
103 	SAFE_FILE_SCANF("/proc/sys/vm/mmap_min_addr", "%lu", &lastend);
104 
105 	fh = SAFE_FOPEN("/proc/self/maps", "r");
106 
107 	while (!feof(fh)) {
108 		if (fgets(buf, sizeof(buf), fh) == NULL)
109 			goto out;
110 
111 		if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
112 			tst_brk(TBROK | TERRNO, "sscanf");
113 			goto out;
114 		}
115 
116 		size = start - lastend;
117 
118 		/* Skip the PROT_NONE that was just added (!size). */
119 		if (!size) {
120 			lastend = end;
121 			continue;
122 		}
123 
124 		/* If the next area is the stack, quit. */
125 		if (!!strstr(buf, "[stack]"))
126 			break;
127 
128 		/* This is not cleaned up. */
129 		SAFE_MMAP((void *)lastend, size, PROT_NONE,
130 			  MAP_ANON|MAP_PRIVATE|MAP_FIXED_NOREPLACE, -1, 0);
131 
132 		lastend = end;
133 	}
134 
135 out:
136 	SAFE_FCLOSE(fh);
137 }
138 #endif
139 
read_stack_addr_from_proc(unsigned long * stack_size)140 unsigned long read_stack_addr_from_proc(unsigned long *stack_size)
141 {
142 	FILE *fh;
143 	char buf[1024];
144 	unsigned long stack_top = -1UL, start, end;
145 
146 	fh = SAFE_FOPEN("/proc/self/maps", "r");
147 
148 	while (!feof(fh)) {
149 		if (fgets(buf, sizeof(buf), fh) == NULL) {
150 			tst_brk(TBROK | TERRNO, "fgets");
151 			goto out;
152 		}
153 
154 		if (!strstr(buf, "[stack"))
155 			continue;
156 
157 		if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
158 			tst_brk(TBROK | TERRNO, "sscanf");
159 			goto out;
160 		}
161 
162 		*stack_size = end - start;
163 
164 		if (STACK_GROWSDOWN)
165 			stack_top = start;
166 		else
167 			stack_top = end;
168 		break;
169 	}
170 
171 out:
172 	SAFE_FCLOSE(fh);
173 	return stack_top;
174 }
175 
dump_proc_self_maps(void)176 void dump_proc_self_maps(void)
177 {
178 	static char buf[64];
179 	static const char *cmd[] = {"cat", buf, NULL};
180 	sprintf(buf, "/proc/%d/maps", getpid());
181 	tst_cmd(cmd, NULL, NULL, 0);
182 }
183 
preallocate_stack(unsigned long required)184 void __attribute__((noinline)) preallocate_stack(unsigned long required)
185 {
186 	volatile char *garbage;
187 
188 	garbage = alloca(required);
189 	garbage[0] = garbage[required - 1] = '\0';
190 }
191 
192 #ifdef __x86_64__
do_mmap_placement_test(unsigned long stack_addr,unsigned long gap)193 static void do_mmap_placement_test(unsigned long stack_addr, unsigned long gap)
194 {
195 	void *map_test_gap;
196 
197 	force_bottom_up();
198 
199 	/*
200 	 * force_bottom_up() used up all the spaces below the stack. The search down
201 	 * path should fail, and search up might take a look at the guard gap
202 	 * region. If it avoids it, the allocation will be above the stack. If it
203 	 * uses it, the allocation will be in the gap and the test should fail.
204 	 */
205 	map_test_gap = SAFE_MMAP(0, MAPPED_LEN,
206 				 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
207 
208 	if (stack_addr - gap <= (unsigned long)map_test_gap &&
209 		(unsigned long)map_test_gap <= stack_addr) {
210 		tst_res(TFAIL, "New mmap was placed in the guard gap.");
211 		SAFE_MUNMAP(map_test_gap, MAPPED_LEN);
212 	}
213 }
214 #endif
215 
do_child(void)216 void do_child(void)
217 {
218 	unsigned long stack_addr, stack_size;
219 	stack_t signal_stack;
220 	struct sigaction segv_sig = {.sa_sigaction = segv_handler, .sa_flags = SA_ONSTACK|SA_SIGINFO};
221 	void *map;
222 	unsigned long gap = GAP_PAGES * page_size;
223 	struct rlimit rlimit;
224 
225 	rlimit.rlim_cur = rlimit.rlim_max = RLIM_INFINITY;
226 	SAFE_SETRLIMIT(RLIMIT_STACK, &rlimit);
227 
228 	preallocate_stack(REQ_STACK_SIZE);
229 
230 	stack_addr = read_stack_addr_from_proc(&stack_size);
231 	if (stack_addr == -1UL) {
232 		tst_brk(TBROK, "can't read stack top from /proc/self/maps");
233 		return;
234 	}
235 
236 	if (STACK_GROWSDOWN)
237 		mapped_addr = stack_addr - gap - MAPPED_LEN;
238 	else
239 		mapped_addr = stack_addr + gap;
240 
241 	mapped_addr &= page_mask;
242 	map = SAFE_MMAP((void *)mapped_addr, MAPPED_LEN,
243 			PROT_READ|PROT_WRITE,
244 			MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
245 	tst_res(TINFO, "Stack:0x%lx+0x%lx mmap:%p+0x%lx",
246 		stack_addr, stack_size, map, MAPPED_LEN);
247 
248 	signal_stack.ss_sp = SAFE_MALLOC(SIGNAL_STACK_SIZE);
249 	signal_stack.ss_size = SIGNAL_STACK_SIZE;
250 	signal_stack.ss_flags = 0;
251 	if (sigaltstack(&signal_stack, NULL) == -1) {
252 		tst_brk(TBROK | TERRNO, "sigaltstack");
253 		return;
254 	}
255 	if (sigaction(SIGSEGV, &segv_sig, NULL) == -1 ||
256 	    sigaction(SIGBUS,  &segv_sig, NULL) == -1) {
257 		tst_brk(TBROK | TERRNO, "sigaction");
258 		return;
259 	}
260 
261 #ifdef DEBUG
262 	dump_proc_self_maps();
263 #endif
264 
265 #ifdef __x86_64__
266 	do_mmap_placement_test(stack_addr, gap);
267 #endif
268 
269 	/* Now see if it can grow too close to an adjacent region. */
270 	exhaust_stack_into_sigsegv();
271 }
272 
setup(void)273 void setup(void)
274 {
275 	char buf[4096], *p;
276 
277 	page_size = sysconf(_SC_PAGESIZE);
278 	page_mask = ~(page_size - 1);
279 
280 	buf[4095] = '\0';
281 	SAFE_FILE_SCANF("/proc/cmdline", "%4095[^\n]", buf);
282 
283 	if ((p = strstr(buf, "stack_guard_gap=")) != NULL) {
284 		if (sscanf(p, "stack_guard_gap=%ld", &GAP_PAGES) != 1) {
285 			tst_brk(TBROK | TERRNO, "sscanf");
286 			return;
287 		}
288 		tst_res(TINFO, "stack_guard_gap = %ld", GAP_PAGES);
289 	}
290 
291 	THRESHOLD = (GAP_PAGES - 1) * page_size;
292 
293 	{
294 		volatile int *a = alloca(128);
295 
296 		{
297 			volatile int *b = alloca(128);
298 
299 			STACK_GROWSDOWN = a > b;
300 			tst_res(TINFO, "STACK_GROWSDOWN = %d == %p > %p", STACK_GROWSDOWN, a, b);
301 		}
302 	}
303 }
304 
stack_clash_test(void)305 void stack_clash_test(void)
306 {
307 	int status;
308 	pid_t pid;
309 
310 	pid = SAFE_FORK();
311 	if (!pid) {
312 		do_child();
313 		exit(EXIT_TESTBROKE);
314 		return;
315 	}
316 
317 	SAFE_WAITPID(pid, &status, 0);
318 
319 	if (WIFEXITED(status)) {
320 		switch (WEXITSTATUS(status)) {
321 		case EXIT_FAILURE:
322 			tst_res(TFAIL, "stack is too close to the mmaped area");
323 			return;
324 		case EXIT_SUCCESS:
325 			tst_res(TPASS, "stack is far enough from mmaped area");
326 			return;
327 		default:
328 		case EXIT_TESTBROKE:
329 			break;
330 		}
331 	}
332 
333 	tst_brk(TBROK, "Child %s", tst_strstatus(status));
334 }
335 
336 static struct tst_test test = {
337 	.forks_child = 1,
338 	.needs_root = 1,
339 	.setup = setup,
340 	.test_all = stack_clash_test,
341 	.tags = (const struct tst_tag[]) {
342 		{"CVE", "2017-1000364"},
343 		{"linux-git", "58c5d0d6d522"},
344 		{}
345 	}
346 };
347