• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2017 Pavel Boldin <pboldin@cloudlinux.com>
4  */
5 
6 /* This is a regression test of the Stack Clash [1] vulnerability. This tests
7  * that there is at least 256 PAGE_SIZE of stack guard gap which is considered
8  * hard to hop above. Code adapted from the Novell's bugzilla [2].
9  *
10  * The code `mmap(2)`s region close to the stack end. The code then allocates
11  * memory on stack until it hits guard page and SIGSEGV or SIGBUS is generated
12  * by the kernel. The signal handler checks that fault address is further than
13  * THRESHOLD from the mmapped area.
14  *
15  * We read /proc/self/maps to examine exact top of the stack and `mmap(2)`
16  * our region exactly GAP_PAGES * PAGE_SIZE away. We read /proc/cmdline to
17  * see if a different stack_guard_gap size is configured. We set stack limit
18  * to infinity and preallocate REQ_STACK_SIZE bytes of stack so that no calls
19  * after `mmap` are moving stack further.
20  *
21  * [1] https://blog.qualys.com/securitylabs/2017/06/19/the-stack-clash
22  * [2] https://bugzilla.novell.com/show_bug.cgi?id=CVE-2017-1000364
23  */
24 
25 #include <sys/mman.h>
26 #include <sys/wait.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <alloca.h>
30 #include <signal.h>
31 #include <stdlib.h>
32 
33 #include "tst_test.h"
34 #include "tst_safe_stdio.h"
35 
36 static unsigned long page_size;
37 static unsigned long page_mask;
38 static unsigned long GAP_PAGES = 256;
39 static unsigned long THRESHOLD;
40 static int STACK_GROWSDOWN;
41 
42 #define SIGNAL_STACK_SIZE	(1UL<<20)
43 #define FRAME_SIZE		1024
44 #define REQ_STACK_SIZE		(1024 * 1024)
45 
46 #define EXIT_TESTBROKE		TBROK
47 
exhaust_stack_into_sigsegv(void)48 void exhaust_stack_into_sigsegv(void)
49 {
50 	volatile char * ptr = alloca(FRAME_SIZE - sizeof(long));
51 	*ptr = '\0';
52 	exhaust_stack_into_sigsegv();
53 }
54 
55 #define MAPPED_LEN page_size
56 static unsigned long mapped_addr;
57 
segv_handler(int sig,siginfo_t * info,void * data LTP_ATTRIBUTE_UNUSED)58 void segv_handler(int sig, siginfo_t *info, void *data LTP_ATTRIBUTE_UNUSED)
59 {
60 	unsigned long fault_addr = (unsigned long)info->si_addr;
61 	unsigned long mmap_end = mapped_addr + MAPPED_LEN;
62 	ssize_t diff;
63 
64 	if (sig != SIGSEGV && sig != SIGBUS)
65 		return;
66 
67 	if (STACK_GROWSDOWN)
68 		diff = fault_addr - mmap_end;
69 	else
70 		diff = mapped_addr - fault_addr;
71 
72 	tst_res(TINFO,
73 		"mmap = [%lx, %lx), addr = %lx, diff = %zx, THRESHOLD = %lx",
74 		mapped_addr, mmap_end, fault_addr, diff, THRESHOLD);
75 	if (diff < 0 || (unsigned long)diff < THRESHOLD)
76 		_exit(EXIT_FAILURE);
77 	else
78 		_exit(EXIT_SUCCESS);
79 }
80 
read_stack_addr_from_proc(unsigned long * stack_size)81 unsigned long read_stack_addr_from_proc(unsigned long *stack_size)
82 {
83 	FILE *fh;
84 	char buf[1024];
85 	unsigned long stack_top = -1UL, start, end;
86 
87 	fh = SAFE_FOPEN("/proc/self/maps", "r");
88 
89 	while (!feof(fh)) {
90 		if (fgets(buf, sizeof(buf), fh) == NULL) {
91 			tst_brk(TBROK | TERRNO, "fgets");
92 			goto out;
93 		}
94 
95 		if (!strstr(buf, "[stack"))
96 			continue;
97 
98 		if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
99 			tst_brk(TBROK | TERRNO, "sscanf");
100 			goto out;
101 		}
102 
103 		*stack_size = end - start;
104 
105 		if (STACK_GROWSDOWN)
106 			stack_top = start;
107 		else
108 			stack_top = end;
109 		break;
110 	}
111 
112 out:
113 	SAFE_FCLOSE(fh);
114 	return stack_top;
115 }
116 
dump_proc_self_maps(void)117 void dump_proc_self_maps(void)
118 {
119 	static char buf[64];
120 	static const char *cmd[] = {"cat", buf, NULL};
121 	sprintf(buf, "/proc/%d/maps", getpid());
122 	tst_run_cmd(cmd, NULL, NULL, 0);
123 }
124 
preallocate_stack(unsigned long required)125 void __attribute__((noinline)) preallocate_stack(unsigned long required)
126 {
127 	volatile char *garbage;
128 
129 	garbage = alloca(required);
130 	garbage[0] = garbage[required - 1] = '\0';
131 }
132 
do_child(void)133 void do_child(void)
134 {
135 	unsigned long stack_addr, stack_size;
136 	stack_t signal_stack;
137 	struct sigaction segv_sig = {.sa_sigaction = segv_handler, .sa_flags = SA_ONSTACK|SA_SIGINFO};
138 	void *map;
139 	unsigned long gap = GAP_PAGES * page_size;
140 	struct rlimit rlimit;
141 
142 	rlimit.rlim_cur = rlimit.rlim_max = RLIM_INFINITY;
143 	SAFE_SETRLIMIT(RLIMIT_STACK, &rlimit);
144 
145 	preallocate_stack(REQ_STACK_SIZE);
146 
147 	stack_addr = read_stack_addr_from_proc(&stack_size);
148 	if (stack_addr == -1UL) {
149 		tst_brk(TBROK, "can't read stack top from /proc/self/maps");
150 		return;
151 	}
152 
153 	if (STACK_GROWSDOWN)
154 		mapped_addr = stack_addr - gap - MAPPED_LEN;
155 	else
156 		mapped_addr = stack_addr + gap;
157 
158 	mapped_addr &= page_mask;
159 	map = SAFE_MMAP((void *)mapped_addr, MAPPED_LEN,
160 			PROT_READ|PROT_WRITE,
161 			MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
162 	tst_res(TINFO, "Stack:0x%lx+0x%lx mmap:%p+0x%lx",
163 		stack_addr, stack_size, map, MAPPED_LEN);
164 
165 	signal_stack.ss_sp = SAFE_MALLOC(SIGNAL_STACK_SIZE);
166 	signal_stack.ss_size = SIGNAL_STACK_SIZE;
167 	signal_stack.ss_flags = 0;
168 	if (sigaltstack(&signal_stack, NULL) == -1) {
169 		tst_brk(TBROK | TERRNO, "sigaltstack");
170 		return;
171 	}
172 	if (sigaction(SIGSEGV, &segv_sig, NULL) == -1 ||
173 	    sigaction(SIGBUS,  &segv_sig, NULL) == -1) {
174 		tst_brk(TBROK | TERRNO, "sigaction");
175 		return;
176 	}
177 
178 #ifdef DEBUG
179 	dump_proc_self_maps();
180 #endif
181 
182 	exhaust_stack_into_sigsegv();
183 }
184 
setup(void)185 void setup(void)
186 {
187 	char buf[4096], *p;
188 
189 	page_size = sysconf(_SC_PAGESIZE);
190 	page_mask = ~(page_size - 1);
191 
192 	buf[4095] = '\0';
193 	SAFE_FILE_SCANF("/proc/cmdline", "%4095[^\n]", buf);
194 
195 	if ((p = strstr(buf, "stack_guard_gap=")) != NULL) {
196 		if (sscanf(p, "stack_guard_gap=%ld", &GAP_PAGES) != 1) {
197 			tst_brk(TBROK | TERRNO, "sscanf");
198 			return;
199 		}
200 		tst_res(TINFO, "stack_guard_gap = %ld", GAP_PAGES);
201 	}
202 
203 	THRESHOLD = (GAP_PAGES - 1) * page_size;
204 
205 	{
206 		volatile int *a = alloca(128);
207 
208 		{
209 			volatile int *b = alloca(128);
210 
211 			STACK_GROWSDOWN = a > b;
212 			tst_res(TINFO, "STACK_GROWSDOWN = %d == %p > %p", STACK_GROWSDOWN, a, b);
213 		}
214 	}
215 }
216 
stack_clash_test(void)217 void stack_clash_test(void)
218 {
219 	int status;
220 	pid_t pid;
221 
222 	pid = SAFE_FORK();
223 	if (!pid) {
224 		do_child();
225 		exit(EXIT_TESTBROKE);
226 		return;
227 	}
228 
229 	SAFE_WAITPID(pid, &status, 0);
230 
231 	if (WIFEXITED(status)) {
232 		switch (WEXITSTATUS(status)) {
233 		case EXIT_FAILURE:
234 			tst_res(TFAIL, "stack is too close to the mmaped area");
235 			return;
236 		case EXIT_SUCCESS:
237 			tst_res(TPASS, "stack is far enough from mmaped area");
238 			return;
239 		default:
240 		case EXIT_TESTBROKE:
241 			break;
242 		}
243 	}
244 
245 	tst_brk(TBROK, "Child %s", tst_strstatus(status));
246 }
247 
248 static struct tst_test test = {
249 	.forks_child = 1,
250 	.needs_root = 1,
251 	.setup = setup,
252 	.test_all = stack_clash_test,
253 	.tags = (const struct tst_tag[]) {
254 		{"CVE", "2017-1000364"},
255 		{}
256 	}
257 };
258