1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2017 Pavel Boldin <pboldin@cloudlinux.com>
4 * Copyright (c) 2023 Rick Edgecombe <rick.p.edgecombe@intel.com>
5 * Copyright (c) Linux Test Project, 2017-2023
6 */
7
8 /*\
9 * [Description]
10 *
11 * This is a regression test of the Stack Clash [1] vulnerability. This tests
12 * that there is at least 256 PAGE_SIZE of stack guard gap which is considered
13 * hard to hop above. Code adapted from the Novell's bugzilla [2].
14 *
15 * The code `mmap(2)`s region close to the stack end. The code then allocates
16 * memory on stack until it hits guard page and SIGSEGV or SIGBUS is generated
17 * by the kernel. The signal handler checks that fault address is further than
18 * THRESHOLD from the mmapped area.
19 *
20 * We read /proc/self/maps to examine exact top of the stack and `mmap(2)`
21 * our region exactly GAP_PAGES * PAGE_SIZE away. We read /proc/cmdline to
22 * see if a different stack_guard_gap size is configured. We set stack limit
23 * to infinity and preallocate REQ_STACK_SIZE bytes of stack so that no calls
24 * after `mmap` are moving stack further.
25 *
26 * If the architecture meets certain requirements (only x86_64 is verified)
27 * then the test also tests that new mmap()s can't be placed in the stack's
28 * guard gap. This part of the test works by forcing a bottom up search. The
29 * assumptions are that the stack grows down (start gap) and either:
30 *
31 * 1. The default search is top down, and will switch to bottom up if
32 * space is exhausted.
33 * 2. The default search is bottom up and the stack is above mmap base.
34 *
35 * [1] https://blog.qualys.com/securitylabs/2017/06/19/the-stack-clash
36 * [2] https://bugzilla.novell.com/show_bug.cgi?id=CVE-2017-1000364
37 */
38
39 #include <sys/wait.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <alloca.h>
43 #include <signal.h>
44 #include <stdlib.h>
45
46 #include "tst_test.h"
47 #include "tst_safe_stdio.h"
48 #include "lapi/mmap.h"
49
50 static unsigned long page_size;
51 static unsigned long page_mask;
52 static unsigned long GAP_PAGES = 256;
53 static unsigned long THRESHOLD;
54 static int STACK_GROWSDOWN;
55
56 #define SIGNAL_STACK_SIZE (1UL<<20)
57 #define FRAME_SIZE 1024
58 #define REQ_STACK_SIZE (1024 * 1024)
59
60 #define EXIT_TESTBROKE TBROK
61
exhaust_stack_into_sigsegv(void)62 void exhaust_stack_into_sigsegv(void)
63 {
64 volatile char * ptr = alloca(FRAME_SIZE - sizeof(long));
65 *ptr = '\0';
66 exhaust_stack_into_sigsegv();
67 }
68
69 #define MAPPED_LEN page_size
70 static unsigned long mapped_addr;
71
segv_handler(int sig,siginfo_t * info,void * data LTP_ATTRIBUTE_UNUSED)72 void segv_handler(int sig, siginfo_t *info, void *data LTP_ATTRIBUTE_UNUSED)
73 {
74 unsigned long fault_addr = (unsigned long)info->si_addr;
75 unsigned long mmap_end = mapped_addr + MAPPED_LEN;
76 ssize_t diff;
77
78 if (sig != SIGSEGV && sig != SIGBUS)
79 return;
80
81 if (STACK_GROWSDOWN)
82 diff = fault_addr - mmap_end;
83 else
84 diff = mapped_addr - fault_addr;
85
86 tst_res(TINFO,
87 "mmap = [%lx, %lx), addr = %lx, diff = %zx, THRESHOLD = %lx",
88 mapped_addr, mmap_end, fault_addr, diff, THRESHOLD);
89 if (diff < 0 || (unsigned long)diff < THRESHOLD)
90 _exit(EXIT_FAILURE);
91 else
92 _exit(EXIT_SUCCESS);
93 }
94
force_bottom_up(void)95 static void force_bottom_up(void)
96 {
97 FILE *fh;
98 char buf[1024];
99 unsigned long start, end, size, lastend = 0;
100
101 /* start filling from mmap_min_addr */
102 SAFE_FILE_SCANF("/proc/sys/vm/mmap_min_addr", "%lu", &lastend);
103
104 fh = SAFE_FOPEN("/proc/self/maps", "r");
105
106 while (!feof(fh)) {
107 if (fgets(buf, sizeof(buf), fh) == NULL)
108 goto out;
109
110 if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
111 tst_brk(TBROK | TERRNO, "sscanf");
112 goto out;
113 }
114
115 size = start - lastend;
116
117 /* Skip the PROT_NONE that was just added (!size). */
118 if (!size) {
119 lastend = end;
120 continue;
121 }
122
123 /* If the next area is the stack, quit. */
124 if (!!strstr(buf, "[stack]"))
125 break;
126
127 /* This is not cleaned up. */
128 SAFE_MMAP((void *)lastend, size, PROT_NONE,
129 MAP_ANON|MAP_PRIVATE|MAP_FIXED_NOREPLACE, -1, 0);
130
131 lastend = end;
132 }
133
134 out:
135 SAFE_FCLOSE(fh);
136 }
137
read_stack_addr_from_proc(unsigned long * stack_size)138 unsigned long read_stack_addr_from_proc(unsigned long *stack_size)
139 {
140 FILE *fh;
141 char buf[1024];
142 unsigned long stack_top = -1UL, start, end;
143
144 fh = SAFE_FOPEN("/proc/self/maps", "r");
145
146 while (!feof(fh)) {
147 if (fgets(buf, sizeof(buf), fh) == NULL) {
148 tst_brk(TBROK | TERRNO, "fgets");
149 goto out;
150 }
151
152 if (!strstr(buf, "[stack"))
153 continue;
154
155 if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
156 tst_brk(TBROK | TERRNO, "sscanf");
157 goto out;
158 }
159
160 *stack_size = end - start;
161
162 if (STACK_GROWSDOWN)
163 stack_top = start;
164 else
165 stack_top = end;
166 break;
167 }
168
169 out:
170 SAFE_FCLOSE(fh);
171 return stack_top;
172 }
173
dump_proc_self_maps(void)174 void dump_proc_self_maps(void)
175 {
176 static char buf[64];
177 static const char *cmd[] = {"cat", buf, NULL};
178 sprintf(buf, "/proc/%d/maps", getpid());
179 tst_cmd(cmd, NULL, NULL, 0);
180 }
181
preallocate_stack(unsigned long required)182 void __attribute__((noinline)) preallocate_stack(unsigned long required)
183 {
184 volatile char *garbage;
185
186 garbage = alloca(required);
187 garbage[0] = garbage[required - 1] = '\0';
188 }
189
do_mmap_placement_test(unsigned long stack_addr,unsigned long gap)190 static void do_mmap_placement_test(unsigned long stack_addr, unsigned long gap)
191 {
192 void *map_test_gap;
193
194 force_bottom_up();
195
196 /*
197 * force_bottom_up() used up all the spaces below the stack. The search down
198 * path should fail, and search up might take a look at the guard gap
199 * region. If it avoids it, the allocation will be above the stack. If it
200 * uses it, the allocation will be in the gap and the test should fail.
201 */
202 map_test_gap = SAFE_MMAP(0, MAPPED_LEN,
203 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
204
205 if (stack_addr - gap <= (unsigned long)map_test_gap &&
206 (unsigned long)map_test_gap <= stack_addr) {
207 tst_res(TFAIL, "New mmap was placed in the guard gap.");
208 SAFE_MUNMAP(map_test_gap, MAPPED_LEN);
209 }
210 }
211
do_child(void)212 void do_child(void)
213 {
214 unsigned long stack_addr, stack_size;
215 stack_t signal_stack;
216 struct sigaction segv_sig = {.sa_sigaction = segv_handler, .sa_flags = SA_ONSTACK|SA_SIGINFO};
217 void *map;
218 unsigned long gap = GAP_PAGES * page_size;
219 struct rlimit rlimit;
220
221 rlimit.rlim_cur = rlimit.rlim_max = RLIM_INFINITY;
222 SAFE_SETRLIMIT(RLIMIT_STACK, &rlimit);
223
224 preallocate_stack(REQ_STACK_SIZE);
225
226 stack_addr = read_stack_addr_from_proc(&stack_size);
227 if (stack_addr == -1UL) {
228 tst_brk(TBROK, "can't read stack top from /proc/self/maps");
229 return;
230 }
231
232 if (STACK_GROWSDOWN)
233 mapped_addr = stack_addr - gap - MAPPED_LEN;
234 else
235 mapped_addr = stack_addr + gap;
236
237 mapped_addr &= page_mask;
238 map = SAFE_MMAP((void *)mapped_addr, MAPPED_LEN,
239 PROT_READ|PROT_WRITE,
240 MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
241 tst_res(TINFO, "Stack:0x%lx+0x%lx mmap:%p+0x%lx",
242 stack_addr, stack_size, map, MAPPED_LEN);
243
244 signal_stack.ss_sp = SAFE_MALLOC(SIGNAL_STACK_SIZE);
245 signal_stack.ss_size = SIGNAL_STACK_SIZE;
246 signal_stack.ss_flags = 0;
247 if (sigaltstack(&signal_stack, NULL) == -1) {
248 tst_brk(TBROK | TERRNO, "sigaltstack");
249 return;
250 }
251 if (sigaction(SIGSEGV, &segv_sig, NULL) == -1 ||
252 sigaction(SIGBUS, &segv_sig, NULL) == -1) {
253 tst_brk(TBROK | TERRNO, "sigaction");
254 return;
255 }
256
257 #ifdef DEBUG
258 dump_proc_self_maps();
259 #endif
260
261 #ifdef __x86_64__
262 do_mmap_placement_test(stack_addr, gap);
263 #endif
264
265 /* Now see if it can grow too close to an adjacent region. */
266 exhaust_stack_into_sigsegv();
267 }
268
setup(void)269 void setup(void)
270 {
271 char buf[4096], *p;
272
273 page_size = sysconf(_SC_PAGESIZE);
274 page_mask = ~(page_size - 1);
275
276 buf[4095] = '\0';
277 SAFE_FILE_SCANF("/proc/cmdline", "%4095[^\n]", buf);
278
279 if ((p = strstr(buf, "stack_guard_gap=")) != NULL) {
280 if (sscanf(p, "stack_guard_gap=%ld", &GAP_PAGES) != 1) {
281 tst_brk(TBROK | TERRNO, "sscanf");
282 return;
283 }
284 tst_res(TINFO, "stack_guard_gap = %ld", GAP_PAGES);
285 }
286
287 THRESHOLD = (GAP_PAGES - 1) * page_size;
288
289 {
290 volatile int *a = alloca(128);
291
292 {
293 volatile int *b = alloca(128);
294
295 STACK_GROWSDOWN = a > b;
296 tst_res(TINFO, "STACK_GROWSDOWN = %d == %p > %p", STACK_GROWSDOWN, a, b);
297 }
298 }
299 }
300
stack_clash_test(void)301 void stack_clash_test(void)
302 {
303 int status;
304 pid_t pid;
305
306 pid = SAFE_FORK();
307 if (!pid) {
308 do_child();
309 exit(EXIT_TESTBROKE);
310 return;
311 }
312
313 SAFE_WAITPID(pid, &status, 0);
314
315 if (WIFEXITED(status)) {
316 switch (WEXITSTATUS(status)) {
317 case EXIT_FAILURE:
318 tst_res(TFAIL, "stack is too close to the mmaped area");
319 return;
320 case EXIT_SUCCESS:
321 tst_res(TPASS, "stack is far enough from mmaped area");
322 return;
323 default:
324 case EXIT_TESTBROKE:
325 break;
326 }
327 }
328
329 tst_brk(TBROK, "Child %s", tst_strstatus(status));
330 }
331
332 static struct tst_test test = {
333 .forks_child = 1,
334 .needs_root = 1,
335 .setup = setup,
336 .test_all = stack_clash_test,
337 .tags = (const struct tst_tag[]) {
338 {"CVE", "2017-1000364"},
339 {"linux-git", "58c5d0d6d522"},
340 {}
341 }
342 };
343