1 /*
2 * Copyright (c) 2017 Pavel Boldin <pboldin@cloudlinux.com>
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * Adapted from code by Michal Hocko.
18 */
19
20 /* This is a regression test of the Stack Clash [1] vulnerability. This tests
21 * that there is at least 256 PAGE_SIZE of stack guard gap which is considered
22 * hard to hop above. Code adapted from the Novell's bugzilla [2].
23 *
24 * The code `mmap(2)`s region close to the stack end. The code then allocates
25 * memory on stack until it hits guard page and SIGSEGV or SIGBUS is generated
26 * by the kernel. The signal handler checks that fault address is further than
27 * THRESHOLD from the mmapped area.
28 *
29 * We read /proc/self/maps to examine exact top of the stack and `mmap(2)`
30 * our region exactly GAP_PAGES * PAGE_SIZE away. We read /proc/cmdline to
31 * see if a different stack_guard_gap size is configured. We set stack limit
32 * to infinity and preallocate REQ_STACK_SIZE bytes of stack so that no calls
33 * after `mmap` are moving stack further.
34 *
35 * [1] https://blog.qualys.com/securitylabs/2017/06/19/the-stack-clash
36 * [2] https://bugzilla.novell.com/show_bug.cgi?id=CVE-2017-1000364
37 */
38
39 #include <sys/mman.h>
40 #include <sys/wait.h>
41 #include <stdio.h>
42 #include <unistd.h>
43 #include <alloca.h>
44 #include <signal.h>
45 #include <stdlib.h>
46
47 #include "tst_test.h"
48 #include "tst_safe_stdio.h"
49
50 static unsigned long page_size;
51 static unsigned long page_mask;
52 static unsigned long GAP_PAGES = 256;
53 static unsigned long THRESHOLD;
54 static int STACK_GROWSDOWN;
55
56 #define SIGNAL_STACK_SIZE (1UL<<20)
57 #define FRAME_SIZE 1024
58 #define REQ_STACK_SIZE (1024 * 1024)
59
60 #define EXIT_TESTBROKE TBROK
61
exhaust_stack_into_sigsegv(void)62 void exhaust_stack_into_sigsegv(void)
63 {
64 volatile char * ptr = alloca(FRAME_SIZE - sizeof(long));
65 *ptr = '\0';
66 exhaust_stack_into_sigsegv();
67 }
68
69 #define MAPPED_LEN page_size
70 static unsigned long mapped_addr;
71
segv_handler(int sig,siginfo_t * info,void * data LTP_ATTRIBUTE_UNUSED)72 void segv_handler(int sig, siginfo_t *info, void *data LTP_ATTRIBUTE_UNUSED)
73 {
74 unsigned long fault_addr = (unsigned long)info->si_addr;
75 unsigned long mmap_end = mapped_addr + MAPPED_LEN;
76 ssize_t diff;
77
78 if (sig != SIGSEGV && sig != SIGBUS)
79 return;
80
81 if (STACK_GROWSDOWN)
82 diff = fault_addr - mmap_end;
83 else
84 diff = mapped_addr - fault_addr;
85
86 tst_res(TINFO,
87 "mmap = [%lx, %lx), addr = %lx, diff = %zx, THRESHOLD = %lx",
88 mapped_addr, mmap_end, fault_addr, diff, THRESHOLD);
89 if (diff < 0 || (unsigned long)diff < THRESHOLD)
90 _exit(EXIT_FAILURE);
91 else
92 _exit(EXIT_SUCCESS);
93 }
94
read_stack_addr_from_proc(unsigned long * stack_size)95 unsigned long read_stack_addr_from_proc(unsigned long *stack_size)
96 {
97 FILE *fh;
98 char buf[1024];
99 unsigned long stack_top = -1UL, start, end;
100
101 fh = SAFE_FOPEN("/proc/self/maps", "r");
102
103 while (!feof(fh)) {
104 if (fgets(buf, sizeof(buf), fh) == NULL) {
105 tst_brk(TBROK | TERRNO, "fgets");
106 goto out;
107 }
108
109 if (!strstr(buf, "[stack"))
110 continue;
111
112 if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
113 tst_brk(TBROK | TERRNO, "sscanf");
114 goto out;
115 }
116
117 *stack_size = end - start;
118
119 if (STACK_GROWSDOWN)
120 stack_top = start;
121 else
122 stack_top = end;
123 break;
124 }
125
126 out:
127 SAFE_FCLOSE(fh);
128 return stack_top;
129 }
130
dump_proc_self_maps(void)131 void dump_proc_self_maps(void)
132 {
133 static char buf[64];
134 static const char *cmd[] = {"cat", buf, NULL};
135 sprintf(buf, "/proc/%d/maps", getpid());
136 tst_run_cmd(cmd, NULL, NULL, 0);
137 }
138
preallocate_stack(unsigned long required)139 void __attribute__((noinline)) preallocate_stack(unsigned long required)
140 {
141 volatile char *garbage;
142
143 garbage = alloca(required);
144 garbage[0] = garbage[required - 1] = '\0';
145 }
146
do_child(void)147 void do_child(void)
148 {
149 unsigned long stack_addr, stack_size;
150 stack_t signal_stack;
151 struct sigaction segv_sig = {.sa_sigaction = segv_handler, .sa_flags = SA_ONSTACK|SA_SIGINFO};
152 void *map;
153 unsigned long gap = GAP_PAGES * page_size;
154 struct rlimit rlimit;
155
156 rlimit.rlim_cur = rlimit.rlim_max = RLIM_INFINITY;
157 SAFE_SETRLIMIT(RLIMIT_STACK, &rlimit);
158
159 preallocate_stack(REQ_STACK_SIZE);
160
161 stack_addr = read_stack_addr_from_proc(&stack_size);
162 if (stack_addr == -1UL) {
163 tst_brk(TBROK, "can't read stack top from /proc/self/maps");
164 return;
165 }
166
167 if (STACK_GROWSDOWN)
168 mapped_addr = stack_addr - gap - MAPPED_LEN;
169 else
170 mapped_addr = stack_addr + gap;
171
172 mapped_addr &= page_mask;
173 map = SAFE_MMAP((void *)mapped_addr, MAPPED_LEN,
174 PROT_READ|PROT_WRITE,
175 MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
176 tst_res(TINFO, "Stack:0x%lx+0x%lx mmap:%p+0x%lx",
177 stack_addr, stack_size, map, MAPPED_LEN);
178
179 signal_stack.ss_sp = SAFE_MALLOC(SIGNAL_STACK_SIZE);
180 signal_stack.ss_size = SIGNAL_STACK_SIZE;
181 signal_stack.ss_flags = 0;
182 if (sigaltstack(&signal_stack, NULL) == -1) {
183 tst_brk(TBROK | TERRNO, "sigaltstack");
184 return;
185 }
186 if (sigaction(SIGSEGV, &segv_sig, NULL) == -1 ||
187 sigaction(SIGBUS, &segv_sig, NULL) == -1) {
188 tst_brk(TBROK | TERRNO, "sigaction");
189 return;
190 }
191
192 #ifdef DEBUG
193 dump_proc_self_maps();
194 #endif
195
196 exhaust_stack_into_sigsegv();
197 }
198
setup(void)199 void setup(void)
200 {
201 char buf[4096], *p;
202
203 page_size = sysconf(_SC_PAGESIZE);
204 page_mask = ~(page_size - 1);
205
206 buf[4095] = '\0';
207 SAFE_FILE_SCANF("/proc/cmdline", "%4095[^\n]", buf);
208
209 if ((p = strstr(buf, "stack_guard_gap=")) != NULL) {
210 if (sscanf(p, "stack_guard_gap=%ld", &GAP_PAGES) != 1) {
211 tst_brk(TBROK | TERRNO, "sscanf");
212 return;
213 }
214 tst_res(TINFO, "stack_guard_gap = %ld", GAP_PAGES);
215 }
216
217 THRESHOLD = (GAP_PAGES - 1) * page_size;
218
219 {
220 volatile int *a = alloca(128);
221
222 {
223 volatile int *b = alloca(128);
224
225 STACK_GROWSDOWN = a > b;
226 tst_res(TINFO, "STACK_GROWSDOWN = %d == %p > %p", STACK_GROWSDOWN, a, b);
227 }
228 }
229 }
230
stack_clash_test(void)231 void stack_clash_test(void)
232 {
233 int status;
234 pid_t pid;
235
236 pid = SAFE_FORK();
237 if (!pid) {
238 do_child();
239 exit(EXIT_TESTBROKE);
240 return;
241 }
242
243 SAFE_WAITPID(pid, &status, 0);
244
245 if (WIFEXITED(status)) {
246 switch (WEXITSTATUS(status)) {
247 case EXIT_FAILURE:
248 tst_res(TFAIL, "stack is too close to the mmaped area");
249 return;
250 case EXIT_SUCCESS:
251 tst_res(TPASS, "stack is far enough from mmaped area");
252 return;
253 default:
254 case EXIT_TESTBROKE:
255 break;
256 }
257 }
258
259 tst_brk(TBROK, "Child %s", tst_strstatus(status));
260 }
261
262 static struct tst_test test = {
263 .forks_child = 1,
264 .needs_root = 1,
265 .setup = setup,
266 .test_all = stack_clash_test,
267 };
268