1 /* seccomp_bpf_tests.c
2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 *
6 * Test code for seccomp bpf.
7 */
8
9 #include <asm/siginfo.h>
10 #define __have_siginfo_t 1
11 #define __have_sigval_t 1
12 #define __have_sigevent_t 1
13
14 #include <errno.h>
15 #include <linux/filter.h>
16 #include <sys/prctl.h>
17 #include <sys/ptrace.h>
18 #include <sys/user.h>
19 #include <linux/prctl.h>
20 #include <linux/ptrace.h>
21 #include <linux/seccomp.h>
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <signal.h>
25 #include <stddef.h>
26 #include <stdbool.h>
27 #include <string.h>
28 #include <linux/elf.h>
29 #include <sys/uio.h>
30 #include <sys/utsname.h>
31 #include <fcntl.h> // ANDROID
32 #include <sys/mman.h>
33 #include <sys/times.h>
34
35 #define _GNU_SOURCE
36 #include <unistd.h>
37 #include <sys/syscall.h>
38
39 #include "test_harness.h"
40
41 #ifndef PR_SET_PTRACER
42 # define PR_SET_PTRACER 0x59616d61
43 #endif
44
45 #ifndef PR_SET_NO_NEW_PRIVS
46 #define PR_SET_NO_NEW_PRIVS 38
47 #define PR_GET_NO_NEW_PRIVS 39
48 #endif
49
50 #ifndef PR_SECCOMP_EXT
51 #define PR_SECCOMP_EXT 43
52 #endif
53
54 #ifndef SECCOMP_EXT_ACT
55 #define SECCOMP_EXT_ACT 1
56 #endif
57
58 #ifndef SECCOMP_EXT_ACT_TSYNC
59 #define SECCOMP_EXT_ACT_TSYNC 1
60 #endif
61
62 #ifndef SECCOMP_MODE_STRICT
63 #define SECCOMP_MODE_STRICT 1
64 #endif
65
66 #ifndef SECCOMP_MODE_FILTER
67 #define SECCOMP_MODE_FILTER 2
68 #endif
69
70 #ifndef SECCOMP_RET_KILL
71 #define SECCOMP_RET_KILL 0x00000000U // kill the task immediately
72 #define SECCOMP_RET_TRAP 0x00030000U // disallow and force a SIGSYS
73 #define SECCOMP_RET_ERRNO 0x00050000U // returns an errno
74 #define SECCOMP_RET_TRACE 0x7ff00000U // pass to a tracer or disallow
75 #define SECCOMP_RET_ALLOW 0x7fff0000U // allow
76
77 /* Masks for the return value sections. */
78 #define SECCOMP_RET_ACTION 0x7fff0000U
79 #define SECCOMP_RET_DATA 0x0000ffffU
80
81 struct seccomp_data {
82 int nr;
83 __u32 arch;
84 __u64 instruction_pointer;
85 __u64 args[6];
86 };
87 #endif
88
89 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
90
91 #define SIBLING_EXIT_UNKILLED 0xbadbeef
92 #define SIBLING_EXIT_FAILURE 0xbadface
93 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed
94
TEST(mode_strict_support)95 TEST(mode_strict_support) {
96 long ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
97 ASSERT_EQ(0, ret) {
98 TH_LOG("Kernel does not support CONFIG_SECCOMP");
99 }
100 syscall(__NR_exit, 1);
101 }
102
TEST_SIGNAL(mode_strict_cannot_call_prctl,SIGKILL)103 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) {
104 long ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
105 ASSERT_EQ(0, ret) {
106 TH_LOG("Kernel does not support CONFIG_SECCOMP");
107 }
108 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
109 EXPECT_FALSE(true) {
110 TH_LOG("Unreachable!");
111 }
112 }
113
114 /* Note! This doesn't test no new privs behavior */
TEST(no_new_privs_support)115 TEST(no_new_privs_support) {
116 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
117 EXPECT_EQ(0, ret) {
118 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
119 }
120 }
121
122 /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */
TEST(mode_filter_support)123 TEST(mode_filter_support) {
124 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
125 ASSERT_EQ(0, ret) {
126 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
127 }
128 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
129 EXPECT_EQ(-1, ret);
130 EXPECT_EQ(EFAULT, errno) {
131 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
132 }
133 }
134
TEST(mode_filter_without_nnp)135 TEST(mode_filter_without_nnp) {
136 struct sock_filter filter[] = {
137 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
138 };
139 struct sock_fprog prog = {
140 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
141 .filter = filter,
142 };
143 long ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
144 ASSERT_LE(0, ret) {
145 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
146 }
147 errno = 0;
148 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
149 /* Succeeds with CAP_SYS_ADMIN, fails without */
150 /* TODO(wad) check caps not euid */
151 if (geteuid()) {
152 EXPECT_EQ(-1, ret);
153 EXPECT_EQ(EACCES, errno);
154 } else {
155 EXPECT_EQ(0, ret);
156 }
157 }
158
159 #define MAX_INSNS_PER_PATH 32768
160
TEST(filter_size_limits)161 TEST(filter_size_limits) {
162 int i;
163 int count = BPF_MAXINSNS + 1;
164 struct sock_filter allow[] = {
165 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
166 };
167 struct sock_filter *filter;
168 struct sock_fprog prog = { };
169
170 filter = calloc(count, sizeof(*filter));
171 ASSERT_NE(NULL, filter);
172
173 for (i = 0; i < count; i++) {
174 filter[i] = allow[0];
175 }
176
177 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
178 ASSERT_EQ(0, ret);
179
180 prog.filter = filter;
181 prog.len = count;
182
183 /* Too many filter instructions in a single filter. */
184 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
185 ASSERT_NE(0, ret) {
186 TH_LOG("Installing %d insn filter was allowed", prog.len);
187 }
188
189 /* One less is okay, though. */
190 prog.len -= 1;
191 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
192 ASSERT_EQ(0, ret) {
193 TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
194 }
195 }
196
TEST(filter_chain_limits)197 TEST(filter_chain_limits) {
198 int i;
199 int count = BPF_MAXINSNS;
200 struct sock_filter allow[] = {
201 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
202 };
203 struct sock_filter *filter;
204 struct sock_fprog prog = { };
205
206 filter = calloc(count, sizeof(*filter));
207 ASSERT_NE(NULL, filter);
208
209 for (i = 0; i < count; i++) {
210 filter[i] = allow[0];
211 }
212
213 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
214 ASSERT_EQ(0, ret);
215
216 prog.filter = filter;
217 prog.len = 1;
218
219 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
220 ASSERT_EQ(0, ret);
221
222 prog.len = count;
223
224 /* Too many total filter instructions. */
225 for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
227 if (ret != 0)
228 break;
229 }
230 ASSERT_NE(0, ret) {
231 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
232 i, count, i * (count + 4));
233 }
234 }
235
TEST(mode_filter_cannot_move_to_strict)236 TEST(mode_filter_cannot_move_to_strict) {
237 struct sock_filter filter[] = {
238 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
239 };
240 struct sock_fprog prog = {
241 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
242 .filter = filter,
243 };
244
245 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
246 ASSERT_EQ(0, ret);
247
248 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
249 ASSERT_EQ(0, ret);
250
251 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
252 EXPECT_EQ(-1, ret);
253 EXPECT_EQ(EINVAL, errno);
254 }
255
256
TEST(mode_filter_get_seccomp)257 TEST(mode_filter_get_seccomp) {
258 struct sock_filter filter[] = {
259 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
260 };
261 struct sock_fprog prog = {
262 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
263 .filter = filter,
264 };
265
266 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
267 ASSERT_EQ(0, ret);
268
269 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
270 EXPECT_EQ(0, ret);
271
272 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
273 ASSERT_EQ(0, ret);
274
275 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
276 EXPECT_EQ(2, ret);
277 }
278
279
TEST(ALLOW_all)280 TEST(ALLOW_all) {
281 struct sock_filter filter[] = {
282 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
283 };
284 struct sock_fprog prog = {
285 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
286 .filter = filter,
287 };
288
289 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
290 ASSERT_EQ(0, ret);
291
292 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
293 ASSERT_EQ(0, ret);
294 }
295
TEST(empty_prog)296 TEST(empty_prog) {
297 struct sock_filter filter[] = {
298 };
299 struct sock_fprog prog = {
300 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
301 .filter = filter,
302 };
303
304 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
305 ASSERT_EQ(0, ret);
306
307 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
308 EXPECT_EQ(-1, ret);
309 EXPECT_EQ(EINVAL, errno);
310 }
311
TEST_SIGNAL(unknown_ret_is_kill_inside,SIGSYS)312 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) {
313 struct sock_filter filter[] = {
314 BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
315 };
316 struct sock_fprog prog = {
317 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
318 .filter = filter,
319 };
320
321 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
322 ASSERT_EQ(0, ret);
323
324 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
325 ASSERT_EQ(0, ret);
326 EXPECT_EQ(0, syscall(__NR_getpid)) {
327 TH_LOG("getpid() shouldn't ever return");
328 }
329 }
330
331 /* return code >= 0x80000000 is unused. */
TEST_SIGNAL(unknown_ret_is_kill_above_allow,SIGSYS)332 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) {
333 struct sock_filter filter[] = {
334 BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
335 };
336 struct sock_fprog prog = {
337 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
338 .filter = filter,
339 };
340
341 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
342 ASSERT_EQ(0, ret);
343
344 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
345 ASSERT_EQ(0, ret);
346 EXPECT_EQ(0, syscall(__NR_getpid)) {
347 TH_LOG("getpid() shouldn't ever return");
348 }
349 }
350
TEST_SIGNAL(KILL_all,SIGSYS)351 TEST_SIGNAL(KILL_all, SIGSYS) {
352 struct sock_filter filter[] = {
353 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
354 };
355 struct sock_fprog prog = {
356 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
357 .filter = filter,
358 };
359
360 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
361 ASSERT_EQ(0, ret);
362
363 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
364 ASSERT_EQ(0, ret);
365 }
366
TEST_SIGNAL(KILL_one,SIGSYS)367 TEST_SIGNAL(KILL_one, SIGSYS) {
368 struct sock_filter filter[] = {
369 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
370 offsetof(struct seccomp_data, nr)),
371 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
372 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
373 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
374 };
375 struct sock_fprog prog = {
376 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
377 .filter = filter,
378 };
379 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
380 pid_t parent = getppid();
381 ASSERT_EQ(0, ret);
382
383 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
384 ASSERT_EQ(0, ret);
385
386 EXPECT_EQ(parent, syscall(__NR_getppid));
387 /* getpid() should never return. */
388 EXPECT_EQ(0, syscall(__NR_getpid));
389 }
390
TEST_SIGNAL(KILL_one_arg_one,SIGSYS)391 TEST_SIGNAL(KILL_one_arg_one, SIGSYS) {
392 void *fatal_address;
393 struct sock_filter filter[] = {
394 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
395 offsetof(struct seccomp_data, nr)),
396 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
397 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
398 /* Only both with lower 32-bit for now. */
399 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
400 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
401 (unsigned long)&fatal_address, 0, 1),
402 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
403 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
404 };
405 struct sock_fprog prog = {
406 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
407 .filter = filter,
408 };
409 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
410 pid_t parent = getppid();
411 struct tms timebuf;
412 clock_t clock = times(&timebuf);
413 ASSERT_EQ(0, ret);
414
415 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
416 ASSERT_EQ(0, ret);
417
418 EXPECT_EQ(parent, syscall(__NR_getppid));
419 EXPECT_LE(clock, syscall(__NR_times, &timebuf));
420 /* times() should never return. */
421 EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
422 }
423
TEST_SIGNAL(KILL_one_arg_six,SIGSYS)424 TEST_SIGNAL(KILL_one_arg_six, SIGSYS) {
425 #ifndef __NR_mmap2
426 int sysno = __NR_mmap;
427 #else
428 int sysno = __NR_mmap2;
429 #endif
430 struct sock_filter filter[] = {
431 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
432 offsetof(struct seccomp_data, nr)),
433 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
434 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
435 /* Only both with lower 32-bit for now. */
436 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
437 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
438 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
439 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
440 };
441 struct sock_fprog prog = {
442 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
443 .filter = filter,
444 };
445 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
446 pid_t parent = getppid();
447 int fd;
448 void *map1, *map2;
449 ASSERT_EQ(0, ret);
450
451 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
452 ASSERT_EQ(0, ret);
453
454 fd = open("/dev/zero", O_RDONLY);
455 ASSERT_NE(-1, fd);
456
457 EXPECT_EQ(parent, syscall(__NR_getppid));
458 map1 = (void *)syscall(sysno,
459 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE);
460 EXPECT_NE(MAP_FAILED, map1);
461 /* mmap2() should never return. */
462 map2 = (void *)syscall(sysno,
463 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
464 EXPECT_EQ(MAP_FAILED, map2);
465
466 /* The test failed, so clean up the resources. */
467 munmap(map1, PAGE_SIZE);
468 munmap(map2, PAGE_SIZE);
469 close(fd);
470 }
471
472 /* TODO(wad) add 64-bit versus 32-bit arg tests. */
473
TEST(arg_out_of_range)474 TEST(arg_out_of_range) {
475 struct sock_filter filter[] = {
476 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
477 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
478 };
479 struct sock_fprog prog = {
480 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
481 .filter = filter,
482 };
483 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
484 ASSERT_EQ(0, ret);
485
486 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
487 EXPECT_EQ(-1, ret);
488 EXPECT_EQ(EINVAL, errno);
489 }
490
TEST(ERRNO_one)491 TEST(ERRNO_one) {
492 struct sock_filter filter[] = {
493 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
494 offsetof(struct seccomp_data, nr)),
495 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
496 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG),
497 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
498 };
499 struct sock_fprog prog = {
500 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
501 .filter = filter,
502 };
503 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
504 pid_t parent = getppid();
505 ASSERT_EQ(0, ret);
506
507 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
508 ASSERT_EQ(0, ret);
509
510 EXPECT_EQ(parent, syscall(__NR_getppid));
511 EXPECT_EQ(-1, read(0, NULL, 0));
512 EXPECT_EQ(E2BIG, errno);
513 }
514
TEST(ERRNO_one_ok)515 TEST(ERRNO_one_ok) {
516 struct sock_filter filter[] = {
517 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
518 offsetof(struct seccomp_data, nr)),
519 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
520 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0),
521 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
522 };
523 struct sock_fprog prog = {
524 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
525 .filter = filter,
526 };
527 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
528 pid_t parent = getppid();
529 ASSERT_EQ(0, ret);
530
531 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
532 ASSERT_EQ(0, ret);
533
534 EXPECT_EQ(parent, syscall(__NR_getppid));
535 /* "errno" of 0 is ok. */
536 EXPECT_EQ(0, read(0, NULL, 0));
537 }
538
FIXTURE_DATA(TRAP)539 FIXTURE_DATA(TRAP) {
540 struct sock_fprog prog;
541 };
542
FIXTURE_SETUP(TRAP)543 FIXTURE_SETUP(TRAP) {
544 struct sock_filter filter[] = {
545 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
546 offsetof(struct seccomp_data, nr)),
547 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
548 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
549 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
550 };
551 memset(&self->prog, 0, sizeof(self->prog));
552 self->prog.filter = malloc(sizeof(filter));
553 ASSERT_NE(NULL, self->prog.filter);
554 memcpy(self->prog.filter, filter, sizeof(filter));
555 self->prog.len = (unsigned short)(sizeof(filter)/sizeof(filter[0]));
556 }
557
FIXTURE_TEARDOWN(TRAP)558 FIXTURE_TEARDOWN(TRAP) {
559 if (self->prog.filter)
560 free(self->prog.filter);
561 };
562
TEST_F_SIGNAL(TRAP,dfl,SIGSYS)563 TEST_F_SIGNAL(TRAP, dfl, SIGSYS) {
564 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
565 ASSERT_EQ(0, ret);
566
567 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
568 ASSERT_EQ(0, ret);
569 syscall(__NR_getpid);
570 }
571
572 /* Ensure that SIGSYS overrides SIG_IGN */
TEST_F_SIGNAL(TRAP,ign,SIGSYS)573 TEST_F_SIGNAL(TRAP, ign, SIGSYS) {
574 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
575 ASSERT_EQ(0, ret);
576
577 signal(SIGSYS, SIG_IGN);
578
579 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
580 ASSERT_EQ(0, ret);
581 syscall(__NR_getpid);
582 }
583
584 static struct siginfo TRAP_info;
585 static volatile int TRAP_nr;
TRAP_action(int nr,siginfo_t * info,void * void_context)586 static void TRAP_action(int nr, siginfo_t *info, void *void_context)
587 {
588 memcpy(&TRAP_info, info, sizeof(TRAP_info));
589 TRAP_nr = nr;
590 return;
591 }
592
TEST_F(TRAP,handler)593 TEST_F(TRAP, handler) {
594 int ret, test;
595 struct sigaction act;
596 sigset_t mask;
597 memset(&act, 0, sizeof(act));
598 sigemptyset(&mask);
599 sigaddset(&mask, SIGSYS);
600
601 act.sa_sigaction = &TRAP_action;
602 act.sa_flags = SA_SIGINFO;
603 ret = sigaction(SIGSYS, &act, NULL);
604 ASSERT_EQ(0, ret) {
605 TH_LOG("sigaction failed");
606 }
607 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
608 ASSERT_EQ(0, ret) {
609 TH_LOG("sigprocmask failed");
610 }
611
612 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
613 ASSERT_EQ(0, ret);
614 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
615 ASSERT_EQ(0, ret);
616 TRAP_nr = 0;
617 memset(&TRAP_info, 0, sizeof(TRAP_info));
618 /* Expect the registers to be rolled back. (nr = error) may vary
619 * based on arch. */
620 ret = syscall(__NR_getpid);
621 /* Silence gcc warning about volatile. */
622 test = TRAP_nr;
623 EXPECT_EQ(SIGSYS, test);
624 struct local_sigsys {
625 void *_call_addr; /* calling user insn */
626 int _syscall; /* triggering system call number */
627 unsigned int _arch; /* AUDIT_ARCH_* of syscall */
628 } *sigsys = (struct local_sigsys *)
629 #ifdef si_syscall
630 &(TRAP_info.si_call_addr);
631 #else
632 &TRAP_info.si_pid;
633 #endif
634 EXPECT_EQ(__NR_getpid, sigsys->_syscall);
635 /* Make sure arch is non-zero. */
636 EXPECT_NE(0, sigsys->_arch);
637 EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
638 }
639
FIXTURE_DATA(precedence)640 FIXTURE_DATA(precedence) {
641 struct sock_fprog allow;
642 struct sock_fprog trace;
643 struct sock_fprog error;
644 struct sock_fprog trap;
645 struct sock_fprog kill;
646 };
647
FIXTURE_SETUP(precedence)648 FIXTURE_SETUP(precedence) {
649 struct sock_filter allow_insns[] = {
650 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
651 };
652 struct sock_filter trace_insns[] = {
653 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
654 offsetof(struct seccomp_data, nr)),
655 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
656 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
657 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
658 };
659 struct sock_filter error_insns[] = {
660 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
661 offsetof(struct seccomp_data, nr)),
662 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
663 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
664 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
665 };
666 struct sock_filter trap_insns[] = {
667 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
668 offsetof(struct seccomp_data, nr)),
669 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
670 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
671 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
672 };
673 struct sock_filter kill_insns[] = {
674 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
675 offsetof(struct seccomp_data, nr)),
676 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
677 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
678 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
679 };
680 memset(self, 0, sizeof(*self));
681 #define FILTER_ALLOC(_x) \
682 self->_x.filter = malloc(sizeof(_x##_insns)); \
683 ASSERT_NE(NULL, self->_x.filter); \
684 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
685 self->_x.len = (unsigned short)(sizeof(_x##_insns)/sizeof(_x##_insns[0]))
686 FILTER_ALLOC(allow);
687 FILTER_ALLOC(trace);
688 FILTER_ALLOC(error);
689 FILTER_ALLOC(trap);
690 FILTER_ALLOC(kill);
691 }
692
FIXTURE_TEARDOWN(precedence)693 FIXTURE_TEARDOWN(precedence) {
694 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
695 FILTER_FREE(allow);
696 FILTER_FREE(trace);
697 FILTER_FREE(error);
698 FILTER_FREE(trap);
699 FILTER_FREE(kill);
700 }
701
TEST_F(precedence,allow_ok)702 TEST_F(precedence, allow_ok) {
703 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
704 pid_t parent = getppid();
705 pid_t res = 0;
706 ASSERT_EQ(0, ret);
707
708 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
709 ASSERT_EQ(0, ret);
710 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
711 ASSERT_EQ(0, ret);
712 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
713 ASSERT_EQ(0, ret);
714 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
715 ASSERT_EQ(0, ret);
716 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
717 ASSERT_EQ(0, ret);
718 /* Should work just fine. */
719 res = syscall(__NR_getppid);
720 EXPECT_EQ(parent, res);
721 }
722
TEST_F_SIGNAL(precedence,kill_is_highest,SIGSYS)723 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) {
724 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
725 pid_t parent = getppid();
726 pid_t res = 0;
727 ASSERT_EQ(0, ret);
728
729 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
730 ASSERT_EQ(0, ret);
731 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
732 ASSERT_EQ(0, ret);
733 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
734 ASSERT_EQ(0, ret);
735 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
736 ASSERT_EQ(0, ret);
737 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
738 ASSERT_EQ(0, ret);
739 /* Should work just fine. */
740 res = syscall(__NR_getppid);
741 EXPECT_EQ(parent, res);
742 /* getpid() should never return. */
743 res = syscall(__NR_getpid);
744 EXPECT_EQ(0, res);
745 }
746
TEST_F_SIGNAL(precedence,kill_is_highest_in_any_order,SIGSYS)747 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) {
748 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
749 pid_t parent = getppid();
750 ASSERT_EQ(0, ret);
751
752 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
753 ASSERT_EQ(0, ret);
754 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
755 ASSERT_EQ(0, ret);
756 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
757 ASSERT_EQ(0, ret);
758 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
759 ASSERT_EQ(0, ret);
760 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
761 ASSERT_EQ(0, ret);
762 /* Should work just fine. */
763 EXPECT_EQ(parent, syscall(__NR_getppid));
764 /* getpid() should never return. */
765 EXPECT_EQ(0, syscall(__NR_getpid));
766 }
767
TEST_F_SIGNAL(precedence,trap_is_second,SIGSYS)768 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) {
769 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
770 pid_t parent = getppid();
771 ASSERT_EQ(0, ret);
772
773 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
774 ASSERT_EQ(0, ret);
775 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
776 ASSERT_EQ(0, ret);
777 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
778 ASSERT_EQ(0, ret);
779 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
780 ASSERT_EQ(0, ret);
781 /* Should work just fine. */
782 EXPECT_EQ(parent, syscall(__NR_getppid));
783 /* getpid() should never return. */
784 EXPECT_EQ(0, syscall(__NR_getpid));
785 }
786
TEST_F_SIGNAL(precedence,trap_is_second_in_any_order,SIGSYS)787 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) {
788 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
789 pid_t parent = getppid();
790 ASSERT_EQ(0, ret);
791
792 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
793 ASSERT_EQ(0, ret);
794 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
795 ASSERT_EQ(0, ret);
796 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
797 ASSERT_EQ(0, ret);
798 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
799 ASSERT_EQ(0, ret);
800 /* Should work just fine. */
801 EXPECT_EQ(parent, syscall(__NR_getppid));
802 /* getpid() should never return. */
803 EXPECT_EQ(0, syscall(__NR_getpid));
804 }
805
TEST_F(precedence,errno_is_third)806 TEST_F(precedence, errno_is_third) {
807 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
808 pid_t parent = getppid();
809 ASSERT_EQ(0, ret);
810
811 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
812 ASSERT_EQ(0, ret);
813 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
814 ASSERT_EQ(0, ret);
815 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
816 ASSERT_EQ(0, ret);
817 /* Should work just fine. */
818 EXPECT_EQ(parent, syscall(__NR_getppid));
819 EXPECT_EQ(0, syscall(__NR_getpid));
820 }
821
TEST_F(precedence,errno_is_third_in_any_order)822 TEST_F(precedence, errno_is_third_in_any_order) {
823 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
824 pid_t parent = getppid();
825 ASSERT_EQ(0, ret);
826
827 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
828 ASSERT_EQ(0, ret);
829 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
830 ASSERT_EQ(0, ret);
831 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
832 ASSERT_EQ(0, ret);
833 /* Should work just fine. */
834 EXPECT_EQ(parent, syscall(__NR_getppid));
835 EXPECT_EQ(0, syscall(__NR_getpid));
836 }
837
TEST_F(precedence,trace_is_fourth)838 TEST_F(precedence, trace_is_fourth) {
839 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
840 pid_t parent = getppid();
841 ASSERT_EQ(0, ret);
842
843 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
844 ASSERT_EQ(0, ret);
845 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
846 ASSERT_EQ(0, ret);
847 /* Should work just fine. */
848 EXPECT_EQ(parent, syscall(__NR_getppid));
849 /* No ptracer */
850 EXPECT_EQ(-1, syscall(__NR_getpid));
851 }
852
TEST_F(precedence,trace_is_fourth_in_any_order)853 TEST_F(precedence, trace_is_fourth_in_any_order) {
854 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
855 pid_t parent = getppid();
856 ASSERT_EQ(0, ret);
857
858 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
859 ASSERT_EQ(0, ret);
860 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
861 ASSERT_EQ(0, ret);
862 /* Should work just fine. */
863 EXPECT_EQ(parent, syscall(__NR_getppid));
864 /* No ptracer */
865 EXPECT_EQ(-1, syscall(__NR_getpid));
866 }
867
868 #ifndef PTRACE_O_TRACESECCOMP
869 #define PTRACE_O_TRACESECCOMP 0x00000080
870 #endif
871
872 /* Catch the Ubuntu 12.04 value error. */
873 #if PTRACE_EVENT_SECCOMP != 7
874 #undef PTRACE_EVENT_SECCOMP
875 #endif
876
877 #ifndef PTRACE_EVENT_SECCOMP
878 #define PTRACE_EVENT_SECCOMP 7
879 #endif
880
881 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
882 bool tracer_running;
tracer_stop(int sig)883 void tracer_stop(int sig)
884 {
885 tracer_running = false;
886 }
887
888 typedef void tracer_func_t(struct __test_metadata *_metadata,
889 pid_t tracee, int status, void *args);
890
tracer(struct __test_metadata * _metadata,int fd,pid_t tracee,tracer_func_t tracer_func,void * args)891 void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
892 tracer_func_t tracer_func, void *args) {
893 int ret = -1;
894 struct sigaction action = {
895 .sa_handler = tracer_stop,
896 };
897
898 /* Allow external shutdown. */
899 tracer_running = true;
900 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
901
902 errno = 0;
903 while (ret == -1 && errno != EINVAL) {
904 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
905 }
906 ASSERT_EQ(0, ret) {
907 kill(tracee, SIGKILL);
908 }
909 /* Wait for attach stop */
910 wait(NULL);
911
912 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, PTRACE_O_TRACESECCOMP);
913 ASSERT_EQ(0, ret) {
914 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
915 kill(tracee, SIGKILL);
916 }
917 ptrace(PTRACE_CONT, tracee, NULL, 0);
918
919 /* Unblock the tracee */
920 ASSERT_EQ(1, write(fd, "A", 1));
921 ASSERT_EQ(0, close(fd));
922
923 /* Run until we're shut down. Must assert to stop execution. */
924 while (tracer_running) {
925 int status;
926 if (wait(&status) != tracee)
927 continue;
928 if (WIFSIGNALED(status) || WIFEXITED(status))
929 /* Child is dead. Time to go. */
930 return;
931
932 /* Make sure this is a seccomp event. */
933 ASSERT_EQ(true, IS_SECCOMP_EVENT(status));
934
935 tracer_func(_metadata, tracee, status, args);
936
937 ret = ptrace(PTRACE_CONT, tracee, NULL, NULL);
938 ASSERT_EQ(0, ret);
939 }
940 /* Directly report the status of our test harness results. */
941 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
942 }
943
944 /* Common tracer setup/teardown functions. */
cont_handler(int num)945 void cont_handler(int num) {
946 }
setup_trace_fixture(struct __test_metadata * _metadata,tracer_func_t func,void * args)947 pid_t setup_trace_fixture(struct __test_metadata *_metadata,
948 tracer_func_t func, void *args) {
949 char sync;
950 int pipefd[2];
951 pid_t tracer_pid;
952 pid_t tracee = getpid();
953
954 /* Setup a pipe for clean synchronization. */
955 ASSERT_EQ(0, pipe(pipefd));
956
957 /* Fork a child which we'll promote to tracer */
958 tracer_pid = fork();
959 ASSERT_LE(0, tracer_pid);
960 signal(SIGALRM, cont_handler);
961 if (tracer_pid == 0) {
962 close(pipefd[0]);
963 tracer(_metadata, pipefd[1], tracee, func, args);
964 syscall(__NR_exit, 0);
965 }
966 close(pipefd[1]);
967 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
968 read(pipefd[0], &sync, 1);
969 close(pipefd[0]);
970
971 return tracer_pid;
972 }
teardown_trace_fixture(struct __test_metadata * _metadata,pid_t tracer)973 void teardown_trace_fixture(struct __test_metadata *_metadata,
974 pid_t tracer) {
975 if (tracer) {
976 int status;
977 /*
978 * Extract the exit code from the other process and
979 * adopt it for ourselves in case its asserts failed.
980 */
981 ASSERT_EQ(0, kill(tracer, SIGUSR1));
982 ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
983 if (WEXITSTATUS(status))
984 _metadata->passed = 0;
985 }
986 }
987
988 /* "poke" tracer arguments and function. */
989 struct tracer_args_poke_t {
990 unsigned long poke_addr;
991 };
992
tracer_poke(struct __test_metadata * _metadata,pid_t tracee,int status,void * args)993 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
994 void *args) {
995 int ret;
996 unsigned long msg;
997 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
998
999 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1000 EXPECT_EQ(0, ret);
1001 /* If this fails, don't try to recover. */
1002 ASSERT_EQ(0x1001, msg) {
1003 kill(tracee, SIGKILL);
1004 }
1005 /*
1006 * Poke in the message.
1007 * Registers are not touched to try to keep this relatively arch
1008 * agnostic.
1009 */
1010 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
1011 EXPECT_EQ(0, ret);
1012 }
1013
FIXTURE_DATA(TRACE_poke)1014 FIXTURE_DATA(TRACE_poke) {
1015 struct sock_fprog prog;
1016 pid_t tracer;
1017 long poked;
1018 struct tracer_args_poke_t tracer_args;
1019 };
1020
FIXTURE_SETUP(TRACE_poke)1021 FIXTURE_SETUP(TRACE_poke) {
1022 struct sock_filter filter[] = {
1023 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1024 offsetof(struct seccomp_data, nr)),
1025 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
1026 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
1027 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1028 };
1029
1030 self->poked = 0;
1031 memset(&self->prog, 0, sizeof(self->prog));
1032 self->prog.filter = malloc(sizeof(filter));
1033 ASSERT_NE(NULL, self->prog.filter);
1034 memcpy(self->prog.filter, filter, sizeof(filter));
1035 self->prog.len = (unsigned short)(sizeof(filter)/sizeof(filter[0]));
1036
1037 /* Set up tracer args. */
1038 self->tracer_args.poke_addr = (unsigned long)&self->poked;
1039
1040 /* Launch tracer. */
1041 self->tracer = setup_trace_fixture(_metadata, tracer_poke,
1042 &self->tracer_args);
1043 }
1044
FIXTURE_TEARDOWN(TRACE_poke)1045 FIXTURE_TEARDOWN(TRACE_poke) {
1046 teardown_trace_fixture(_metadata, self->tracer);
1047 if (self->prog.filter)
1048 free(self->prog.filter);
1049 };
1050
TEST_F(TRACE_poke,read_has_side_effects)1051 TEST_F(TRACE_poke, read_has_side_effects) {
1052 ssize_t ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1053 ASSERT_EQ(0, ret);
1054
1055 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1056 ASSERT_EQ(0, ret);
1057
1058 EXPECT_EQ(0, self->poked);
1059 ret = read(-1, NULL, 0);
1060 EXPECT_EQ(-1, ret);
1061 EXPECT_EQ(0x1001, self->poked);
1062 }
1063
TEST_F(TRACE_poke,getpid_runs_normally)1064 TEST_F(TRACE_poke, getpid_runs_normally) {
1065 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1066 ASSERT_EQ(0, ret);
1067
1068 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1069 ASSERT_EQ(0, ret);
1070
1071 EXPECT_EQ(0, self->poked);
1072 EXPECT_NE(0, syscall(__NR_getpid));
1073 EXPECT_EQ(0, self->poked);
1074 }
1075
1076 #if defined(__x86_64__)
1077 # define ARCH_REGS struct user_regs_struct
1078 # define SYSCALL_NUM orig_rax
1079 # define SYSCALL_RET rax
1080 #elif defined(__i386__)
1081 # define ARCH_REGS struct user_regs_struct
1082 # define SYSCALL_NUM orig_eax
1083 # define SYSCALL_RET eax
1084 #elif defined(__arm__)
1085 # define ARCH_REGS struct pt_regs
1086 # define SYSCALL_NUM ARM_r7
1087 # define SYSCALL_RET ARM_r0
1088 #elif defined(__aarch64__)
1089 # define ARCH_REGS struct user_pt_regs
1090 # define SYSCALL_NUM regs[8]
1091 # define SYSCALL_RET regs[0]
1092 #else
1093 # error "Do not know how to find your architecture's registers and syscalls"
1094 #endif
1095
1096 /* Architecture-specific syscall fetching routine. */
get_syscall(struct __test_metadata * _metadata,pid_t tracee)1097 int get_syscall(struct __test_metadata *_metadata, pid_t tracee) {
1098 struct iovec iov;
1099 ARCH_REGS regs;
1100
1101 iov.iov_base = ®s;
1102 iov.iov_len = sizeof(regs);
1103 EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) {
1104 TH_LOG("PTRACE_GETREGSET failed");
1105 return -1;
1106 }
1107
1108 return regs.SYSCALL_NUM;
1109 }
1110
1111 /* Architecture-specific syscall changing routine. */
change_syscall(struct __test_metadata * _metadata,pid_t tracee,int syscall)1112 void change_syscall(struct __test_metadata *_metadata,
1113 pid_t tracee, int syscall) {
1114 struct iovec iov;
1115 int ret;
1116 ARCH_REGS regs;
1117
1118 iov.iov_base = ®s;
1119 iov.iov_len = sizeof(regs);
1120 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
1121 EXPECT_EQ(0, ret);
1122
1123 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
1124 {
1125 regs.SYSCALL_NUM = syscall;
1126 }
1127
1128 #elif defined(__arm__)
1129 # ifndef PTRACE_SET_SYSCALL
1130 # define PTRACE_SET_SYSCALL 23
1131 # endif
1132 {
1133 ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall);
1134 EXPECT_EQ(0, ret);
1135 }
1136
1137 #else
1138 ASSERT_EQ(1, 0) {
1139 TH_LOG("How is the syscall changed on this architecture?");
1140 }
1141 #endif
1142
1143 /* If syscall is skipped, change return value. */
1144 if (syscall == -1)
1145 regs.SYSCALL_RET = 1;
1146
1147 ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
1148 EXPECT_EQ(0, ret);
1149 }
1150
tracer_syscall(struct __test_metadata * _metadata,pid_t tracee,int status,void * args)1151 void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1152 int status, void *args) {
1153 int ret;
1154 unsigned long msg;
1155
1156 /* Make sure we got the right message. */
1157 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1158 EXPECT_EQ(0, ret);
1159
1160 switch (msg) {
1161 case 0x1002:
1162 /* change getpid to getppid. */
1163 change_syscall(_metadata, tracee, __NR_getppid);
1164 break;
1165 case 0x1003:
1166 /* skip gettid. */
1167 change_syscall(_metadata, tracee, -1);
1168 break;
1169 case 0x1004:
1170 /* do nothing (allow getppid) */
1171 break;
1172 default:
1173 EXPECT_EQ(0, msg) {
1174 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
1175 kill(tracee, SIGKILL);
1176 }
1177 }
1178
1179 }
1180
FIXTURE_DATA(TRACE_syscall)1181 FIXTURE_DATA(TRACE_syscall) {
1182 struct sock_fprog prog;
1183 pid_t tracer, mytid, mypid, parent;
1184 };
1185
FIXTURE_SETUP(TRACE_syscall)1186 FIXTURE_SETUP(TRACE_syscall) {
1187 struct sock_filter filter[] = {
1188 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1189 offsetof(struct seccomp_data, nr)),
1190 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
1191 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
1192 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
1193 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
1194 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1195 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
1196 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1197 };
1198
1199 memset(&self->prog, 0, sizeof(self->prog));
1200 self->prog.filter = malloc(sizeof(filter));
1201 ASSERT_NE(NULL, self->prog.filter);
1202 memcpy(self->prog.filter, filter, sizeof(filter));
1203 self->prog.len = (unsigned short)(sizeof(filter)/sizeof(filter[0]));
1204
1205 /* Prepare some testable syscall results. */
1206 self->mytid = syscall(__NR_gettid);
1207 ASSERT_GT(self->mytid, 0);
1208 ASSERT_NE(self->mytid, 1) {
1209 TH_LOG("Running this test as init is not supported. :)");
1210 }
1211
1212 self->mypid = getpid();
1213 ASSERT_GT(self->mypid, 0);
1214 ASSERT_EQ(self->mytid, self->mypid);
1215
1216 self->parent = getppid();
1217 ASSERT_GT(self->parent, 0);
1218 ASSERT_NE(self->parent, self->mypid);
1219
1220 /* Launch tracer. */
1221 self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL);
1222 }
1223
FIXTURE_TEARDOWN(TRACE_syscall)1224 FIXTURE_TEARDOWN(TRACE_syscall) {
1225 teardown_trace_fixture(_metadata, self->tracer);
1226 if (self->prog.filter)
1227 free(self->prog.filter);
1228 };
1229
TEST_F(TRACE_syscall,syscall_allowed)1230 TEST_F(TRACE_syscall, syscall_allowed) {
1231 long ret;
1232
1233 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1234 ASSERT_EQ(0, ret);
1235
1236 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1237 ASSERT_EQ(0, ret);
1238
1239 /* getppid works as expected (no changes). */
1240 EXPECT_EQ(self->parent, syscall(__NR_getppid));
1241 EXPECT_NE(self->mypid, syscall(__NR_getppid));
1242 }
1243
TEST_F(TRACE_syscall,syscall_redirected)1244 TEST_F(TRACE_syscall, syscall_redirected) {
1245 long ret;
1246
1247 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1248 ASSERT_EQ(0, ret);
1249
1250 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1251 ASSERT_EQ(0, ret);
1252
1253 /* getpid has been redirected to getppid as expected. */
1254 EXPECT_EQ(self->parent, syscall(__NR_getpid));
1255 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1256 }
1257
TEST_F(TRACE_syscall,syscall_dropped)1258 TEST_F(TRACE_syscall, syscall_dropped) {
1259 long ret;
1260
1261 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1262 ASSERT_EQ(0, ret);
1263
1264 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1265 ASSERT_EQ(0, ret);
1266
1267 /* gettid has been skipped and an altered return value stored. */
1268 EXPECT_EQ(1, syscall(__NR_gettid));
1269 EXPECT_NE(self->mytid, syscall(__NR_gettid));
1270 }
1271
1272 #ifndef __NR_seccomp
1273 # if defined(__i386__)
1274 # define __NR_seccomp 354
1275 # elif defined(__x86_64__)
1276 # define __NR_seccomp 317
1277 # elif defined(__arm__)
1278 # define __NR_seccomp 383
1279 # elif defined(__aarch64__)
1280 # define __NR_seccomp 277
1281 # else
1282 # warning "seccomp syscall number unknown for this architecture"
1283 # define __NR_seccomp 0xffff
1284 # endif
1285 #endif
1286
1287 #ifndef SECCOMP_SET_MODE_STRICT
1288 #define SECCOMP_SET_MODE_STRICT 0
1289 #endif
1290
1291 #ifndef SECCOMP_SET_MODE_FILTER
1292 #define SECCOMP_SET_MODE_FILTER 1
1293 #endif
1294
1295 #ifndef SECCOMP_FLAG_FILTER_TSYNC
1296 #define SECCOMP_FLAG_FILTER_TSYNC 1
1297 #endif
1298
1299 #ifndef seccomp
seccomp(unsigned int op,unsigned int flags,struct sock_fprog * filter)1300 int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
1301 {
1302 errno = 0;
1303 return syscall(__NR_seccomp, op, flags, filter);
1304 }
1305 #endif
1306
TEST(seccomp_syscall)1307 TEST(seccomp_syscall) {
1308 struct sock_filter filter[] = {
1309 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1310 };
1311 struct sock_fprog prog = {
1312 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
1313 .filter = filter,
1314 };
1315 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1316 ASSERT_EQ(0, ret) {
1317 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1318 }
1319
1320 /* Reject insane operation. */
1321 ret = seccomp(-1, 0, &prog);
1322 EXPECT_EQ(EINVAL, errno) {
1323 TH_LOG("Did not reject crazy op value!");
1324 }
1325
1326 /* Reject strict with flags or pointer. */
1327 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
1328 EXPECT_EQ(EINVAL, errno) {
1329 TH_LOG("Did not reject mode strict with flags!");
1330 }
1331 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
1332 EXPECT_EQ(EINVAL, errno) {
1333 TH_LOG("Did not reject mode strict with uargs!");
1334 }
1335
1336 /* Reject insane args for filter. */
1337 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
1338 EXPECT_EQ(EINVAL, errno) {
1339 TH_LOG("Did not reject crazy filter flags!");
1340 }
1341 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
1342 EXPECT_EQ(EFAULT, errno) {
1343 TH_LOG("Did not reject NULL filter!");
1344 }
1345
1346 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
1347 EXPECT_EQ(0, errno) {
1348 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
1349 strerror(errno));
1350 }
1351 }
1352
TEST(seccomp_syscall_mode_lock)1353 TEST(seccomp_syscall_mode_lock) {
1354 struct sock_filter filter[] = {
1355 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1356 };
1357 struct sock_fprog prog = {
1358 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
1359 .filter = filter,
1360 };
1361 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
1362 ASSERT_EQ(0, ret) {
1363 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1364 }
1365
1366 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
1367 EXPECT_EQ(0, ret) {
1368 TH_LOG("Could not install filter!");
1369 }
1370
1371 /* Make sure neither entry point will switch to strict. */
1372 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
1373 EXPECT_EQ(EINVAL, errno) {
1374 TH_LOG("Switched to mode strict!");
1375 }
1376
1377 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
1378 EXPECT_EQ(EINVAL, errno) {
1379 TH_LOG("Switched to mode strict!");
1380 }
1381 }
1382
TEST(TSYNC_first)1383 TEST(TSYNC_first) {
1384 struct sock_filter filter[] = {
1385 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1386 };
1387 struct sock_fprog prog = {
1388 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
1389 .filter = filter,
1390 };
1391 long ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
1392 ASSERT_EQ(0, ret) {
1393 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1394 }
1395
1396 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1397 &prog);
1398 EXPECT_EQ(0, ret) {
1399 TH_LOG("Could not install initial filter with TSYNC!");
1400 }
1401 }
1402
1403 #define TSYNC_SIBLINGS 2
1404 struct tsync_sibling {
1405 pthread_t tid;
1406 pid_t system_tid;
1407 sem_t *started;
1408 pthread_cond_t *cond;
1409 pthread_mutex_t *mutex;
1410 int diverge;
1411 int num_waits;
1412 struct sock_fprog *prog;
1413 struct __test_metadata *metadata;
1414 };
1415
FIXTURE_DATA(TSYNC)1416 FIXTURE_DATA(TSYNC) {
1417 struct sock_fprog root_prog, apply_prog;
1418 struct tsync_sibling sibling[TSYNC_SIBLINGS];
1419 sem_t started;
1420 pthread_cond_t cond;
1421 pthread_mutex_t mutex;
1422 int sibling_count;
1423 };
1424
FIXTURE_SETUP(TSYNC)1425 FIXTURE_SETUP(TSYNC) {
1426 struct sock_filter root_filter[] = {
1427 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1428 };
1429 struct sock_filter apply_filter[] = {
1430 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1431 offsetof(struct seccomp_data, nr)),
1432 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
1433 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
1434 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1435 };
1436 memset(&self->root_prog, 0, sizeof(self->root_prog));
1437 memset(&self->apply_prog, 0, sizeof(self->apply_prog));
1438 memset(&self->sibling, 0, sizeof(self->sibling));
1439 self->root_prog.filter = malloc(sizeof(root_filter));
1440 ASSERT_NE(NULL, self->root_prog.filter);
1441 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
1442 self->root_prog.len = (unsigned short)(sizeof(root_filter)/sizeof(root_filter[0]));
1443
1444 self->apply_prog.filter = malloc(sizeof(apply_filter));
1445 ASSERT_NE(NULL, self->apply_prog.filter);
1446 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
1447 self->apply_prog.len = (unsigned short)(sizeof(apply_filter)/sizeof(apply_filter[0]));
1448
1449 self->sibling_count = 0;
1450 pthread_mutex_init(&self->mutex, NULL);
1451 pthread_cond_init(&self->cond, NULL);
1452 sem_init(&self->started, 0, 0);
1453 self->sibling[0].tid = 0;
1454 self->sibling[0].cond = &self->cond;
1455 self->sibling[0].started = &self->started;
1456 self->sibling[0].mutex = &self->mutex;
1457 self->sibling[0].diverge = 0;
1458 self->sibling[0].num_waits = 1;
1459 self->sibling[0].prog = &self->root_prog;
1460 self->sibling[0].metadata = _metadata;
1461 self->sibling[1].tid = 0;
1462 self->sibling[1].cond = &self->cond;
1463 self->sibling[1].started = &self->started;
1464 self->sibling[1].mutex = &self->mutex;
1465 self->sibling[1].diverge = 0;
1466 self->sibling[1].prog = &self->root_prog;
1467 self->sibling[1].num_waits = 1;
1468 self->sibling[1].metadata = _metadata;
1469 }
1470
FIXTURE_TEARDOWN(TSYNC)1471 FIXTURE_TEARDOWN(TSYNC) {
1472 int sib = 0;
1473 if (self->root_prog.filter)
1474 free(self->root_prog.filter);
1475 if (self->apply_prog.filter)
1476 free(self->apply_prog.filter);
1477
1478 for ( ; sib < self->sibling_count; ++sib) {
1479 struct tsync_sibling *s = &self->sibling[sib];
1480 void *status;
1481 if (!s->tid)
1482 continue;
1483 if (pthread_kill(s->tid, 0)) {
1484 //pthread_cancel(s->tid); // ANDROID
1485 pthread_join(s->tid, &status);
1486 }
1487 }
1488 pthread_mutex_destroy(&self->mutex);
1489 pthread_cond_destroy(&self->cond);
1490 sem_destroy(&self->started);
1491 };
1492
tsync_sibling(void * data)1493 void *tsync_sibling(void *data)
1494 {
1495 long ret = 0;
1496 struct tsync_sibling *me = data;
1497 me->system_tid = syscall(__NR_gettid);
1498
1499 pthread_mutex_lock(me->mutex);
1500 if (me->diverge) {
1501 /* Just re-apply the root prog to fork the tree */
1502 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
1503 me->prog, 0, 0);
1504 }
1505 sem_post(me->started);
1506 /* Return outside of started so parent notices failures. */
1507 if (ret) {
1508 pthread_mutex_unlock(me->mutex);
1509 return (void *)SIBLING_EXIT_FAILURE;
1510 }
1511 do {
1512 pthread_cond_wait(me->cond, me->mutex);
1513 me->num_waits = me->num_waits - 1;
1514 }
1515 while (me->num_waits);
1516 pthread_mutex_unlock(me->mutex);
1517 long nnp = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
1518 if (!nnp)
1519 return (void*)SIBLING_EXIT_NEWPRIVS;
1520 read(0, NULL, 0);
1521 return (void *)SIBLING_EXIT_UNKILLED;
1522 }
1523
tsync_start_sibling(struct tsync_sibling * sibling)1524 void tsync_start_sibling(struct tsync_sibling *sibling)
1525 {
1526 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
1527 }
1528
TEST_F(TSYNC,siblings_fail_prctl)1529 TEST_F(TSYNC, siblings_fail_prctl) {
1530 long ret;
1531 void *status;
1532 struct sock_filter filter[] = {
1533 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1534 offsetof(struct seccomp_data, nr)),
1535 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
1536 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
1537 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1538 };
1539 struct sock_fprog prog = {
1540 .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
1541 .filter = filter,
1542 };
1543
1544 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1545 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1546 }
1547
1548 /* Check prctl failure detection by requesting sib 0 diverge. */
1549 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
1550 ASSERT_EQ(0, ret) {
1551 TH_LOG("setting filter failed");
1552 }
1553
1554 self->sibling[0].diverge = 1;
1555 tsync_start_sibling(&self->sibling[0]);
1556 tsync_start_sibling(&self->sibling[1]);
1557
1558 while (self->sibling_count < TSYNC_SIBLINGS) {
1559 sem_wait(&self->started);
1560 self->sibling_count++;
1561 }
1562
1563 /* Signal the threads to clean up*/
1564 pthread_mutex_lock(&self->mutex);
1565 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1566 TH_LOG("cond broadcast non-zero");
1567 }
1568 pthread_mutex_unlock(&self->mutex);
1569
1570 /* Ensure diverging sibling failed to call prctl. */
1571 pthread_join(self->sibling[0].tid, &status);
1572 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
1573 pthread_join(self->sibling[1].tid, &status);
1574 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
1575 }
1576
TEST_F(TSYNC,two_siblings_with_ancestor)1577 TEST_F(TSYNC, two_siblings_with_ancestor) {
1578 long ret;
1579 void *status;
1580
1581 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1582 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1583 }
1584
1585 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1586 ASSERT_EQ(0, ret) {
1587 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1588 }
1589 tsync_start_sibling(&self->sibling[0]);
1590 tsync_start_sibling(&self->sibling[1]);
1591
1592 while (self->sibling_count < TSYNC_SIBLINGS) {
1593 sem_wait(&self->started);
1594 self->sibling_count++;
1595 }
1596
1597 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1598 &self->apply_prog);
1599 ASSERT_EQ(0, ret) {
1600 TH_LOG("Could install filter on all threads!");
1601 }
1602 /* Tell the siblings to test the policy */
1603 pthread_mutex_lock(&self->mutex);
1604 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1605 TH_LOG("cond broadcast non-zero");
1606 }
1607 pthread_mutex_unlock(&self->mutex);
1608 /* Ensure they are both killed and don't exit cleanly. */
1609 pthread_join(self->sibling[0].tid, &status);
1610 EXPECT_EQ(0x0, (long)status);
1611 pthread_join(self->sibling[1].tid, &status);
1612 EXPECT_EQ(0x0, (long)status);
1613 }
1614
TEST_F(TSYNC,two_sibling_want_nnp)1615 TEST_F(TSYNC, two_sibling_want_nnp) {
1616 void *status;
1617
1618 /* start siblings before any prctl() operations */
1619 tsync_start_sibling(&self->sibling[0]);
1620 tsync_start_sibling(&self->sibling[1]);
1621 while (self->sibling_count < TSYNC_SIBLINGS) {
1622 sem_wait(&self->started);
1623 self->sibling_count++;
1624 }
1625
1626 /* Tell the siblings to test no policy */
1627 pthread_mutex_lock(&self->mutex);
1628 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1629 TH_LOG("cond broadcast non-zero");
1630 }
1631 pthread_mutex_unlock(&self->mutex);
1632
1633 /* Ensure they are both upset about lacking nnp. */
1634 pthread_join(self->sibling[0].tid, &status);
1635 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
1636 pthread_join(self->sibling[1].tid, &status);
1637 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
1638 }
1639
TEST_F(TSYNC,two_siblings_with_no_filter)1640 TEST_F(TSYNC, two_siblings_with_no_filter) {
1641 long ret;
1642 void *status;
1643
1644 /* start siblings before any prctl() operations */
1645 tsync_start_sibling(&self->sibling[0]);
1646 tsync_start_sibling(&self->sibling[1]);
1647 while (self->sibling_count < TSYNC_SIBLINGS) {
1648 sem_wait(&self->started);
1649 self->sibling_count++;
1650 }
1651
1652 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1653 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1654 }
1655
1656 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1657 &self->apply_prog);
1658 ASSERT_EQ(0, ret) {
1659 TH_LOG("Could install filter on all threads!");
1660 }
1661
1662 /* Tell the siblings to test the policy */
1663 pthread_mutex_lock(&self->mutex);
1664 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1665 TH_LOG("cond broadcast non-zero");
1666 }
1667 pthread_mutex_unlock(&self->mutex);
1668
1669 /* Ensure they are both killed and don't exit cleanly. */
1670 pthread_join(self->sibling[0].tid, &status);
1671 EXPECT_EQ(0x0, (long)status);
1672 pthread_join(self->sibling[1].tid, &status);
1673 EXPECT_EQ(0x0, (long)status);
1674 }
1675
TEST_F(TSYNC,two_siblings_with_one_divergence)1676 TEST_F(TSYNC, two_siblings_with_one_divergence) {
1677 long ret;
1678 void *status;
1679
1680 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1681 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1682 }
1683
1684 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1685 ASSERT_EQ(0, ret) {
1686 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1687 }
1688 self->sibling[0].diverge = 1;
1689 tsync_start_sibling(&self->sibling[0]);
1690 tsync_start_sibling(&self->sibling[1]);
1691
1692 while (self->sibling_count < TSYNC_SIBLINGS) {
1693 sem_wait(&self->started);
1694 self->sibling_count++;
1695 }
1696
1697 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1698 &self->apply_prog);
1699 ASSERT_EQ(self->sibling[0].system_tid, ret) {
1700 TH_LOG("Did not fail on diverged sibling.");
1701 }
1702
1703 /* Wake the threads */
1704 pthread_mutex_lock(&self->mutex);
1705 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1706 TH_LOG("cond broadcast non-zero");
1707 }
1708 pthread_mutex_unlock(&self->mutex);
1709
1710 /* Ensure they are both unkilled. */
1711 pthread_join(self->sibling[0].tid, &status);
1712 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
1713 pthread_join(self->sibling[1].tid, &status);
1714 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
1715 }
1716
TEST_F(TSYNC,two_siblings_not_under_filter)1717 TEST_F(TSYNC, two_siblings_not_under_filter) {
1718 long ret, sib;
1719 void *status;
1720
1721 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1722 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1723 }
1724
1725 /*
1726 * Sibling 0 will have its own seccomp policy
1727 * and Sibling 1 will not be under seccomp at
1728 * all. Sibling 1 will enter seccomp and 0
1729 * will cause failure.
1730 */
1731 self->sibling[0].diverge = 1;
1732 tsync_start_sibling(&self->sibling[0]);
1733 tsync_start_sibling(&self->sibling[1]);
1734
1735 while (self->sibling_count < TSYNC_SIBLINGS) {
1736 sem_wait(&self->started);
1737 self->sibling_count++;
1738 }
1739
1740 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
1741 ASSERT_EQ(0, ret) {
1742 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1743 }
1744
1745 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1746 &self->apply_prog);
1747 ASSERT_EQ(ret, self->sibling[0].system_tid) {
1748 TH_LOG("Did not fail on diverged sibling.");
1749 }
1750 sib = 1;
1751 if (ret == self->sibling[0].system_tid)
1752 sib = 0;
1753
1754 pthread_mutex_lock(&self->mutex);
1755
1756 /* Increment the other siblings num_waits so we can clean up
1757 * the one we just saw.
1758 */
1759 self->sibling[!sib].num_waits += 1;
1760
1761 /* Signal the thread to clean up*/
1762 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1763 TH_LOG("cond broadcast non-zero");
1764 }
1765 pthread_mutex_unlock(&self->mutex);
1766 pthread_join(self->sibling[sib].tid, &status);
1767 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
1768 /* Poll for actual task death. pthread_join doesn't guarantee it. */
1769 while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1);
1770 /* Switch to the remaining sibling */
1771 sib = !sib;
1772
1773 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1774 &self->apply_prog);
1775 ASSERT_EQ(0, ret) {
1776 TH_LOG("Expected the remaining sibling to sync");
1777 };
1778
1779 pthread_mutex_lock(&self->mutex);
1780
1781 /* If remaining sibling didn't have a chance to wake up during
1782 * the first broadcast, manually reduce the num_waits now.
1783 */
1784 if (self->sibling[sib].num_waits > 1)
1785 self->sibling[sib].num_waits = 1;
1786 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
1787 TH_LOG("cond broadcast non-zero");
1788 }
1789 pthread_mutex_unlock(&self->mutex);
1790 pthread_join(self->sibling[sib].tid, &status);
1791 EXPECT_EQ(0, (long)status);
1792 /* Poll for actual task death. pthread_join doesn't guarantee it. */
1793 while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1);
1794
1795 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
1796 &self->apply_prog);
1797 ASSERT_EQ(0, ret); /* just us chickens */
1798 }
1799
1800 /* Make sure restarted syscalls are seen directly as "restart_syscall". */
TEST(syscall_restart)1801 TEST(syscall_restart)
1802 {
1803 long ret;
1804 unsigned long msg;
1805 pid_t child_pid;
1806 int pipefd[2];
1807 int status;
1808 siginfo_t info = { };
1809 struct sock_filter filter[] = {
1810 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1811 offsetof(struct seccomp_data, nr)),
1812
1813 #ifdef __NR_sigreturn
1814 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0),
1815 #endif
1816 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0),
1817 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0),
1818 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0),
1819 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0),
1820 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
1821
1822 /* Allow __NR_write for easy logging. */
1823 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
1824 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1825 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
1826 /* The nanosleep jump target. */
1827 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
1828 /* The restart_syscall jump target. */
1829 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
1830 };
1831 struct sock_fprog prog = {
1832 .len = (unsigned short) (sizeof(filter)/sizeof(filter[0])),
1833 .filter = filter,
1834 };
1835 #if defined(__arm__)
1836 struct utsname utsbuf;
1837 #endif
1838
1839 ASSERT_EQ(0, pipe(pipefd));
1840
1841 child_pid = fork();
1842 ASSERT_LE(0, child_pid);
1843 if (child_pid == 0) {
1844 /* Child uses EXPECT not ASSERT to deliver status correctly. */
1845 char buf = ' ';
1846 struct timespec timeout = { };
1847
1848 /* Attach parent as tracer and stop. */
1849 EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
1850 EXPECT_EQ(0, raise(SIGSTOP));
1851
1852 EXPECT_EQ(0, close(pipefd[1]));
1853
1854 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
1855 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1856 }
1857
1858 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
1859 EXPECT_EQ(0, ret) {
1860 TH_LOG("Failed to install filter!");
1861 }
1862
1863 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
1864 TH_LOG("Failed to read() sync from parent");
1865 }
1866 EXPECT_EQ('.', buf) {
1867 TH_LOG("Failed to get sync data from read()");
1868 }
1869
1870 /* Start nanosleep to be interrupted. */
1871 timeout.tv_sec = 1;
1872 errno = 0;
1873 EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
1874 TH_LOG("Call to nanosleep() failed (errno %d)", errno);
1875 }
1876
1877 /* Read final sync from parent. */
1878 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
1879 TH_LOG("Failed final read() from parent");
1880 }
1881 EXPECT_EQ('!', buf) {
1882 TH_LOG("Failed to get final data from read()");
1883 }
1884
1885 /* Directly report the status of our test harness results. */
1886 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
1887 : EXIT_FAILURE);
1888 }
1889 EXPECT_EQ(0, close(pipefd[0]));
1890
1891 /* Attach to child, setup options, and release. */
1892 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1893 ASSERT_EQ(true, WIFSTOPPED(status));
1894 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
1895 PTRACE_O_TRACESECCOMP));
1896 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
1897 ASSERT_EQ(1, write(pipefd[1], ".", 1));
1898
1899 /* Wait for nanosleep() to start. */
1900 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1901 ASSERT_EQ(true, WIFSTOPPED(status));
1902 ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
1903 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
1904 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
1905 ASSERT_EQ(0x100, msg);
1906 EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid));
1907
1908 /* Might as well check siginfo for sanity while we're here. */
1909 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
1910 ASSERT_EQ(SIGTRAP, info.si_signo);
1911 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
1912 EXPECT_EQ(0, info.si_errno);
1913 EXPECT_EQ(getuid(), info.si_uid);
1914 /* Verify signal delivery came from child (seccomp-triggered). */
1915 EXPECT_EQ(child_pid, info.si_pid);
1916
1917 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
1918 ASSERT_EQ(0, kill(child_pid, SIGSTOP));
1919 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
1920 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1921 ASSERT_EQ(true, WIFSTOPPED(status));
1922 ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
1923 /* Verify signal delivery came from parent now. */
1924 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
1925 EXPECT_EQ(getpid(), info.si_pid);
1926
1927 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
1928 ASSERT_EQ(0, kill(child_pid, SIGCONT));
1929 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
1930 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1931 ASSERT_EQ(true, WIFSTOPPED(status));
1932 ASSERT_EQ(SIGCONT, WSTOPSIG(status));
1933 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
1934
1935 /* Wait for restart_syscall() to start. */
1936 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1937 ASSERT_EQ(true, WIFSTOPPED(status));
1938 ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
1939 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
1940 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
1941
1942 ASSERT_EQ(0x200, msg);
1943 ret = get_syscall(_metadata, child_pid);
1944 #if defined(__arm__)
1945 /*
1946 * FIXME:
1947 * - native ARM registers do NOT expose true syscall.
1948 * - compat ARM registers on ARM64 DO expose true syscall.
1949 */
1950 ASSERT_EQ(0, uname(&utsbuf));
1951 if (strncmp(utsbuf.machine, "arm", 3) == 0) {
1952 EXPECT_EQ(__NR_nanosleep, ret);
1953 } else
1954 #endif
1955 {
1956 EXPECT_EQ(__NR_restart_syscall, ret);
1957 }
1958
1959 /* Write again to end test. */
1960 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
1961 ASSERT_EQ(1, write(pipefd[1], "!", 1));
1962 EXPECT_EQ(0, close(pipefd[1]));
1963
1964 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
1965 if (WIFSIGNALED(status) || WEXITSTATUS(status))
1966 _metadata->passed = 0;
1967 }
1968
1969 /*
1970 * TODO:
1971 * - add microbenchmarks
1972 * - expand NNP testing
1973 * - better arch-specific TRACE and TRAP handlers.
1974 * - endianness checking when appropriate
1975 * - 64-bit arg prodding
1976 * - arch value testing (x86 modes especially)
1977 * - ...
1978 */
1979
1980 // ANDROID:begin
get_seccomp_test_list()1981 struct __test_metadata* get_seccomp_test_list() {
1982 return __test_list;
1983 }
1984 // ANDROID:end
1985
1986 TEST_HARNESS_MAIN
1987