1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #define _BSD_SOURCE
7 #define _DEFAULT_SOURCE
8 #define _GNU_SOURCE
9
10 #include <asm/unistd.h>
11 #include <assert.h>
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <grp.h>
16 #include <linux/capability.h>
17 #include <linux/filter.h>
18 #include <sched.h>
19 #include <signal.h>
20 #include <stdbool.h>
21 #include <stddef.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/capability.h>
26 #include <sys/mount.h>
27 #include <sys/param.h>
28 #include <sys/prctl.h>
29 #include <sys/resource.h>
30 #include <sys/select.h>
31 #include <sys/stat.h>
32 #include <sys/sysmacros.h>
33 #include <sys/types.h>
34 #include <sys/user.h>
35 #include <sys/wait.h>
36 #include <syscall.h>
37 #include <unistd.h>
38
39 #include "libminijail-private.h"
40 #include "libminijail.h"
41
42 #include "signal_handler.h"
43 #include "syscall_filter.h"
44 #include "syscall_wrapper.h"
45 #include "system.h"
46 #include "util.h"
47
48 /* Until these are reliably available in linux/prctl.h. */
49 #ifndef PR_ALT_SYSCALL
50 #define PR_ALT_SYSCALL 0x43724f53
51 #endif
52
53 /* New cgroup namespace might not be in linux-headers yet. */
54 #ifndef CLONE_NEWCGROUP
55 #define CLONE_NEWCGROUP 0x02000000
56 #endif
57
58 #define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
59
60 #define MAX_RLIMITS 32 /* Currently there are 15 supported by Linux. */
61
62 #define MAX_PRESERVED_FDS 128U
63
64 /* Keyctl commands. */
65 #define KEYCTL_JOIN_SESSION_KEYRING 1
66
67 /*
68 * The userspace equivalent of MNT_USER_SETTABLE_MASK, which is the mask of all
69 * flags that can be modified by MS_REMOUNT.
70 */
71 #define MS_USER_SETTABLE_MASK \
72 (MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_NOATIME | MS_NODIRATIME | \
73 MS_RELATIME | MS_RDONLY)
74
75 struct minijail_rlimit {
76 int type;
77 rlim_t cur;
78 rlim_t max;
79 };
80
81 struct mountpoint {
82 char *src;
83 char *dest;
84 char *type;
85 char *data;
86 int has_data;
87 unsigned long flags;
88 struct mountpoint *next;
89 };
90
91 struct minijail_remount {
92 unsigned long remount_mode;
93 char *mount_name;
94 struct minijail_remount *next;
95 };
96
97 struct hook {
98 minijail_hook_t hook;
99 void *payload;
100 minijail_hook_event_t event;
101 struct hook *next;
102 };
103
104 struct preserved_fd {
105 int parent_fd;
106 int child_fd;
107 };
108
109 struct minijail {
110 /*
111 * WARNING: if you add a flag here you need to make sure it's
112 * accounted for in minijail_pre{enter|exec}() below.
113 */
114 struct {
115 int uid : 1;
116 int gid : 1;
117 int inherit_suppl_gids : 1;
118 int set_suppl_gids : 1;
119 int keep_suppl_gids : 1;
120 int use_caps : 1;
121 int capbset_drop : 1;
122 int set_ambient_caps : 1;
123 int vfs : 1;
124 int enter_vfs : 1;
125 int pids : 1;
126 int ipc : 1;
127 int uts : 1;
128 int net : 1;
129 int enter_net : 1;
130 int ns_cgroups : 1;
131 int userns : 1;
132 int disable_setgroups : 1;
133 int seccomp : 1;
134 int remount_proc_ro : 1;
135 int no_new_privs : 1;
136 int seccomp_filter : 1;
137 int seccomp_filter_tsync : 1;
138 int seccomp_filter_logging : 1;
139 int seccomp_filter_allow_speculation : 1;
140 int chroot : 1;
141 int pivot_root : 1;
142 int mount_dev : 1;
143 int mount_tmp : 1;
144 int do_init : 1;
145 int run_as_init : 1;
146 int pid_file : 1;
147 int cgroups : 1;
148 int alt_syscall : 1;
149 int reset_signal_mask : 1;
150 int reset_signal_handlers : 1;
151 int close_open_fds : 1;
152 int new_session_keyring : 1;
153 int forward_signals : 1;
154 int setsid : 1;
155 } flags;
156 uid_t uid;
157 gid_t gid;
158 gid_t usergid;
159 char *user;
160 size_t suppl_gid_count;
161 gid_t *suppl_gid_list;
162 uint64_t caps;
163 uint64_t cap_bset;
164 pid_t initpid;
165 int mountns_fd;
166 int netns_fd;
167 char *chrootdir;
168 char *pid_file_path;
169 char *uidmap;
170 char *gidmap;
171 char *hostname;
172 char *preload_path;
173 size_t filter_len;
174 struct sock_fprog *filter_prog;
175 char *alt_syscall_table;
176 struct mountpoint *mounts_head;
177 struct mountpoint *mounts_tail;
178 size_t mounts_count;
179 unsigned long remount_mode;
180 struct minijail_remount *remounts_head;
181 struct minijail_remount *remounts_tail;
182 size_t tmpfs_size;
183 char *cgroups[MAX_CGROUPS];
184 size_t cgroup_count;
185 struct minijail_rlimit rlimits[MAX_RLIMITS];
186 size_t rlimit_count;
187 uint64_t securebits_skip_mask;
188 struct hook *hooks_head;
189 struct hook *hooks_tail;
190 struct preserved_fd preserved_fds[MAX_PRESERVED_FDS];
191 size_t preserved_fd_count;
192 char *seccomp_policy_path;
193 };
194
195 static void run_hooks_or_die(const struct minijail *j,
196 minijail_hook_event_t event);
197
seccomp_is_logging_allowed(const struct minijail * j)198 static bool seccomp_is_logging_allowed(const struct minijail *j)
199 {
200 return seccomp_default_ret_log() || j->flags.seccomp_filter_logging;
201 }
202
free_mounts_list(struct minijail * j)203 static void free_mounts_list(struct minijail *j)
204 {
205 while (j->mounts_head) {
206 struct mountpoint *m = j->mounts_head;
207 j->mounts_head = j->mounts_head->next;
208 free(m->data);
209 free(m->type);
210 free(m->dest);
211 free(m->src);
212 free(m);
213 }
214 // No need to clear mounts_head as we know it's NULL after the loop.
215 j->mounts_tail = NULL;
216 }
217
free_remounts_list(struct minijail * j)218 static void free_remounts_list(struct minijail *j)
219 {
220 while (j->remounts_head) {
221 struct minijail_remount *m = j->remounts_head;
222 j->remounts_head = j->remounts_head->next;
223 free(m->mount_name);
224 free(m);
225 }
226 // No need to clear remounts_head as we know it's NULL after the loop.
227 j->remounts_tail = NULL;
228 }
229
230 /*
231 * Writes exactly n bytes from buf to file descriptor fd.
232 * Returns 0 on success or a negative error code on error.
233 */
write_exactly(int fd,const void * buf,size_t n)234 static int write_exactly(int fd, const void *buf, size_t n)
235 {
236 const char *p = buf;
237 while (n > 0) {
238 const ssize_t written = write(fd, p, n);
239 if (written < 0) {
240 if (errno == EINTR)
241 continue;
242
243 return -errno;
244 }
245
246 p += written;
247 n -= written;
248 }
249
250 return 0;
251 }
252
253 /* Closes *pfd and sets it to -1. */
close_and_reset(int * pfd)254 static void close_and_reset(int *pfd)
255 {
256 if (*pfd != -1)
257 close(*pfd);
258 *pfd = -1;
259 }
260
261 /*
262 * Strip out flags meant for the parent.
263 * We keep things that are not inherited across execve(2) (e.g. capabilities),
264 * or are easier to set after execve(2) (e.g. seccomp filters).
265 */
minijail_preenter(struct minijail * j)266 void minijail_preenter(struct minijail *j)
267 {
268 j->flags.vfs = 0;
269 j->flags.enter_vfs = 0;
270 j->flags.ns_cgroups = 0;
271 j->flags.net = 0;
272 j->flags.uts = 0;
273 j->flags.remount_proc_ro = 0;
274 j->flags.pids = 0;
275 j->flags.do_init = 0;
276 j->flags.run_as_init = 0;
277 j->flags.pid_file = 0;
278 j->flags.cgroups = 0;
279 j->flags.forward_signals = 0;
280 j->flags.setsid = 0;
281 j->remount_mode = 0;
282 free_remounts_list(j);
283 }
284
285 /*
286 * Strip out flags meant for the child.
287 * We keep things that are inherited across execve(2).
288 */
minijail_preexec(struct minijail * j)289 void minijail_preexec(struct minijail *j)
290 {
291 int vfs = j->flags.vfs;
292 int enter_vfs = j->flags.enter_vfs;
293 int ns_cgroups = j->flags.ns_cgroups;
294 int net = j->flags.net;
295 int uts = j->flags.uts;
296 int remount_proc_ro = j->flags.remount_proc_ro;
297 int userns = j->flags.userns;
298 if (j->user)
299 free(j->user);
300 j->user = NULL;
301 if (j->suppl_gid_list)
302 free(j->suppl_gid_list);
303 j->suppl_gid_list = NULL;
304 if (j->preload_path)
305 free(j->preload_path);
306 j->preload_path = NULL;
307 free_mounts_list(j);
308 memset(&j->flags, 0, sizeof(j->flags));
309 /* Now restore anything we meant to keep. */
310 j->flags.vfs = vfs;
311 j->flags.enter_vfs = enter_vfs;
312 j->flags.ns_cgroups = ns_cgroups;
313 j->flags.net = net;
314 j->flags.uts = uts;
315 j->flags.remount_proc_ro = remount_proc_ro;
316 j->flags.userns = userns;
317 /* Note, |pids| will already have been used before this call. */
318 }
319
320 /* Minijail API. */
321
minijail_new(void)322 struct minijail API *minijail_new(void)
323 {
324 struct minijail *j = calloc(1, sizeof(struct minijail));
325 if (j) {
326 j->remount_mode = MS_PRIVATE;
327 }
328 return j;
329 }
330
minijail_change_uid(struct minijail * j,uid_t uid)331 void API minijail_change_uid(struct minijail *j, uid_t uid)
332 {
333 if (uid == 0)
334 die("useless change to uid 0");
335 j->uid = uid;
336 j->flags.uid = 1;
337 }
338
minijail_change_gid(struct minijail * j,gid_t gid)339 void API minijail_change_gid(struct minijail *j, gid_t gid)
340 {
341 if (gid == 0)
342 die("useless change to gid 0");
343 j->gid = gid;
344 j->flags.gid = 1;
345 }
346
minijail_set_supplementary_gids(struct minijail * j,size_t size,const gid_t * list)347 void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
348 const gid_t *list)
349 {
350 size_t i;
351
352 if (j->flags.inherit_suppl_gids)
353 die("cannot inherit *and* set supplementary groups");
354 if (j->flags.keep_suppl_gids)
355 die("cannot keep *and* set supplementary groups");
356
357 if (size == 0) {
358 /* Clear supplementary groups. */
359 j->suppl_gid_list = NULL;
360 j->suppl_gid_count = 0;
361 j->flags.set_suppl_gids = 1;
362 return;
363 }
364
365 /* Copy the gid_t array. */
366 j->suppl_gid_list = calloc(size, sizeof(gid_t));
367 if (!j->suppl_gid_list) {
368 die("failed to allocate internal supplementary group array");
369 }
370 for (i = 0; i < size; i++) {
371 j->suppl_gid_list[i] = list[i];
372 }
373 j->suppl_gid_count = size;
374 j->flags.set_suppl_gids = 1;
375 }
376
minijail_keep_supplementary_gids(struct minijail * j)377 void API minijail_keep_supplementary_gids(struct minijail *j)
378 {
379 j->flags.keep_suppl_gids = 1;
380 }
381
minijail_change_user(struct minijail * j,const char * user)382 int API minijail_change_user(struct minijail *j, const char *user)
383 {
384 uid_t uid;
385 gid_t gid;
386 int rc = lookup_user(user, &uid, &gid);
387 if (rc)
388 return rc;
389 minijail_change_uid(j, uid);
390 j->user = strdup(user);
391 if (!j->user)
392 return -ENOMEM;
393 j->usergid = gid;
394 return 0;
395 }
396
minijail_change_group(struct minijail * j,const char * group)397 int API minijail_change_group(struct minijail *j, const char *group)
398 {
399 gid_t gid;
400 int rc = lookup_group(group, &gid);
401 if (rc)
402 return rc;
403 minijail_change_gid(j, gid);
404 return 0;
405 }
406
minijail_use_seccomp(struct minijail * j)407 void API minijail_use_seccomp(struct minijail *j)
408 {
409 j->flags.seccomp = 1;
410 }
411
minijail_no_new_privs(struct minijail * j)412 void API minijail_no_new_privs(struct minijail *j)
413 {
414 j->flags.no_new_privs = 1;
415 }
416
minijail_use_seccomp_filter(struct minijail * j)417 void API minijail_use_seccomp_filter(struct minijail *j)
418 {
419 j->flags.seccomp_filter = 1;
420 }
421
minijail_set_seccomp_filter_tsync(struct minijail * j)422 void API minijail_set_seccomp_filter_tsync(struct minijail *j)
423 {
424 if (j->filter_len > 0 && j->filter_prog != NULL) {
425 die("minijail_set_seccomp_filter_tsync() must be called "
426 "before minijail_parse_seccomp_filters()");
427 }
428
429 if (seccomp_is_logging_allowed(j) && !seccomp_ret_log_available()) {
430 /*
431 * If SECCOMP_RET_LOG is not available, we don't want to use
432 * SECCOMP_RET_TRAP to both kill the entire process and report
433 * failing syscalls, since it will be brittle. Just bail.
434 */
435 die("SECCOMP_RET_LOG not available, cannot use logging with "
436 "thread sync at the same time");
437 }
438
439 j->flags.seccomp_filter_tsync = 1;
440 }
441
minijail_set_seccomp_filter_allow_speculation(struct minijail * j)442 void API minijail_set_seccomp_filter_allow_speculation(struct minijail *j)
443 {
444 if (j->filter_len > 0 && j->filter_prog != NULL) {
445 die("minijail_set_seccomp_filter_allow_speculation() must be "
446 "called before minijail_parse_seccomp_filters()");
447 }
448
449 j->flags.seccomp_filter_allow_speculation = 1;
450 }
451
minijail_log_seccomp_filter_failures(struct minijail * j)452 void API minijail_log_seccomp_filter_failures(struct minijail *j)
453 {
454 if (j->filter_len > 0 && j->filter_prog != NULL) {
455 die("minijail_log_seccomp_filter_failures() must be called "
456 "before minijail_parse_seccomp_filters()");
457 }
458
459 if (j->flags.seccomp_filter_tsync && !seccomp_ret_log_available()) {
460 /*
461 * If SECCOMP_RET_LOG is not available, we don't want to use
462 * SECCOMP_RET_TRAP to both kill the entire process and report
463 * failing syscalls, since it will be brittle. Just bail.
464 */
465 die("SECCOMP_RET_LOG not available, cannot use thread sync "
466 "with logging at the same time");
467 }
468
469 if (debug_logging_allowed()) {
470 j->flags.seccomp_filter_logging = 1;
471 } else {
472 warn("non-debug build: ignoring request to enable seccomp "
473 "logging");
474 }
475 }
476
minijail_use_caps(struct minijail * j,uint64_t capmask)477 void API minijail_use_caps(struct minijail *j, uint64_t capmask)
478 {
479 /*
480 * 'minijail_use_caps' configures a runtime-capabilities-only
481 * environment, including a bounding set matching the thread's runtime
482 * (permitted|inheritable|effective) sets.
483 * Therefore, it will override any existing bounding set configurations
484 * since the latter would allow gaining extra runtime capabilities from
485 * file capabilities.
486 */
487 if (j->flags.capbset_drop) {
488 warn("overriding bounding set configuration");
489 j->cap_bset = 0;
490 j->flags.capbset_drop = 0;
491 }
492 j->caps = capmask;
493 j->flags.use_caps = 1;
494 }
495
minijail_capbset_drop(struct minijail * j,uint64_t capmask)496 void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
497 {
498 if (j->flags.use_caps) {
499 /*
500 * 'minijail_use_caps' will have already configured a capability
501 * bounding set matching the (permitted|inheritable|effective)
502 * sets. Abort if the user tries to configure a separate
503 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
504 * are mutually exclusive.
505 */
506 die("runtime capabilities already configured, can't drop "
507 "bounding set separately");
508 }
509 j->cap_bset = capmask;
510 j->flags.capbset_drop = 1;
511 }
512
minijail_set_ambient_caps(struct minijail * j)513 void API minijail_set_ambient_caps(struct minijail *j)
514 {
515 j->flags.set_ambient_caps = 1;
516 }
517
minijail_reset_signal_mask(struct minijail * j)518 void API minijail_reset_signal_mask(struct minijail *j)
519 {
520 j->flags.reset_signal_mask = 1;
521 }
522
minijail_reset_signal_handlers(struct minijail * j)523 void API minijail_reset_signal_handlers(struct minijail *j)
524 {
525 j->flags.reset_signal_handlers = 1;
526 }
527
minijail_namespace_vfs(struct minijail * j)528 void API minijail_namespace_vfs(struct minijail *j)
529 {
530 j->flags.vfs = 1;
531 }
532
minijail_namespace_enter_vfs(struct minijail * j,const char * ns_path)533 void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
534 {
535 /* Note: Do not use O_CLOEXEC here. We'll close it after we use it. */
536 int ns_fd = open(ns_path, O_RDONLY);
537 if (ns_fd < 0) {
538 pdie("failed to open namespace '%s'", ns_path);
539 }
540 j->mountns_fd = ns_fd;
541 j->flags.enter_vfs = 1;
542 }
543
minijail_new_session_keyring(struct minijail * j)544 void API minijail_new_session_keyring(struct minijail *j)
545 {
546 j->flags.new_session_keyring = 1;
547 }
548
minijail_skip_setting_securebits(struct minijail * j,uint64_t securebits_skip_mask)549 void API minijail_skip_setting_securebits(struct minijail *j,
550 uint64_t securebits_skip_mask)
551 {
552 j->securebits_skip_mask = securebits_skip_mask;
553 }
554
minijail_remount_mode(struct minijail * j,unsigned long mode)555 void API minijail_remount_mode(struct minijail *j, unsigned long mode)
556 {
557 j->remount_mode = mode;
558 }
559
minijail_skip_remount_private(struct minijail * j)560 void API minijail_skip_remount_private(struct minijail *j)
561 {
562 j->remount_mode = 0;
563 }
564
minijail_namespace_pids(struct minijail * j)565 void API minijail_namespace_pids(struct minijail *j)
566 {
567 j->flags.vfs = 1;
568 j->flags.remount_proc_ro = 1;
569 j->flags.pids = 1;
570 j->flags.do_init = 1;
571 }
572
minijail_namespace_pids_rw_proc(struct minijail * j)573 void API minijail_namespace_pids_rw_proc(struct minijail *j)
574 {
575 j->flags.vfs = 1;
576 j->flags.pids = 1;
577 j->flags.do_init = 1;
578 }
579
minijail_namespace_ipc(struct minijail * j)580 void API minijail_namespace_ipc(struct minijail *j)
581 {
582 j->flags.ipc = 1;
583 }
584
minijail_namespace_uts(struct minijail * j)585 void API minijail_namespace_uts(struct minijail *j)
586 {
587 j->flags.uts = 1;
588 }
589
minijail_namespace_set_hostname(struct minijail * j,const char * name)590 int API minijail_namespace_set_hostname(struct minijail *j, const char *name)
591 {
592 if (j->hostname)
593 return -EINVAL;
594 minijail_namespace_uts(j);
595 j->hostname = strdup(name);
596 if (!j->hostname)
597 return -ENOMEM;
598 return 0;
599 }
600
minijail_namespace_net(struct minijail * j)601 void API minijail_namespace_net(struct minijail *j)
602 {
603 j->flags.net = 1;
604 }
605
minijail_namespace_enter_net(struct minijail * j,const char * ns_path)606 void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
607 {
608 /* Note: Do not use O_CLOEXEC here. We'll close it after we use it. */
609 int ns_fd = open(ns_path, O_RDONLY);
610 if (ns_fd < 0) {
611 pdie("failed to open namespace '%s'", ns_path);
612 }
613 j->netns_fd = ns_fd;
614 j->flags.enter_net = 1;
615 }
616
minijail_namespace_cgroups(struct minijail * j)617 void API minijail_namespace_cgroups(struct minijail *j)
618 {
619 j->flags.ns_cgroups = 1;
620 }
621
minijail_close_open_fds(struct minijail * j)622 void API minijail_close_open_fds(struct minijail *j)
623 {
624 j->flags.close_open_fds = 1;
625 }
626
minijail_remount_proc_readonly(struct minijail * j)627 void API minijail_remount_proc_readonly(struct minijail *j)
628 {
629 j->flags.vfs = 1;
630 j->flags.remount_proc_ro = 1;
631 }
632
minijail_namespace_user(struct minijail * j)633 void API minijail_namespace_user(struct minijail *j)
634 {
635 j->flags.userns = 1;
636 }
637
minijail_namespace_user_disable_setgroups(struct minijail * j)638 void API minijail_namespace_user_disable_setgroups(struct minijail *j)
639 {
640 j->flags.disable_setgroups = 1;
641 }
642
minijail_uidmap(struct minijail * j,const char * uidmap)643 int API minijail_uidmap(struct minijail *j, const char *uidmap)
644 {
645 j->uidmap = strdup(uidmap);
646 if (!j->uidmap)
647 return -ENOMEM;
648 char *ch;
649 for (ch = j->uidmap; *ch; ch++) {
650 if (*ch == ',')
651 *ch = '\n';
652 }
653 return 0;
654 }
655
minijail_gidmap(struct minijail * j,const char * gidmap)656 int API minijail_gidmap(struct minijail *j, const char *gidmap)
657 {
658 j->gidmap = strdup(gidmap);
659 if (!j->gidmap)
660 return -ENOMEM;
661 char *ch;
662 for (ch = j->gidmap; *ch; ch++) {
663 if (*ch == ',')
664 *ch = '\n';
665 }
666 return 0;
667 }
668
minijail_inherit_usergroups(struct minijail * j)669 void API minijail_inherit_usergroups(struct minijail *j)
670 {
671 j->flags.inherit_suppl_gids = 1;
672 }
673
minijail_run_as_init(struct minijail * j)674 void API minijail_run_as_init(struct minijail *j)
675 {
676 /*
677 * Since the jailed program will become 'init' in the new PID namespace,
678 * Minijail does not need to fork an 'init' process.
679 */
680 j->flags.run_as_init = 1;
681 }
682
minijail_enter_chroot(struct minijail * j,const char * dir)683 int API minijail_enter_chroot(struct minijail *j, const char *dir)
684 {
685 if (j->chrootdir)
686 return -EINVAL;
687 j->chrootdir = strdup(dir);
688 if (!j->chrootdir)
689 return -ENOMEM;
690 j->flags.chroot = 1;
691 return 0;
692 }
693
minijail_enter_pivot_root(struct minijail * j,const char * dir)694 int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
695 {
696 if (j->chrootdir)
697 return -EINVAL;
698 j->chrootdir = strdup(dir);
699 if (!j->chrootdir)
700 return -ENOMEM;
701 j->flags.pivot_root = 1;
702 return 0;
703 }
704
minijail_get_original_path(struct minijail * j,const char * path_inside_chroot)705 char API *minijail_get_original_path(struct minijail *j,
706 const char *path_inside_chroot)
707 {
708 struct mountpoint *b;
709
710 b = j->mounts_head;
711 while (b) {
712 /*
713 * If |path_inside_chroot| is the exact destination of a
714 * mount, then the original path is exactly the source of
715 * the mount.
716 * for example: "-b /some/path/exe,/chroot/path/exe"
717 * mount source = /some/path/exe, mount dest =
718 * /chroot/path/exe Then when getting the original path of
719 * "/chroot/path/exe", the source of that mount,
720 * "/some/path/exe" is what should be returned.
721 */
722 if (!strcmp(b->dest, path_inside_chroot))
723 return strdup(b->src);
724
725 /*
726 * If |path_inside_chroot| is within the destination path of a
727 * mount, take the suffix of the chroot path relative to the
728 * mount destination path, and append it to the mount source
729 * path.
730 */
731 if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
732 const char *relative_path =
733 path_inside_chroot + strlen(b->dest);
734 return path_join(b->src, relative_path);
735 }
736 b = b->next;
737 }
738
739 /* If there is a chroot path, append |path_inside_chroot| to that. */
740 if (j->chrootdir)
741 return path_join(j->chrootdir, path_inside_chroot);
742
743 /* No chroot, so the path outside is the same as it is inside. */
744 return strdup(path_inside_chroot);
745 }
746
minijail_mount_dev(struct minijail * j)747 void API minijail_mount_dev(struct minijail *j)
748 {
749 j->flags.mount_dev = 1;
750 }
751
minijail_mount_tmp(struct minijail * j)752 void API minijail_mount_tmp(struct minijail *j)
753 {
754 minijail_mount_tmp_size(j, 64 * 1024 * 1024);
755 }
756
minijail_mount_tmp_size(struct minijail * j,size_t size)757 void API minijail_mount_tmp_size(struct minijail *j, size_t size)
758 {
759 j->tmpfs_size = size;
760 j->flags.mount_tmp = 1;
761 }
762
minijail_write_pid_file(struct minijail * j,const char * path)763 int API minijail_write_pid_file(struct minijail *j, const char *path)
764 {
765 j->pid_file_path = strdup(path);
766 if (!j->pid_file_path)
767 return -ENOMEM;
768 j->flags.pid_file = 1;
769 return 0;
770 }
771
minijail_add_to_cgroup(struct minijail * j,const char * path)772 int API minijail_add_to_cgroup(struct minijail *j, const char *path)
773 {
774 if (j->cgroup_count >= MAX_CGROUPS)
775 return -ENOMEM;
776 j->cgroups[j->cgroup_count] = strdup(path);
777 if (!j->cgroups[j->cgroup_count])
778 return -ENOMEM;
779 j->cgroup_count++;
780 j->flags.cgroups = 1;
781 return 0;
782 }
783
minijail_rlimit(struct minijail * j,int type,rlim_t cur,rlim_t max)784 int API minijail_rlimit(struct minijail *j, int type, rlim_t cur, rlim_t max)
785 {
786 size_t i;
787
788 if (j->rlimit_count >= MAX_RLIMITS)
789 return -ENOMEM;
790 /* It's an error if the caller sets the same rlimit multiple times. */
791 for (i = 0; i < j->rlimit_count; i++) {
792 if (j->rlimits[i].type == type)
793 return -EEXIST;
794 }
795
796 j->rlimits[j->rlimit_count].type = type;
797 j->rlimits[j->rlimit_count].cur = cur;
798 j->rlimits[j->rlimit_count].max = max;
799 j->rlimit_count++;
800 return 0;
801 }
802
minijail_forward_signals(struct minijail * j)803 int API minijail_forward_signals(struct minijail *j)
804 {
805 j->flags.forward_signals = 1;
806 return 0;
807 }
808
minijail_create_session(struct minijail * j)809 int API minijail_create_session(struct minijail *j)
810 {
811 j->flags.setsid = 1;
812 return 0;
813 }
814
minijail_mount_with_data(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags,const char * data)815 int API minijail_mount_with_data(struct minijail *j, const char *src,
816 const char *dest, const char *type,
817 unsigned long flags, const char *data)
818 {
819 struct mountpoint *m;
820
821 if (*dest != '/')
822 return -EINVAL;
823 m = calloc(1, sizeof(*m));
824 if (!m)
825 return -ENOMEM;
826 m->dest = strdup(dest);
827 if (!m->dest)
828 goto error;
829 m->src = strdup(src);
830 if (!m->src)
831 goto error;
832 m->type = strdup(type);
833 if (!m->type)
834 goto error;
835
836 if (!data || !data[0]) {
837 /*
838 * Set up secure defaults for certain filesystems. Adding this
839 * fs-specific logic here kind of sucks, but considering how
840 * people use these in practice, it's probably OK. If they want
841 * the kernel defaults, they can pass data="" instead of NULL.
842 */
843 if (!strcmp(type, "tmpfs")) {
844 /* tmpfs defaults to mode=1777 and size=50%. */
845 data = "mode=0755,size=10M";
846 }
847 }
848 if (data) {
849 m->data = strdup(data);
850 if (!m->data)
851 goto error;
852 m->has_data = 1;
853 }
854
855 /* If they don't specify any flags, default to secure ones. */
856 if (flags == 0)
857 flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
858 m->flags = flags;
859
860 /*
861 * Unless asked to enter an existing namespace, force vfs namespacing
862 * so the mounts don't leak out into the containing vfs namespace.
863 * If Minijail is being asked to enter the root vfs namespace this will
864 * leak mounts, but it's unlikely that the user would ask to do that by
865 * mistake.
866 */
867 if (!j->flags.enter_vfs)
868 minijail_namespace_vfs(j);
869
870 if (j->mounts_tail)
871 j->mounts_tail->next = m;
872 else
873 j->mounts_head = m;
874 j->mounts_tail = m;
875 j->mounts_count++;
876
877 return 0;
878
879 error:
880 free(m->type);
881 free(m->src);
882 free(m->dest);
883 free(m);
884 return -ENOMEM;
885 }
886
minijail_mount(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags)887 int API minijail_mount(struct minijail *j, const char *src, const char *dest,
888 const char *type, unsigned long flags)
889 {
890 return minijail_mount_with_data(j, src, dest, type, flags, NULL);
891 }
892
minijail_bind(struct minijail * j,const char * src,const char * dest,int writeable)893 int API minijail_bind(struct minijail *j, const char *src, const char *dest,
894 int writeable)
895 {
896 unsigned long flags = MS_BIND;
897
898 if (!writeable)
899 flags |= MS_RDONLY;
900
901 return minijail_mount(j, src, dest, "", flags);
902 }
903
minijail_add_remount(struct minijail * j,const char * mount_name,unsigned long remount_mode)904 int API minijail_add_remount(struct minijail *j, const char *mount_name,
905 unsigned long remount_mode)
906 {
907 struct minijail_remount *m;
908
909 if (*mount_name != '/')
910 return -EINVAL;
911 m = calloc(1, sizeof(*m));
912 if (!m)
913 return -ENOMEM;
914 m->mount_name = strdup(mount_name);
915 if (!m->mount_name) {
916 free(m);
917 return -ENOMEM;
918 }
919
920 m->remount_mode = remount_mode;
921
922 if (j->remounts_tail)
923 j->remounts_tail->next = m;
924 else
925 j->remounts_head = m;
926 j->remounts_tail = m;
927
928 return 0;
929 }
930
minijail_add_hook(struct minijail * j,minijail_hook_t hook,void * payload,minijail_hook_event_t event)931 int API minijail_add_hook(struct minijail *j, minijail_hook_t hook,
932 void *payload, minijail_hook_event_t event)
933 {
934 struct hook *c;
935
936 if (hook == NULL)
937 return -EINVAL;
938 if (event >= MINIJAIL_HOOK_EVENT_MAX)
939 return -EINVAL;
940 c = calloc(1, sizeof(*c));
941 if (!c)
942 return -ENOMEM;
943
944 c->hook = hook;
945 c->payload = payload;
946 c->event = event;
947
948 if (j->hooks_tail)
949 j->hooks_tail->next = c;
950 else
951 j->hooks_head = c;
952 j->hooks_tail = c;
953
954 return 0;
955 }
956
minijail_preserve_fd(struct minijail * j,int parent_fd,int child_fd)957 int API minijail_preserve_fd(struct minijail *j, int parent_fd, int child_fd)
958 {
959 if (parent_fd < 0 || child_fd < 0)
960 return -EINVAL;
961 if (j->preserved_fd_count >= MAX_PRESERVED_FDS)
962 return -ENOMEM;
963 j->preserved_fds[j->preserved_fd_count].parent_fd = parent_fd;
964 j->preserved_fds[j->preserved_fd_count].child_fd = child_fd;
965 j->preserved_fd_count++;
966 return 0;
967 }
968
minijail_set_preload_path(struct minijail * j,const char * preload_path)969 int API minijail_set_preload_path(struct minijail *j, const char *preload_path)
970 {
971 if (j->preload_path)
972 return -EINVAL;
973 j->preload_path = strdup(preload_path);
974 if (!j->preload_path)
975 return -ENOMEM;
976 return 0;
977 }
978
clear_seccomp_options(struct minijail * j)979 static void clear_seccomp_options(struct minijail *j)
980 {
981 j->flags.seccomp_filter = 0;
982 j->flags.seccomp_filter_tsync = 0;
983 j->flags.seccomp_filter_logging = 0;
984 j->flags.seccomp_filter_allow_speculation = 0;
985 j->filter_len = 0;
986 j->filter_prog = NULL;
987 j->flags.no_new_privs = 0;
988 if (j->seccomp_policy_path) {
989 free(j->seccomp_policy_path);
990 }
991 j->seccomp_policy_path = NULL;
992 }
993
seccomp_should_use_filters(struct minijail * j)994 static int seccomp_should_use_filters(struct minijail *j)
995 {
996 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL) == -1) {
997 /*
998 * |errno| will be set to EINVAL when seccomp has not been
999 * compiled into the kernel. On certain platforms and kernel
1000 * versions this is not a fatal failure. In that case, and only
1001 * in that case, disable seccomp and skip loading the filters.
1002 */
1003 if ((errno == EINVAL) && seccomp_can_softfail()) {
1004 warn("not loading seccomp filters, seccomp filter not "
1005 "supported");
1006 clear_seccomp_options(j);
1007 return 0;
1008 }
1009 /*
1010 * If |errno| != EINVAL or seccomp_can_softfail() is false,
1011 * we can proceed. Worst case scenario minijail_enter() will
1012 * abort() if seccomp fails.
1013 */
1014 }
1015 if (j->flags.seccomp_filter_tsync) {
1016 /* Are the seccomp(2) syscall and the TSYNC option supported? */
1017 if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
1018 SECCOMP_FILTER_FLAG_TSYNC, NULL) == -1) {
1019 int saved_errno = errno;
1020 if (saved_errno == ENOSYS && seccomp_can_softfail()) {
1021 warn("seccomp(2) syscall not supported");
1022 clear_seccomp_options(j);
1023 return 0;
1024 } else if (saved_errno == EINVAL &&
1025 seccomp_can_softfail()) {
1026 warn(
1027 "seccomp filter thread sync not supported");
1028 clear_seccomp_options(j);
1029 return 0;
1030 }
1031 /*
1032 * Similar logic here. If seccomp_can_softfail() is
1033 * false, or |errno| != ENOSYS, or |errno| != EINVAL,
1034 * we can proceed. Worst case scenario minijail_enter()
1035 * will abort() if seccomp or TSYNC fail.
1036 */
1037 }
1038 }
1039 if (j->flags.seccomp_filter_allow_speculation) {
1040 /* Is the SPEC_ALLOW flag supported? */
1041 if (!seccomp_filter_flags_available(
1042 SECCOMP_FILTER_FLAG_SPEC_ALLOW)) {
1043 warn("allowing speculative execution on seccomp "
1044 "processes not supported");
1045 j->flags.seccomp_filter_allow_speculation = 0;
1046 }
1047 }
1048 return 1;
1049 }
1050
set_seccomp_filters_internal(struct minijail * j,const struct sock_fprog * filter,bool owned)1051 static int set_seccomp_filters_internal(struct minijail *j,
1052 const struct sock_fprog *filter,
1053 bool owned)
1054 {
1055 struct sock_fprog *fprog;
1056
1057 if (owned) {
1058 /*
1059 * If |owned| is true, it's OK to cast away the const-ness since
1060 * we'll own the pointer going forward.
1061 */
1062 fprog = (struct sock_fprog *)filter;
1063 } else {
1064 fprog = malloc(sizeof(struct sock_fprog));
1065 if (!fprog)
1066 return -ENOMEM;
1067 fprog->len = filter->len;
1068 fprog->filter = malloc(sizeof(struct sock_filter) * fprog->len);
1069 if (!fprog->filter) {
1070 free(fprog);
1071 return -ENOMEM;
1072 }
1073 memcpy(fprog->filter, filter->filter,
1074 sizeof(struct sock_filter) * fprog->len);
1075 }
1076
1077 if (j->filter_prog) {
1078 free(j->filter_prog->filter);
1079 free(j->filter_prog);
1080 }
1081
1082 j->filter_len = fprog->len;
1083 j->filter_prog = fprog;
1084 return 0;
1085 }
1086
parse_seccomp_filters(struct minijail * j,const char * filename,FILE * policy_file)1087 static int parse_seccomp_filters(struct minijail *j, const char *filename,
1088 FILE *policy_file)
1089 {
1090 struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
1091 if (!fprog)
1092 return -ENOMEM;
1093
1094 struct filter_options filteropts;
1095
1096 /*
1097 * Figure out filter options.
1098 * Allow logging?
1099 */
1100 filteropts.allow_logging =
1101 debug_logging_allowed() && seccomp_is_logging_allowed(j);
1102
1103 /* What to do on a blocked system call? */
1104 if (filteropts.allow_logging) {
1105 if (seccomp_ret_log_available())
1106 filteropts.action = ACTION_RET_LOG;
1107 else
1108 filteropts.action = ACTION_RET_TRAP;
1109 } else {
1110 if (j->flags.seccomp_filter_tsync) {
1111 if (seccomp_ret_kill_process_available()) {
1112 filteropts.action = ACTION_RET_KILL_PROCESS;
1113 } else {
1114 filteropts.action = ACTION_RET_TRAP;
1115 }
1116 } else {
1117 filteropts.action = ACTION_RET_KILL;
1118 }
1119 }
1120
1121 /*
1122 * If SECCOMP_RET_LOG is not available, need to allow extra syscalls
1123 * for logging.
1124 */
1125 filteropts.allow_syscalls_for_logging =
1126 filteropts.allow_logging && !seccomp_ret_log_available();
1127
1128 /* Whether to fail on duplicate syscalls. */
1129 filteropts.allow_duplicate_syscalls = allow_duplicate_syscalls();
1130
1131 if (compile_filter(filename, policy_file, fprog, &filteropts)) {
1132 free(fprog);
1133 return -1;
1134 }
1135
1136 return set_seccomp_filters_internal(j, fprog, true /* owned */);
1137 }
1138
minijail_parse_seccomp_filters(struct minijail * j,const char * path)1139 void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
1140 {
1141 if (!seccomp_should_use_filters(j))
1142 return;
1143
1144 attribute_cleanup_fp FILE *file = fopen(path, "re");
1145 if (!file) {
1146 pdie("failed to open seccomp filter file '%s'", path);
1147 }
1148
1149 if (parse_seccomp_filters(j, path, file) != 0) {
1150 die("failed to compile seccomp filter BPF program in '%s'",
1151 path);
1152 }
1153 if (j->seccomp_policy_path) {
1154 free(j->seccomp_policy_path);
1155 }
1156 j->seccomp_policy_path = strdup(path);
1157 }
1158
minijail_parse_seccomp_filters_from_fd(struct minijail * j,int fd)1159 void API minijail_parse_seccomp_filters_from_fd(struct minijail *j, int fd)
1160 {
1161 char *fd_path, *path;
1162 attribute_cleanup_fp FILE *file = NULL;
1163
1164 if (!seccomp_should_use_filters(j))
1165 return;
1166
1167 file = fdopen(fd, "r");
1168 if (!file) {
1169 pdie("failed to associate stream with fd %d", fd);
1170 }
1171
1172 if (asprintf(&fd_path, "/proc/self/fd/%d", fd) == -1)
1173 pdie("failed to create path for fd %d", fd);
1174 path = realpath(fd_path, NULL);
1175 if (path == NULL)
1176 pwarn("failed to get path of fd %d", fd);
1177 free(fd_path);
1178
1179 if (parse_seccomp_filters(j, path ? path : "<fd>", file) != 0) {
1180 die("failed to compile seccomp filter BPF program from fd %d",
1181 fd);
1182 }
1183 if (j->seccomp_policy_path) {
1184 free(j->seccomp_policy_path);
1185 }
1186 j->seccomp_policy_path = path;
1187 }
1188
minijail_set_seccomp_filters(struct minijail * j,const struct sock_fprog * filter)1189 void API minijail_set_seccomp_filters(struct minijail *j,
1190 const struct sock_fprog *filter)
1191 {
1192 if (!seccomp_should_use_filters(j))
1193 return;
1194
1195 if (seccomp_is_logging_allowed(j)) {
1196 die("minijail_log_seccomp_filter_failures() is incompatible "
1197 "with minijail_set_seccomp_filters()");
1198 }
1199
1200 /*
1201 * set_seccomp_filters_internal() can only fail with ENOMEM.
1202 * Furthermore, since we won't own the incoming filter, it will not be
1203 * modified.
1204 */
1205 if (set_seccomp_filters_internal(j, filter, false /* owned */) < 0) {
1206 die("failed to set seccomp filter");
1207 }
1208 }
1209
minijail_use_alt_syscall(struct minijail * j,const char * table)1210 int API minijail_use_alt_syscall(struct minijail *j, const char *table)
1211 {
1212 j->alt_syscall_table = strdup(table);
1213 if (!j->alt_syscall_table)
1214 return -ENOMEM;
1215 j->flags.alt_syscall = 1;
1216 return 0;
1217 }
1218
1219 struct marshal_state {
1220 size_t available;
1221 size_t total;
1222 char *buf;
1223 };
1224
marshal_state_init(struct marshal_state * state,char * buf,size_t available)1225 static void marshal_state_init(struct marshal_state *state, char *buf,
1226 size_t available)
1227 {
1228 state->available = available;
1229 state->buf = buf;
1230 state->total = 0;
1231 }
1232
marshal_append(struct marshal_state * state,const void * src,size_t length)1233 static void marshal_append(struct marshal_state *state, const void *src,
1234 size_t length)
1235 {
1236 size_t copy_len = MIN(state->available, length);
1237
1238 /* Up to |available| will be written. */
1239 if (copy_len) {
1240 memcpy(state->buf, src, copy_len);
1241 state->buf += copy_len;
1242 state->available -= copy_len;
1243 }
1244 /* |total| will contain the expected length. */
1245 state->total += length;
1246 }
1247
marshal_append_string(struct marshal_state * state,const char * src)1248 static void marshal_append_string(struct marshal_state *state, const char *src)
1249 {
1250 marshal_append(state, src, strlen(src) + 1);
1251 }
1252
marshal_mount(struct marshal_state * state,const struct mountpoint * m)1253 static void marshal_mount(struct marshal_state *state,
1254 const struct mountpoint *m)
1255 {
1256 marshal_append(state, m->src, strlen(m->src) + 1);
1257 marshal_append(state, m->dest, strlen(m->dest) + 1);
1258 marshal_append(state, m->type, strlen(m->type) + 1);
1259 marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
1260 if (m->has_data)
1261 marshal_append(state, m->data, strlen(m->data) + 1);
1262 marshal_append(state, (char *)&m->flags, sizeof(m->flags));
1263 }
1264
minijail_marshal_helper(struct marshal_state * state,const struct minijail * j)1265 static void minijail_marshal_helper(struct marshal_state *state,
1266 const struct minijail *j)
1267 {
1268 struct mountpoint *m = NULL;
1269 size_t i;
1270
1271 marshal_append(state, (char *)j, sizeof(*j));
1272 if (j->user)
1273 marshal_append_string(state, j->user);
1274 if (j->suppl_gid_list) {
1275 marshal_append(state, j->suppl_gid_list,
1276 j->suppl_gid_count * sizeof(gid_t));
1277 }
1278 if (j->chrootdir)
1279 marshal_append_string(state, j->chrootdir);
1280 if (j->hostname)
1281 marshal_append_string(state, j->hostname);
1282 if (j->alt_syscall_table) {
1283 marshal_append(state, j->alt_syscall_table,
1284 strlen(j->alt_syscall_table) + 1);
1285 }
1286 if (j->flags.seccomp_filter && j->filter_prog) {
1287 struct sock_fprog *fp = j->filter_prog;
1288 marshal_append(state, (char *)fp->filter,
1289 fp->len * sizeof(struct sock_filter));
1290 }
1291 for (m = j->mounts_head; m; m = m->next) {
1292 marshal_mount(state, m);
1293 }
1294 for (i = 0; i < j->cgroup_count; ++i)
1295 marshal_append_string(state, j->cgroups[i]);
1296 if (j->seccomp_policy_path)
1297 marshal_append_string(state, j->seccomp_policy_path);
1298 }
1299
minijail_size(const struct minijail * j)1300 size_t API minijail_size(const struct minijail *j)
1301 {
1302 struct marshal_state state;
1303 marshal_state_init(&state, NULL, 0);
1304 minijail_marshal_helper(&state, j);
1305 return state.total;
1306 }
1307
minijail_marshal(const struct minijail * j,char * buf,size_t available)1308 int minijail_marshal(const struct minijail *j, char *buf, size_t available)
1309 {
1310 struct marshal_state state;
1311 marshal_state_init(&state, buf, available);
1312 minijail_marshal_helper(&state, j);
1313 return (state.total > available);
1314 }
1315
minijail_unmarshal(struct minijail * j,char * serialized,size_t length)1316 int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
1317 {
1318 size_t i;
1319 size_t count;
1320 int ret = -EINVAL;
1321
1322 if (length < sizeof(*j))
1323 goto out;
1324 memcpy((void *)j, serialized, sizeof(*j));
1325 serialized += sizeof(*j);
1326 length -= sizeof(*j);
1327
1328 /* Potentially stale pointers not used as signals. */
1329 j->preload_path = NULL;
1330 j->pid_file_path = NULL;
1331 j->uidmap = NULL;
1332 j->gidmap = NULL;
1333 j->mounts_head = NULL;
1334 j->mounts_tail = NULL;
1335 j->remounts_head = NULL;
1336 j->remounts_tail = NULL;
1337 j->filter_prog = NULL;
1338 j->hooks_head = NULL;
1339 j->hooks_tail = NULL;
1340
1341 if (j->user) { /* stale pointer */
1342 char *user = consumestr(&serialized, &length);
1343 if (!user)
1344 goto clear_pointers;
1345 j->user = strdup(user);
1346 if (!j->user)
1347 goto clear_pointers;
1348 }
1349
1350 if (j->suppl_gid_list) { /* stale pointer */
1351 if (j->suppl_gid_count > NGROUPS_MAX) {
1352 goto bad_gid_list;
1353 }
1354 size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
1355 void *gid_list_bytes =
1356 consumebytes(gid_list_size, &serialized, &length);
1357 if (!gid_list_bytes)
1358 goto bad_gid_list;
1359
1360 j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
1361 if (!j->suppl_gid_list)
1362 goto bad_gid_list;
1363
1364 memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
1365 }
1366
1367 if (j->chrootdir) { /* stale pointer */
1368 char *chrootdir = consumestr(&serialized, &length);
1369 if (!chrootdir)
1370 goto bad_chrootdir;
1371 j->chrootdir = strdup(chrootdir);
1372 if (!j->chrootdir)
1373 goto bad_chrootdir;
1374 }
1375
1376 if (j->hostname) { /* stale pointer */
1377 char *hostname = consumestr(&serialized, &length);
1378 if (!hostname)
1379 goto bad_hostname;
1380 j->hostname = strdup(hostname);
1381 if (!j->hostname)
1382 goto bad_hostname;
1383 }
1384
1385 if (j->alt_syscall_table) { /* stale pointer */
1386 char *alt_syscall_table = consumestr(&serialized, &length);
1387 if (!alt_syscall_table)
1388 goto bad_syscall_table;
1389 j->alt_syscall_table = strdup(alt_syscall_table);
1390 if (!j->alt_syscall_table)
1391 goto bad_syscall_table;
1392 }
1393
1394 if (j->flags.seccomp_filter && j->filter_len > 0) {
1395 size_t ninstrs = j->filter_len;
1396 if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
1397 ninstrs > USHRT_MAX)
1398 goto bad_filters;
1399
1400 size_t program_len = ninstrs * sizeof(struct sock_filter);
1401 void *program = consumebytes(program_len, &serialized, &length);
1402 if (!program)
1403 goto bad_filters;
1404
1405 j->filter_prog = malloc(sizeof(struct sock_fprog));
1406 if (!j->filter_prog)
1407 goto bad_filters;
1408
1409 j->filter_prog->len = ninstrs;
1410 j->filter_prog->filter = malloc(program_len);
1411 if (!j->filter_prog->filter)
1412 goto bad_filter_prog_instrs;
1413
1414 memcpy(j->filter_prog->filter, program, program_len);
1415 }
1416
1417 count = j->mounts_count;
1418 j->mounts_count = 0;
1419 for (i = 0; i < count; ++i) {
1420 unsigned long *flags;
1421 int *has_data;
1422 const char *dest;
1423 const char *type;
1424 const char *data = NULL;
1425 const char *src = consumestr(&serialized, &length);
1426 if (!src)
1427 goto bad_mounts;
1428 dest = consumestr(&serialized, &length);
1429 if (!dest)
1430 goto bad_mounts;
1431 type = consumestr(&serialized, &length);
1432 if (!type)
1433 goto bad_mounts;
1434 has_data =
1435 consumebytes(sizeof(*has_data), &serialized, &length);
1436 if (!has_data)
1437 goto bad_mounts;
1438 if (*has_data) {
1439 data = consumestr(&serialized, &length);
1440 if (!data)
1441 goto bad_mounts;
1442 }
1443 flags = consumebytes(sizeof(*flags), &serialized, &length);
1444 if (!flags)
1445 goto bad_mounts;
1446 if (minijail_mount_with_data(j, src, dest, type, *flags, data))
1447 goto bad_mounts;
1448 }
1449
1450 count = j->cgroup_count;
1451 j->cgroup_count = 0;
1452 for (i = 0; i < count; ++i) {
1453 char *cgroup = consumestr(&serialized, &length);
1454 if (!cgroup)
1455 goto bad_cgroups;
1456 j->cgroups[i] = strdup(cgroup);
1457 if (!j->cgroups[i])
1458 goto bad_cgroups;
1459 ++j->cgroup_count;
1460 }
1461
1462 if (j->seccomp_policy_path) { /* stale pointer */
1463 char *seccomp_policy_path = consumestr(&serialized, &length);
1464 if (!seccomp_policy_path)
1465 goto bad_cgroups;
1466 j->seccomp_policy_path = strdup(seccomp_policy_path);
1467 if (!j->seccomp_policy_path)
1468 goto bad_cgroups;
1469 }
1470
1471 return 0;
1472
1473 /*
1474 * If more is added after j->seccomp_policy_path, then this is needed:
1475 * if (j->seccomp_policy_path)
1476 * free(j->seccomp_policy_path);
1477 */
1478
1479 bad_cgroups:
1480 free_mounts_list(j);
1481 free_remounts_list(j);
1482 for (i = 0; i < j->cgroup_count; ++i)
1483 free(j->cgroups[i]);
1484 bad_mounts:
1485 if (j->filter_prog && j->filter_prog->filter)
1486 free(j->filter_prog->filter);
1487 bad_filter_prog_instrs:
1488 if (j->filter_prog)
1489 free(j->filter_prog);
1490 bad_filters:
1491 if (j->alt_syscall_table)
1492 free(j->alt_syscall_table);
1493 bad_syscall_table:
1494 if (j->hostname)
1495 free(j->hostname);
1496 bad_hostname:
1497 if (j->chrootdir)
1498 free(j->chrootdir);
1499 bad_chrootdir:
1500 if (j->suppl_gid_list)
1501 free(j->suppl_gid_list);
1502 bad_gid_list:
1503 if (j->user)
1504 free(j->user);
1505 clear_pointers:
1506 j->user = NULL;
1507 j->suppl_gid_list = NULL;
1508 j->chrootdir = NULL;
1509 j->hostname = NULL;
1510 j->alt_syscall_table = NULL;
1511 j->cgroup_count = 0;
1512 j->seccomp_policy_path = NULL;
1513 out:
1514 return ret;
1515 }
1516
1517 struct dev_spec {
1518 const char *name;
1519 mode_t mode;
1520 dev_t major, minor;
1521 };
1522
1523 // clang-format off
1524 static const struct dev_spec device_nodes[] = {
1525 {
1526 "null",
1527 S_IFCHR | 0666, 1, 3,
1528 },
1529 {
1530 "zero",
1531 S_IFCHR | 0666, 1, 5,
1532 },
1533 {
1534 "full",
1535 S_IFCHR | 0666, 1, 7,
1536 },
1537 {
1538 "urandom",
1539 S_IFCHR | 0444, 1, 9,
1540 },
1541 {
1542 "tty",
1543 S_IFCHR | 0666, 5, 0,
1544 },
1545 };
1546 // clang-format on
1547
1548 struct dev_sym_spec {
1549 const char *source, *dest;
1550 };
1551
1552 static const struct dev_sym_spec device_symlinks[] = {
1553 {
1554 "ptmx",
1555 "pts/ptmx",
1556 },
1557 {
1558 "fd",
1559 "/proc/self/fd",
1560 },
1561 {
1562 "stdin",
1563 "fd/0",
1564 },
1565 {
1566 "stdout",
1567 "fd/1",
1568 },
1569 {
1570 "stderr",
1571 "fd/2",
1572 },
1573 };
1574
1575 /*
1576 * Clean up the temporary dev path we had setup previously. In case of errors,
1577 * we don't want to go leaking empty tempdirs.
1578 */
mount_dev_cleanup(char * dev_path)1579 static void mount_dev_cleanup(char *dev_path)
1580 {
1581 umount2(dev_path, MNT_DETACH);
1582 rmdir(dev_path);
1583 free(dev_path);
1584 }
1585
1586 /*
1587 * Set up the pseudo /dev path at the temporary location.
1588 * See mount_dev_finalize for more details.
1589 */
mount_dev(char ** dev_path_ret)1590 static int mount_dev(char **dev_path_ret)
1591 {
1592 int ret;
1593 attribute_cleanup_fd int dev_fd = -1;
1594 size_t i;
1595 mode_t mask;
1596 char *dev_path;
1597
1598 /*
1599 * Create a temp path for the /dev init. We'll relocate this to the
1600 * final location later on in the startup process.
1601 */
1602 dev_path = *dev_path_ret = strdup("/tmp/minijail.dev.XXXXXX");
1603 if (dev_path == NULL || mkdtemp(dev_path) == NULL)
1604 pdie("could not create temp path for /dev");
1605
1606 /* Set up the empty /dev mount point first. */
1607 ret = mount("minijail-devfs", dev_path, "tmpfs", MS_NOEXEC | MS_NOSUID,
1608 "size=5M,mode=755");
1609 if (ret) {
1610 rmdir(dev_path);
1611 return ret;
1612 }
1613
1614 /* We want to set the mode directly from the spec. */
1615 mask = umask(0);
1616
1617 /* Get a handle to the temp dev path for *at funcs below. */
1618 dev_fd = open(dev_path, O_DIRECTORY | O_PATH | O_CLOEXEC);
1619 if (dev_fd < 0) {
1620 ret = 1;
1621 goto done;
1622 }
1623
1624 /* Create all the nodes in /dev. */
1625 for (i = 0; i < ARRAY_SIZE(device_nodes); ++i) {
1626 const struct dev_spec *ds = &device_nodes[i];
1627 ret = mknodat(dev_fd, ds->name, ds->mode,
1628 makedev(ds->major, ds->minor));
1629 if (ret)
1630 goto done;
1631 }
1632
1633 /* Create all the symlinks in /dev. */
1634 for (i = 0; i < ARRAY_SIZE(device_symlinks); ++i) {
1635 const struct dev_sym_spec *ds = &device_symlinks[i];
1636 ret = symlinkat(ds->dest, dev_fd, ds->source);
1637 if (ret)
1638 goto done;
1639 }
1640
1641 /* Create empty dir for glibc shared mem APIs. */
1642 ret = mkdirat(dev_fd, "shm", 01777);
1643 if (ret)
1644 goto done;
1645
1646 /* Restore old mask. */
1647 done:
1648 umask(mask);
1649
1650 if (ret)
1651 mount_dev_cleanup(dev_path);
1652
1653 return ret;
1654 }
1655
1656 /*
1657 * Relocate the temporary /dev mount to its final /dev place.
1658 * We have to do this two step process so people can bind mount extra
1659 * /dev paths like /dev/log.
1660 */
mount_dev_finalize(const struct minijail * j,char * dev_path)1661 static int mount_dev_finalize(const struct minijail *j, char *dev_path)
1662 {
1663 int ret = -1;
1664 char *dest = NULL;
1665
1666 /* Unmount the /dev mount if possible. */
1667 if (umount2("/dev", MNT_DETACH))
1668 goto done;
1669
1670 if (asprintf(&dest, "%s/dev", j->chrootdir ?: "") < 0)
1671 goto done;
1672
1673 if (mount(dev_path, dest, NULL, MS_MOVE, NULL))
1674 goto done;
1675
1676 ret = 0;
1677 done:
1678 free(dest);
1679 mount_dev_cleanup(dev_path);
1680
1681 return ret;
1682 }
1683
1684 /*
1685 * mount_one: Applies mounts from @m for @j, recursing as needed.
1686 * @j Minijail these mounts are for
1687 * @m Head of list of mounts
1688 *
1689 * Returns 0 for success.
1690 */
mount_one(const struct minijail * j,struct mountpoint * m,const char * dev_path)1691 static int mount_one(const struct minijail *j, struct mountpoint *m,
1692 const char *dev_path)
1693 {
1694 int ret;
1695 char *dest;
1696 int remount = 0;
1697 unsigned long original_mnt_flags = 0;
1698
1699 /* We assume |dest| has a leading "/". */
1700 if (dev_path && strncmp("/dev/", m->dest, 5) == 0) {
1701 /*
1702 * Since the temp path is rooted at /dev, skip that dest part.
1703 */
1704 if (asprintf(&dest, "%s%s", dev_path, m->dest + 4) < 0)
1705 return -ENOMEM;
1706 } else {
1707 if (asprintf(&dest, "%s%s", j->chrootdir ?: "", m->dest) < 0)
1708 return -ENOMEM;
1709 }
1710
1711 ret =
1712 setup_mount_destination(m->src, dest, j->uid, j->gid,
1713 (m->flags & MS_BIND), &original_mnt_flags);
1714 if (ret) {
1715 warn("cannot create mount target '%s'", dest);
1716 goto error;
1717 }
1718
1719 /*
1720 * Bind mounts that change the 'ro' flag have to be remounted since
1721 * 'bind' and other flags can't both be specified in the same command.
1722 * Remount after the initial mount.
1723 */
1724 if ((m->flags & MS_BIND) &&
1725 ((m->flags & MS_RDONLY) != (original_mnt_flags & MS_RDONLY))) {
1726 remount = 1;
1727 /*
1728 * Restrict the mount flags to those that are user-settable in a
1729 * MS_REMOUNT request, but excluding MS_RDONLY. The
1730 * user-requested mount flags will dictate whether the remount
1731 * will have that flag or not.
1732 */
1733 original_mnt_flags &= (MS_USER_SETTABLE_MASK & ~MS_RDONLY);
1734 }
1735
1736 ret = mount(m->src, dest, m->type, m->flags, m->data);
1737 if (ret) {
1738 pwarn("cannot bind-mount '%s' as '%s' with flags %#lx", m->src,
1739 dest, m->flags);
1740 goto error;
1741 }
1742
1743 if (remount) {
1744 ret =
1745 mount(m->src, dest, NULL,
1746 m->flags | original_mnt_flags | MS_REMOUNT, m->data);
1747 if (ret) {
1748 pwarn(
1749 "cannot bind-remount '%s' as '%s' with flags %#lx",
1750 m->src, dest,
1751 m->flags | original_mnt_flags | MS_REMOUNT);
1752 goto error;
1753 }
1754 }
1755
1756 free(dest);
1757 if (m->next)
1758 return mount_one(j, m->next, dev_path);
1759 return 0;
1760
1761 error:
1762 free(dest);
1763 return ret;
1764 }
1765
process_mounts_or_die(const struct minijail * j)1766 static void process_mounts_or_die(const struct minijail *j)
1767 {
1768 /*
1769 * We have to mount /dev first in case there are bind mounts from
1770 * the original /dev into the new unique tmpfs one.
1771 */
1772 char *dev_path = NULL;
1773 if (j->flags.mount_dev && mount_dev(&dev_path))
1774 pdie("mount_dev failed");
1775
1776 if (j->mounts_head && mount_one(j, j->mounts_head, dev_path)) {
1777 if (dev_path)
1778 mount_dev_cleanup(dev_path);
1779
1780 _exit(MINIJAIL_ERR_MOUNT);
1781 }
1782
1783 /*
1784 * Once all bind mounts have been processed, move the temp dev to
1785 * its final /dev home.
1786 */
1787 if (j->flags.mount_dev && mount_dev_finalize(j, dev_path))
1788 pdie("mount_dev_finalize failed");
1789 }
1790
enter_chroot(const struct minijail * j)1791 static int enter_chroot(const struct minijail *j)
1792 {
1793 run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
1794
1795 if (chroot(j->chrootdir))
1796 return -errno;
1797
1798 if (chdir("/"))
1799 return -errno;
1800
1801 return 0;
1802 }
1803
enter_pivot_root(const struct minijail * j)1804 static int enter_pivot_root(const struct minijail *j)
1805 {
1806 attribute_cleanup_fd int oldroot = -1;
1807 attribute_cleanup_fd int newroot = -1;
1808
1809 run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
1810
1811 /*
1812 * Keep the fd for both old and new root.
1813 * It will be used in fchdir(2) later.
1814 */
1815 oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1816 if (oldroot < 0)
1817 pdie("failed to open / for fchdir");
1818 newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1819 if (newroot < 0)
1820 pdie("failed to open %s for fchdir", j->chrootdir);
1821
1822 /*
1823 * To ensure j->chrootdir is the root of a filesystem,
1824 * do a self bind mount.
1825 */
1826 if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
1827 pdie("failed to bind mount '%s'", j->chrootdir);
1828 if (chdir(j->chrootdir))
1829 return -errno;
1830 if (syscall(SYS_pivot_root, ".", "."))
1831 pdie("pivot_root");
1832
1833 /*
1834 * Now the old root is mounted on top of the new root. Use fchdir(2) to
1835 * change to the old root and unmount it.
1836 */
1837 if (fchdir(oldroot))
1838 pdie("failed to fchdir to old /");
1839
1840 /*
1841 * If skip_remount_private was enabled for minijail_enter(),
1842 * there could be a shared mount point under |oldroot|. In that case,
1843 * mounts under this shared mount point will be unmounted below, and
1844 * this unmounting will propagate to the original mount namespace
1845 * (because the mount point is shared). To prevent this unexpected
1846 * unmounting, remove these mounts from their peer groups by recursively
1847 * remounting them as MS_PRIVATE.
1848 */
1849 if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
1850 pdie("failed to mount(/, private) before umount(/)");
1851 /* The old root might be busy, so use lazy unmount. */
1852 if (umount2(".", MNT_DETACH))
1853 pdie("umount(/)");
1854 /* Change back to the new root. */
1855 if (fchdir(newroot))
1856 return -errno;
1857 if (chroot("/"))
1858 return -errno;
1859 /* Set correct CWD for getcwd(3). */
1860 if (chdir("/"))
1861 return -errno;
1862
1863 return 0;
1864 }
1865
mount_tmp(const struct minijail * j)1866 static int mount_tmp(const struct minijail *j)
1867 {
1868 const char fmt[] = "size=%zu,mode=1777";
1869 /* Count for the user storing ULLONG_MAX literally + extra space. */
1870 char data[sizeof(fmt) + sizeof("18446744073709551615ULL")];
1871 int ret;
1872
1873 ret = snprintf(data, sizeof(data), fmt, j->tmpfs_size);
1874
1875 if (ret <= 0)
1876 pdie("tmpfs size spec error");
1877 else if ((size_t)ret >= sizeof(data))
1878 pdie("tmpfs size spec too large");
1879 return mount("none", "/tmp", "tmpfs", MS_NODEV | MS_NOEXEC | MS_NOSUID,
1880 data);
1881 }
1882
remount_proc_readonly(const struct minijail * j)1883 static int remount_proc_readonly(const struct minijail *j)
1884 {
1885 const char *kProcPath = "/proc";
1886 const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1887 /*
1888 * Right now, we're holding a reference to our parent's old mount of
1889 * /proc in our namespace, which means using MS_REMOUNT here would
1890 * mutate our parent's mount as well, even though we're in a VFS
1891 * namespace (!). Instead, remove their mount from our namespace lazily
1892 * (MNT_DETACH) and make our own.
1893 *
1894 * However, we skip this in the user namespace case because it will
1895 * invariably fail. Every mount namespace is "owned" by the
1896 * user namespace of the process that creates it. Mount namespace A is
1897 * "less privileged" than mount namespace B if A is created off of B,
1898 * and B is owned by a different user namespace.
1899 * When a less privileged mount namespace is created, the mounts used to
1900 * initialize it (coming from the more privileged mount namespace) come
1901 * as a unit, and are locked together. This means that code running in
1902 * the new mount (and user) namespace cannot piecemeal unmount
1903 * individual mounts inherited from a more privileged mount namespace.
1904 * See https://man7.org/linux/man-pages/man7/mount_namespaces.7.html,
1905 * "Restrictions on mount namespaces" for details.
1906 *
1907 * This happens in our use case because we first enter a new user
1908 * namespace (on clone(2)) and then we unshare(2) a new mount namespace,
1909 * which means the new mount namespace is less privileged than its
1910 * parent mount namespace. This would also happen if we entered a new
1911 * mount namespace on clone(2), since the user namespace is created
1912 * first.
1913 * In all other non-user-namespace cases the new mount namespace is
1914 * similarly privileged as the parent mount namespace so unmounting a
1915 * single mount is allowed.
1916 *
1917 * We still remount /proc as read-only in the user namespace case
1918 * because while a process with CAP_SYS_ADMIN in the new user namespace
1919 * can unmount the RO mount and get at the RW mount, an attacker with
1920 * access only to a write primitive will not be able to modify /proc.
1921 */
1922 if (!j->flags.userns && umount2(kProcPath, MNT_DETACH))
1923 return -errno;
1924 if (mount("proc", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
1925 return -errno;
1926 return 0;
1927 }
1928
kill_child_and_die(const struct minijail * j,const char * msg)1929 static void kill_child_and_die(const struct minijail *j, const char *msg)
1930 {
1931 kill(j->initpid, SIGKILL);
1932 die("%s", msg);
1933 }
1934
write_pid_file_or_die(const struct minijail * j)1935 static void write_pid_file_or_die(const struct minijail *j)
1936 {
1937 if (write_pid_to_path(j->initpid, j->pid_file_path))
1938 kill_child_and_die(j, "failed to write pid file");
1939 }
1940
add_to_cgroups_or_die(const struct minijail * j)1941 static void add_to_cgroups_or_die(const struct minijail *j)
1942 {
1943 size_t i;
1944
1945 for (i = 0; i < j->cgroup_count; ++i) {
1946 if (write_pid_to_path(j->initpid, j->cgroups[i]))
1947 kill_child_and_die(j, "failed to add to cgroups");
1948 }
1949 }
1950
set_rlimits_or_die(const struct minijail * j)1951 static void set_rlimits_or_die(const struct minijail *j)
1952 {
1953 size_t i;
1954
1955 for (i = 0; i < j->rlimit_count; ++i) {
1956 struct rlimit limit;
1957 limit.rlim_cur = j->rlimits[i].cur;
1958 limit.rlim_max = j->rlimits[i].max;
1959 if (prlimit(j->initpid, j->rlimits[i].type, &limit, NULL))
1960 kill_child_and_die(j, "failed to set rlimit");
1961 }
1962 }
1963
write_ugid_maps_or_die(const struct minijail * j)1964 static void write_ugid_maps_or_die(const struct minijail *j)
1965 {
1966 if (j->uidmap && write_proc_file(j->initpid, j->uidmap, "uid_map") != 0)
1967 kill_child_and_die(j, "failed to write uid_map");
1968 if (j->gidmap && j->flags.disable_setgroups) {
1969 /*
1970 * Older kernels might not have the /proc/<pid>/setgroups files.
1971 */
1972 int ret = write_proc_file(j->initpid, "deny", "setgroups");
1973 if (ret != 0) {
1974 if (ret == -ENOENT) {
1975 /*
1976 * See
1977 * http://man7.org/linux/man-pages/man7/user_namespaces.7.html.
1978 */
1979 warn("could not disable setgroups(2)");
1980 } else
1981 kill_child_and_die(
1982 j, "failed to disable setgroups(2)");
1983 }
1984 }
1985 if (j->gidmap && write_proc_file(j->initpid, j->gidmap, "gid_map") != 0)
1986 kill_child_and_die(j, "failed to write gid_map");
1987 }
1988
enter_user_namespace(const struct minijail * j)1989 static void enter_user_namespace(const struct minijail *j)
1990 {
1991 int uid = j->flags.uid ? j->uid : 0;
1992 int gid = j->flags.gid ? j->gid : 0;
1993 if (j->gidmap && setresgid(gid, gid, gid)) {
1994 pdie("user_namespaces: setresgid(%d, %d, %d) failed", gid, gid,
1995 gid);
1996 }
1997 if (j->uidmap && setresuid(uid, uid, uid)) {
1998 pdie("user_namespaces: setresuid(%d, %d, %d) failed", uid, uid,
1999 uid);
2000 }
2001 }
2002
parent_setup_complete(int * pipe_fds)2003 static void parent_setup_complete(int *pipe_fds)
2004 {
2005 close_and_reset(&pipe_fds[0]);
2006 close_and_reset(&pipe_fds[1]);
2007 }
2008
2009 /*
2010 * wait_for_parent_setup: Called by the child process to wait for any
2011 * further parent-side setup to complete before continuing.
2012 */
wait_for_parent_setup(int * pipe_fds)2013 static void wait_for_parent_setup(int *pipe_fds)
2014 {
2015 char buf;
2016
2017 close_and_reset(&pipe_fds[1]);
2018
2019 /* Wait for parent to complete setup and close the pipe. */
2020 if (read(pipe_fds[0], &buf, 1) != 0)
2021 die("failed to sync with parent");
2022 close_and_reset(&pipe_fds[0]);
2023 }
2024
drop_ugid(const struct minijail * j)2025 static void drop_ugid(const struct minijail *j)
2026 {
2027 if (j->flags.inherit_suppl_gids + j->flags.keep_suppl_gids +
2028 j->flags.set_suppl_gids >
2029 1) {
2030 die("can only do one of inherit, keep, or set supplementary "
2031 "groups");
2032 }
2033
2034 if (j->flags.inherit_suppl_gids) {
2035 if (initgroups(j->user, j->usergid))
2036 pdie("initgroups(%s, %d) failed", j->user, j->usergid);
2037 } else if (j->flags.set_suppl_gids) {
2038 if (setgroups(j->suppl_gid_count, j->suppl_gid_list))
2039 pdie("setgroups(suppl_gids) failed");
2040 } else if (!j->flags.keep_suppl_gids && !j->flags.disable_setgroups) {
2041 /*
2042 * Only attempt to clear supplementary groups if we are changing
2043 * users or groups, and if the caller did not request to disable
2044 * setgroups (used when entering a user namespace as a
2045 * non-privileged user).
2046 */
2047 if ((j->flags.uid || j->flags.gid) && setgroups(0, NULL))
2048 pdie("setgroups(0, NULL) failed");
2049 }
2050
2051 if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
2052 pdie("setresgid(%d, %d, %d) failed", j->gid, j->gid, j->gid);
2053
2054 if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
2055 pdie("setresuid(%d, %d, %d) failed", j->uid, j->uid, j->uid);
2056 }
2057
drop_capbset(uint64_t keep_mask,unsigned int last_valid_cap)2058 static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
2059 {
2060 const uint64_t one = 1;
2061 unsigned int i;
2062 for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
2063 if (keep_mask & (one << i))
2064 continue;
2065 if (prctl(PR_CAPBSET_DROP, i))
2066 pdie("could not drop capability from bounding set");
2067 }
2068 }
2069
drop_caps(const struct minijail * j,unsigned int last_valid_cap)2070 static void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
2071 {
2072 if (!j->flags.use_caps)
2073 return;
2074
2075 cap_t caps = cap_get_proc();
2076 cap_value_t flag[1];
2077 const size_t ncaps = sizeof(j->caps) * 8;
2078 const uint64_t one = 1;
2079 unsigned int i;
2080 if (!caps)
2081 die("can't get process caps");
2082 if (cap_clear(caps))
2083 die("can't clear caps");
2084
2085 for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2086 /* Keep CAP_SETPCAP for dropping bounding set bits. */
2087 if (i != CAP_SETPCAP && !(j->caps & (one << i)))
2088 continue;
2089 flag[0] = i;
2090 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
2091 die("can't add effective cap");
2092 if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
2093 die("can't add permitted cap");
2094 if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
2095 die("can't add inheritable cap");
2096 }
2097 if (cap_set_proc(caps))
2098 die("can't apply initial cleaned capset");
2099
2100 /*
2101 * Instead of dropping the bounding set first, do it here in case
2102 * the caller had a more permissive bounding set which could
2103 * have been used above to raise a capability that wasn't already
2104 * present. This requires CAP_SETPCAP, so we raised/kept it above.
2105 *
2106 * However, if we're asked to skip setting *and* locking the
2107 * SECURE_NOROOT securebit, also skip dropping the bounding set.
2108 * If the caller wants to regain all capabilities when executing a
2109 * set-user-ID-root program, allow them to do so. The default behavior
2110 * (i.e. the behavior without |securebits_skip_mask| set) will still put
2111 * the jailed process tree in a capabilities-only environment.
2112 *
2113 * We check the negated skip mask for SECURE_NOROOT and
2114 * SECURE_NOROOT_LOCKED. If the bits are set in the negated mask they
2115 * will *not* be skipped in lock_securebits(), and therefore we should
2116 * drop the bounding set.
2117 */
2118 if (secure_noroot_set_and_locked(~j->securebits_skip_mask)) {
2119 drop_capbset(j->caps, last_valid_cap);
2120 } else {
2121 warn("SECURE_NOROOT not set, not dropping bounding set");
2122 }
2123
2124 /* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
2125 if ((j->caps & (one << CAP_SETPCAP)) == 0) {
2126 flag[0] = CAP_SETPCAP;
2127 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
2128 die("can't clear effective cap");
2129 if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
2130 die("can't clear permitted cap");
2131 if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
2132 die("can't clear inheritable cap");
2133 }
2134
2135 if (cap_set_proc(caps))
2136 die("can't apply final cleaned capset");
2137
2138 /*
2139 * If ambient capabilities are supported, clear all capabilities first,
2140 * then raise the requested ones.
2141 */
2142 if (j->flags.set_ambient_caps) {
2143 if (!cap_ambient_supported()) {
2144 pdie("ambient capabilities not supported");
2145 }
2146 if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0) !=
2147 0) {
2148 pdie("can't clear ambient capabilities");
2149 }
2150
2151 for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2152 if (!(j->caps & (one << i)))
2153 continue;
2154
2155 if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, i, 0,
2156 0) != 0) {
2157 pdie("prctl(PR_CAP_AMBIENT, "
2158 "PR_CAP_AMBIENT_RAISE, %u) failed",
2159 i);
2160 }
2161 }
2162 }
2163
2164 cap_free(caps);
2165 }
2166
set_seccomp_filter(const struct minijail * j)2167 static void set_seccomp_filter(const struct minijail *j)
2168 {
2169 /*
2170 * Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
2171 * in the kernel source tree for an explanation of the parameters.
2172 */
2173 if (j->flags.no_new_privs) {
2174 if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
2175 pdie("prctl(PR_SET_NO_NEW_PRIVS)");
2176 }
2177
2178 /*
2179 * Code running with ASan
2180 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
2181 * will make system calls not included in the syscall filter policy,
2182 * which will likely crash the program. Skip setting seccomp filter in
2183 * that case.
2184 * 'running_with_asan()' has no inputs and is completely defined at
2185 * build time, so this cannot be used by an attacker to skip setting
2186 * seccomp filter.
2187 */
2188 if (j->flags.seccomp_filter && running_with_asan()) {
2189 warn("running with (HW)ASan, not setting seccomp filter");
2190 return;
2191 }
2192
2193 if (j->flags.seccomp_filter) {
2194 if (seccomp_is_logging_allowed(j)) {
2195 warn("logging seccomp filter failures");
2196 if (!seccomp_ret_log_available()) {
2197 /*
2198 * If SECCOMP_RET_LOG is not available,
2199 * install the SIGSYS handler first.
2200 */
2201 if (install_sigsys_handler())
2202 pdie(
2203 "failed to install SIGSYS handler");
2204 }
2205 } else if (j->flags.seccomp_filter_tsync) {
2206 /*
2207 * If setting thread sync,
2208 * reset the SIGSYS signal handler so that
2209 * the entire thread group is killed.
2210 */
2211 if (signal(SIGSYS, SIG_DFL) == SIG_ERR)
2212 pdie("failed to reset SIGSYS disposition");
2213 }
2214 }
2215
2216 /*
2217 * Install the syscall filter.
2218 */
2219 if (j->flags.seccomp_filter) {
2220 if (j->flags.seccomp_filter_tsync ||
2221 j->flags.seccomp_filter_allow_speculation) {
2222 int filter_flags =
2223 (j->flags.seccomp_filter_tsync
2224 ? SECCOMP_FILTER_FLAG_TSYNC
2225 : 0) |
2226 (j->flags.seccomp_filter_allow_speculation
2227 ? SECCOMP_FILTER_FLAG_SPEC_ALLOW
2228 : 0);
2229 if (sys_seccomp(SECCOMP_SET_MODE_FILTER, filter_flags,
2230 j->filter_prog)) {
2231 pdie("seccomp(tsync) failed");
2232 }
2233 } else {
2234 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2235 j->filter_prog)) {
2236 pdie("prctl(seccomp_filter) failed");
2237 }
2238 }
2239 }
2240 }
2241
2242 static pid_t forward_pid = -1;
2243
forward_signal(int sig,siginfo_t * siginfo attribute_unused,void * void_context attribute_unused)2244 static void forward_signal(int sig, siginfo_t *siginfo attribute_unused,
2245 void *void_context attribute_unused)
2246 {
2247 if (forward_pid != -1) {
2248 kill(forward_pid, sig);
2249 }
2250 }
2251
install_signal_handlers(void)2252 static void install_signal_handlers(void)
2253 {
2254 struct sigaction act;
2255
2256 memset(&act, 0, sizeof(act));
2257 act.sa_sigaction = &forward_signal;
2258 act.sa_flags = SA_SIGINFO | SA_RESTART;
2259
2260 /* Handle all signals, except SIGCHLD. */
2261 for (int sig = 1; sig < NSIG; sig++) {
2262 /*
2263 * We don't care if we get EINVAL: that just means that we
2264 * can't handle this signal, so let's skip it and continue.
2265 */
2266 sigaction(sig, &act, NULL);
2267 }
2268 /* Reset SIGCHLD's handler. */
2269 signal(SIGCHLD, SIG_DFL);
2270
2271 /* Handle real-time signals. */
2272 for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++) {
2273 sigaction(sig, &act, NULL);
2274 }
2275 }
2276
lookup_hook_name(minijail_hook_event_t event)2277 static const char *lookup_hook_name(minijail_hook_event_t event)
2278 {
2279 switch (event) {
2280 case MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS:
2281 return "pre-drop-caps";
2282 case MINIJAIL_HOOK_EVENT_PRE_EXECVE:
2283 return "pre-execve";
2284 case MINIJAIL_HOOK_EVENT_PRE_CHROOT:
2285 return "pre-chroot";
2286 case MINIJAIL_HOOK_EVENT_MAX:
2287 /*
2288 * Adding this in favor of a default case to force the
2289 * compiler to error out if a new enum value is added.
2290 */
2291 break;
2292 }
2293 return "unknown";
2294 }
2295
run_hooks_or_die(const struct minijail * j,minijail_hook_event_t event)2296 static void run_hooks_or_die(const struct minijail *j,
2297 minijail_hook_event_t event)
2298 {
2299 int rc;
2300 int hook_index = 0;
2301 for (struct hook *c = j->hooks_head; c; c = c->next) {
2302 if (c->event != event)
2303 continue;
2304 rc = c->hook(c->payload);
2305 if (rc != 0) {
2306 errno = -rc;
2307 pdie("%s hook (index %d) failed",
2308 lookup_hook_name(event), hook_index);
2309 }
2310 /* Only increase the index within the same hook event type. */
2311 ++hook_index;
2312 }
2313 }
2314
minijail_enter(const struct minijail * j)2315 void API minijail_enter(const struct minijail *j)
2316 {
2317 /*
2318 * If we're dropping caps, get the last valid cap from /proc now,
2319 * since /proc can be unmounted before drop_caps() is called.
2320 */
2321 unsigned int last_valid_cap = 0;
2322 if (j->flags.capbset_drop || j->flags.use_caps)
2323 last_valid_cap = get_last_valid_cap();
2324
2325 if (j->flags.pids)
2326 die("tried to enter a pid-namespaced jail;"
2327 " try minijail_run()?");
2328
2329 if (j->flags.inherit_suppl_gids && !j->user)
2330 die("cannot inherit supplementary groups without setting a "
2331 "username");
2332
2333 /*
2334 * We can't recover from failures if we've dropped privileges partially,
2335 * so we don't even try. If any of our operations fail, we abort() the
2336 * entire process.
2337 */
2338 if (j->flags.enter_vfs) {
2339 if (setns(j->mountns_fd, CLONE_NEWNS))
2340 pdie("setns(CLONE_NEWNS) failed");
2341 close(j->mountns_fd);
2342 }
2343
2344 if (j->flags.vfs) {
2345 if (unshare(CLONE_NEWNS))
2346 pdie("unshare(CLONE_NEWNS) failed");
2347 /*
2348 * By default, remount all filesystems as private, unless
2349 * - Passed a specific remount mode, in which case remount with
2350 * that,
2351 * - Asked not to remount at all, in which case skip the
2352 * mount(2) call.
2353 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
2354 */
2355 if (j->remount_mode) {
2356 if (mount(NULL, "/", NULL, MS_REC | j->remount_mode,
2357 NULL))
2358 pdie("mount(NULL, /, NULL, "
2359 "MS_REC | j->remount_mode, NULL) failed");
2360
2361 struct minijail_remount *temp = j->remounts_head;
2362 while (temp) {
2363 if (temp->remount_mode < j->remount_mode)
2364 die("cannot remount %s as stricter "
2365 "than the root dir",
2366 temp->mount_name);
2367 if (mount(NULL, temp->mount_name, NULL,
2368 MS_REC | temp->remount_mode, NULL))
2369 pdie("mount(NULL, %s, NULL, "
2370 "MS_REC | temp->remount_mode, "
2371 "NULL) failed",
2372 temp->mount_name);
2373 temp = temp->next;
2374 }
2375 }
2376 }
2377
2378 if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
2379 pdie("unshare(CLONE_NEWIPC) failed");
2380 }
2381
2382 if (j->flags.uts) {
2383 if (unshare(CLONE_NEWUTS))
2384 pdie("unshare(CLONE_NEWUTS) failed");
2385
2386 if (j->hostname &&
2387 sethostname(j->hostname, strlen(j->hostname)))
2388 pdie("sethostname(%s) failed", j->hostname);
2389 }
2390
2391 if (j->flags.enter_net) {
2392 if (setns(j->netns_fd, CLONE_NEWNET))
2393 pdie("setns(CLONE_NEWNET) failed");
2394 close(j->netns_fd);
2395 } else if (j->flags.net) {
2396 if (unshare(CLONE_NEWNET))
2397 pdie("unshare(CLONE_NEWNET) failed");
2398 config_net_loopback();
2399 }
2400
2401 if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
2402 pdie("unshare(CLONE_NEWCGROUP) failed");
2403
2404 if (j->flags.new_session_keyring) {
2405 if (syscall(SYS_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL) < 0)
2406 pdie("keyctl(KEYCTL_JOIN_SESSION_KEYRING) failed");
2407 }
2408
2409 /* We have to process all the mounts before we chroot/pivot_root. */
2410 process_mounts_or_die(j);
2411
2412 if (j->flags.chroot && enter_chroot(j))
2413 pdie("chroot");
2414
2415 if (j->flags.pivot_root && enter_pivot_root(j))
2416 pdie("pivot_root");
2417
2418 if (j->flags.mount_tmp && mount_tmp(j))
2419 pdie("mount_tmp");
2420
2421 if (j->flags.remount_proc_ro && remount_proc_readonly(j))
2422 pdie("remount");
2423
2424 run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS);
2425
2426 /*
2427 * If we're only dropping capabilities from the bounding set, but not
2428 * from the thread's (permitted|inheritable|effective) sets, do it now.
2429 */
2430 if (j->flags.capbset_drop) {
2431 drop_capbset(j->cap_bset, last_valid_cap);
2432 }
2433
2434 /*
2435 * POSIX capabilities are a bit tricky. We must set SECBIT_KEEP_CAPS
2436 * before drop_ugid() below as the latter would otherwise drop all
2437 * capabilities.
2438 */
2439 if (j->flags.use_caps) {
2440 /*
2441 * When using ambient capabilities, CAP_SET{GID,UID} can be
2442 * inherited across execve(2), so SECBIT_KEEP_CAPS is not
2443 * strictly needed.
2444 */
2445 bool require_keep_caps = !j->flags.set_ambient_caps;
2446 if (lock_securebits(j->securebits_skip_mask,
2447 require_keep_caps) < 0) {
2448 pdie("locking securebits failed");
2449 }
2450 }
2451
2452 if (j->flags.no_new_privs) {
2453 /*
2454 * If we're setting no_new_privs, we can drop privileges
2455 * before setting seccomp filter. This way filter policies
2456 * don't need to allow privilege-dropping syscalls.
2457 */
2458 drop_ugid(j);
2459 drop_caps(j, last_valid_cap);
2460 set_seccomp_filter(j);
2461 } else {
2462 /*
2463 * If we're not setting no_new_privs,
2464 * we need to set seccomp filter *before* dropping privileges.
2465 * WARNING: this means that filter policies *must* allow
2466 * setgroups()/setresgid()/setresuid() for dropping root and
2467 * capget()/capset()/prctl() for dropping caps.
2468 */
2469 set_seccomp_filter(j);
2470 drop_ugid(j);
2471 drop_caps(j, last_valid_cap);
2472 }
2473
2474 /*
2475 * Select the specified alternate syscall table. The table must not
2476 * block prctl(2) if we're using seccomp as well.
2477 */
2478 if (j->flags.alt_syscall) {
2479 if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
2480 pdie("prctl(PR_ALT_SYSCALL) failed");
2481 }
2482
2483 /*
2484 * seccomp has to come last since it cuts off all the other
2485 * privilege-dropping syscalls :)
2486 */
2487 if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
2488 if ((errno == EINVAL) && seccomp_can_softfail()) {
2489 warn("seccomp not supported");
2490 return;
2491 }
2492 pdie("prctl(PR_SET_SECCOMP) failed");
2493 }
2494 }
2495
2496 /* TODO(wad): will visibility affect this variable? */
2497 static int init_exitstatus = 0;
2498
init_term(int sig attribute_unused)2499 static void init_term(int sig attribute_unused)
2500 {
2501 _exit(init_exitstatus);
2502 }
2503
init(pid_t rootpid)2504 static void init(pid_t rootpid)
2505 {
2506 pid_t pid;
2507 int status;
2508 /* So that we exit with the right status. */
2509 signal(SIGTERM, init_term);
2510 /* TODO(wad): self jail with seccomp filters here. */
2511 while ((pid = wait(&status)) > 0) {
2512 /*
2513 * This loop will only end when either there are no processes
2514 * left inside our pid namespace or we get a signal.
2515 */
2516 if (pid == rootpid)
2517 init_exitstatus = status;
2518 }
2519 if (!WIFEXITED(init_exitstatus))
2520 _exit(MINIJAIL_ERR_INIT);
2521 _exit(WEXITSTATUS(init_exitstatus));
2522 }
2523
minijail_from_fd(int fd,struct minijail * j)2524 int API minijail_from_fd(int fd, struct minijail *j)
2525 {
2526 size_t sz = 0;
2527 size_t bytes = read(fd, &sz, sizeof(sz));
2528 attribute_cleanup_str char *buf = NULL;
2529 int r;
2530 if (sizeof(sz) != bytes)
2531 return -EINVAL;
2532 if (sz > USHRT_MAX) /* arbitrary check */
2533 return -E2BIG;
2534 buf = malloc(sz);
2535 if (!buf)
2536 return -ENOMEM;
2537 bytes = read(fd, buf, sz);
2538 if (bytes != sz)
2539 return -EINVAL;
2540 r = minijail_unmarshal(j, buf, sz);
2541 return r;
2542 }
2543
minijail_to_fd(struct minijail * j,int fd)2544 int API minijail_to_fd(struct minijail *j, int fd)
2545 {
2546 size_t sz = minijail_size(j);
2547 if (!sz)
2548 return -EINVAL;
2549
2550 attribute_cleanup_str char *buf = malloc(sz);
2551 if (!buf)
2552 return -ENOMEM;
2553
2554 int err = minijail_marshal(j, buf, sz);
2555 if (err)
2556 return err;
2557
2558 /* Sends [size][minijail]. */
2559 err = write_exactly(fd, &sz, sizeof(sz));
2560 if (err)
2561 return err;
2562
2563 return write_exactly(fd, buf, sz);
2564 }
2565
minijail_copy_jail(const struct minijail * from,struct minijail * out)2566 int API minijail_copy_jail(const struct minijail *from, struct minijail *out)
2567 {
2568 size_t sz = minijail_size(from);
2569 if (!sz)
2570 return -EINVAL;
2571
2572 attribute_cleanup_str char *buf = malloc(sz);
2573 if (!buf)
2574 return -ENOMEM;
2575
2576 int err = minijail_marshal(from, buf, sz);
2577 if (err)
2578 return err;
2579
2580 return minijail_unmarshal(out, buf, sz);
2581 }
2582
setup_preload(const struct minijail * j attribute_unused,char *** child_env attribute_unused)2583 static int setup_preload(const struct minijail *j attribute_unused,
2584 char ***child_env attribute_unused)
2585 {
2586 #if defined(__ANDROID__)
2587 /* Don't use LDPRELOAD on Android. */
2588 return 0;
2589 #else
2590 const char *preload_path = j->preload_path ?: PRELOADPATH;
2591 char *newenv = NULL;
2592 int ret = 0;
2593 const char *oldenv = minijail_getenv(*child_env, kLdPreloadEnvVar);
2594
2595 if (!oldenv)
2596 oldenv = "";
2597
2598 /* Only insert a separating space if we have something to separate... */
2599 if (asprintf(&newenv, "%s%s%s", oldenv, oldenv[0] != '\0' ? " " : "",
2600 preload_path) < 0) {
2601 return -1;
2602 }
2603
2604 ret = minijail_setenv(child_env, kLdPreloadEnvVar, newenv, 1);
2605 free(newenv);
2606 return ret;
2607 #endif
2608 }
2609
2610 /*
2611 * This is for logging purposes and does not change the enforced seccomp
2612 * filter.
2613 */
setup_seccomp_policy_path(const struct minijail * j,char *** child_env)2614 static int setup_seccomp_policy_path(const struct minijail *j,
2615 char ***child_env)
2616 {
2617 return minijail_setenv(child_env, kSeccompPolicyPathEnvVar,
2618 j->seccomp_policy_path ? j->seccomp_policy_path
2619 : "NO-LABEL",
2620 1 /* overwrite */);
2621 }
2622
setup_pipe(char *** child_env,int fds[2])2623 static int setup_pipe(char ***child_env, int fds[2])
2624 {
2625 int r = pipe(fds);
2626 char fd_buf[11];
2627 if (r)
2628 return r;
2629 r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
2630 if (r <= 0)
2631 return -EINVAL;
2632 return minijail_setenv(child_env, kFdEnvVar, fd_buf, 1);
2633 }
2634
close_open_fds(int * inheritable_fds,size_t size)2635 static int close_open_fds(int *inheritable_fds, size_t size)
2636 {
2637 const char *kFdPath = "/proc/self/fd";
2638
2639 DIR *d = opendir(kFdPath);
2640 struct dirent *dir_entry;
2641
2642 if (d == NULL)
2643 return -1;
2644 int dir_fd = dirfd(d);
2645 while ((dir_entry = readdir(d)) != NULL) {
2646 size_t i;
2647 char *end;
2648 bool should_close = true;
2649 const int fd = strtol(dir_entry->d_name, &end, 10);
2650
2651 if ((*end) != '\0') {
2652 continue;
2653 }
2654 /*
2655 * We might have set up some pipes that we want to share with
2656 * the parent process, and should not be closed.
2657 */
2658 for (i = 0; i < size; ++i) {
2659 if (fd == inheritable_fds[i]) {
2660 should_close = false;
2661 break;
2662 }
2663 }
2664 /* Also avoid closing the directory fd. */
2665 if (should_close && fd != dir_fd)
2666 close(fd);
2667 }
2668 closedir(d);
2669 return 0;
2670 }
2671
2672 /* Return true if the specified file descriptor is already open. */
fd_is_open(int fd)2673 static int fd_is_open(int fd)
2674 {
2675 return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
2676 }
2677
2678 static_assert(FD_SETSIZE >= MAX_PRESERVED_FDS * 2 - 1,
2679 "If true, ensure_no_fd_conflict will always find an unused fd.");
2680
2681 /* If parent_fd will be used by a child fd, move it to an unused fd. */
ensure_no_fd_conflict(const fd_set * child_fds,int child_fd,int * parent_fd)2682 static int ensure_no_fd_conflict(const fd_set *child_fds, int child_fd,
2683 int *parent_fd)
2684 {
2685 if (!FD_ISSET(*parent_fd, child_fds)) {
2686 return 0;
2687 }
2688
2689 /*
2690 * If no other parent_fd matches the child_fd then use it instead of a
2691 * temporary.
2692 */
2693 int fd = child_fd;
2694 if (fd == -1 || fd_is_open(fd)) {
2695 fd = FD_SETSIZE - 1;
2696 while (FD_ISSET(fd, child_fds) || fd_is_open(fd)) {
2697 --fd;
2698 if (fd < 0) {
2699 die("failed to find an unused fd");
2700 }
2701 }
2702 }
2703
2704 int ret = dup2(*parent_fd, fd);
2705 /*
2706 * warn() opens a file descriptor so it needs to happen after dup2 to
2707 * avoid unintended side effects. This can be avoided by reordering the
2708 * mapping requests so that the source fds with overlap are mapped
2709 * first (unless there are cycles).
2710 */
2711 warn("mapped fd overlap: moving %d to %d", *parent_fd, fd);
2712 if (ret == -1) {
2713 return -1;
2714 }
2715
2716 *parent_fd = fd;
2717 return 0;
2718 }
2719
2720 /*
2721 * Populate child_fds_out with the set of file descriptors that will be replaced
2722 * by redirect_fds().
2723 *
2724 * NOTE: This creates temporaries for parent file descriptors that would
2725 * otherwise be overwritten during redirect_fds().
2726 */
get_child_fds(struct minijail * j,fd_set * child_fds_out)2727 static int get_child_fds(struct minijail *j, fd_set *child_fds_out)
2728 {
2729 /* Relocate parent_fds that would be replaced by a child_fd. */
2730 for (size_t i = 0; i < j->preserved_fd_count; i++) {
2731 int child_fd = j->preserved_fds[i].child_fd;
2732 if (FD_ISSET(child_fd, child_fds_out)) {
2733 die("fd %d is mapped more than once", child_fd);
2734 }
2735
2736 int *parent_fd = &j->preserved_fds[i].parent_fd;
2737 if (ensure_no_fd_conflict(child_fds_out, child_fd, parent_fd) ==
2738 -1) {
2739 return -1;
2740 }
2741
2742 FD_SET(child_fd, child_fds_out);
2743 }
2744 return 0;
2745 }
2746
2747 /*
2748 * Structure holding resources and state created when running a minijail.
2749 */
2750 struct minijail_run_state {
2751 pid_t child_pid;
2752 int pipe_fds[2];
2753 int stdin_fds[2];
2754 int stdout_fds[2];
2755 int stderr_fds[2];
2756 int child_sync_pipe_fds[2];
2757 char **child_env;
2758 };
2759
2760 /*
2761 * Move pipe_fds if they conflict with a child_fd.
2762 */
avoid_pipe_conflicts(struct minijail_run_state * state,fd_set * child_fds_out)2763 static int avoid_pipe_conflicts(struct minijail_run_state *state,
2764 fd_set *child_fds_out)
2765 {
2766 int *pipe_fds[] = {
2767 state->pipe_fds, state->child_sync_pipe_fds, state->stdin_fds,
2768 state->stdout_fds, state->stderr_fds,
2769 };
2770 for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
2771 if (pipe_fds[i][0] != -1 &&
2772 ensure_no_fd_conflict(child_fds_out, -1, &pipe_fds[i][0]) ==
2773 -1) {
2774 return -1;
2775 }
2776 if (pipe_fds[i][1] != -1 &&
2777 ensure_no_fd_conflict(child_fds_out, -1, &pipe_fds[i][1]) ==
2778 -1) {
2779 return -1;
2780 }
2781 }
2782 return 0;
2783 }
2784
2785 /*
2786 * Redirect j->preserved_fds from the parent_fd to the child_fd.
2787 *
2788 * NOTE: This will clear FD_CLOEXEC since otherwise the child_fd would not be
2789 * inherited after the exec call.
2790 */
redirect_fds(struct minijail * j,fd_set * child_fds)2791 static int redirect_fds(struct minijail *j, fd_set *child_fds)
2792 {
2793 for (size_t i = 0; i < j->preserved_fd_count; i++) {
2794 if (j->preserved_fds[i].parent_fd ==
2795 j->preserved_fds[i].child_fd) {
2796 // Clear CLOEXEC if it is set so the FD will be
2797 // inherited by the child.
2798 int flags =
2799 fcntl(j->preserved_fds[i].child_fd, F_GETFD);
2800 if (flags == -1 || (flags & FD_CLOEXEC) == 0) {
2801 continue;
2802 }
2803
2804 // Currently FD_CLOEXEC is cleared without being
2805 // restored. It may make sense to track when this
2806 // happens and restore FD_CLOEXEC in the child process.
2807 flags &= ~FD_CLOEXEC;
2808 if (fcntl(j->preserved_fds[i].child_fd, F_SETFD,
2809 flags) == -1) {
2810 pwarn("failed to clear CLOEXEC for %d",
2811 j->preserved_fds[i].parent_fd);
2812 }
2813 continue;
2814 }
2815 if (dup2(j->preserved_fds[i].parent_fd,
2816 j->preserved_fds[i].child_fd) == -1) {
2817 return -1;
2818 }
2819 }
2820
2821 /*
2822 * After all fds have been duped, we are now free to close all parent
2823 * fds that are *not* child fds.
2824 */
2825 for (size_t i = 0; i < j->preserved_fd_count; i++) {
2826 int parent_fd = j->preserved_fds[i].parent_fd;
2827 if (!FD_ISSET(parent_fd, child_fds)) {
2828 close(parent_fd);
2829 }
2830 }
2831 return 0;
2832 }
2833
minijail_free_run_state(struct minijail_run_state * state)2834 static void minijail_free_run_state(struct minijail_run_state *state)
2835 {
2836 state->child_pid = -1;
2837
2838 int *fd_pairs[] = {state->pipe_fds, state->stdin_fds, state->stdout_fds,
2839 state->stderr_fds, state->child_sync_pipe_fds};
2840 for (size_t i = 0; i < ARRAY_SIZE(fd_pairs); ++i) {
2841 close_and_reset(&fd_pairs[i][0]);
2842 close_and_reset(&fd_pairs[i][1]);
2843 }
2844
2845 minijail_free_env(state->child_env);
2846 state->child_env = NULL;
2847 }
2848
2849 /* Set up stdin/stdout/stderr file descriptors in the child. */
setup_child_std_fds(struct minijail * j,struct minijail_run_state * state)2850 static void setup_child_std_fds(struct minijail *j,
2851 struct minijail_run_state *state)
2852 {
2853 struct {
2854 const char *name;
2855 int from;
2856 int to;
2857 } fd_map[] = {
2858 {"stdin", state->stdin_fds[0], STDIN_FILENO},
2859 {"stdout", state->stdout_fds[1], STDOUT_FILENO},
2860 {"stderr", state->stderr_fds[1], STDERR_FILENO},
2861 };
2862
2863 for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
2864 if (fd_map[i].from == -1 || fd_map[i].from == fd_map[i].to)
2865 continue;
2866 if (dup2(fd_map[i].from, fd_map[i].to) == -1)
2867 die("failed to set up %s pipe", fd_map[i].name);
2868 }
2869
2870 /* Close temporary pipe file descriptors. */
2871 int *std_pipes[] = {state->stdin_fds, state->stdout_fds,
2872 state->stderr_fds};
2873 for (size_t i = 0; i < ARRAY_SIZE(std_pipes); ++i) {
2874 close_and_reset(&std_pipes[i][0]);
2875 close_and_reset(&std_pipes[i][1]);
2876 }
2877
2878 /*
2879 * If any of stdin, stdout, or stderr are TTYs, or setsid flag is
2880 * set, create a new session. This prevents the jailed process from
2881 * using the TIOCSTI ioctl to push characters into the parent process
2882 * terminal's input buffer, therefore escaping the jail.
2883 *
2884 * Since it has just forked, the child will not be a process group
2885 * leader, and this call to setsid() should always succeed.
2886 */
2887 if (j->flags.setsid || isatty(STDIN_FILENO) || isatty(STDOUT_FILENO) ||
2888 isatty(STDERR_FILENO)) {
2889 if (setsid() < 0) {
2890 pdie("setsid() failed");
2891 }
2892
2893 if (isatty(STDIN_FILENO)) {
2894 if (ioctl(STDIN_FILENO, TIOCSCTTY, 0) != 0) {
2895 pwarn("failed to set controlling terminal");
2896 }
2897 }
2898 }
2899 }
2900
2901 /*
2902 * Structure that specifies how to start a minijail.
2903 *
2904 * filename - The program to exec in the child. Should be NULL if elf_fd is set.
2905 * elf_fd - A fd to be used with fexecve. Should be -1 if filename is set.
2906 * NOTE: either filename or elf_fd is required if |exec_in_child| = 1.
2907 * argv - Arguments for the child program. Required if |exec_in_child| = 1.
2908 * envp - Environment for the child program. Available if |exec_in_child| = 1.
2909 * use_preload - If true use LD_PRELOAD.
2910 * exec_in_child - If true, run |filename|. Otherwise, the child will return to
2911 * the caller.
2912 * pstdin_fd - Filled with stdin pipe if non-NULL.
2913 * pstdout_fd - Filled with stdout pipe if non-NULL.
2914 * pstderr_fd - Filled with stderr pipe if non-NULL.
2915 * pchild_pid - Filled with the pid of the child process if non-NULL.
2916 */
2917 struct minijail_run_config {
2918 const char *filename;
2919 int elf_fd;
2920 char *const *argv;
2921 char *const *envp;
2922 int use_preload;
2923 int exec_in_child;
2924 int *pstdin_fd;
2925 int *pstdout_fd;
2926 int *pstderr_fd;
2927 pid_t *pchild_pid;
2928 };
2929
2930 static int
2931 minijail_run_config_internal(struct minijail *j,
2932 const struct minijail_run_config *config);
2933
minijail_run(struct minijail * j,const char * filename,char * const argv[])2934 int API minijail_run(struct minijail *j, const char *filename,
2935 char *const argv[])
2936 {
2937 struct minijail_run_config config = {
2938 .filename = filename,
2939 .elf_fd = -1,
2940 .argv = argv,
2941 .envp = NULL,
2942 .use_preload = true,
2943 .exec_in_child = true,
2944 };
2945 return minijail_run_config_internal(j, &config);
2946 }
2947
minijail_run_env(struct minijail * j,const char * filename,char * const argv[],char * const envp[])2948 int API minijail_run_env(struct minijail *j, const char *filename,
2949 char *const argv[], char *const envp[])
2950 {
2951 struct minijail_run_config config = {
2952 .filename = filename,
2953 .elf_fd = -1,
2954 .argv = argv,
2955 .envp = envp,
2956 .use_preload = true,
2957 .exec_in_child = true,
2958 };
2959 return minijail_run_config_internal(j, &config);
2960 }
2961
minijail_run_pid(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid)2962 int API minijail_run_pid(struct minijail *j, const char *filename,
2963 char *const argv[], pid_t *pchild_pid)
2964 {
2965 struct minijail_run_config config = {
2966 .filename = filename,
2967 .elf_fd = -1,
2968 .argv = argv,
2969 .envp = NULL,
2970 .use_preload = true,
2971 .exec_in_child = true,
2972 .pchild_pid = pchild_pid,
2973 };
2974 return minijail_run_config_internal(j, &config);
2975 }
2976
minijail_run_pipe(struct minijail * j,const char * filename,char * const argv[],int * pstdin_fd)2977 int API minijail_run_pipe(struct minijail *j, const char *filename,
2978 char *const argv[], int *pstdin_fd)
2979 {
2980 struct minijail_run_config config = {
2981 .filename = filename,
2982 .elf_fd = -1,
2983 .argv = argv,
2984 .envp = NULL,
2985 .use_preload = true,
2986 .exec_in_child = true,
2987 .pstdin_fd = pstdin_fd,
2988 };
2989 return minijail_run_config_internal(j, &config);
2990 }
2991
minijail_run_pid_pipes(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)2992 int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
2993 char *const argv[], pid_t *pchild_pid,
2994 int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
2995 {
2996 struct minijail_run_config config = {
2997 .filename = filename,
2998 .elf_fd = -1,
2999 .argv = argv,
3000 .envp = NULL,
3001 .use_preload = true,
3002 .exec_in_child = true,
3003 .pstdin_fd = pstdin_fd,
3004 .pstdout_fd = pstdout_fd,
3005 .pstderr_fd = pstderr_fd,
3006 .pchild_pid = pchild_pid,
3007 };
3008 return minijail_run_config_internal(j, &config);
3009 }
3010
minijail_run_env_pid_pipes(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3011 int API minijail_run_env_pid_pipes(struct minijail *j, const char *filename,
3012 char *const argv[], char *const envp[],
3013 pid_t *pchild_pid, int *pstdin_fd,
3014 int *pstdout_fd, int *pstderr_fd)
3015 {
3016 struct minijail_run_config config = {
3017 .filename = filename,
3018 .elf_fd = -1,
3019 .argv = argv,
3020 .envp = envp,
3021 .use_preload = true,
3022 .exec_in_child = true,
3023 .pstdin_fd = pstdin_fd,
3024 .pstdout_fd = pstdout_fd,
3025 .pstderr_fd = pstderr_fd,
3026 .pchild_pid = pchild_pid,
3027 };
3028 return minijail_run_config_internal(j, &config);
3029 }
3030
minijail_run_fd_env_pid_pipes(struct minijail * j,int elf_fd,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3031 int API minijail_run_fd_env_pid_pipes(struct minijail *j, int elf_fd,
3032 char *const argv[], char *const envp[],
3033 pid_t *pchild_pid, int *pstdin_fd,
3034 int *pstdout_fd, int *pstderr_fd)
3035 {
3036 struct minijail_run_config config = {
3037 .filename = NULL,
3038 .elf_fd = elf_fd,
3039 .argv = argv,
3040 .envp = envp,
3041 .use_preload = true,
3042 .exec_in_child = true,
3043 .pstdin_fd = pstdin_fd,
3044 .pstdout_fd = pstdout_fd,
3045 .pstderr_fd = pstderr_fd,
3046 .pchild_pid = pchild_pid,
3047 };
3048 return minijail_run_config_internal(j, &config);
3049 }
3050
minijail_run_no_preload(struct minijail * j,const char * filename,char * const argv[])3051 int API minijail_run_no_preload(struct minijail *j, const char *filename,
3052 char *const argv[])
3053 {
3054 struct minijail_run_config config = {
3055 .filename = filename,
3056 .elf_fd = -1,
3057 .argv = argv,
3058 .envp = NULL,
3059 .use_preload = false,
3060 .exec_in_child = true,
3061 };
3062 return minijail_run_config_internal(j, &config);
3063 }
3064
minijail_run_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3065 int API minijail_run_pid_pipes_no_preload(struct minijail *j,
3066 const char *filename,
3067 char *const argv[], pid_t *pchild_pid,
3068 int *pstdin_fd, int *pstdout_fd,
3069 int *pstderr_fd)
3070 {
3071 struct minijail_run_config config = {
3072 .filename = filename,
3073 .elf_fd = -1,
3074 .argv = argv,
3075 .envp = NULL,
3076 .use_preload = false,
3077 .exec_in_child = true,
3078 .pstdin_fd = pstdin_fd,
3079 .pstdout_fd = pstdout_fd,
3080 .pstderr_fd = pstderr_fd,
3081 .pchild_pid = pchild_pid,
3082 };
3083 return minijail_run_config_internal(j, &config);
3084 }
3085
minijail_run_env_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3086 int API minijail_run_env_pid_pipes_no_preload(struct minijail *j,
3087 const char *filename,
3088 char *const argv[],
3089 char *const envp[],
3090 pid_t *pchild_pid, int *pstdin_fd,
3091 int *pstdout_fd, int *pstderr_fd)
3092 {
3093 struct minijail_run_config config = {
3094 .filename = filename,
3095 .elf_fd = -1,
3096 .argv = argv,
3097 .envp = envp,
3098 .use_preload = false,
3099 .exec_in_child = true,
3100 .pstdin_fd = pstdin_fd,
3101 .pstdout_fd = pstdout_fd,
3102 .pstderr_fd = pstderr_fd,
3103 .pchild_pid = pchild_pid,
3104 };
3105 return minijail_run_config_internal(j, &config);
3106 }
3107
minijail_fork(struct minijail * j)3108 pid_t API minijail_fork(struct minijail *j)
3109 {
3110 struct minijail_run_config config = {
3111 .elf_fd = -1,
3112 };
3113 return minijail_run_config_internal(j, &config);
3114 }
3115
minijail_run_internal(struct minijail * j,const struct minijail_run_config * config,struct minijail_run_state * state_out)3116 static int minijail_run_internal(struct minijail *j,
3117 const struct minijail_run_config *config,
3118 struct minijail_run_state *state_out)
3119 {
3120 int sync_child = 0;
3121 int ret;
3122 /* We need to remember this across the minijail_preexec() call. */
3123 int pid_namespace = j->flags.pids;
3124 /*
3125 * Create an init process if we are entering a pid namespace, unless the
3126 * user has explicitly opted out by calling minijail_run_as_init().
3127 */
3128 int do_init = j->flags.do_init && !j->flags.run_as_init;
3129 int use_preload = config->use_preload;
3130
3131 if (config->filename != NULL && config->elf_fd != -1) {
3132 die("filename and elf_fd cannot be set at the same time");
3133 }
3134
3135 /*
3136 * Only copy the environment if we need to modify it. If this is done
3137 * unconditionally, it triggers odd behavior in the ARC container.
3138 */
3139 if (use_preload || j->seccomp_policy_path) {
3140 state_out->child_env =
3141 minijail_copy_env(config->envp ? config->envp : environ);
3142 if (!state_out->child_env)
3143 return ENOMEM;
3144 }
3145
3146 if (j->seccomp_policy_path &&
3147 setup_seccomp_policy_path(j, &state_out->child_env))
3148 return -EFAULT;
3149
3150 if (use_preload) {
3151 if (j->hooks_head != NULL)
3152 die("Minijail hooks are not supported with LD_PRELOAD");
3153 if (!config->exec_in_child)
3154 die("minijail_fork is not supported with LD_PRELOAD");
3155
3156 /*
3157 * Before we fork(2) and execve(2) the child process, we need
3158 * to open a pipe(2) to send the minijail configuration over.
3159 */
3160 if (setup_preload(j, &state_out->child_env) ||
3161 setup_pipe(&state_out->child_env, state_out->pipe_fds))
3162 return -EFAULT;
3163 }
3164
3165 if (!use_preload) {
3166 if (j->flags.use_caps && j->caps != 0 &&
3167 !j->flags.set_ambient_caps) {
3168 die("non-empty, non-ambient capabilities are not "
3169 "supported without LD_PRELOAD");
3170 }
3171 }
3172
3173 /* Create pipes for stdin/stdout/stderr as requested by caller. */
3174 struct {
3175 bool requested;
3176 int *pipe_fds;
3177 } pipe_fd_req[] = {
3178 {config->pstdin_fd != NULL, state_out->stdin_fds},
3179 {config->pstdout_fd != NULL, state_out->stdout_fds},
3180 {config->pstderr_fd != NULL, state_out->stderr_fds},
3181 };
3182
3183 for (size_t i = 0; i < ARRAY_SIZE(pipe_fd_req); ++i) {
3184 if (pipe_fd_req[i].requested &&
3185 pipe(pipe_fd_req[i].pipe_fds) == -1)
3186 return EFAULT;
3187 }
3188
3189 /*
3190 * If the parent process needs to configure the child's runtime
3191 * environment after forking, create a pipe(2) to block the child until
3192 * configuration is done.
3193 */
3194 if (j->flags.forward_signals || j->flags.pid_file || j->flags.cgroups ||
3195 j->rlimit_count || j->flags.userns) {
3196 sync_child = 1;
3197 if (pipe(state_out->child_sync_pipe_fds))
3198 return -EFAULT;
3199 }
3200
3201 /*
3202 * Use sys_clone() if and only if we're creating a pid namespace.
3203 *
3204 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
3205 *
3206 * In multithreaded programs, there are a bunch of locks inside libc,
3207 * some of which may be held by other threads at the time that we call
3208 * minijail_run_pid(). If we call fork(), glibc does its level best to
3209 * ensure that we hold all of these locks before it calls clone()
3210 * internally and drop them after clone() returns, but when we call
3211 * sys_clone(2) directly, all that gets bypassed and we end up with a
3212 * child address space where some of libc's important locks are held by
3213 * other threads (which did not get cloned, and hence will never release
3214 * those locks). This is okay so long as we call exec() immediately
3215 * after, but a bunch of seemingly-innocent libc functions like setenv()
3216 * take locks.
3217 *
3218 * Hence, only call sys_clone() if we need to, in order to get at pid
3219 * namespacing. If we follow this path, the child's address space might
3220 * have broken locks; you may only call functions that do not acquire
3221 * any locks.
3222 *
3223 * Unfortunately, fork() acquires every lock it can get its hands on, as
3224 * previously detailed, so this function is highly likely to deadlock
3225 * later on (see "deadlock here") if we're multithreaded.
3226 *
3227 * We might hack around this by having the clone()d child (init of the
3228 * pid namespace) return directly, rather than leaving the clone()d
3229 * process hanging around to be init for the new namespace (and having
3230 * its fork()ed child return in turn), but that process would be
3231 * crippled with its libc locks potentially broken. We might try
3232 * fork()ing in the parent before we clone() to ensure that we own all
3233 * the locks, but then we have to have the forked child hanging around
3234 * consuming resources (and possibly having file descriptors / shared
3235 * memory regions / etc attached). We'd need to keep the child around to
3236 * avoid having its children get reparented to init.
3237 *
3238 * TODO(ellyjones): figure out if the "forked child hanging around"
3239 * problem is fixable or not. It would be nice if we worked in this
3240 * case.
3241 */
3242 pid_t child_pid;
3243 if (pid_namespace) {
3244 unsigned long clone_flags = CLONE_NEWPID | SIGCHLD;
3245 if (j->flags.userns)
3246 clone_flags |= CLONE_NEWUSER;
3247
3248 child_pid = syscall(SYS_clone, clone_flags, NULL, 0L, 0L, 0L);
3249
3250 if (child_pid < 0) {
3251 if (errno == EPERM)
3252 pdie("clone(CLONE_NEWPID | ...) failed with "
3253 "EPERM; is this process missing "
3254 "CAP_SYS_ADMIN?");
3255 pdie("clone(CLONE_NEWPID | ...) failed");
3256 }
3257 } else {
3258 child_pid = fork();
3259
3260 if (child_pid < 0)
3261 pdie("fork failed");
3262 }
3263
3264 state_out->child_pid = child_pid;
3265 if (child_pid) {
3266 j->initpid = child_pid;
3267
3268 if (j->flags.forward_signals) {
3269 forward_pid = child_pid;
3270 install_signal_handlers();
3271 }
3272
3273 if (j->flags.pid_file)
3274 write_pid_file_or_die(j);
3275
3276 if (j->flags.cgroups)
3277 add_to_cgroups_or_die(j);
3278
3279 if (j->rlimit_count)
3280 set_rlimits_or_die(j);
3281
3282 if (j->flags.userns)
3283 write_ugid_maps_or_die(j);
3284
3285 if (j->flags.enter_vfs)
3286 close(j->mountns_fd);
3287
3288 if (j->flags.enter_net)
3289 close(j->netns_fd);
3290
3291 if (sync_child)
3292 parent_setup_complete(state_out->child_sync_pipe_fds);
3293
3294 if (use_preload) {
3295 /*
3296 * Add SIGPIPE to the signal mask to avoid getting
3297 * killed if the child process finishes or closes its
3298 * end of the pipe prematurely.
3299 *
3300 * TODO(crbug.com/1022170): Use pthread_sigmask instead
3301 * of sigprocmask if Minijail is used in multithreaded
3302 * programs.
3303 */
3304 sigset_t to_block, to_restore;
3305 if (sigemptyset(&to_block) < 0)
3306 pdie("sigemptyset failed");
3307 if (sigaddset(&to_block, SIGPIPE) < 0)
3308 pdie("sigaddset failed");
3309 if (sigprocmask(SIG_BLOCK, &to_block, &to_restore) < 0)
3310 pdie("sigprocmask failed");
3311
3312 /* Send marshalled minijail. */
3313 close_and_reset(&state_out->pipe_fds[0]);
3314 ret = minijail_to_fd(j, state_out->pipe_fds[1]);
3315 close_and_reset(&state_out->pipe_fds[1]);
3316
3317 /* Accept any pending SIGPIPE. */
3318 while (true) {
3319 const struct timespec zero_time = {0, 0};
3320 const int sig =
3321 sigtimedwait(&to_block, NULL, &zero_time);
3322 if (sig < 0) {
3323 if (errno != EINTR)
3324 break;
3325 } else {
3326 if (sig != SIGPIPE)
3327 die("unexpected signal %d",
3328 sig);
3329 }
3330 }
3331
3332 /* Restore the signal mask to its original state. */
3333 if (sigprocmask(SIG_SETMASK, &to_restore, NULL) < 0)
3334 pdie("sigprocmask failed");
3335
3336 if (ret) {
3337 warn("failed to send marshalled minijail: %s",
3338 strerror(-ret));
3339 kill(j->initpid, SIGKILL);
3340 }
3341 }
3342
3343 return 0;
3344 }
3345
3346 /* Child process. */
3347 if (j->flags.reset_signal_mask) {
3348 sigset_t signal_mask;
3349 if (sigemptyset(&signal_mask) != 0)
3350 pdie("sigemptyset failed");
3351 if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
3352 pdie("sigprocmask failed");
3353 }
3354
3355 if (j->flags.reset_signal_handlers) {
3356 int signum;
3357 for (signum = 0; signum <= SIGRTMAX; signum++) {
3358 /*
3359 * Ignore EINVAL since some signal numbers in the range
3360 * might not be valid.
3361 */
3362 if (signal(signum, SIG_DFL) == SIG_ERR &&
3363 errno != EINVAL) {
3364 pdie("failed to reset signal %d disposition",
3365 signum);
3366 }
3367 }
3368 }
3369
3370 if (j->flags.close_open_fds) {
3371 const size_t kMaxInheritableFdsSize = 11 + MAX_PRESERVED_FDS;
3372 int inheritable_fds[kMaxInheritableFdsSize];
3373 size_t size = 0;
3374
3375 int *pipe_fds[] = {
3376 state_out->pipe_fds, state_out->child_sync_pipe_fds,
3377 state_out->stdin_fds, state_out->stdout_fds,
3378 state_out->stderr_fds,
3379 };
3380
3381 for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3382 if (pipe_fds[i][0] != -1) {
3383 inheritable_fds[size++] = pipe_fds[i][0];
3384 }
3385 if (pipe_fds[i][1] != -1) {
3386 inheritable_fds[size++] = pipe_fds[i][1];
3387 }
3388 }
3389
3390 /*
3391 * Preserve namespace file descriptors over the close_open_fds()
3392 * call. These are closed in minijail_enter() so they won't leak
3393 * into the child process.
3394 */
3395 if (j->flags.enter_vfs)
3396 minijail_preserve_fd(j, j->mountns_fd, j->mountns_fd);
3397 if (j->flags.enter_net)
3398 minijail_preserve_fd(j, j->netns_fd, j->netns_fd);
3399
3400 for (size_t i = 0; i < j->preserved_fd_count; i++) {
3401 /*
3402 * Preserve all parent_fds. They will be dup2(2)-ed in
3403 * the child later.
3404 */
3405 inheritable_fds[size++] = j->preserved_fds[i].parent_fd;
3406 }
3407
3408 if (config->elf_fd > -1) {
3409 inheritable_fds[size++] = config->elf_fd;
3410 }
3411
3412 if (close_open_fds(inheritable_fds, size) < 0)
3413 die("failed to close open file descriptors");
3414 }
3415
3416 /* The set of fds will be replaced. */
3417 fd_set child_fds;
3418 FD_ZERO(&child_fds);
3419 if (get_child_fds(j, &child_fds))
3420 die("failed to set up fd redirections");
3421
3422 if (avoid_pipe_conflicts(state_out, &child_fds))
3423 die("failed to redirect conflicting pipes");
3424
3425 /* The elf_fd needs to be mutable so use a stack copy from now on. */
3426 int elf_fd = config->elf_fd;
3427 if (elf_fd != -1 && ensure_no_fd_conflict(&child_fds, -1, &elf_fd))
3428 die("failed to redirect elf_fd");
3429
3430 if (redirect_fds(j, &child_fds))
3431 die("failed to set up fd redirections");
3432
3433 if (sync_child)
3434 wait_for_parent_setup(state_out->child_sync_pipe_fds);
3435
3436 if (j->flags.userns)
3437 enter_user_namespace(j);
3438
3439 setup_child_std_fds(j, state_out);
3440
3441 /* If running an init program, let it decide when/how to mount /proc. */
3442 if (pid_namespace && !do_init)
3443 j->flags.remount_proc_ro = 0;
3444
3445 if (use_preload) {
3446 /* Strip out flags that cannot be inherited across execve(2). */
3447 minijail_preexec(j);
3448 } else {
3449 /*
3450 * If not using LD_PRELOAD, do all jailing before execve(2).
3451 * Note that PID namespaces can only be entered on fork(2),
3452 * so that flag is still cleared.
3453 */
3454 j->flags.pids = 0;
3455 }
3456
3457 /*
3458 * Jail this process.
3459 * If forking, return.
3460 * If not, execve(2) the target.
3461 */
3462 minijail_enter(j);
3463
3464 if (config->exec_in_child && pid_namespace && do_init) {
3465 /*
3466 * pid namespace: this process will become init inside the new
3467 * namespace. We don't want all programs we might exec to have
3468 * to know how to be init. Normally (do_init == 1) we fork off
3469 * a child to actually run the program. If |do_init == 0|, we
3470 * let the program keep pid 1 and be init.
3471 *
3472 * If we're multithreaded, we'll probably deadlock here. See
3473 * WARNING above.
3474 */
3475 child_pid = fork();
3476 if (child_pid < 0) {
3477 _exit(child_pid);
3478 } else if (child_pid > 0) {
3479 minijail_free_run_state(state_out);
3480
3481 /*
3482 * Best effort. Don't bother checking the return value.
3483 */
3484 prctl(PR_SET_NAME, "minijail-init");
3485 init(child_pid); /* Never returns. */
3486 }
3487 state_out->child_pid = child_pid;
3488 }
3489
3490 run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_EXECVE);
3491
3492 if (!config->exec_in_child)
3493 return 0;
3494
3495 /*
3496 * We're going to execve(), so make sure any remaining resources are
3497 * freed. Exceptions are:
3498 * 1. The child environment. No need to worry about freeing it since
3499 * execve reinitializes the heap anyways.
3500 * 2. The read side of the LD_PRELOAD pipe, which we need to hand down
3501 * into the target in which the preloaded code will read from it and
3502 * then close it.
3503 */
3504 state_out->pipe_fds[0] = -1;
3505 char *const *child_env = state_out->child_env;
3506 state_out->child_env = NULL;
3507 minijail_free_run_state(state_out);
3508
3509 /*
3510 * If we aren't pid-namespaced, or the jailed program asked to be init:
3511 * calling process
3512 * -> execve()-ing process
3513 * If we are:
3514 * calling process
3515 * -> init()-ing process
3516 * -> execve()-ing process
3517 */
3518 if (!child_env)
3519 child_env = config->envp ? config->envp : environ;
3520 if (elf_fd > -1) {
3521 fexecve(elf_fd, config->argv, child_env);
3522 pwarn("fexecve(%d) failed", config->elf_fd);
3523 } else {
3524 execve(config->filename, config->argv, child_env);
3525 pwarn("execve(%s) failed", config->filename);
3526 }
3527
3528 ret = (errno == ENOENT ? MINIJAIL_ERR_NO_COMMAND
3529 : MINIJAIL_ERR_NO_ACCESS);
3530 _exit(ret);
3531 }
3532
3533 static int
minijail_run_config_internal(struct minijail * j,const struct minijail_run_config * config)3534 minijail_run_config_internal(struct minijail *j,
3535 const struct minijail_run_config *config)
3536 {
3537 struct minijail_run_state state = {
3538 .child_pid = -1,
3539 .pipe_fds = {-1, -1},
3540 .stdin_fds = {-1, -1},
3541 .stdout_fds = {-1, -1},
3542 .stderr_fds = {-1, -1},
3543 .child_sync_pipe_fds = {-1, -1},
3544 .child_env = NULL,
3545 };
3546 int ret = minijail_run_internal(j, config, &state);
3547
3548 if (ret == 0) {
3549 if (config->pchild_pid)
3550 *config->pchild_pid = state.child_pid;
3551
3552 /* Grab stdin/stdout/stderr descriptors requested by caller. */
3553 struct {
3554 int *pfd;
3555 int *psrc;
3556 } fd_map[] = {
3557 {config->pstdin_fd, &state.stdin_fds[1]},
3558 {config->pstdout_fd, &state.stdout_fds[0]},
3559 {config->pstderr_fd, &state.stderr_fds[0]},
3560 };
3561
3562 for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
3563 if (fd_map[i].pfd) {
3564 *fd_map[i].pfd = *fd_map[i].psrc;
3565 *fd_map[i].psrc = -1;
3566 }
3567 }
3568
3569 if (!config->exec_in_child)
3570 ret = state.child_pid;
3571 }
3572
3573 minijail_free_run_state(&state);
3574
3575 return ret;
3576 }
3577
minijail_wait_internal(struct minijail * j,int expected_signal)3578 static int minijail_wait_internal(struct minijail *j, int expected_signal)
3579 {
3580 if (j->initpid <= 0)
3581 return -ECHILD;
3582
3583 int st;
3584 while (true) {
3585 const int ret = waitpid(j->initpid, &st, 0);
3586 if (ret >= 0)
3587 break;
3588 if (errno != EINTR)
3589 return -errno;
3590 }
3591
3592 if (!WIFEXITED(st)) {
3593 int error_status = st;
3594 if (!WIFSIGNALED(st)) {
3595 return error_status;
3596 }
3597
3598 int signum = WTERMSIG(st);
3599 /*
3600 * We return MINIJAIL_ERR_JAIL if the process received
3601 * SIGSYS, which happens when a syscall is blocked by
3602 * seccomp filters.
3603 * If not, we do what bash(1) does:
3604 * $? = 128 + signum
3605 */
3606 if (signum == SIGSYS) {
3607 warn("child process %d had a policy violation (%s)",
3608 j->initpid,
3609 j->seccomp_policy_path ? j->seccomp_policy_path
3610 : "NO-LABEL");
3611 error_status = MINIJAIL_ERR_JAIL;
3612 } else {
3613 if (signum != expected_signal) {
3614 warn("child process %d received signal %d",
3615 j->initpid, signum);
3616 }
3617 error_status = MINIJAIL_ERR_SIG_BASE + signum;
3618 }
3619 return error_status;
3620 }
3621
3622 int exit_status = WEXITSTATUS(st);
3623 if (exit_status != 0)
3624 info("child process %d exited with status %d", j->initpid,
3625 exit_status);
3626
3627 return exit_status;
3628 }
3629
minijail_kill(struct minijail * j)3630 int API minijail_kill(struct minijail *j)
3631 {
3632 if (j->initpid <= 0)
3633 return -ECHILD;
3634
3635 if (kill(j->initpid, SIGTERM))
3636 return -errno;
3637
3638 return minijail_wait_internal(j, SIGTERM);
3639 }
3640
minijail_wait(struct minijail * j)3641 int API minijail_wait(struct minijail *j)
3642 {
3643 return minijail_wait_internal(j, 0);
3644 }
3645
minijail_destroy(struct minijail * j)3646 void API minijail_destroy(struct minijail *j)
3647 {
3648 size_t i;
3649
3650 if (j->filter_prog) {
3651 free(j->filter_prog->filter);
3652 free(j->filter_prog);
3653 }
3654 free_mounts_list(j);
3655 free_remounts_list(j);
3656 while (j->hooks_head) {
3657 struct hook *c = j->hooks_head;
3658 j->hooks_head = c->next;
3659 free(c);
3660 }
3661 j->hooks_tail = NULL;
3662 if (j->user)
3663 free(j->user);
3664 if (j->suppl_gid_list)
3665 free(j->suppl_gid_list);
3666 if (j->chrootdir)
3667 free(j->chrootdir);
3668 if (j->pid_file_path)
3669 free(j->pid_file_path);
3670 if (j->uidmap)
3671 free(j->uidmap);
3672 if (j->gidmap)
3673 free(j->gidmap);
3674 if (j->hostname)
3675 free(j->hostname);
3676 if (j->preload_path)
3677 free(j->preload_path);
3678 if (j->alt_syscall_table)
3679 free(j->alt_syscall_table);
3680 for (i = 0; i < j->cgroup_count; ++i)
3681 free(j->cgroups[i]);
3682 if (j->seccomp_policy_path)
3683 free(j->seccomp_policy_path);
3684 free(j);
3685 }
3686
minijail_log_to_fd(int fd,int min_priority)3687 void API minijail_log_to_fd(int fd, int min_priority)
3688 {
3689 init_logging(LOG_TO_FD, fd, min_priority);
3690 }
3691