• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #define _BSD_SOURCE
7 #define _DEFAULT_SOURCE
8 #define _GNU_SOURCE
9 
10 #include <asm/unistd.h>
11 #include <assert.h>
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <grp.h>
16 #include <linux/capability.h>
17 #include <linux/filter.h>
18 #include <sched.h>
19 #include <signal.h>
20 #include <stdbool.h>
21 #include <stddef.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/capability.h>
26 #include <sys/mount.h>
27 #include <sys/param.h>
28 #include <sys/prctl.h>
29 #include <sys/resource.h>
30 #include <sys/select.h>
31 #include <sys/stat.h>
32 #include <sys/sysmacros.h>
33 #include <sys/types.h>
34 #include <sys/user.h>
35 #include <sys/wait.h>
36 #include <syscall.h>
37 #include <unistd.h>
38 
39 #include "libminijail.h"
40 #include "libminijail-private.h"
41 
42 #include "signal_handler.h"
43 #include "syscall_filter.h"
44 #include "syscall_wrapper.h"
45 #include "system.h"
46 #include "util.h"
47 
48 /* Until these are reliably available in linux/prctl.h. */
49 #ifndef PR_ALT_SYSCALL
50 # define PR_ALT_SYSCALL 0x43724f53
51 #endif
52 
53 /* New cgroup namespace might not be in linux-headers yet. */
54 #ifndef CLONE_NEWCGROUP
55 # define CLONE_NEWCGROUP 0x02000000
56 #endif
57 
58 #define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
59 
60 #define MAX_RLIMITS 32 /* Currently there are 15 supported by Linux. */
61 
62 #define MAX_PRESERVED_FDS 32U
63 
64 /* Keyctl commands. */
65 #define KEYCTL_JOIN_SESSION_KEYRING 1
66 
67 /*
68  * The userspace equivalent of MNT_USER_SETTABLE_MASK, which is the mask of all
69  * flags that can be modified by MS_REMOUNT.
70  */
71 #define MS_USER_SETTABLE_MASK                                                  \
72 	(MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_NOATIME | MS_NODIRATIME |       \
73 	 MS_RELATIME | MS_RDONLY)
74 
75 struct minijail_rlimit {
76 	int type;
77 	rlim_t cur;
78 	rlim_t max;
79 };
80 
81 struct mountpoint {
82 	char *src;
83 	char *dest;
84 	char *type;
85 	char *data;
86 	int has_data;
87 	unsigned long flags;
88 	struct mountpoint *next;
89 };
90 
91 struct minijail_remount {
92 	unsigned long remount_mode;
93 	char *mount_name;
94 	struct minijail_remount *next;
95 };
96 
97 struct hook {
98 	minijail_hook_t hook;
99 	void *payload;
100 	minijail_hook_event_t event;
101 	struct hook *next;
102 };
103 
104 struct preserved_fd {
105 	int parent_fd;
106 	int child_fd;
107 };
108 
109 struct minijail {
110 	/*
111 	 * WARNING: if you add a flag here you need to make sure it's
112 	 * accounted for in minijail_pre{enter|exec}() below.
113 	 */
114 	struct {
115 		int uid : 1;
116 		int gid : 1;
117 		int inherit_suppl_gids : 1;
118 		int set_suppl_gids : 1;
119 		int keep_suppl_gids : 1;
120 		int use_caps : 1;
121 		int capbset_drop : 1;
122 		int set_ambient_caps : 1;
123 		int vfs : 1;
124 		int enter_vfs : 1;
125 		int pids : 1;
126 		int ipc : 1;
127 		int uts : 1;
128 		int net : 1;
129 		int enter_net : 1;
130 		int ns_cgroups : 1;
131 		int userns : 1;
132 		int disable_setgroups : 1;
133 		int seccomp : 1;
134 		int remount_proc_ro : 1;
135 		int no_new_privs : 1;
136 		int seccomp_filter : 1;
137 		int seccomp_filter_tsync : 1;
138 		int seccomp_filter_logging : 1;
139 		int seccomp_filter_allow_speculation : 1;
140 		int chroot : 1;
141 		int pivot_root : 1;
142 		int mount_dev : 1;
143 		int mount_tmp : 1;
144 		int do_init : 1;
145 		int run_as_init : 1;
146 		int pid_file : 1;
147 		int cgroups : 1;
148 		int alt_syscall : 1;
149 		int reset_signal_mask : 1;
150 		int reset_signal_handlers : 1;
151 		int close_open_fds : 1;
152 		int new_session_keyring : 1;
153 		int forward_signals : 1;
154 		int setsid : 1;
155 	} flags;
156 	uid_t uid;
157 	gid_t gid;
158 	gid_t usergid;
159 	char *user;
160 	size_t suppl_gid_count;
161 	gid_t *suppl_gid_list;
162 	uint64_t caps;
163 	uint64_t cap_bset;
164 	pid_t initpid;
165 	int mountns_fd;
166 	int netns_fd;
167 	char *chrootdir;
168 	char *pid_file_path;
169 	char *uidmap;
170 	char *gidmap;
171 	char *hostname;
172 	char *preload_path;
173 	size_t filter_len;
174 	struct sock_fprog *filter_prog;
175 	char *alt_syscall_table;
176 	struct mountpoint *mounts_head;
177 	struct mountpoint *mounts_tail;
178 	size_t mounts_count;
179 	unsigned long remount_mode;
180 	struct minijail_remount *remounts_head;
181 	struct minijail_remount *remounts_tail;
182 	size_t tmpfs_size;
183 	char *cgroups[MAX_CGROUPS];
184 	size_t cgroup_count;
185 	struct minijail_rlimit rlimits[MAX_RLIMITS];
186 	size_t rlimit_count;
187 	uint64_t securebits_skip_mask;
188 	struct hook *hooks_head;
189 	struct hook *hooks_tail;
190 	struct preserved_fd preserved_fds[MAX_PRESERVED_FDS];
191 	size_t preserved_fd_count;
192 };
193 
194 static void run_hooks_or_die(const struct minijail *j,
195 			     minijail_hook_event_t event);
196 
free_mounts_list(struct minijail * j)197 static void free_mounts_list(struct minijail *j)
198 {
199 	while (j->mounts_head) {
200 		struct mountpoint *m = j->mounts_head;
201 		j->mounts_head = j->mounts_head->next;
202 		free(m->data);
203 		free(m->type);
204 		free(m->dest);
205 		free(m->src);
206 		free(m);
207 	}
208 	// No need to clear mounts_head as we know it's NULL after the loop.
209 	j->mounts_tail = NULL;
210 }
211 
free_remounts_list(struct minijail * j)212 static void free_remounts_list(struct minijail *j)
213 {
214 	while (j->remounts_head) {
215 		struct minijail_remount *m = j->remounts_head;
216 		j->remounts_head = j->remounts_head->next;
217 		free(m->mount_name);
218 		free(m);
219 	}
220 	// No need to clear remounts_head as we know it's NULL after the loop.
221 	j->remounts_tail = NULL;
222 }
223 
224 /*
225  * Writes exactly n bytes from buf to file descriptor fd.
226  * Returns 0 on success or a negative error code on error.
227  */
write_exactly(int fd,const void * buf,size_t n)228 static int write_exactly(int fd, const void *buf, size_t n)
229 {
230 	const char *p = buf;
231 	while (n > 0) {
232 		const ssize_t written = write(fd, p, n);
233 		if (written < 0) {
234 			if (errno == EINTR)
235 				continue;
236 
237 			return -errno;
238 		}
239 
240 		p += written;
241 		n -= written;
242 	}
243 
244 	return 0;
245 }
246 
247 /* Closes *pfd and sets it to -1. */
close_and_reset(int * pfd)248 static void close_and_reset(int *pfd)
249 {
250 	if (*pfd != -1)
251 		close(*pfd);
252 	*pfd = -1;
253 }
254 
255 /*
256  * Strip out flags meant for the parent.
257  * We keep things that are not inherited across execve(2) (e.g. capabilities),
258  * or are easier to set after execve(2) (e.g. seccomp filters).
259  */
minijail_preenter(struct minijail * j)260 void minijail_preenter(struct minijail *j)
261 {
262 	j->flags.vfs = 0;
263 	j->flags.enter_vfs = 0;
264 	j->flags.ns_cgroups = 0;
265 	j->flags.net = 0;
266 	j->flags.uts = 0;
267 	j->flags.remount_proc_ro = 0;
268 	j->flags.pids = 0;
269 	j->flags.do_init = 0;
270 	j->flags.run_as_init = 0;
271 	j->flags.pid_file = 0;
272 	j->flags.cgroups = 0;
273 	j->flags.forward_signals = 0;
274 	j->flags.setsid = 0;
275 	j->remount_mode = 0;
276 	free_remounts_list(j);
277 }
278 
279 /*
280  * Strip out flags meant for the child.
281  * We keep things that are inherited across execve(2).
282  */
minijail_preexec(struct minijail * j)283 void minijail_preexec(struct minijail *j)
284 {
285 	int vfs = j->flags.vfs;
286 	int enter_vfs = j->flags.enter_vfs;
287 	int ns_cgroups = j->flags.ns_cgroups;
288 	int net = j->flags.net;
289 	int uts = j->flags.uts;
290 	int remount_proc_ro = j->flags.remount_proc_ro;
291 	int userns = j->flags.userns;
292 	if (j->user)
293 		free(j->user);
294 	j->user = NULL;
295 	if (j->suppl_gid_list)
296 		free(j->suppl_gid_list);
297 	j->suppl_gid_list = NULL;
298 	if (j->preload_path)
299 		free(j->preload_path);
300 	j->preload_path = NULL;
301 	free_mounts_list(j);
302 	memset(&j->flags, 0, sizeof(j->flags));
303 	/* Now restore anything we meant to keep. */
304 	j->flags.vfs = vfs;
305 	j->flags.enter_vfs = enter_vfs;
306 	j->flags.ns_cgroups = ns_cgroups;
307 	j->flags.net = net;
308 	j->flags.uts = uts;
309 	j->flags.remount_proc_ro = remount_proc_ro;
310 	j->flags.userns = userns;
311 	/* Note, |pids| will already have been used before this call. */
312 }
313 
314 /* Minijail API. */
315 
minijail_new(void)316 struct minijail API *minijail_new(void)
317 {
318 	struct minijail *j = calloc(1, sizeof(struct minijail));
319 	if (j) {
320 		j->remount_mode = MS_PRIVATE;
321 	}
322 	return j;
323 }
324 
minijail_change_uid(struct minijail * j,uid_t uid)325 void API minijail_change_uid(struct minijail *j, uid_t uid)
326 {
327 	if (uid == 0)
328 		die("useless change to uid 0");
329 	j->uid = uid;
330 	j->flags.uid = 1;
331 }
332 
minijail_change_gid(struct minijail * j,gid_t gid)333 void API minijail_change_gid(struct minijail *j, gid_t gid)
334 {
335 	if (gid == 0)
336 		die("useless change to gid 0");
337 	j->gid = gid;
338 	j->flags.gid = 1;
339 }
340 
minijail_set_supplementary_gids(struct minijail * j,size_t size,const gid_t * list)341 void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
342 					 const gid_t *list)
343 {
344 	size_t i;
345 
346 	if (j->flags.inherit_suppl_gids)
347 		die("cannot inherit *and* set supplementary groups");
348 	if (j->flags.keep_suppl_gids)
349 		die("cannot keep *and* set supplementary groups");
350 
351 	if (size == 0) {
352 		/* Clear supplementary groups. */
353 		j->suppl_gid_list = NULL;
354 		j->suppl_gid_count = 0;
355 		j->flags.set_suppl_gids = 1;
356 		return;
357 	}
358 
359 	/* Copy the gid_t array. */
360 	j->suppl_gid_list = calloc(size, sizeof(gid_t));
361 	if (!j->suppl_gid_list) {
362 		die("failed to allocate internal supplementary group array");
363 	}
364 	for (i = 0; i < size; i++) {
365 		j->suppl_gid_list[i] = list[i];
366 	}
367 	j->suppl_gid_count = size;
368 	j->flags.set_suppl_gids = 1;
369 }
370 
minijail_keep_supplementary_gids(struct minijail * j)371 void API minijail_keep_supplementary_gids(struct minijail *j) {
372 	j->flags.keep_suppl_gids = 1;
373 }
374 
minijail_change_user(struct minijail * j,const char * user)375 int API minijail_change_user(struct minijail *j, const char *user)
376 {
377 	uid_t uid;
378 	gid_t gid;
379 	int rc = lookup_user(user, &uid, &gid);
380 	if (rc)
381 		return rc;
382 	minijail_change_uid(j, uid);
383 	j->user = strdup(user);
384 	if (!j->user)
385 		return -ENOMEM;
386 	j->usergid = gid;
387 	return 0;
388 }
389 
minijail_change_group(struct minijail * j,const char * group)390 int API minijail_change_group(struct minijail *j, const char *group)
391 {
392 	gid_t gid;
393 	int rc = lookup_group(group, &gid);
394 	if (rc)
395 		return rc;
396 	minijail_change_gid(j, gid);
397 	return 0;
398 }
399 
minijail_use_seccomp(struct minijail * j)400 void API minijail_use_seccomp(struct minijail *j)
401 {
402 	j->flags.seccomp = 1;
403 }
404 
minijail_no_new_privs(struct minijail * j)405 void API minijail_no_new_privs(struct minijail *j)
406 {
407 	j->flags.no_new_privs = 1;
408 }
409 
minijail_use_seccomp_filter(struct minijail * j)410 void API minijail_use_seccomp_filter(struct minijail *j)
411 {
412 	j->flags.seccomp_filter = 1;
413 }
414 
minijail_set_seccomp_filter_tsync(struct minijail * j)415 void API minijail_set_seccomp_filter_tsync(struct minijail *j)
416 {
417 	if (j->filter_len > 0 && j->filter_prog != NULL) {
418 		die("minijail_set_seccomp_filter_tsync() must be called "
419 		    "before minijail_parse_seccomp_filters()");
420 	}
421 
422 	if (j->flags.seccomp_filter_logging && !seccomp_ret_log_available()) {
423 		/*
424 		 * If SECCOMP_RET_LOG is not available, we don't want to use
425 		 * SECCOMP_RET_TRAP to both kill the entire process and report
426 		 * failing syscalls, since it will be brittle. Just bail.
427 		 */
428 		die("SECCOMP_RET_LOG not available, cannot use logging with "
429 		    "thread sync at the same time");
430 	}
431 
432 	j->flags.seccomp_filter_tsync = 1;
433 }
434 
minijail_set_seccomp_filter_allow_speculation(struct minijail * j)435 void API minijail_set_seccomp_filter_allow_speculation(struct minijail *j)
436 {
437 	if (j->filter_len > 0 && j->filter_prog != NULL) {
438 		die("minijail_set_seccomp_filter_allow_speculation() must be "
439 		    "called before minijail_parse_seccomp_filters()");
440 	}
441 
442 	j->flags.seccomp_filter_allow_speculation = 1;
443 }
444 
minijail_log_seccomp_filter_failures(struct minijail * j)445 void API minijail_log_seccomp_filter_failures(struct minijail *j)
446 {
447 	if (j->filter_len > 0 && j->filter_prog != NULL) {
448 		die("minijail_log_seccomp_filter_failures() must be called "
449 		    "before minijail_parse_seccomp_filters()");
450 	}
451 
452 	if (j->flags.seccomp_filter_tsync && !seccomp_ret_log_available()) {
453 		/*
454 		 * If SECCOMP_RET_LOG is not available, we don't want to use
455 		 * SECCOMP_RET_TRAP to both kill the entire process and report
456 		 * failing syscalls, since it will be brittle. Just bail.
457 		 */
458 		die("SECCOMP_RET_LOG not available, cannot use thread sync with "
459 		    "logging at the same time");
460 	}
461 
462 	if (debug_logging_allowed()) {
463 		j->flags.seccomp_filter_logging = 1;
464 	} else {
465 		warn("non-debug build: ignoring request to enable seccomp "
466 		     "logging");
467 	}
468 }
469 
minijail_use_caps(struct minijail * j,uint64_t capmask)470 void API minijail_use_caps(struct minijail *j, uint64_t capmask)
471 {
472 	/*
473 	 * 'minijail_use_caps' configures a runtime-capabilities-only
474 	 * environment, including a bounding set matching the thread's runtime
475 	 * (permitted|inheritable|effective) sets.
476 	 * Therefore, it will override any existing bounding set configurations
477 	 * since the latter would allow gaining extra runtime capabilities from
478 	 * file capabilities.
479 	 */
480 	if (j->flags.capbset_drop) {
481 		warn("overriding bounding set configuration");
482 		j->cap_bset = 0;
483 		j->flags.capbset_drop = 0;
484 	}
485 	j->caps = capmask;
486 	j->flags.use_caps = 1;
487 }
488 
minijail_capbset_drop(struct minijail * j,uint64_t capmask)489 void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
490 {
491 	if (j->flags.use_caps) {
492 		/*
493 		 * 'minijail_use_caps' will have already configured a capability
494 		 * bounding set matching the (permitted|inheritable|effective)
495 		 * sets. Abort if the user tries to configure a separate
496 		 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
497 		 * are mutually exclusive.
498 		 */
499 		die("runtime capabilities already configured, can't drop "
500 		    "bounding set separately");
501 	}
502 	j->cap_bset = capmask;
503 	j->flags.capbset_drop = 1;
504 }
505 
minijail_set_ambient_caps(struct minijail * j)506 void API minijail_set_ambient_caps(struct minijail *j)
507 {
508 	j->flags.set_ambient_caps = 1;
509 }
510 
minijail_reset_signal_mask(struct minijail * j)511 void API minijail_reset_signal_mask(struct minijail *j)
512 {
513 	j->flags.reset_signal_mask = 1;
514 }
515 
minijail_reset_signal_handlers(struct minijail * j)516 void API minijail_reset_signal_handlers(struct minijail *j)
517 {
518 	j->flags.reset_signal_handlers = 1;
519 }
520 
minijail_namespace_vfs(struct minijail * j)521 void API minijail_namespace_vfs(struct minijail *j)
522 {
523 	j->flags.vfs = 1;
524 }
525 
minijail_namespace_enter_vfs(struct minijail * j,const char * ns_path)526 void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
527 {
528 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
529 	int ns_fd = open(ns_path, O_RDONLY);
530 	if (ns_fd < 0) {
531 		pdie("failed to open namespace '%s'", ns_path);
532 	}
533 	j->mountns_fd = ns_fd;
534 	j->flags.enter_vfs = 1;
535 }
536 
minijail_new_session_keyring(struct minijail * j)537 void API minijail_new_session_keyring(struct minijail *j)
538 {
539 	j->flags.new_session_keyring = 1;
540 }
541 
minijail_skip_setting_securebits(struct minijail * j,uint64_t securebits_skip_mask)542 void API minijail_skip_setting_securebits(struct minijail *j,
543 					  uint64_t securebits_skip_mask)
544 {
545 	j->securebits_skip_mask = securebits_skip_mask;
546 }
547 
minijail_remount_mode(struct minijail * j,unsigned long mode)548 void API minijail_remount_mode(struct minijail *j, unsigned long mode)
549 {
550 	j->remount_mode = mode;
551 }
552 
minijail_skip_remount_private(struct minijail * j)553 void API minijail_skip_remount_private(struct minijail *j)
554 {
555 	j->remount_mode = 0;
556 }
557 
minijail_namespace_pids(struct minijail * j)558 void API minijail_namespace_pids(struct minijail *j)
559 {
560 	j->flags.vfs = 1;
561 	j->flags.remount_proc_ro = 1;
562 	j->flags.pids = 1;
563 	j->flags.do_init = 1;
564 }
565 
minijail_namespace_pids_rw_proc(struct minijail * j)566 void API minijail_namespace_pids_rw_proc(struct minijail *j)
567 {
568 	j->flags.vfs = 1;
569 	j->flags.pids = 1;
570 	j->flags.do_init = 1;
571 }
572 
minijail_namespace_ipc(struct minijail * j)573 void API minijail_namespace_ipc(struct minijail *j)
574 {
575 	j->flags.ipc = 1;
576 }
577 
minijail_namespace_uts(struct minijail * j)578 void API minijail_namespace_uts(struct minijail *j)
579 {
580 	j->flags.uts = 1;
581 }
582 
minijail_namespace_set_hostname(struct minijail * j,const char * name)583 int API minijail_namespace_set_hostname(struct minijail *j, const char *name)
584 {
585 	if (j->hostname)
586 		return -EINVAL;
587 	minijail_namespace_uts(j);
588 	j->hostname = strdup(name);
589 	if (!j->hostname)
590 		return -ENOMEM;
591 	return 0;
592 }
593 
minijail_namespace_net(struct minijail * j)594 void API minijail_namespace_net(struct minijail *j)
595 {
596 	j->flags.net = 1;
597 }
598 
minijail_namespace_enter_net(struct minijail * j,const char * ns_path)599 void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
600 {
601 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
602 	int ns_fd = open(ns_path, O_RDONLY);
603 	if (ns_fd < 0) {
604 		pdie("failed to open namespace '%s'", ns_path);
605 	}
606 	j->netns_fd = ns_fd;
607 	j->flags.enter_net = 1;
608 }
609 
minijail_namespace_cgroups(struct minijail * j)610 void API minijail_namespace_cgroups(struct minijail *j)
611 {
612 	j->flags.ns_cgroups = 1;
613 }
614 
minijail_close_open_fds(struct minijail * j)615 void API minijail_close_open_fds(struct minijail *j)
616 {
617 	j->flags.close_open_fds = 1;
618 }
619 
minijail_remount_proc_readonly(struct minijail * j)620 void API minijail_remount_proc_readonly(struct minijail *j)
621 {
622 	j->flags.vfs = 1;
623 	j->flags.remount_proc_ro = 1;
624 }
625 
minijail_namespace_user(struct minijail * j)626 void API minijail_namespace_user(struct minijail *j)
627 {
628 	j->flags.userns = 1;
629 }
630 
minijail_namespace_user_disable_setgroups(struct minijail * j)631 void API minijail_namespace_user_disable_setgroups(struct minijail *j)
632 {
633 	j->flags.disable_setgroups = 1;
634 }
635 
minijail_uidmap(struct minijail * j,const char * uidmap)636 int API minijail_uidmap(struct minijail *j, const char *uidmap)
637 {
638 	j->uidmap = strdup(uidmap);
639 	if (!j->uidmap)
640 		return -ENOMEM;
641 	char *ch;
642 	for (ch = j->uidmap; *ch; ch++) {
643 		if (*ch == ',')
644 			*ch = '\n';
645 	}
646 	return 0;
647 }
648 
minijail_gidmap(struct minijail * j,const char * gidmap)649 int API minijail_gidmap(struct minijail *j, const char *gidmap)
650 {
651 	j->gidmap = strdup(gidmap);
652 	if (!j->gidmap)
653 		return -ENOMEM;
654 	char *ch;
655 	for (ch = j->gidmap; *ch; ch++) {
656 		if (*ch == ',')
657 			*ch = '\n';
658 	}
659 	return 0;
660 }
661 
minijail_inherit_usergroups(struct minijail * j)662 void API minijail_inherit_usergroups(struct minijail *j)
663 {
664 	j->flags.inherit_suppl_gids = 1;
665 }
666 
minijail_run_as_init(struct minijail * j)667 void API minijail_run_as_init(struct minijail *j)
668 {
669 	/*
670 	 * Since the jailed program will become 'init' in the new PID namespace,
671 	 * Minijail does not need to fork an 'init' process.
672 	 */
673 	j->flags.run_as_init = 1;
674 }
675 
minijail_enter_chroot(struct minijail * j,const char * dir)676 int API minijail_enter_chroot(struct minijail *j, const char *dir)
677 {
678 	if (j->chrootdir)
679 		return -EINVAL;
680 	j->chrootdir = strdup(dir);
681 	if (!j->chrootdir)
682 		return -ENOMEM;
683 	j->flags.chroot = 1;
684 	return 0;
685 }
686 
minijail_enter_pivot_root(struct minijail * j,const char * dir)687 int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
688 {
689 	if (j->chrootdir)
690 		return -EINVAL;
691 	j->chrootdir = strdup(dir);
692 	if (!j->chrootdir)
693 		return -ENOMEM;
694 	j->flags.pivot_root = 1;
695 	return 0;
696 }
697 
minijail_get_original_path(struct minijail * j,const char * path_inside_chroot)698 char API *minijail_get_original_path(struct minijail *j,
699 				     const char *path_inside_chroot)
700 {
701 	struct mountpoint *b;
702 
703 	b = j->mounts_head;
704 	while (b) {
705 		/*
706 		 * If |path_inside_chroot| is the exact destination of a
707 		 * mount, then the original path is exactly the source of
708 		 * the mount.
709 		 *  for example: "-b /some/path/exe,/chroot/path/exe"
710 		 *    mount source = /some/path/exe, mount dest =
711 		 *    /chroot/path/exe Then when getting the original path of
712 		 *    "/chroot/path/exe", the source of that mount,
713 		 *    "/some/path/exe" is what should be returned.
714 		 */
715 		if (!strcmp(b->dest, path_inside_chroot))
716 			return strdup(b->src);
717 
718 		/*
719 		 * If |path_inside_chroot| is within the destination path of a
720 		 * mount, take the suffix of the chroot path relative to the
721 		 * mount destination path, and append it to the mount source
722 		 * path.
723 		 */
724 		if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
725 			const char *relative_path =
726 				path_inside_chroot + strlen(b->dest);
727 			return path_join(b->src, relative_path);
728 		}
729 		b = b->next;
730 	}
731 
732 	/* If there is a chroot path, append |path_inside_chroot| to that. */
733 	if (j->chrootdir)
734 		return path_join(j->chrootdir, path_inside_chroot);
735 
736 	/* No chroot, so the path outside is the same as it is inside. */
737 	return strdup(path_inside_chroot);
738 }
739 
minijail_mount_dev(struct minijail * j)740 void API minijail_mount_dev(struct minijail *j)
741 {
742 	j->flags.mount_dev = 1;
743 }
744 
minijail_mount_tmp(struct minijail * j)745 void API minijail_mount_tmp(struct minijail *j)
746 {
747 	minijail_mount_tmp_size(j, 64 * 1024 * 1024);
748 }
749 
minijail_mount_tmp_size(struct minijail * j,size_t size)750 void API minijail_mount_tmp_size(struct minijail *j, size_t size)
751 {
752 	j->tmpfs_size = size;
753 	j->flags.mount_tmp = 1;
754 }
755 
minijail_write_pid_file(struct minijail * j,const char * path)756 int API minijail_write_pid_file(struct minijail *j, const char *path)
757 {
758 	j->pid_file_path = strdup(path);
759 	if (!j->pid_file_path)
760 		return -ENOMEM;
761 	j->flags.pid_file = 1;
762 	return 0;
763 }
764 
minijail_add_to_cgroup(struct minijail * j,const char * path)765 int API minijail_add_to_cgroup(struct minijail *j, const char *path)
766 {
767 	if (j->cgroup_count >= MAX_CGROUPS)
768 		return -ENOMEM;
769 	j->cgroups[j->cgroup_count] = strdup(path);
770 	if (!j->cgroups[j->cgroup_count])
771 		return -ENOMEM;
772 	j->cgroup_count++;
773 	j->flags.cgroups = 1;
774 	return 0;
775 }
776 
minijail_rlimit(struct minijail * j,int type,rlim_t cur,rlim_t max)777 int API minijail_rlimit(struct minijail *j, int type, rlim_t cur, rlim_t max)
778 {
779 	size_t i;
780 
781 	if (j->rlimit_count >= MAX_RLIMITS)
782 		return -ENOMEM;
783 	/* It's an error if the caller sets the same rlimit multiple times. */
784 	for (i = 0; i < j->rlimit_count; i++) {
785 		if (j->rlimits[i].type == type)
786 			return -EEXIST;
787 	}
788 
789 	j->rlimits[j->rlimit_count].type = type;
790 	j->rlimits[j->rlimit_count].cur = cur;
791 	j->rlimits[j->rlimit_count].max = max;
792 	j->rlimit_count++;
793 	return 0;
794 }
795 
minijail_forward_signals(struct minijail * j)796 int API minijail_forward_signals(struct minijail *j)
797 {
798 	j->flags.forward_signals = 1;
799 	return 0;
800 }
801 
minijail_create_session(struct minijail * j)802 int API minijail_create_session(struct minijail *j) {
803 	j->flags.setsid = 1;
804 	return 0;
805 }
806 
minijail_mount_with_data(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags,const char * data)807 int API minijail_mount_with_data(struct minijail *j, const char *src,
808 				 const char *dest, const char *type,
809 				 unsigned long flags, const char *data)
810 {
811 	struct mountpoint *m;
812 
813 	if (*dest != '/')
814 		return -EINVAL;
815 	m = calloc(1, sizeof(*m));
816 	if (!m)
817 		return -ENOMEM;
818 	m->dest = strdup(dest);
819 	if (!m->dest)
820 		goto error;
821 	m->src = strdup(src);
822 	if (!m->src)
823 		goto error;
824 	m->type = strdup(type);
825 	if (!m->type)
826 		goto error;
827 
828 	if (!data || !data[0]) {
829 		/*
830 		 * Set up secure defaults for certain filesystems.  Adding this
831 		 * fs-specific logic here kind of sucks, but considering how
832 		 * people use these in practice, it's probably OK.  If they want
833 		 * the kernel defaults, they can pass data="" instead of NULL.
834 		 */
835 		if (!strcmp(type, "tmpfs")) {
836 			/* tmpfs defaults to mode=1777 and size=50%. */
837 			data = "mode=0755,size=10M";
838 		}
839 	}
840 	if (data) {
841 		m->data = strdup(data);
842 		if (!m->data)
843 			goto error;
844 		m->has_data = 1;
845 	}
846 
847 	/* If they don't specify any flags, default to secure ones. */
848 	if (flags == 0)
849 		flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
850 	m->flags = flags;
851 
852 	/*
853 	 * Unless asked to enter an existing namespace, force vfs namespacing
854 	 * so the mounts don't leak out into the containing vfs namespace.
855 	 * If Minijail is being asked to enter the root vfs namespace this will
856 	 * leak mounts, but it's unlikely that the user would ask to do that by
857 	 * mistake.
858 	 */
859 	if (!j->flags.enter_vfs)
860 		minijail_namespace_vfs(j);
861 
862 	if (j->mounts_tail)
863 		j->mounts_tail->next = m;
864 	else
865 		j->mounts_head = m;
866 	j->mounts_tail = m;
867 	j->mounts_count++;
868 
869 	return 0;
870 
871 error:
872 	free(m->type);
873 	free(m->src);
874 	free(m->dest);
875 	free(m);
876 	return -ENOMEM;
877 }
878 
minijail_mount(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags)879 int API minijail_mount(struct minijail *j, const char *src, const char *dest,
880 		       const char *type, unsigned long flags)
881 {
882 	return minijail_mount_with_data(j, src, dest, type, flags, NULL);
883 }
884 
minijail_bind(struct minijail * j,const char * src,const char * dest,int writeable)885 int API minijail_bind(struct minijail *j, const char *src, const char *dest,
886 		      int writeable)
887 {
888 	unsigned long flags = MS_BIND;
889 
890 	if (!writeable)
891 		flags |= MS_RDONLY;
892 
893 	return minijail_mount(j, src, dest, "", flags);
894 }
895 
minijail_add_remount(struct minijail * j,const char * mount_name,unsigned long remount_mode)896 int API minijail_add_remount(struct minijail *j, const char *mount_name,
897 			     unsigned long remount_mode)
898 {
899 	struct minijail_remount *m;
900 
901 	if (*mount_name != '/')
902 		return -EINVAL;
903 	m = calloc(1, sizeof(*m));
904 	if (!m)
905 		return -ENOMEM;
906 	m->mount_name = strdup(mount_name);
907 	if (!m->mount_name) {
908 		free(m);
909 		return -ENOMEM;
910 	}
911 
912 	m->remount_mode = remount_mode;
913 
914 	if (j->remounts_tail)
915 		j->remounts_tail->next = m;
916 	else
917 		j->remounts_head = m;
918 	j->remounts_tail = m;
919 
920 	return 0;
921 }
922 
minijail_add_hook(struct minijail * j,minijail_hook_t hook,void * payload,minijail_hook_event_t event)923 int API minijail_add_hook(struct minijail *j, minijail_hook_t hook,
924 			  void *payload, minijail_hook_event_t event)
925 {
926 	struct hook *c;
927 
928 	if (hook == NULL)
929 		return -EINVAL;
930 	if (event >= MINIJAIL_HOOK_EVENT_MAX)
931 		return -EINVAL;
932 	c = calloc(1, sizeof(*c));
933 	if (!c)
934 		return -ENOMEM;
935 
936 	c->hook = hook;
937 	c->payload = payload;
938 	c->event = event;
939 
940 	if (j->hooks_tail)
941 		j->hooks_tail->next = c;
942 	else
943 		j->hooks_head = c;
944 	j->hooks_tail = c;
945 
946 	return 0;
947 }
948 
minijail_preserve_fd(struct minijail * j,int parent_fd,int child_fd)949 int API minijail_preserve_fd(struct minijail *j, int parent_fd, int child_fd)
950 {
951 	if (parent_fd < 0 || child_fd < 0)
952 		return -EINVAL;
953 	if (j->preserved_fd_count >= MAX_PRESERVED_FDS)
954 		return -ENOMEM;
955 	j->preserved_fds[j->preserved_fd_count].parent_fd = parent_fd;
956 	j->preserved_fds[j->preserved_fd_count].child_fd = child_fd;
957 	j->preserved_fd_count++;
958 	return 0;
959 }
960 
minijail_set_preload_path(struct minijail * j,const char * preload_path)961 int API minijail_set_preload_path(struct minijail *j, const char *preload_path)
962 {
963 	if (j->preload_path)
964 		return -EINVAL;
965 	j->preload_path = strdup(preload_path);
966 	if (!j->preload_path)
967 		return -ENOMEM;
968 	return 0;
969 }
970 
clear_seccomp_options(struct minijail * j)971 static void clear_seccomp_options(struct minijail *j)
972 {
973 	j->flags.seccomp_filter = 0;
974 	j->flags.seccomp_filter_tsync = 0;
975 	j->flags.seccomp_filter_logging = 0;
976 	j->flags.seccomp_filter_allow_speculation = 0;
977 	j->filter_len = 0;
978 	j->filter_prog = NULL;
979 	j->flags.no_new_privs = 0;
980 }
981 
seccomp_should_use_filters(struct minijail * j)982 static int seccomp_should_use_filters(struct minijail *j)
983 {
984 	if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL) == -1) {
985 		/*
986 		 * |errno| will be set to EINVAL when seccomp has not been
987 		 * compiled into the kernel. On certain platforms and kernel
988 		 * versions this is not a fatal failure. In that case, and only
989 		 * in that case, disable seccomp and skip loading the filters.
990 		 */
991 		if ((errno == EINVAL) && seccomp_can_softfail()) {
992 			warn("not loading seccomp filters, seccomp filter not "
993 			     "supported");
994 			clear_seccomp_options(j);
995 			return 0;
996 		}
997 		/*
998 		 * If |errno| != EINVAL or seccomp_can_softfail() is false,
999 		 * we can proceed. Worst case scenario minijail_enter() will
1000 		 * abort() if seccomp fails.
1001 		 */
1002 	}
1003 	if (j->flags.seccomp_filter_tsync) {
1004 		/* Are the seccomp(2) syscall and the TSYNC option supported? */
1005 		if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
1006 				SECCOMP_FILTER_FLAG_TSYNC, NULL) == -1) {
1007 			int saved_errno = errno;
1008 			if (saved_errno == ENOSYS && seccomp_can_softfail()) {
1009 				warn("seccomp(2) syscall not supported");
1010 				clear_seccomp_options(j);
1011 				return 0;
1012 			} else if (saved_errno == EINVAL &&
1013 				   seccomp_can_softfail()) {
1014 				warn(
1015 				    "seccomp filter thread sync not supported");
1016 				clear_seccomp_options(j);
1017 				return 0;
1018 			}
1019 			/*
1020 			 * Similar logic here. If seccomp_can_softfail() is
1021 			 * false, or |errno| != ENOSYS, or |errno| != EINVAL,
1022 			 * we can proceed. Worst case scenario minijail_enter()
1023 			 * will abort() if seccomp or TSYNC fail.
1024 			 */
1025 		}
1026 	}
1027 	if (j->flags.seccomp_filter_allow_speculation) {
1028 		/* Is the SPEC_ALLOW flag supported? */
1029 		if (!seccomp_filter_flags_available(
1030 			SECCOMP_FILTER_FLAG_SPEC_ALLOW)) {
1031 			warn("allowing speculative execution on seccomp "
1032 			     "processes not supported");
1033 			j->flags.seccomp_filter_allow_speculation = 0;
1034 		}
1035 	}
1036 	return 1;
1037 }
1038 
set_seccomp_filters_internal(struct minijail * j,const struct sock_fprog * filter,bool owned)1039 static int set_seccomp_filters_internal(struct minijail *j,
1040 					const struct sock_fprog *filter,
1041 					bool owned)
1042 {
1043 	struct sock_fprog *fprog;
1044 
1045 	if (owned) {
1046 		/*
1047 		 * If |owned| is true, it's OK to cast away the const-ness since
1048 		 * we'll own the pointer going forward.
1049 		 */
1050 		fprog = (struct sock_fprog *)filter;
1051 	} else {
1052 		fprog = malloc(sizeof(struct sock_fprog));
1053 		if (!fprog)
1054 			return -ENOMEM;
1055 		fprog->len = filter->len;
1056 		fprog->filter = malloc(sizeof(struct sock_filter) * fprog->len);
1057 		if (!fprog->filter) {
1058 			free(fprog);
1059 			return -ENOMEM;
1060 		}
1061 		memcpy(fprog->filter, filter->filter,
1062 		       sizeof(struct sock_filter) * fprog->len);
1063 	}
1064 
1065 	if (j->filter_prog) {
1066 		free(j->filter_prog->filter);
1067 		free(j->filter_prog);
1068 	}
1069 
1070 	j->filter_len = fprog->len;
1071 	j->filter_prog = fprog;
1072 	return 0;
1073 }
1074 
parse_seccomp_filters(struct minijail * j,const char * filename,FILE * policy_file)1075 static int parse_seccomp_filters(struct minijail *j, const char *filename,
1076 				 FILE *policy_file)
1077 {
1078 	struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
1079 	if (!fprog)
1080 		return -ENOMEM;
1081 
1082 	struct filter_options filteropts;
1083 
1084 	/*
1085 	 * Figure out filter options.
1086 	 * Allow logging?
1087 	 */
1088 	filteropts.allow_logging =
1089 	    debug_logging_allowed() && j->flags.seccomp_filter_logging;
1090 
1091 	/* What to do on a blocked system call? */
1092 	if (filteropts.allow_logging) {
1093 		if (seccomp_ret_log_available())
1094 			filteropts.action = ACTION_RET_LOG;
1095 		else
1096 			filteropts.action = ACTION_RET_TRAP;
1097 	} else {
1098 		if (j->flags.seccomp_filter_tsync) {
1099 			if (seccomp_ret_kill_process_available()) {
1100 				filteropts.action = ACTION_RET_KILL_PROCESS;
1101 			} else {
1102 				filteropts.action = ACTION_RET_TRAP;
1103 			}
1104 		} else {
1105 			filteropts.action = ACTION_RET_KILL;
1106 		}
1107 	}
1108 
1109 	/*
1110 	 * If SECCOMP_RET_LOG is not available, need to allow extra syscalls
1111 	 * for logging.
1112 	 */
1113 	filteropts.allow_syscalls_for_logging =
1114 	    filteropts.allow_logging && !seccomp_ret_log_available();
1115 
1116 	/* Whether to fail on duplicate syscalls. */
1117 	filteropts.allow_duplicate_syscalls = allow_duplicate_syscalls();
1118 
1119 	if (compile_filter(filename, policy_file, fprog, &filteropts)) {
1120 		free(fprog);
1121 		return -1;
1122 	}
1123 
1124 	return set_seccomp_filters_internal(j, fprog, true /* owned */);
1125 }
1126 
minijail_parse_seccomp_filters(struct minijail * j,const char * path)1127 void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
1128 {
1129 	if (!seccomp_should_use_filters(j))
1130 		return;
1131 
1132 	FILE *file = fopen(path, "re");
1133 	if (!file) {
1134 		pdie("failed to open seccomp filter file '%s'", path);
1135 	}
1136 
1137 	if (parse_seccomp_filters(j, path, file) != 0) {
1138 		die("failed to compile seccomp filter BPF program in '%s'",
1139 		    path);
1140 	}
1141 	fclose(file);
1142 }
1143 
minijail_parse_seccomp_filters_from_fd(struct minijail * j,int fd)1144 void API minijail_parse_seccomp_filters_from_fd(struct minijail *j, int fd)
1145 {
1146 	char *fd_path, *path;
1147 	FILE *file;
1148 
1149 	if (!seccomp_should_use_filters(j))
1150 		return;
1151 
1152 	file = fdopen(fd, "r");
1153 	if (!file) {
1154 		pdie("failed to associate stream with fd %d", fd);
1155 	}
1156 
1157 	if (asprintf(&fd_path, "/proc/self/fd/%d", fd) == -1)
1158 		pdie("failed to create path for fd %d", fd);
1159 	path = realpath(fd_path, NULL);
1160 	if (path == NULL)
1161 		pwarn("failed to get path of fd %d", fd);
1162 	free(fd_path);
1163 
1164 	if (parse_seccomp_filters(j, path ? path : "<fd>", file) != 0) {
1165 		die("failed to compile seccomp filter BPF program from fd %d",
1166 		    fd);
1167 	}
1168 	free(path);
1169 	fclose(file);
1170 }
1171 
minijail_set_seccomp_filters(struct minijail * j,const struct sock_fprog * filter)1172 void API minijail_set_seccomp_filters(struct minijail *j,
1173 				      const struct sock_fprog *filter)
1174 {
1175 	if (!seccomp_should_use_filters(j))
1176 		return;
1177 
1178 	if (j->flags.seccomp_filter_logging) {
1179 		die("minijail_log_seccomp_filter_failures() is incompatible "
1180 		    "with minijail_set_seccomp_filters()");
1181 	}
1182 
1183 	/*
1184 	 * set_seccomp_filters_internal() can only fail with ENOMEM.
1185 	 * Furthermore, since we won't own the incoming filter, it will not be
1186 	 * modified.
1187 	 */
1188 	if (set_seccomp_filters_internal(j, filter, false /* owned */) < 0) {
1189 		die("failed to set seccomp filter");
1190 	}
1191 }
1192 
minijail_use_alt_syscall(struct minijail * j,const char * table)1193 int API minijail_use_alt_syscall(struct minijail *j, const char *table)
1194 {
1195 	j->alt_syscall_table = strdup(table);
1196 	if (!j->alt_syscall_table)
1197 		return -ENOMEM;
1198 	j->flags.alt_syscall = 1;
1199 	return 0;
1200 }
1201 
1202 struct marshal_state {
1203 	size_t available;
1204 	size_t total;
1205 	char *buf;
1206 };
1207 
marshal_state_init(struct marshal_state * state,char * buf,size_t available)1208 static void marshal_state_init(struct marshal_state *state, char *buf,
1209 			       size_t available)
1210 {
1211 	state->available = available;
1212 	state->buf = buf;
1213 	state->total = 0;
1214 }
1215 
marshal_append(struct marshal_state * state,const void * src,size_t length)1216 static void marshal_append(struct marshal_state *state, const void *src,
1217 			   size_t length)
1218 {
1219 	size_t copy_len = MIN(state->available, length);
1220 
1221 	/* Up to |available| will be written. */
1222 	if (copy_len) {
1223 		memcpy(state->buf, src, copy_len);
1224 		state->buf += copy_len;
1225 		state->available -= copy_len;
1226 	}
1227 	/* |total| will contain the expected length. */
1228 	state->total += length;
1229 }
1230 
marshal_append_string(struct marshal_state * state,const char * src)1231 static void marshal_append_string(struct marshal_state *state, const char *src)
1232 {
1233 	marshal_append(state, src, strlen(src) + 1);
1234 }
1235 
marshal_mount(struct marshal_state * state,const struct mountpoint * m)1236 static void marshal_mount(struct marshal_state *state,
1237 			  const struct mountpoint *m)
1238 {
1239 	marshal_append(state, m->src, strlen(m->src) + 1);
1240 	marshal_append(state, m->dest, strlen(m->dest) + 1);
1241 	marshal_append(state, m->type, strlen(m->type) + 1);
1242 	marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
1243 	if (m->has_data)
1244 		marshal_append(state, m->data, strlen(m->data) + 1);
1245 	marshal_append(state, (char *)&m->flags, sizeof(m->flags));
1246 }
1247 
minijail_marshal_helper(struct marshal_state * state,const struct minijail * j)1248 static void minijail_marshal_helper(struct marshal_state *state,
1249 				    const struct minijail *j)
1250 {
1251 	struct mountpoint *m = NULL;
1252 	size_t i;
1253 
1254 	marshal_append(state, (char *)j, sizeof(*j));
1255 	if (j->user)
1256 		marshal_append_string(state, j->user);
1257 	if (j->suppl_gid_list) {
1258 		marshal_append(state, j->suppl_gid_list,
1259 			       j->suppl_gid_count * sizeof(gid_t));
1260 	}
1261 	if (j->chrootdir)
1262 		marshal_append_string(state, j->chrootdir);
1263 	if (j->hostname)
1264 		marshal_append_string(state, j->hostname);
1265 	if (j->alt_syscall_table) {
1266 		marshal_append(state, j->alt_syscall_table,
1267 			       strlen(j->alt_syscall_table) + 1);
1268 	}
1269 	if (j->flags.seccomp_filter && j->filter_prog) {
1270 		struct sock_fprog *fp = j->filter_prog;
1271 		marshal_append(state, (char *)fp->filter,
1272 			       fp->len * sizeof(struct sock_filter));
1273 	}
1274 	for (m = j->mounts_head; m; m = m->next) {
1275 		marshal_mount(state, m);
1276 	}
1277 	for (i = 0; i < j->cgroup_count; ++i)
1278 		marshal_append_string(state, j->cgroups[i]);
1279 }
1280 
minijail_size(const struct minijail * j)1281 size_t API minijail_size(const struct minijail *j)
1282 {
1283 	struct marshal_state state;
1284 	marshal_state_init(&state, NULL, 0);
1285 	minijail_marshal_helper(&state, j);
1286 	return state.total;
1287 }
1288 
minijail_marshal(const struct minijail * j,char * buf,size_t available)1289 int minijail_marshal(const struct minijail *j, char *buf, size_t available)
1290 {
1291 	struct marshal_state state;
1292 	marshal_state_init(&state, buf, available);
1293 	minijail_marshal_helper(&state, j);
1294 	return (state.total > available);
1295 }
1296 
minijail_unmarshal(struct minijail * j,char * serialized,size_t length)1297 int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
1298 {
1299 	size_t i;
1300 	size_t count;
1301 	int ret = -EINVAL;
1302 
1303 	if (length < sizeof(*j))
1304 		goto out;
1305 	memcpy((void *)j, serialized, sizeof(*j));
1306 	serialized += sizeof(*j);
1307 	length -= sizeof(*j);
1308 
1309 	/* Potentially stale pointers not used as signals. */
1310 	j->preload_path = NULL;
1311 	j->pid_file_path = NULL;
1312 	j->uidmap = NULL;
1313 	j->gidmap = NULL;
1314 	j->mounts_head = NULL;
1315 	j->mounts_tail = NULL;
1316 	j->remounts_head = NULL;
1317 	j->remounts_tail = NULL;
1318 	j->filter_prog = NULL;
1319 	j->hooks_head = NULL;
1320 	j->hooks_tail = NULL;
1321 
1322 	if (j->user) {		/* stale pointer */
1323 		char *user = consumestr(&serialized, &length);
1324 		if (!user)
1325 			goto clear_pointers;
1326 		j->user = strdup(user);
1327 		if (!j->user)
1328 			goto clear_pointers;
1329 	}
1330 
1331 	if (j->suppl_gid_list) {	/* stale pointer */
1332 		if (j->suppl_gid_count > NGROUPS_MAX) {
1333 			goto bad_gid_list;
1334 		}
1335 		size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
1336 		void *gid_list_bytes =
1337 		    consumebytes(gid_list_size, &serialized, &length);
1338 		if (!gid_list_bytes)
1339 			goto bad_gid_list;
1340 
1341 		j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
1342 		if (!j->suppl_gid_list)
1343 			goto bad_gid_list;
1344 
1345 		memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
1346 	}
1347 
1348 	if (j->chrootdir) {	/* stale pointer */
1349 		char *chrootdir = consumestr(&serialized, &length);
1350 		if (!chrootdir)
1351 			goto bad_chrootdir;
1352 		j->chrootdir = strdup(chrootdir);
1353 		if (!j->chrootdir)
1354 			goto bad_chrootdir;
1355 	}
1356 
1357 	if (j->hostname) {	/* stale pointer */
1358 		char *hostname = consumestr(&serialized, &length);
1359 		if (!hostname)
1360 			goto bad_hostname;
1361 		j->hostname = strdup(hostname);
1362 		if (!j->hostname)
1363 			goto bad_hostname;
1364 	}
1365 
1366 	if (j->alt_syscall_table) {	/* stale pointer */
1367 		char *alt_syscall_table = consumestr(&serialized, &length);
1368 		if (!alt_syscall_table)
1369 			goto bad_syscall_table;
1370 		j->alt_syscall_table = strdup(alt_syscall_table);
1371 		if (!j->alt_syscall_table)
1372 			goto bad_syscall_table;
1373 	}
1374 
1375 	if (j->flags.seccomp_filter && j->filter_len > 0) {
1376 		size_t ninstrs = j->filter_len;
1377 		if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
1378 		    ninstrs > USHRT_MAX)
1379 			goto bad_filters;
1380 
1381 		size_t program_len = ninstrs * sizeof(struct sock_filter);
1382 		void *program = consumebytes(program_len, &serialized, &length);
1383 		if (!program)
1384 			goto bad_filters;
1385 
1386 		j->filter_prog = malloc(sizeof(struct sock_fprog));
1387 		if (!j->filter_prog)
1388 			goto bad_filters;
1389 
1390 		j->filter_prog->len = ninstrs;
1391 		j->filter_prog->filter = malloc(program_len);
1392 		if (!j->filter_prog->filter)
1393 			goto bad_filter_prog_instrs;
1394 
1395 		memcpy(j->filter_prog->filter, program, program_len);
1396 	}
1397 
1398 	count = j->mounts_count;
1399 	j->mounts_count = 0;
1400 	for (i = 0; i < count; ++i) {
1401 		unsigned long *flags;
1402 		int *has_data;
1403 		const char *dest;
1404 		const char *type;
1405 		const char *data = NULL;
1406 		const char *src = consumestr(&serialized, &length);
1407 		if (!src)
1408 			goto bad_mounts;
1409 		dest = consumestr(&serialized, &length);
1410 		if (!dest)
1411 			goto bad_mounts;
1412 		type = consumestr(&serialized, &length);
1413 		if (!type)
1414 			goto bad_mounts;
1415 		has_data = consumebytes(sizeof(*has_data), &serialized,
1416 					&length);
1417 		if (!has_data)
1418 			goto bad_mounts;
1419 		if (*has_data) {
1420 			data = consumestr(&serialized, &length);
1421 			if (!data)
1422 				goto bad_mounts;
1423 		}
1424 		flags = consumebytes(sizeof(*flags), &serialized, &length);
1425 		if (!flags)
1426 			goto bad_mounts;
1427 		if (minijail_mount_with_data(j, src, dest, type, *flags, data))
1428 			goto bad_mounts;
1429 	}
1430 
1431 	count = j->cgroup_count;
1432 	j->cgroup_count = 0;
1433 	for (i = 0; i < count; ++i) {
1434 		char *cgroup = consumestr(&serialized, &length);
1435 		if (!cgroup)
1436 			goto bad_cgroups;
1437 		j->cgroups[i] = strdup(cgroup);
1438 		if (!j->cgroups[i])
1439 			goto bad_cgroups;
1440 		++j->cgroup_count;
1441 	}
1442 
1443 	return 0;
1444 
1445 bad_cgroups:
1446 	free_mounts_list(j);
1447 	free_remounts_list(j);
1448 	for (i = 0; i < j->cgroup_count; ++i)
1449 		free(j->cgroups[i]);
1450 bad_mounts:
1451 	if (j->filter_prog && j->filter_prog->filter)
1452 		free(j->filter_prog->filter);
1453 bad_filter_prog_instrs:
1454 	if (j->filter_prog)
1455 		free(j->filter_prog);
1456 bad_filters:
1457 	if (j->alt_syscall_table)
1458 		free(j->alt_syscall_table);
1459 bad_syscall_table:
1460 	if (j->chrootdir)
1461 		free(j->chrootdir);
1462 bad_chrootdir:
1463 	if (j->hostname)
1464 		free(j->hostname);
1465 bad_hostname:
1466 	if (j->suppl_gid_list)
1467 		free(j->suppl_gid_list);
1468 bad_gid_list:
1469 	if (j->user)
1470 		free(j->user);
1471 clear_pointers:
1472 	j->user = NULL;
1473 	j->suppl_gid_list = NULL;
1474 	j->chrootdir = NULL;
1475 	j->hostname = NULL;
1476 	j->alt_syscall_table = NULL;
1477 	j->cgroup_count = 0;
1478 out:
1479 	return ret;
1480 }
1481 
1482 struct dev_spec {
1483 	const char *name;
1484 	mode_t mode;
1485 	dev_t major, minor;
1486 };
1487 
1488 static const struct dev_spec device_nodes[] = {
1489 	{
1490 		"null",
1491 		S_IFCHR | 0666, 1, 3,
1492 	},
1493 	{
1494 		"zero",
1495 		S_IFCHR | 0666, 1, 5,
1496 	},
1497 	{
1498 		"full",
1499 		S_IFCHR | 0666, 1, 7,
1500 	},
1501 	{
1502 		"urandom",
1503 		S_IFCHR | 0444, 1, 9,
1504 	},
1505 	{
1506 		"tty",
1507 		S_IFCHR | 0666, 5, 0,
1508 	},
1509 };
1510 
1511 struct dev_sym_spec {
1512 	const char *source, *dest;
1513 };
1514 
1515 static const struct dev_sym_spec device_symlinks[] = {
1516 	{ "ptmx", "pts/ptmx", },
1517 	{ "fd", "/proc/self/fd", },
1518 	{ "stdin", "fd/0", },
1519 	{ "stdout", "fd/1", },
1520 	{ "stderr", "fd/2", },
1521 };
1522 
1523 /*
1524  * Clean up the temporary dev path we had setup previously.  In case of errors,
1525  * we don't want to go leaking empty tempdirs.
1526  */
mount_dev_cleanup(char * dev_path)1527 static void mount_dev_cleanup(char *dev_path)
1528 {
1529 	umount2(dev_path, MNT_DETACH);
1530 	rmdir(dev_path);
1531 	free(dev_path);
1532 }
1533 
1534 /*
1535  * Set up the pseudo /dev path at the temporary location.
1536  * See mount_dev_finalize for more details.
1537  */
mount_dev(char ** dev_path_ret)1538 static int mount_dev(char **dev_path_ret)
1539 {
1540 	int ret;
1541 	int dev_fd;
1542 	size_t i;
1543 	mode_t mask;
1544 	char *dev_path;
1545 
1546 	/*
1547 	 * Create a temp path for the /dev init.  We'll relocate this to the
1548 	 * final location later on in the startup process.
1549 	 */
1550 	dev_path = *dev_path_ret = strdup("/tmp/minijail.dev.XXXXXX");
1551 	if (dev_path == NULL || mkdtemp(dev_path) == NULL)
1552 		pdie("could not create temp path for /dev");
1553 
1554 	/* Set up the empty /dev mount point first. */
1555 	ret = mount("minijail-devfs", dev_path, "tmpfs",
1556 	            MS_NOEXEC | MS_NOSUID, "size=5M,mode=755");
1557 	if (ret) {
1558 		rmdir(dev_path);
1559 		return ret;
1560 	}
1561 
1562 	/* We want to set the mode directly from the spec. */
1563 	mask = umask(0);
1564 
1565 	/* Get a handle to the temp dev path for *at funcs below. */
1566 	dev_fd = open(dev_path, O_DIRECTORY|O_PATH|O_CLOEXEC);
1567 	if (dev_fd < 0) {
1568 		ret = 1;
1569 		goto done;
1570 	}
1571 
1572 	/* Create all the nodes in /dev. */
1573 	for (i = 0; i < ARRAY_SIZE(device_nodes); ++i) {
1574 		const struct dev_spec *ds = &device_nodes[i];
1575 		ret = mknodat(dev_fd, ds->name, ds->mode,
1576 		              makedev(ds->major, ds->minor));
1577 		if (ret)
1578 			goto done;
1579 	}
1580 
1581 	/* Create all the symlinks in /dev. */
1582 	for (i = 0; i < ARRAY_SIZE(device_symlinks); ++i) {
1583 		const struct dev_sym_spec *ds = &device_symlinks[i];
1584 		ret = symlinkat(ds->dest, dev_fd, ds->source);
1585 		if (ret)
1586 			goto done;
1587 	}
1588 
1589 	/* Create empty dir for glibc shared mem APIs. */
1590 	ret = mkdirat(dev_fd, "shm", 01777);
1591 	if (ret)
1592 		goto done;
1593 
1594 	/* Restore old mask. */
1595  done:
1596 	close(dev_fd);
1597 	umask(mask);
1598 
1599 	if (ret)
1600 		mount_dev_cleanup(dev_path);
1601 
1602 	return ret;
1603 }
1604 
1605 /*
1606  * Relocate the temporary /dev mount to its final /dev place.
1607  * We have to do this two step process so people can bind mount extra
1608  * /dev paths like /dev/log.
1609  */
mount_dev_finalize(const struct minijail * j,char * dev_path)1610 static int mount_dev_finalize(const struct minijail *j, char *dev_path)
1611 {
1612 	int ret = -1;
1613 	char *dest = NULL;
1614 
1615 	/* Unmount the /dev mount if possible. */
1616 	if (umount2("/dev", MNT_DETACH))
1617 		goto done;
1618 
1619 	if (asprintf(&dest, "%s/dev", j->chrootdir ? : "") < 0)
1620 		goto done;
1621 
1622 	if (mount(dev_path, dest, NULL, MS_MOVE, NULL))
1623 		goto done;
1624 
1625 	ret = 0;
1626  done:
1627 	free(dest);
1628 	mount_dev_cleanup(dev_path);
1629 
1630 	return ret;
1631 }
1632 
1633 /*
1634  * mount_one: Applies mounts from @m for @j, recursing as needed.
1635  * @j Minijail these mounts are for
1636  * @m Head of list of mounts
1637  *
1638  * Returns 0 for success.
1639  */
mount_one(const struct minijail * j,struct mountpoint * m,const char * dev_path)1640 static int mount_one(const struct minijail *j, struct mountpoint *m,
1641 		     const char *dev_path)
1642 {
1643 	int ret;
1644 	char *dest;
1645 	int remount = 0;
1646 	unsigned long original_mnt_flags = 0;
1647 
1648 	/* We assume |dest| has a leading "/". */
1649 	if (dev_path && strncmp("/dev/", m->dest, 5) == 0) {
1650 		/*
1651 		 * Since the temp path is rooted at /dev, skip that dest part.
1652 		 */
1653 		if (asprintf(&dest, "%s%s", dev_path, m->dest + 4) < 0)
1654 			return -ENOMEM;
1655 	} else {
1656 		if (asprintf(&dest, "%s%s", j->chrootdir ?: "", m->dest) < 0)
1657 			return -ENOMEM;
1658 	}
1659 
1660 	ret =
1661 	    setup_mount_destination(m->src, dest, j->uid, j->gid,
1662 				    (m->flags & MS_BIND), &original_mnt_flags);
1663 	if (ret) {
1664 		warn("cannot create mount target '%s'", dest);
1665 		goto error;
1666 	}
1667 
1668 	/*
1669 	 * Bind mounts that change the 'ro' flag have to be remounted since
1670 	 * 'bind' and other flags can't both be specified in the same command.
1671 	 * Remount after the initial mount.
1672 	 */
1673 	if ((m->flags & MS_BIND) &&
1674 	    ((m->flags & MS_RDONLY) != (original_mnt_flags & MS_RDONLY))) {
1675 		remount = 1;
1676 		/*
1677 		 * Restrict the mount flags to those that are user-settable in a
1678 		 * MS_REMOUNT request, but excluding MS_RDONLY. The
1679 		 * user-requested mount flags will dictate whether the remount
1680 		 * will have that flag or not.
1681 		 */
1682 		original_mnt_flags &= (MS_USER_SETTABLE_MASK & ~MS_RDONLY);
1683 	}
1684 
1685 	ret = mount(m->src, dest, m->type, m->flags, m->data);
1686 	if (ret) {
1687 		pwarn("cannot bind-mount '%s' as '%s' with flags %#lx", m->src,
1688 		      dest, m->flags);
1689 		goto error;
1690 	}
1691 
1692 	if (remount) {
1693 		ret =
1694 		    mount(m->src, dest, NULL,
1695 			  m->flags | original_mnt_flags | MS_REMOUNT, m->data);
1696 		if (ret) {
1697 			pwarn(
1698 			    "cannot bind-remount '%s' as '%s' with flags %#lx",
1699 			    m->src, dest,
1700 			    m->flags | original_mnt_flags | MS_REMOUNT);
1701 			goto error;
1702 		}
1703 	}
1704 
1705 	free(dest);
1706 	if (m->next)
1707 		return mount_one(j, m->next, dev_path);
1708 	return 0;
1709 
1710 error:
1711 	free(dest);
1712 	return ret;
1713 }
1714 
process_mounts_or_die(const struct minijail * j)1715 static void process_mounts_or_die(const struct minijail *j)
1716 {
1717 	/*
1718 	 * We have to mount /dev first in case there are bind mounts from
1719 	 * the original /dev into the new unique tmpfs one.
1720 	 */
1721 	char *dev_path = NULL;
1722 	if (j->flags.mount_dev && mount_dev(&dev_path))
1723 		pdie("mount_dev failed");
1724 
1725 	if (j->mounts_head && mount_one(j, j->mounts_head, dev_path)) {
1726 		if (dev_path)
1727 			mount_dev_cleanup(dev_path);
1728 
1729 		_exit(MINIJAIL_ERR_MOUNT);
1730 	}
1731 
1732 	/*
1733 	 * Once all bind mounts have been processed, move the temp dev to
1734 	 * its final /dev home.
1735 	 */
1736 	if (j->flags.mount_dev && mount_dev_finalize(j, dev_path))
1737 		pdie("mount_dev_finalize failed");
1738 }
1739 
enter_chroot(const struct minijail * j)1740 static int enter_chroot(const struct minijail *j)
1741 {
1742 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
1743 
1744 	if (chroot(j->chrootdir))
1745 		return -errno;
1746 
1747 	if (chdir("/"))
1748 		return -errno;
1749 
1750 	return 0;
1751 }
1752 
enter_pivot_root(const struct minijail * j)1753 static int enter_pivot_root(const struct minijail *j)
1754 {
1755 	int oldroot, newroot;
1756 
1757 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
1758 
1759 	/*
1760 	 * Keep the fd for both old and new root.
1761 	 * It will be used in fchdir(2) later.
1762 	 */
1763 	oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1764 	if (oldroot < 0)
1765 		pdie("failed to open / for fchdir");
1766 	newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1767 	if (newroot < 0)
1768 		pdie("failed to open %s for fchdir", j->chrootdir);
1769 
1770 	/*
1771 	 * To ensure j->chrootdir is the root of a filesystem,
1772 	 * do a self bind mount.
1773 	 */
1774 	if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
1775 		pdie("failed to bind mount '%s'", j->chrootdir);
1776 	if (chdir(j->chrootdir))
1777 		return -errno;
1778 	if (syscall(SYS_pivot_root, ".", "."))
1779 		pdie("pivot_root");
1780 
1781 	/*
1782 	 * Now the old root is mounted on top of the new root. Use fchdir(2) to
1783 	 * change to the old root and unmount it.
1784 	 */
1785 	if (fchdir(oldroot))
1786 		pdie("failed to fchdir to old /");
1787 
1788 	/*
1789 	 * If skip_remount_private was enabled for minijail_enter(),
1790 	 * there could be a shared mount point under |oldroot|. In that case,
1791 	 * mounts under this shared mount point will be unmounted below, and
1792 	 * this unmounting will propagate to the original mount namespace
1793 	 * (because the mount point is shared). To prevent this unexpected
1794 	 * unmounting, remove these mounts from their peer groups by recursively
1795 	 * remounting them as MS_PRIVATE.
1796 	 */
1797 	if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
1798 		pdie("failed to mount(/, private) before umount(/)");
1799 	/* The old root might be busy, so use lazy unmount. */
1800 	if (umount2(".", MNT_DETACH))
1801 		pdie("umount(/)");
1802 	/* Change back to the new root. */
1803 	if (fchdir(newroot))
1804 		return -errno;
1805 	if (close(oldroot))
1806 		return -errno;
1807 	if (close(newroot))
1808 		return -errno;
1809 	if (chroot("/"))
1810 		return -errno;
1811 	/* Set correct CWD for getcwd(3). */
1812 	if (chdir("/"))
1813 		return -errno;
1814 
1815 	return 0;
1816 }
1817 
mount_tmp(const struct minijail * j)1818 static int mount_tmp(const struct minijail *j)
1819 {
1820 	const char fmt[] = "size=%zu,mode=1777";
1821 	/* Count for the user storing ULLONG_MAX literally + extra space. */
1822 	char data[sizeof(fmt) + sizeof("18446744073709551615ULL")];
1823 	int ret;
1824 
1825 	ret = snprintf(data, sizeof(data), fmt, j->tmpfs_size);
1826 
1827 	if (ret <= 0)
1828 		pdie("tmpfs size spec error");
1829 	else if ((size_t)ret >= sizeof(data))
1830 		pdie("tmpfs size spec too large");
1831 	return mount("none", "/tmp", "tmpfs", MS_NODEV | MS_NOEXEC | MS_NOSUID,
1832 		     data);
1833 }
1834 
remount_proc_readonly(const struct minijail * j)1835 static int remount_proc_readonly(const struct minijail *j)
1836 {
1837 	const char *kProcPath = "/proc";
1838 	const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1839 	/*
1840 	 * Right now, we're holding a reference to our parent's old mount of
1841 	 * /proc in our namespace, which means using MS_REMOUNT here would
1842 	 * mutate our parent's mount as well, even though we're in a VFS
1843 	 * namespace (!). Instead, remove their mount from our namespace lazily
1844 	 * (MNT_DETACH) and make our own.
1845 	 *
1846 	 * However, we skip this in the user namespace case because it will
1847 	 * invariably fail. Every mount namespace is "owned" by the
1848 	 * user namespace of the process that creates it. Mount namespace A is
1849 	 * "less privileged" than mount namespace B if A is created off of B,
1850 	 * and B is owned by a different user namespace.
1851 	 * When a less privileged mount namespace is created, the mounts used to
1852 	 * initialize it (coming from the more privileged mount namespace) come
1853 	 * as a unit, and are locked together. This means that code running in
1854 	 * the new mount (and user) namespace cannot piecemeal unmount
1855 	 * individual mounts inherited from a more privileged mount namespace.
1856 	 * See https://man7.org/linux/man-pages/man7/mount_namespaces.7.html,
1857 	 * "Restrictions on mount namespaces" for details.
1858 	 *
1859 	 * This happens in our use case because we first enter a new user
1860 	 * namespace (on clone(2)) and then we unshare(2) a new mount namespace,
1861 	 * which means the new mount namespace is less privileged than its
1862 	 * parent mount namespace. This would also happen if we entered a new
1863 	 * mount namespace on clone(2), since the user namespace is created
1864 	 * first.
1865 	 * In all other non-user-namespace cases the new mount namespace is
1866 	 * similarly privileged as the parent mount namespace so unmounting a
1867 	 * single mount is allowed.
1868 	 *
1869 	 * We still remount /proc as read-only in the user namespace case
1870 	 * because while a process with CAP_SYS_ADMIN in the new user namespace
1871 	 * can unmount the RO mount and get at the RW mount, an attacker with
1872 	 * access only to a write primitive will not be able to modify /proc.
1873 	 */
1874 	if (!j->flags.userns && umount2(kProcPath, MNT_DETACH))
1875 		return -errno;
1876 	if (mount("proc", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
1877 		return -errno;
1878 	return 0;
1879 }
1880 
kill_child_and_die(const struct minijail * j,const char * msg)1881 static void kill_child_and_die(const struct minijail *j, const char *msg)
1882 {
1883 	kill(j->initpid, SIGKILL);
1884 	die("%s", msg);
1885 }
1886 
write_pid_file_or_die(const struct minijail * j)1887 static void write_pid_file_or_die(const struct minijail *j)
1888 {
1889 	if (write_pid_to_path(j->initpid, j->pid_file_path))
1890 		kill_child_and_die(j, "failed to write pid file");
1891 }
1892 
add_to_cgroups_or_die(const struct minijail * j)1893 static void add_to_cgroups_or_die(const struct minijail *j)
1894 {
1895 	size_t i;
1896 
1897 	for (i = 0; i < j->cgroup_count; ++i) {
1898 		if (write_pid_to_path(j->initpid, j->cgroups[i]))
1899 			kill_child_and_die(j, "failed to add to cgroups");
1900 	}
1901 }
1902 
set_rlimits_or_die(const struct minijail * j)1903 static void set_rlimits_or_die(const struct minijail *j)
1904 {
1905 	size_t i;
1906 
1907 	for (i = 0; i < j->rlimit_count; ++i) {
1908 		struct rlimit limit;
1909 		limit.rlim_cur = j->rlimits[i].cur;
1910 		limit.rlim_max = j->rlimits[i].max;
1911 		if (prlimit(j->initpid, j->rlimits[i].type, &limit, NULL))
1912 			kill_child_and_die(j, "failed to set rlimit");
1913 	}
1914 }
1915 
write_ugid_maps_or_die(const struct minijail * j)1916 static void write_ugid_maps_or_die(const struct minijail *j)
1917 {
1918 	if (j->uidmap && write_proc_file(j->initpid, j->uidmap, "uid_map") != 0)
1919 		kill_child_and_die(j, "failed to write uid_map");
1920 	if (j->gidmap && j->flags.disable_setgroups) {
1921 		/*
1922 		 * Older kernels might not have the /proc/<pid>/setgroups files.
1923 		 */
1924 		int ret = write_proc_file(j->initpid, "deny", "setgroups");
1925 		if (ret != 0) {
1926 			if (ret == -ENOENT) {
1927 				/* See http://man7.org/linux/man-pages/man7/user_namespaces.7.html. */
1928 				warn("could not disable setgroups(2)");
1929 			} else
1930 				kill_child_and_die(
1931 				    j, "failed to disable setgroups(2)");
1932 		}
1933 	}
1934 	if (j->gidmap && write_proc_file(j->initpid, j->gidmap, "gid_map") != 0)
1935 		kill_child_and_die(j, "failed to write gid_map");
1936 }
1937 
enter_user_namespace(const struct minijail * j)1938 static void enter_user_namespace(const struct minijail *j)
1939 {
1940 	int uid = j->flags.uid ? j->uid : 0;
1941 	int gid = j->flags.gid ? j->gid : 0;
1942 	if (j->gidmap && setresgid(gid, gid, gid)) {
1943 		pdie("user_namespaces: setresgid(%d, %d, %d) failed", gid, gid,
1944 		     gid);
1945 	}
1946 	if (j->uidmap && setresuid(uid, uid, uid)) {
1947 		pdie("user_namespaces: setresuid(%d, %d, %d) failed", uid, uid,
1948 		     uid);
1949 	}
1950 }
1951 
parent_setup_complete(int * pipe_fds)1952 static void parent_setup_complete(int *pipe_fds)
1953 {
1954 	close_and_reset(&pipe_fds[0]);
1955 	close_and_reset(&pipe_fds[1]);
1956 }
1957 
1958 /*
1959  * wait_for_parent_setup: Called by the child process to wait for any
1960  * further parent-side setup to complete before continuing.
1961  */
wait_for_parent_setup(int * pipe_fds)1962 static void wait_for_parent_setup(int *pipe_fds)
1963 {
1964 	char buf;
1965 
1966 	close_and_reset(&pipe_fds[1]);
1967 
1968 	/* Wait for parent to complete setup and close the pipe. */
1969 	if (read(pipe_fds[0], &buf, 1) != 0)
1970 		die("failed to sync with parent");
1971 	close_and_reset(&pipe_fds[0]);
1972 }
1973 
drop_ugid(const struct minijail * j)1974 static void drop_ugid(const struct minijail *j)
1975 {
1976 	if (j->flags.inherit_suppl_gids + j->flags.keep_suppl_gids +
1977 	    j->flags.set_suppl_gids > 1) {
1978 		die("can only do one of inherit, keep, or set supplementary "
1979 		    "groups");
1980 	}
1981 
1982 	if (j->flags.inherit_suppl_gids) {
1983 		if (initgroups(j->user, j->usergid))
1984 			pdie("initgroups(%s, %d) failed", j->user, j->usergid);
1985 	} else if (j->flags.set_suppl_gids) {
1986 		if (setgroups(j->suppl_gid_count, j->suppl_gid_list))
1987 			pdie("setgroups(suppl_gids) failed");
1988 	} else if (!j->flags.keep_suppl_gids && !j->flags.disable_setgroups) {
1989 		/*
1990 		 * Only attempt to clear supplementary groups if we are changing
1991 		 * users or groups, and if the caller did not request to disable
1992 		 * setgroups (used when entering a user namespace as a
1993 		 * non-privileged user).
1994 		 */
1995 		if ((j->flags.uid || j->flags.gid) && setgroups(0, NULL))
1996 			pdie("setgroups(0, NULL) failed");
1997 	}
1998 
1999 	if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
2000 		pdie("setresgid(%d, %d, %d) failed", j->gid, j->gid, j->gid);
2001 
2002 	if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
2003 		pdie("setresuid(%d, %d, %d) failed", j->uid, j->uid, j->uid);
2004 }
2005 
drop_capbset(uint64_t keep_mask,unsigned int last_valid_cap)2006 static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
2007 {
2008 	const uint64_t one = 1;
2009 	unsigned int i;
2010 	for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
2011 		if (keep_mask & (one << i))
2012 			continue;
2013 		if (prctl(PR_CAPBSET_DROP, i))
2014 			pdie("could not drop capability from bounding set");
2015 	}
2016 }
2017 
drop_caps(const struct minijail * j,unsigned int last_valid_cap)2018 static void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
2019 {
2020 	if (!j->flags.use_caps)
2021 		return;
2022 
2023 	cap_t caps = cap_get_proc();
2024 	cap_value_t flag[1];
2025 	const size_t ncaps = sizeof(j->caps) * 8;
2026 	const uint64_t one = 1;
2027 	unsigned int i;
2028 	if (!caps)
2029 		die("can't get process caps");
2030 	if (cap_clear(caps))
2031 		die("can't clear caps");
2032 
2033 	for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2034 		/* Keep CAP_SETPCAP for dropping bounding set bits. */
2035 		if (i != CAP_SETPCAP && !(j->caps & (one << i)))
2036 			continue;
2037 		flag[0] = i;
2038 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
2039 			die("can't add effective cap");
2040 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
2041 			die("can't add permitted cap");
2042 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
2043 			die("can't add inheritable cap");
2044 	}
2045 	if (cap_set_proc(caps))
2046 		die("can't apply initial cleaned capset");
2047 
2048 	/*
2049 	 * Instead of dropping the bounding set first, do it here in case
2050 	 * the caller had a more permissive bounding set which could
2051 	 * have been used above to raise a capability that wasn't already
2052 	 * present. This requires CAP_SETPCAP, so we raised/kept it above.
2053 	 *
2054 	 * However, if we're asked to skip setting *and* locking the
2055 	 * SECURE_NOROOT securebit, also skip dropping the bounding set.
2056 	 * If the caller wants to regain all capabilities when executing a
2057 	 * set-user-ID-root program, allow them to do so. The default behavior
2058 	 * (i.e. the behavior without |securebits_skip_mask| set) will still put
2059 	 * the jailed process tree in a capabilities-only environment.
2060 	 *
2061 	 * We check the negated skip mask for SECURE_NOROOT and
2062 	 * SECURE_NOROOT_LOCKED. If the bits are set in the negated mask they
2063 	 * will *not* be skipped in lock_securebits(), and therefore we should
2064 	 * drop the bounding set.
2065 	 */
2066 	if (secure_noroot_set_and_locked(~j->securebits_skip_mask)) {
2067 		drop_capbset(j->caps, last_valid_cap);
2068 	} else {
2069 		warn("SECURE_NOROOT not set, not dropping bounding set");
2070 	}
2071 
2072 	/* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
2073 	if ((j->caps & (one << CAP_SETPCAP)) == 0) {
2074 		flag[0] = CAP_SETPCAP;
2075 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
2076 			die("can't clear effective cap");
2077 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
2078 			die("can't clear permitted cap");
2079 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
2080 			die("can't clear inheritable cap");
2081 	}
2082 
2083 	if (cap_set_proc(caps))
2084 		die("can't apply final cleaned capset");
2085 
2086 	/*
2087 	 * If ambient capabilities are supported, clear all capabilities first,
2088 	 * then raise the requested ones.
2089 	 */
2090 	if (j->flags.set_ambient_caps) {
2091 		if (!cap_ambient_supported()) {
2092 			pdie("ambient capabilities not supported");
2093 		}
2094 		if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0) !=
2095 		    0) {
2096 			pdie("can't clear ambient capabilities");
2097 		}
2098 
2099 		for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2100 			if (!(j->caps & (one << i)))
2101 				continue;
2102 
2103 			if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, i, 0,
2104 				  0) != 0) {
2105 				pdie("prctl(PR_CAP_AMBIENT, "
2106 				     "PR_CAP_AMBIENT_RAISE, %u) failed",
2107 				     i);
2108 			}
2109 		}
2110 	}
2111 
2112 	cap_free(caps);
2113 }
2114 
set_seccomp_filter(const struct minijail * j)2115 static void set_seccomp_filter(const struct minijail *j)
2116 {
2117 	/*
2118 	 * Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
2119 	 * in the kernel source tree for an explanation of the parameters.
2120 	 */
2121 	if (j->flags.no_new_privs) {
2122 		if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
2123 			pdie("prctl(PR_SET_NO_NEW_PRIVS)");
2124 	}
2125 
2126 	/*
2127 	 * Code running with ASan
2128 	 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
2129 	 * will make system calls not included in the syscall filter policy,
2130 	 * which will likely crash the program. Skip setting seccomp filter in
2131 	 * that case.
2132 	 * 'running_with_asan()' has no inputs and is completely defined at
2133 	 * build time, so this cannot be used by an attacker to skip setting
2134 	 * seccomp filter.
2135 	 */
2136 	if (j->flags.seccomp_filter && running_with_asan()) {
2137 		warn("running with (HW)ASan, not setting seccomp filter");
2138 		return;
2139 	}
2140 
2141 	if (j->flags.seccomp_filter) {
2142 		if (j->flags.seccomp_filter_logging) {
2143 			warn("logging seccomp filter failures");
2144 			if (!seccomp_ret_log_available()) {
2145 				/*
2146 				 * If SECCOMP_RET_LOG is not available,
2147 				 * install the SIGSYS handler first.
2148 				 */
2149 				if (install_sigsys_handler())
2150 					pdie(
2151 					    "failed to install SIGSYS handler");
2152 			}
2153 		} else if (j->flags.seccomp_filter_tsync) {
2154 			/*
2155 			 * If setting thread sync,
2156 			 * reset the SIGSYS signal handler so that
2157 			 * the entire thread group is killed.
2158 			 */
2159 			if (signal(SIGSYS, SIG_DFL) == SIG_ERR)
2160 				pdie("failed to reset SIGSYS disposition");
2161 		}
2162 	}
2163 
2164 	/*
2165 	 * Install the syscall filter.
2166 	 */
2167 	if (j->flags.seccomp_filter) {
2168 		if (j->flags.seccomp_filter_tsync ||
2169 		    j->flags.seccomp_filter_allow_speculation) {
2170 			int filter_flags =
2171 			    (j->flags.seccomp_filter_tsync
2172 				 ? SECCOMP_FILTER_FLAG_TSYNC
2173 				 : 0) |
2174 			    (j->flags.seccomp_filter_allow_speculation
2175 				 ? SECCOMP_FILTER_FLAG_SPEC_ALLOW
2176 				 : 0);
2177 			if (sys_seccomp(SECCOMP_SET_MODE_FILTER, filter_flags,
2178 					j->filter_prog)) {
2179 				pdie("seccomp(tsync) failed");
2180 			}
2181 		} else {
2182 			if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2183 				  j->filter_prog)) {
2184 				pdie("prctl(seccomp_filter) failed");
2185 			}
2186 		}
2187 	}
2188 }
2189 
2190 static pid_t forward_pid = -1;
2191 
forward_signal(int sig,siginfo_t * siginfo attribute_unused,void * void_context attribute_unused)2192 static void forward_signal(int sig,
2193 			   siginfo_t *siginfo attribute_unused,
2194 			   void *void_context attribute_unused)
2195 {
2196 	if (forward_pid != -1) {
2197 		kill(forward_pid, sig);
2198 	}
2199 }
2200 
install_signal_handlers(void)2201 static void install_signal_handlers(void)
2202 {
2203 	struct sigaction act;
2204 
2205 	memset(&act, 0, sizeof(act));
2206 	act.sa_sigaction = &forward_signal;
2207 	act.sa_flags = SA_SIGINFO | SA_RESTART;
2208 
2209 	/* Handle all signals, except SIGCHLD. */
2210 	for (int sig = 1; sig < NSIG; sig++) {
2211 		/*
2212 		 * We don't care if we get EINVAL: that just means that we
2213 		 * can't handle this signal, so let's skip it and continue.
2214 		 */
2215 		sigaction(sig, &act, NULL);
2216 	}
2217 	/* Reset SIGCHLD's handler. */
2218 	signal(SIGCHLD, SIG_DFL);
2219 
2220 	/* Handle real-time signals. */
2221 	for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++) {
2222 		sigaction(sig, &act, NULL);
2223 	}
2224 }
2225 
lookup_hook_name(minijail_hook_event_t event)2226 static const char *lookup_hook_name(minijail_hook_event_t event)
2227 {
2228 	switch (event) {
2229 	case MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS:
2230 		return "pre-drop-caps";
2231 	case MINIJAIL_HOOK_EVENT_PRE_EXECVE:
2232 		return "pre-execve";
2233 	case MINIJAIL_HOOK_EVENT_PRE_CHROOT:
2234 		return "pre-chroot";
2235 	case MINIJAIL_HOOK_EVENT_MAX:
2236 		/*
2237 		 * Adding this in favor of a default case to force the
2238 		 * compiler to error out if a new enum value is added.
2239 		 */
2240 		break;
2241 	}
2242 	return "unknown";
2243 }
2244 
run_hooks_or_die(const struct minijail * j,minijail_hook_event_t event)2245 static void run_hooks_or_die(const struct minijail *j,
2246 			     minijail_hook_event_t event)
2247 {
2248 	int rc;
2249 	int hook_index = 0;
2250 	for (struct hook *c = j->hooks_head; c; c = c->next) {
2251 		if (c->event != event)
2252 			continue;
2253 		rc = c->hook(c->payload);
2254 		if (rc != 0) {
2255 			errno = -rc;
2256 			pdie("%s hook (index %d) failed",
2257 			     lookup_hook_name(event), hook_index);
2258 		}
2259 		/* Only increase the index within the same hook event type. */
2260 		++hook_index;
2261 	}
2262 }
2263 
minijail_enter(const struct minijail * j)2264 void API minijail_enter(const struct minijail *j)
2265 {
2266 	/*
2267 	 * If we're dropping caps, get the last valid cap from /proc now,
2268 	 * since /proc can be unmounted before drop_caps() is called.
2269 	 */
2270 	unsigned int last_valid_cap = 0;
2271 	if (j->flags.capbset_drop || j->flags.use_caps)
2272 		last_valid_cap = get_last_valid_cap();
2273 
2274 	if (j->flags.pids)
2275 		die("tried to enter a pid-namespaced jail;"
2276 		    " try minijail_run()?");
2277 
2278 	if (j->flags.inherit_suppl_gids && !j->user)
2279 		die("cannot inherit supplementary groups without setting a "
2280 		    "username");
2281 
2282 	/*
2283 	 * We can't recover from failures if we've dropped privileges partially,
2284 	 * so we don't even try. If any of our operations fail, we abort() the
2285 	 * entire process.
2286 	 */
2287 	if (j->flags.enter_vfs) {
2288 		if (setns(j->mountns_fd, CLONE_NEWNS))
2289 			pdie("setns(CLONE_NEWNS) failed");
2290 		close(j->mountns_fd);
2291 	}
2292 
2293 	if (j->flags.vfs) {
2294 		if (unshare(CLONE_NEWNS))
2295 			pdie("unshare(CLONE_NEWNS) failed");
2296 		/*
2297 		 * By default, remount all filesystems as private, unless
2298 		 * - Passed a specific remount mode, in which case remount with
2299 		 *   that,
2300 		 * - Asked not to remount at all, in which case skip the
2301 		 *   mount(2) call.
2302 		 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
2303 		 */
2304 		if (j->remount_mode) {
2305 			if (mount(NULL, "/", NULL, MS_REC | j->remount_mode,
2306 				  NULL))
2307 				pdie("mount(NULL, /, NULL, "
2308 				     "MS_REC | j->remount_mode, NULL) failed");
2309 
2310 			struct minijail_remount *temp = j->remounts_head;
2311 			while (temp) {
2312 				if (temp->remount_mode < j->remount_mode)
2313 					die("cannot remount %s as stricter "
2314 					    "than the root dir",
2315 					    temp->mount_name);
2316 				if (mount(NULL, temp->mount_name, NULL,
2317 					  MS_REC | temp->remount_mode, NULL))
2318 					pdie("mount(NULL, %s, NULL, "
2319 					     "MS_REC | temp->remount_mode, NULL) "
2320 					     "failed", temp->mount_name);
2321 				temp = temp->next;
2322 			}
2323 		}
2324 
2325 	}
2326 
2327 	if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
2328 		pdie("unshare(CLONE_NEWIPC) failed");
2329 	}
2330 
2331 	if (j->flags.uts) {
2332 		if (unshare(CLONE_NEWUTS))
2333 			pdie("unshare(CLONE_NEWUTS) failed");
2334 
2335 		if (j->hostname && sethostname(j->hostname, strlen(j->hostname)))
2336 			pdie("sethostname(%s) failed", j->hostname);
2337 	}
2338 
2339 	if (j->flags.enter_net) {
2340 		if (setns(j->netns_fd, CLONE_NEWNET))
2341 			pdie("setns(CLONE_NEWNET) failed");
2342 		close(j->netns_fd);
2343 	} else if (j->flags.net) {
2344 		if (unshare(CLONE_NEWNET))
2345 			pdie("unshare(CLONE_NEWNET) failed");
2346 		config_net_loopback();
2347 	}
2348 
2349 	if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
2350 		pdie("unshare(CLONE_NEWCGROUP) failed");
2351 
2352 	if (j->flags.new_session_keyring) {
2353 		if (syscall(SYS_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL) < 0)
2354 			pdie("keyctl(KEYCTL_JOIN_SESSION_KEYRING) failed");
2355 	}
2356 
2357 	/* We have to process all the mounts before we chroot/pivot_root. */
2358 	process_mounts_or_die(j);
2359 
2360 	if (j->flags.chroot && enter_chroot(j))
2361 		pdie("chroot");
2362 
2363 	if (j->flags.pivot_root && enter_pivot_root(j))
2364 		pdie("pivot_root");
2365 
2366 	if (j->flags.mount_tmp && mount_tmp(j))
2367 		pdie("mount_tmp");
2368 
2369 	if (j->flags.remount_proc_ro && remount_proc_readonly(j))
2370 		pdie("remount");
2371 
2372 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS);
2373 
2374 	/*
2375 	 * If we're only dropping capabilities from the bounding set, but not
2376 	 * from the thread's (permitted|inheritable|effective) sets, do it now.
2377 	 */
2378 	if (j->flags.capbset_drop) {
2379 		drop_capbset(j->cap_bset, last_valid_cap);
2380 	}
2381 
2382 	/*
2383 	 * POSIX capabilities are a bit tricky. We must set SECBIT_KEEP_CAPS
2384 	 * before drop_ugid() below as the latter would otherwise drop all
2385 	 * capabilities.
2386 	 */
2387 	if (j->flags.use_caps) {
2388 		/*
2389 		 * When using ambient capabilities, CAP_SET{GID,UID} can be
2390 		 * inherited across execve(2), so SECBIT_KEEP_CAPS is not
2391 		 * strictly needed.
2392 		 */
2393 		bool require_keep_caps = !j->flags.set_ambient_caps;
2394 		if (lock_securebits(j->securebits_skip_mask,
2395 				    require_keep_caps) < 0) {
2396 			pdie("locking securebits failed");
2397 		}
2398 	}
2399 
2400 	if (j->flags.no_new_privs) {
2401 		/*
2402 		 * If we're setting no_new_privs, we can drop privileges
2403 		 * before setting seccomp filter. This way filter policies
2404 		 * don't need to allow privilege-dropping syscalls.
2405 		 */
2406 		drop_ugid(j);
2407 		drop_caps(j, last_valid_cap);
2408 		set_seccomp_filter(j);
2409 	} else {
2410 		/*
2411 		 * If we're not setting no_new_privs,
2412 		 * we need to set seccomp filter *before* dropping privileges.
2413 		 * WARNING: this means that filter policies *must* allow
2414 		 * setgroups()/setresgid()/setresuid() for dropping root and
2415 		 * capget()/capset()/prctl() for dropping caps.
2416 		 */
2417 		set_seccomp_filter(j);
2418 		drop_ugid(j);
2419 		drop_caps(j, last_valid_cap);
2420 	}
2421 
2422 	/*
2423 	 * Select the specified alternate syscall table.  The table must not
2424 	 * block prctl(2) if we're using seccomp as well.
2425 	 */
2426 	if (j->flags.alt_syscall) {
2427 		if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
2428 			pdie("prctl(PR_ALT_SYSCALL) failed");
2429 	}
2430 
2431 	/*
2432 	 * seccomp has to come last since it cuts off all the other
2433 	 * privilege-dropping syscalls :)
2434 	 */
2435 	if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
2436 		if ((errno == EINVAL) && seccomp_can_softfail()) {
2437 			warn("seccomp not supported");
2438 			return;
2439 		}
2440 		pdie("prctl(PR_SET_SECCOMP) failed");
2441 	}
2442 }
2443 
2444 /* TODO(wad): will visibility affect this variable? */
2445 static int init_exitstatus = 0;
2446 
init_term(int sig attribute_unused)2447 static void init_term(int sig attribute_unused)
2448 {
2449 	_exit(init_exitstatus);
2450 }
2451 
init(pid_t rootpid)2452 static void init(pid_t rootpid)
2453 {
2454 	pid_t pid;
2455 	int status;
2456 	/* So that we exit with the right status. */
2457 	signal(SIGTERM, init_term);
2458 	/* TODO(wad): self jail with seccomp filters here. */
2459 	while ((pid = wait(&status)) > 0) {
2460 		/*
2461 		 * This loop will only end when either there are no processes
2462 		 * left inside our pid namespace or we get a signal.
2463 		 */
2464 		if (pid == rootpid)
2465 			init_exitstatus = status;
2466 	}
2467 	if (!WIFEXITED(init_exitstatus))
2468 		_exit(MINIJAIL_ERR_INIT);
2469 	_exit(WEXITSTATUS(init_exitstatus));
2470 }
2471 
minijail_from_fd(int fd,struct minijail * j)2472 int API minijail_from_fd(int fd, struct minijail *j)
2473 {
2474 	size_t sz = 0;
2475 	size_t bytes = read(fd, &sz, sizeof(sz));
2476 	char *buf;
2477 	int r;
2478 	if (sizeof(sz) != bytes)
2479 		return -EINVAL;
2480 	if (sz > USHRT_MAX)	/* arbitrary sanity check */
2481 		return -E2BIG;
2482 	buf = malloc(sz);
2483 	if (!buf)
2484 		return -ENOMEM;
2485 	bytes = read(fd, buf, sz);
2486 	if (bytes != sz) {
2487 		free(buf);
2488 		return -EINVAL;
2489 	}
2490 	r = minijail_unmarshal(j, buf, sz);
2491 	free(buf);
2492 	return r;
2493 }
2494 
minijail_to_fd(struct minijail * j,int fd)2495 int API minijail_to_fd(struct minijail *j, int fd)
2496 {
2497 	size_t sz = minijail_size(j);
2498 	if (!sz)
2499 		return -EINVAL;
2500 
2501 	char *buf = malloc(sz);
2502 	if (!buf)
2503 		return -ENOMEM;
2504 
2505 	int err = minijail_marshal(j, buf, sz);
2506 	if (err)
2507 		goto error;
2508 
2509 	/* Sends [size][minijail]. */
2510 	err = write_exactly(fd, &sz, sizeof(sz));
2511 	if (err)
2512 		goto error;
2513 
2514 	err = write_exactly(fd, buf, sz);
2515 
2516 error:
2517 	free(buf);
2518 	return err;
2519 }
2520 
minijail_copy_jail(const struct minijail * from,struct minijail * out)2521 int API minijail_copy_jail(const struct minijail *from, struct minijail *out)
2522 {
2523 	size_t sz = minijail_size(from);
2524 	if (!sz)
2525 		return -EINVAL;
2526 
2527 	char *buf = malloc(sz);
2528 	if (!buf)
2529 		return -ENOMEM;
2530 
2531 	int err = minijail_marshal(from, buf, sz);
2532 	if (err)
2533 		goto error;
2534 
2535 	err = minijail_unmarshal(out, buf, sz);
2536 error:
2537 	free(buf);
2538 	return err;
2539 }
2540 
setup_preload(const struct minijail * j attribute_unused,char *** child_env attribute_unused)2541 static int setup_preload(const struct minijail *j attribute_unused,
2542 			 char ***child_env attribute_unused)
2543 {
2544 #if defined(__ANDROID__)
2545 	/* Don't use LDPRELOAD on Android. */
2546 	return 0;
2547 #else
2548 	const char *preload_path = j->preload_path ?: PRELOADPATH;
2549 	char *newenv = NULL;
2550 	int ret = 0;
2551 	const char *oldenv = getenv(kLdPreloadEnvVar);
2552 
2553 	if (!oldenv)
2554 		oldenv = "";
2555 
2556 	/* Only insert a separating space if we have something to separate... */
2557 	if (asprintf(&newenv, "%s%s%s", oldenv, oldenv[0] != '\0' ? " " : "",
2558 		     preload_path) < 0) {
2559 		return -1;
2560 	}
2561 
2562 	ret = minijail_setenv(child_env, kLdPreloadEnvVar, newenv, 1);
2563 	free(newenv);
2564 	return ret;
2565 #endif
2566 }
2567 
setup_pipe(char *** child_env,int fds[2])2568 static int setup_pipe(char ***child_env, int fds[2])
2569 {
2570 	int r = pipe(fds);
2571 	char fd_buf[11];
2572 	if (r)
2573 		return r;
2574 	r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
2575 	if (r <= 0)
2576 		return -EINVAL;
2577 	return minijail_setenv(child_env, kFdEnvVar, fd_buf, 1);
2578 }
2579 
close_open_fds(int * inheritable_fds,size_t size)2580 static int close_open_fds(int *inheritable_fds, size_t size)
2581 {
2582 	const char *kFdPath = "/proc/self/fd";
2583 
2584 	DIR *d = opendir(kFdPath);
2585 	struct dirent *dir_entry;
2586 
2587 	if (d == NULL)
2588 		return -1;
2589 	int dir_fd = dirfd(d);
2590 	while ((dir_entry = readdir(d)) != NULL) {
2591 		size_t i;
2592 		char *end;
2593 		bool should_close = true;
2594 		const int fd = strtol(dir_entry->d_name, &end, 10);
2595 
2596 		if ((*end) != '\0') {
2597 			continue;
2598 		}
2599 		/*
2600 		 * We might have set up some pipes that we want to share with
2601 		 * the parent process, and should not be closed.
2602 		 */
2603 		for (i = 0; i < size; ++i) {
2604 			if (fd == inheritable_fds[i]) {
2605 				should_close = false;
2606 				break;
2607 			}
2608 		}
2609 		/* Also avoid closing the directory fd. */
2610 		if (should_close && fd != dir_fd)
2611 			close(fd);
2612 	}
2613 	closedir(d);
2614 	return 0;
2615 }
2616 
2617 /* Return true if the specified file descriptor is already open. */
fd_is_open(int fd)2618 static int fd_is_open(int fd)
2619 {
2620 	return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
2621 }
2622 
2623 static_assert(FD_SETSIZE >= MAX_PRESERVED_FDS * 2 - 1,
2624 	      "If true, ensure_no_fd_conflict will always find an unused fd.");
2625 
2626 /* If p->parent_fd will be used by a child_fd, move it to an unused fd. */
ensure_no_fd_conflict(const fd_set * child_fds,struct preserved_fd * p)2627 static int ensure_no_fd_conflict(const fd_set* child_fds,
2628 				 struct preserved_fd* p)
2629 {
2630 	if (!FD_ISSET(p->parent_fd, child_fds)){
2631 		return 0;
2632 	}
2633 
2634 	/*
2635 	 * If no other parent_fd matches the child_fd then use it instead of a
2636 	 * temporary.
2637 	 */
2638 	int fd = p->child_fd;
2639 	if (fd_is_open(fd)) {
2640 		fd = FD_SETSIZE - 1;
2641 		while (FD_ISSET(fd, child_fds) || fd_is_open(fd)) {
2642 			--fd;
2643 			if (fd < 0) {
2644 				die("failed to find an unused fd");
2645 			}
2646 		}
2647 	}
2648 
2649 	int ret = dup2(p->parent_fd, fd);
2650 	/*
2651 	 * warn() opens a file descriptor so it needs to happen after dup2 to
2652 	 * avoid unintended side effects. This can be avoided by reordering the
2653 	 * mapping requests so that the source fds with overlap are mapped
2654 	 * first (unless there are cycles).
2655 	 */
2656 	warn("mapped fd overlap: moving %d to %d", p->parent_fd, fd);
2657 	if (ret == -1) {
2658 		return -1;
2659 	}
2660 
2661 	p->parent_fd = fd;
2662 	return 0;
2663 }
2664 
redirect_fds(struct minijail * j)2665 static int redirect_fds(struct minijail *j)
2666 {
2667 	fd_set child_fds;
2668 	FD_ZERO(&child_fds);
2669 
2670 	/* Relocate parent_fds that would be replaced by a child_fd. */
2671 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
2672 		int child_fd = j->preserved_fds[i].child_fd;
2673 		if (FD_ISSET(child_fd, &child_fds)) {
2674 			die("fd %d is mapped more than once", child_fd);
2675 		}
2676 
2677 		if (ensure_no_fd_conflict(&child_fds,
2678 					  &j->preserved_fds[i]) == -1) {
2679 			return -1;
2680 		}
2681 
2682 		FD_SET(child_fd, &child_fds);
2683 	}
2684 
2685 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
2686 		if (j->preserved_fds[i].parent_fd ==
2687 		    j->preserved_fds[i].child_fd) {
2688 			continue;
2689 		}
2690 		if (dup2(j->preserved_fds[i].parent_fd,
2691 			 j->preserved_fds[i].child_fd) == -1) {
2692 			return -1;
2693 		}
2694 	}
2695 	/*
2696 	 * After all fds have been duped, we are now free to close all parent
2697 	 * fds that are *not* child fds.
2698 	 */
2699 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
2700 		int parent_fd = j->preserved_fds[i].parent_fd;
2701 		if (!FD_ISSET(parent_fd, &child_fds)) {
2702 			close(parent_fd);
2703 		}
2704 	}
2705 	return 0;
2706 }
2707 
2708 /*
2709  * Structure holding resources and state created when running a minijail.
2710  */
2711 struct minijail_run_state {
2712 	pid_t child_pid;
2713 	int pipe_fds[2];
2714 	int stdin_fds[2];
2715 	int stdout_fds[2];
2716 	int stderr_fds[2];
2717 	int child_sync_pipe_fds[2];
2718 	char **child_env;
2719 };
2720 
minijail_free_run_state(struct minijail_run_state * state)2721 static void minijail_free_run_state(struct minijail_run_state *state)
2722 {
2723 	state->child_pid = -1;
2724 
2725 	int *fd_pairs[] = {state->pipe_fds, state->stdin_fds, state->stdout_fds,
2726 			   state->stderr_fds, state->child_sync_pipe_fds};
2727 	for (size_t i = 0; i < ARRAY_SIZE(fd_pairs); ++i) {
2728 		close_and_reset(&fd_pairs[i][0]);
2729 		close_and_reset(&fd_pairs[i][1]);
2730 	}
2731 
2732 	minijail_free_env(state->child_env);
2733 	state->child_env = NULL;
2734 }
2735 
2736 /* Set up stdin/stdout/stderr file descriptors in the child. */
setup_child_std_fds(struct minijail * j,struct minijail_run_state * state)2737 static void setup_child_std_fds(struct minijail *j,
2738 				struct minijail_run_state *state)
2739 {
2740 	struct {
2741 		const char *name;
2742 		int from;
2743 		int to;
2744 	} fd_map[] = {
2745 	    {"stdin", state->stdin_fds[0], STDIN_FILENO},
2746 	    {"stdout", state->stdout_fds[1], STDOUT_FILENO},
2747 	    {"stderr", state->stderr_fds[1], STDERR_FILENO},
2748 	};
2749 
2750 	for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
2751 		if (fd_map[i].from == -1 || fd_map[i].from == fd_map[i].to)
2752 			continue;
2753 		if (dup2(fd_map[i].from, fd_map[i].to) == -1)
2754 			die("failed to set up %s pipe", fd_map[i].name);
2755 	}
2756 
2757 	/* Close temporary pipe file descriptors. */
2758 	int *std_pipes[] = {state->stdin_fds, state->stdout_fds,
2759 			    state->stderr_fds};
2760 	for (size_t i = 0; i < ARRAY_SIZE(std_pipes); ++i) {
2761 		close_and_reset(&std_pipes[i][0]);
2762 		close_and_reset(&std_pipes[i][1]);
2763 	}
2764 
2765 	/*
2766 	 * If any of stdin, stdout, or stderr are TTYs, or setsid flag is
2767 	 * set, create a new session. This prevents the jailed process from
2768 	 * using the TIOCSTI ioctl to push characters into the parent process
2769 	 * terminal's input buffer, therefore escaping the jail.
2770 	 *
2771 	 * Since it has just forked, the child will not be a process group
2772 	 * leader, and this call to setsid() should always succeed.
2773 	 */
2774 	if (j->flags.setsid || isatty(STDIN_FILENO) || isatty(STDOUT_FILENO) ||
2775 	    isatty(STDERR_FILENO)) {
2776 		if (setsid() < 0) {
2777 			pdie("setsid() failed");
2778 		}
2779 	}
2780 }
2781 
2782 /*
2783  * Structure that specifies how to start a minijail.
2784  *
2785  * filename - The program to exec in the child. Required if |exec_in_child| = 1.
2786  * argv - Arguments for the child program. Required if |exec_in_child| = 1.
2787  * envp - Environment for the child program. Available if |exec_in_child| = 1.
2788  * use_preload - If true use LD_PRELOAD.
2789  * exec_in_child - If true, run |filename|. Otherwise, the child will return to
2790  *     the caller.
2791  * pstdin_fd - Filled with stdin pipe if non-NULL.
2792  * pstdout_fd - Filled with stdout pipe if non-NULL.
2793  * pstderr_fd - Filled with stderr pipe if non-NULL.
2794  * pchild_pid - Filled with the pid of the child process if non-NULL.
2795  */
2796 struct minijail_run_config {
2797 	const char *filename;
2798 	char *const *argv;
2799 	char *const *envp;
2800 	int use_preload;
2801 	int exec_in_child;
2802 	int *pstdin_fd;
2803 	int *pstdout_fd;
2804 	int *pstderr_fd;
2805 	pid_t *pchild_pid;
2806 };
2807 
2808 static int
2809 minijail_run_config_internal(struct minijail *j,
2810 			     const struct minijail_run_config *config);
2811 
minijail_run(struct minijail * j,const char * filename,char * const argv[])2812 int API minijail_run(struct minijail *j, const char *filename,
2813 		     char *const argv[])
2814 {
2815 	struct minijail_run_config config = {
2816 	    .filename = filename,
2817 	    .argv = argv,
2818 	    .envp = NULL,
2819 	    .use_preload = true,
2820 	    .exec_in_child = true,
2821 	};
2822 	return minijail_run_config_internal(j, &config);
2823 }
2824 
minijail_run_pid(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid)2825 int API minijail_run_pid(struct minijail *j, const char *filename,
2826 			 char *const argv[], pid_t *pchild_pid)
2827 {
2828 	struct minijail_run_config config = {
2829 	    .filename = filename,
2830 	    .argv = argv,
2831 	    .envp = NULL,
2832 	    .use_preload = true,
2833 	    .exec_in_child = true,
2834 	    .pchild_pid = pchild_pid,
2835 	};
2836 	return minijail_run_config_internal(j, &config);
2837 }
2838 
minijail_run_pipe(struct minijail * j,const char * filename,char * const argv[],int * pstdin_fd)2839 int API minijail_run_pipe(struct minijail *j, const char *filename,
2840 			  char *const argv[], int *pstdin_fd)
2841 {
2842 	struct minijail_run_config config = {
2843 	    .filename = filename,
2844 	    .argv = argv,
2845 	    .envp = NULL,
2846 	    .use_preload = true,
2847 	    .exec_in_child = true,
2848 	    .pstdin_fd = pstdin_fd,
2849 	};
2850 	return minijail_run_config_internal(j, &config);
2851 }
2852 
minijail_run_pid_pipes(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)2853 int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
2854 			       char *const argv[], pid_t *pchild_pid,
2855 			       int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
2856 {
2857 	struct minijail_run_config config = {
2858 	    .filename = filename,
2859 	    .argv = argv,
2860 	    .envp = NULL,
2861 	    .use_preload = true,
2862 	    .exec_in_child = true,
2863 	    .pstdin_fd = pstdin_fd,
2864 	    .pstdout_fd = pstdout_fd,
2865 	    .pstderr_fd = pstderr_fd,
2866 	    .pchild_pid = pchild_pid,
2867 	};
2868 	return minijail_run_config_internal(j, &config);
2869 }
2870 
minijail_run_env_pid_pipes(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)2871 int API minijail_run_env_pid_pipes(struct minijail *j, const char *filename,
2872 				   char *const argv[], char *const envp[],
2873 				   pid_t *pchild_pid, int *pstdin_fd,
2874 				   int *pstdout_fd, int *pstderr_fd)
2875 {
2876 	struct minijail_run_config config = {
2877 	    .filename = filename,
2878 	    .argv = argv,
2879 	    .envp = envp,
2880 	    .use_preload = true,
2881 	    .exec_in_child = true,
2882 	    .pstdin_fd = pstdin_fd,
2883 	    .pstdout_fd = pstdout_fd,
2884 	    .pstderr_fd = pstderr_fd,
2885 	    .pchild_pid = pchild_pid,
2886 	};
2887 	return minijail_run_config_internal(j, &config);
2888 }
2889 
minijail_run_no_preload(struct minijail * j,const char * filename,char * const argv[])2890 int API minijail_run_no_preload(struct minijail *j, const char *filename,
2891 				char *const argv[])
2892 {
2893 	struct minijail_run_config config = {
2894 	    .filename = filename,
2895 	    .argv = argv,
2896 	    .envp = NULL,
2897 	    .use_preload = false,
2898 	    .exec_in_child = true,
2899 	};
2900 	return minijail_run_config_internal(j, &config);
2901 }
2902 
minijail_run_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)2903 int API minijail_run_pid_pipes_no_preload(struct minijail *j,
2904 					  const char *filename,
2905 					  char *const argv[],
2906 					  pid_t *pchild_pid,
2907 					  int *pstdin_fd,
2908 					  int *pstdout_fd,
2909 					  int *pstderr_fd)
2910 {
2911 	struct minijail_run_config config = {
2912 	    .filename = filename,
2913 	    .argv = argv,
2914 	    .envp = NULL,
2915 	    .use_preload = false,
2916 	    .exec_in_child = true,
2917 	    .pstdin_fd = pstdin_fd,
2918 	    .pstdout_fd = pstdout_fd,
2919 	    .pstderr_fd = pstderr_fd,
2920 	    .pchild_pid = pchild_pid,
2921 	};
2922 	return minijail_run_config_internal(j, &config);
2923 }
2924 
minijail_run_env_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)2925 int API minijail_run_env_pid_pipes_no_preload(struct minijail *j,
2926 					      const char *filename,
2927 					      char *const argv[],
2928 					      char *const envp[],
2929 					      pid_t *pchild_pid, int *pstdin_fd,
2930 					      int *pstdout_fd, int *pstderr_fd)
2931 {
2932 	struct minijail_run_config config = {
2933 	    .filename = filename,
2934 	    .argv = argv,
2935 	    .envp = envp,
2936 	    .use_preload = false,
2937 	    .exec_in_child = true,
2938 	    .pstdin_fd = pstdin_fd,
2939 	    .pstdout_fd = pstdout_fd,
2940 	    .pstderr_fd = pstderr_fd,
2941 	    .pchild_pid = pchild_pid,
2942 	};
2943 	return minijail_run_config_internal(j, &config);
2944 }
2945 
minijail_fork(struct minijail * j)2946 pid_t API minijail_fork(struct minijail *j)
2947 {
2948 	struct minijail_run_config config = {};
2949 	return minijail_run_config_internal(j, &config);
2950 }
2951 
minijail_run_internal(struct minijail * j,const struct minijail_run_config * config,struct minijail_run_state * state_out)2952 static int minijail_run_internal(struct minijail *j,
2953 				 const struct minijail_run_config *config,
2954 				 struct minijail_run_state *state_out)
2955 {
2956 	int sync_child = 0;
2957 	int ret;
2958 	/* We need to remember this across the minijail_preexec() call. */
2959 	int pid_namespace = j->flags.pids;
2960 	/*
2961 	 * Create an init process if we are entering a pid namespace, unless the
2962 	 * user has explicitly opted out by calling minijail_run_as_init().
2963 	 */
2964 	int do_init = j->flags.do_init && !j->flags.run_as_init;
2965 	int use_preload = config->use_preload;
2966 
2967 	if (use_preload) {
2968 		if (j->hooks_head != NULL)
2969 			die("Minijail hooks are not supported with LD_PRELOAD");
2970 		if (!config->exec_in_child)
2971 			die("minijail_fork is not supported with LD_PRELOAD");
2972 
2973 		/*
2974 		 * Before we fork(2) and execve(2) the child process, we need
2975 		 * to open a pipe(2) to send the minijail configuration over.
2976 		 */
2977 		state_out->child_env =
2978 		    minijail_copy_env(config->envp ? config->envp : environ);
2979 		if (!state_out->child_env)
2980 			return ENOMEM;
2981 		if (setup_preload(j, &state_out->child_env) ||
2982 		    setup_pipe(&state_out->child_env, state_out->pipe_fds))
2983 			return -EFAULT;
2984 	}
2985 
2986 	if (!use_preload) {
2987 		if (j->flags.use_caps && j->caps != 0 &&
2988 		    !j->flags.set_ambient_caps) {
2989 			die("non-empty, non-ambient capabilities are not "
2990 			    "supported without LD_PRELOAD");
2991 		}
2992 	}
2993 
2994 	/* Create pipes for stdin/stdout/stderr as requested by caller. */
2995 	struct {
2996 		bool requested;
2997 		int *pipe_fds;
2998 	} pipe_fd_req[] = {
2999 	    {config->pstdin_fd != NULL, state_out->stdin_fds},
3000 	    {config->pstdout_fd != NULL, state_out->stdout_fds},
3001 	    {config->pstderr_fd != NULL, state_out->stderr_fds},
3002 	};
3003 
3004 	for (size_t i = 0; i < ARRAY_SIZE(pipe_fd_req); ++i) {
3005 		if (pipe_fd_req[i].requested &&
3006 		    pipe(pipe_fd_req[i].pipe_fds) == -1)
3007 			return EFAULT;
3008 	}
3009 
3010 	/*
3011 	 * If the parent process needs to configure the child's runtime
3012 	 * environment after forking, create a pipe(2) to block the child until
3013 	 * configuration is done.
3014 	 */
3015 	if (j->flags.forward_signals || j->flags.pid_file || j->flags.cgroups ||
3016 	    j->rlimit_count || j->flags.userns) {
3017 		sync_child = 1;
3018 		if (pipe(state_out->child_sync_pipe_fds))
3019 			return -EFAULT;
3020 	}
3021 
3022 	/*
3023 	 * Use sys_clone() if and only if we're creating a pid namespace.
3024 	 *
3025 	 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
3026 	 *
3027 	 * In multithreaded programs, there are a bunch of locks inside libc,
3028 	 * some of which may be held by other threads at the time that we call
3029 	 * minijail_run_pid(). If we call fork(), glibc does its level best to
3030 	 * ensure that we hold all of these locks before it calls clone()
3031 	 * internally and drop them after clone() returns, but when we call
3032 	 * sys_clone(2) directly, all that gets bypassed and we end up with a
3033 	 * child address space where some of libc's important locks are held by
3034 	 * other threads (which did not get cloned, and hence will never release
3035 	 * those locks). This is okay so long as we call exec() immediately
3036 	 * after, but a bunch of seemingly-innocent libc functions like setenv()
3037 	 * take locks.
3038 	 *
3039 	 * Hence, only call sys_clone() if we need to, in order to get at pid
3040 	 * namespacing. If we follow this path, the child's address space might
3041 	 * have broken locks; you may only call functions that do not acquire
3042 	 * any locks.
3043 	 *
3044 	 * Unfortunately, fork() acquires every lock it can get its hands on, as
3045 	 * previously detailed, so this function is highly likely to deadlock
3046 	 * later on (see "deadlock here") if we're multithreaded.
3047 	 *
3048 	 * We might hack around this by having the clone()d child (init of the
3049 	 * pid namespace) return directly, rather than leaving the clone()d
3050 	 * process hanging around to be init for the new namespace (and having
3051 	 * its fork()ed child return in turn), but that process would be
3052 	 * crippled with its libc locks potentially broken. We might try
3053 	 * fork()ing in the parent before we clone() to ensure that we own all
3054 	 * the locks, but then we have to have the forked child hanging around
3055 	 * consuming resources (and possibly having file descriptors / shared
3056 	 * memory regions / etc attached). We'd need to keep the child around to
3057 	 * avoid having its children get reparented to init.
3058 	 *
3059 	 * TODO(ellyjones): figure out if the "forked child hanging around"
3060 	 * problem is fixable or not. It would be nice if we worked in this
3061 	 * case.
3062 	 */
3063 	pid_t child_pid;
3064 	if (pid_namespace) {
3065 		unsigned long clone_flags = CLONE_NEWPID | SIGCHLD;
3066 		if (j->flags.userns)
3067 			clone_flags |= CLONE_NEWUSER;
3068 
3069 		child_pid = syscall(SYS_clone, clone_flags, NULL, 0L, 0L, 0L);
3070 
3071 		if (child_pid < 0) {
3072 			if (errno == EPERM)
3073 				pdie("clone(CLONE_NEWPID | ...) failed with EPERM; "
3074 				     "is this process missing CAP_SYS_ADMIN?");
3075 			pdie("clone(CLONE_NEWPID | ...) failed");
3076 		}
3077 	} else {
3078 		child_pid = fork();
3079 
3080 		if (child_pid < 0)
3081 			pdie("fork failed");
3082 	}
3083 
3084 	state_out->child_pid = child_pid;
3085 	if (child_pid) {
3086 		j->initpid = child_pid;
3087 
3088 		if (j->flags.forward_signals) {
3089 			forward_pid = child_pid;
3090 			install_signal_handlers();
3091 		}
3092 
3093 		if (j->flags.pid_file)
3094 			write_pid_file_or_die(j);
3095 
3096 		if (j->flags.cgroups)
3097 			add_to_cgroups_or_die(j);
3098 
3099 		if (j->rlimit_count)
3100 			set_rlimits_or_die(j);
3101 
3102 		if (j->flags.userns)
3103 			write_ugid_maps_or_die(j);
3104 
3105 		if (j->flags.enter_vfs)
3106 			close(j->mountns_fd);
3107 
3108 		if (j->flags.enter_net)
3109 			close(j->netns_fd);
3110 
3111 		if (sync_child)
3112 			parent_setup_complete(state_out->child_sync_pipe_fds);
3113 
3114 		if (use_preload) {
3115 			/*
3116 			 * Add SIGPIPE to the signal mask to avoid getting
3117 			 * killed if the child process finishes or closes its
3118 			 * end of the pipe prematurely.
3119 			 *
3120 			 * TODO(crbug.com/1022170): Use pthread_sigmask instead
3121 			 * of sigprocmask if Minijail is used in multithreaded
3122 			 * programs.
3123 			 */
3124 			sigset_t to_block, to_restore;
3125 			if (sigemptyset(&to_block) < 0)
3126 				pdie("sigemptyset failed");
3127 			if (sigaddset(&to_block, SIGPIPE) < 0)
3128 				pdie("sigaddset failed");
3129 			if (sigprocmask(SIG_BLOCK, &to_block, &to_restore) < 0)
3130 				pdie("sigprocmask failed");
3131 
3132 			/* Send marshalled minijail. */
3133 			close_and_reset(&state_out->pipe_fds[0]);
3134 			ret = minijail_to_fd(j, state_out->pipe_fds[1]);
3135 			close_and_reset(&state_out->pipe_fds[1]);
3136 
3137 			/* Accept any pending SIGPIPE. */
3138 			while (true) {
3139 				const struct timespec zero_time = {0, 0};
3140 				const int sig = sigtimedwait(&to_block, NULL, &zero_time);
3141 				if (sig < 0) {
3142 					if (errno != EINTR)
3143 						break;
3144 				} else {
3145 					if (sig != SIGPIPE)
3146 						die("unexpected signal %d", sig);
3147 				}
3148 			}
3149 
3150 			/* Restore the signal mask to its original state. */
3151 			if (sigprocmask(SIG_SETMASK, &to_restore, NULL) < 0)
3152 				pdie("sigprocmask failed");
3153 
3154 			if (ret) {
3155 				warn("failed to send marshalled minijail: %s",
3156 				     strerror(-ret));
3157 				kill(j->initpid, SIGKILL);
3158 			}
3159 		}
3160 
3161 		return 0;
3162 	}
3163 
3164 	/* Child process. */
3165 	if (j->flags.reset_signal_mask) {
3166 		sigset_t signal_mask;
3167 		if (sigemptyset(&signal_mask) != 0)
3168 			pdie("sigemptyset failed");
3169 		if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
3170 			pdie("sigprocmask failed");
3171 	}
3172 
3173 	if (j->flags.reset_signal_handlers) {
3174 		int signum;
3175 		for (signum = 0; signum <= SIGRTMAX; signum++) {
3176 			/*
3177 			 * Ignore EINVAL since some signal numbers in the range
3178 			 * might not be valid.
3179 			 */
3180 			if (signal(signum, SIG_DFL) == SIG_ERR &&
3181 			    errno != EINVAL) {
3182 				pdie("failed to reset signal %d disposition",
3183 				     signum);
3184 			}
3185 		}
3186 	}
3187 
3188 	if (j->flags.close_open_fds) {
3189 		const size_t kMaxInheritableFdsSize = 10 + MAX_PRESERVED_FDS;
3190 		int inheritable_fds[kMaxInheritableFdsSize];
3191 		size_t size = 0;
3192 
3193 		int *pipe_fds[] = {
3194 		    state_out->pipe_fds,   state_out->child_sync_pipe_fds,
3195 		    state_out->stdin_fds,  state_out->stdout_fds,
3196 		    state_out->stderr_fds,
3197 		};
3198 
3199 		for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3200 			if (pipe_fds[i][0] != -1) {
3201 				inheritable_fds[size++] = pipe_fds[i][0];
3202 			}
3203 			if (pipe_fds[i][1] != -1) {
3204 				inheritable_fds[size++] = pipe_fds[i][1];
3205 			}
3206 		}
3207 
3208 		/*
3209 		 * Preserve namespace file descriptors over the close_open_fds()
3210 		 * call. These are closed in minijail_enter() so they won't leak
3211 		 * into the child process.
3212 		 */
3213 		if (j->flags.enter_vfs)
3214 			minijail_preserve_fd(j, j->mountns_fd, j->mountns_fd);
3215 		if (j->flags.enter_net)
3216 			minijail_preserve_fd(j, j->netns_fd, j->netns_fd);
3217 
3218 		for (size_t i = 0; i < j->preserved_fd_count; i++) {
3219 			/*
3220 			 * Preserve all parent_fds. They will be dup2(2)-ed in
3221 			 * the child later.
3222 			 */
3223 			inheritable_fds[size++] = j->preserved_fds[i].parent_fd;
3224 		}
3225 
3226 		if (close_open_fds(inheritable_fds, size) < 0)
3227 			die("failed to close open file descriptors");
3228 	}
3229 
3230 	if (redirect_fds(j))
3231 		die("failed to set up fd redirections");
3232 
3233 	if (sync_child)
3234 		wait_for_parent_setup(state_out->child_sync_pipe_fds);
3235 
3236 	if (j->flags.userns)
3237 		enter_user_namespace(j);
3238 
3239 	setup_child_std_fds(j, state_out);
3240 
3241 	/* If running an init program, let it decide when/how to mount /proc. */
3242 	if (pid_namespace && !do_init)
3243 		j->flags.remount_proc_ro = 0;
3244 
3245 	if (use_preload) {
3246 		/* Strip out flags that cannot be inherited across execve(2). */
3247 		minijail_preexec(j);
3248 	} else {
3249 		/*
3250 		 * If not using LD_PRELOAD, do all jailing before execve(2).
3251 		 * Note that PID namespaces can only be entered on fork(2),
3252 		 * so that flag is still cleared.
3253 		 */
3254 		j->flags.pids = 0;
3255 	}
3256 
3257 	/*
3258 	 * Jail this process.
3259 	 * If forking, return.
3260 	 * If not, execve(2) the target.
3261 	 */
3262 	minijail_enter(j);
3263 
3264 	if (config->exec_in_child && pid_namespace && do_init) {
3265 		/*
3266 		 * pid namespace: this process will become init inside the new
3267 		 * namespace. We don't want all programs we might exec to have
3268 		 * to know how to be init. Normally (do_init == 1) we fork off
3269 		 * a child to actually run the program. If |do_init == 0|, we
3270 		 * let the program keep pid 1 and be init.
3271 		 *
3272 		 * If we're multithreaded, we'll probably deadlock here. See
3273 		 * WARNING above.
3274 		 */
3275 		child_pid = fork();
3276 		if (child_pid < 0) {
3277 			_exit(child_pid);
3278 		} else if (child_pid > 0) {
3279 			minijail_free_run_state(state_out);
3280 
3281 			/*
3282 			 * Best effort. Don't bother checking the return value.
3283 			 */
3284 			prctl(PR_SET_NAME, "minijail-init");
3285 			init(child_pid);	/* Never returns. */
3286 		}
3287 		state_out->child_pid = child_pid;
3288 	}
3289 
3290 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_EXECVE);
3291 
3292 	if (!config->exec_in_child)
3293 		return 0;
3294 
3295 	/*
3296 	 * We're going to execve(), so make sure any remaining resources are
3297 	 * freed. Exceptions are:
3298 	 *  1. The child environment. No need to worry about freeing it since
3299 	 *     execve reinitializes the heap anyways.
3300 	 *  2. The read side of the LD_PRELOAD pipe, which we need to hand down
3301 	 *     into the target in which the preloaded code will read from it and
3302 	 *     then close it.
3303 	 */
3304 	state_out->pipe_fds[0] = -1;
3305 	char *const *child_env = state_out->child_env;
3306 	state_out->child_env = NULL;
3307 	minijail_free_run_state(state_out);
3308 
3309 	/*
3310 	 * If we aren't pid-namespaced, or the jailed program asked to be init:
3311 	 *   calling process
3312 	 *   -> execve()-ing process
3313 	 * If we are:
3314 	 *   calling process
3315 	 *   -> init()-ing process
3316 	 *      -> execve()-ing process
3317 	 */
3318 	if (!child_env)
3319 		child_env = config->envp ? config->envp : environ;
3320 	execve(config->filename, config->argv, child_env);
3321 
3322 	ret = (errno == ENOENT ? MINIJAIL_ERR_NO_COMMAND : MINIJAIL_ERR_NO_ACCESS);
3323 	pwarn("execve(%s) failed", config->filename);
3324 	_exit(ret);
3325 }
3326 
3327 static int
minijail_run_config_internal(struct minijail * j,const struct minijail_run_config * config)3328 minijail_run_config_internal(struct minijail *j,
3329 			     const struct minijail_run_config *config)
3330 {
3331 	struct minijail_run_state state = {
3332 	    .child_pid = -1,
3333 	    .pipe_fds = {-1, -1},
3334 	    .stdin_fds = {-1, -1},
3335 	    .stdout_fds = {-1, -1},
3336 	    .stderr_fds = {-1, -1},
3337 	    .child_sync_pipe_fds = {-1, -1},
3338 	    .child_env = NULL,
3339 	};
3340 	int ret = minijail_run_internal(j, config, &state);
3341 
3342 	if (ret == 0) {
3343 		if (config->pchild_pid)
3344 			*config->pchild_pid = state.child_pid;
3345 
3346 		/* Grab stdin/stdout/stderr descriptors requested by caller. */
3347 		struct {
3348 			int *pfd;
3349 			int *psrc;
3350 		} fd_map[] = {
3351 		    {config->pstdin_fd, &state.stdin_fds[1]},
3352 		    {config->pstdout_fd, &state.stdout_fds[0]},
3353 		    {config->pstderr_fd, &state.stderr_fds[0]},
3354 		};
3355 
3356 		for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
3357 			if (fd_map[i].pfd) {
3358 				*fd_map[i].pfd = *fd_map[i].psrc;
3359 				*fd_map[i].psrc = -1;
3360 			}
3361 		}
3362 
3363 		if (!config->exec_in_child)
3364 			ret = state.child_pid;
3365 	}
3366 
3367 	minijail_free_run_state(&state);
3368 
3369 	return ret;
3370 }
3371 
minijail_wait_internal(struct minijail * j,int expected_signal)3372 static int minijail_wait_internal(struct minijail *j, int expected_signal)
3373 {
3374 	if (j->initpid <= 0)
3375 		return -ECHILD;
3376 
3377 	int st;
3378 	while (true) {
3379 		const int ret = waitpid(j->initpid, &st, 0);
3380 		if (ret >= 0)
3381 			break;
3382 		if (errno != EINTR)
3383 			return -errno;
3384 	}
3385 
3386 	if (!WIFEXITED(st)) {
3387 		int error_status = st;
3388 		if (WIFSIGNALED(st)) {
3389 			int signum = WTERMSIG(st);
3390 			if (signum != expected_signal) {
3391 				warn("child process %d received signal %d",
3392 				     j->initpid, signum);
3393 			}
3394 			/*
3395 			 * We return MINIJAIL_ERR_JAIL if the process received
3396 			 * SIGSYS, which happens when a syscall is blocked by
3397 			 * seccomp filters.
3398 			 * If not, we do what bash(1) does:
3399 			 * $? = 128 + signum
3400 			 */
3401 			if (signum == SIGSYS) {
3402 				error_status = MINIJAIL_ERR_JAIL;
3403 			} else {
3404 				error_status = MINIJAIL_ERR_SIG_BASE + signum;
3405 			}
3406 		}
3407 		return error_status;
3408 	}
3409 
3410 	int exit_status = WEXITSTATUS(st);
3411 	if (exit_status != 0)
3412 		info("child process %d exited with status %d",
3413 		     j->initpid, exit_status);
3414 
3415 	return exit_status;
3416 }
3417 
minijail_kill(struct minijail * j)3418 int API minijail_kill(struct minijail *j)
3419 {
3420 	if (j->initpid <= 0)
3421 		return -ECHILD;
3422 
3423 	if (kill(j->initpid, SIGTERM))
3424 		return -errno;
3425 
3426 	return minijail_wait_internal(j, SIGTERM);
3427 }
3428 
minijail_wait(struct minijail * j)3429 int API minijail_wait(struct minijail *j)
3430 {
3431 	return minijail_wait_internal(j, 0);
3432 }
3433 
minijail_destroy(struct minijail * j)3434 void API minijail_destroy(struct minijail *j)
3435 {
3436 	size_t i;
3437 
3438 	if (j->filter_prog) {
3439 		free(j->filter_prog->filter);
3440 		free(j->filter_prog);
3441 	}
3442 	free_mounts_list(j);
3443 	free_remounts_list(j);
3444 	while (j->hooks_head) {
3445 		struct hook *c = j->hooks_head;
3446 		j->hooks_head = c->next;
3447 		free(c);
3448 	}
3449 	j->hooks_tail = NULL;
3450 	if (j->user)
3451 		free(j->user);
3452 	if (j->suppl_gid_list)
3453 		free(j->suppl_gid_list);
3454 	if (j->chrootdir)
3455 		free(j->chrootdir);
3456 	if (j->pid_file_path)
3457 		free(j->pid_file_path);
3458 	if (j->uidmap)
3459 		free(j->uidmap);
3460 	if (j->gidmap)
3461 		free(j->gidmap);
3462 	if (j->hostname)
3463 		free(j->hostname);
3464 	if (j->preload_path)
3465 		free(j->preload_path);
3466 	if (j->alt_syscall_table)
3467 		free(j->alt_syscall_table);
3468 	for (i = 0; i < j->cgroup_count; ++i)
3469 		free(j->cgroups[i]);
3470 	free(j);
3471 }
3472 
minijail_log_to_fd(int fd,int min_priority)3473 void API minijail_log_to_fd(int fd, int min_priority)
3474 {
3475 	init_logging(LOG_TO_FD, fd, min_priority);
3476 }
3477