• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2012 The ChromiumOS Authors
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #define _BSD_SOURCE
7 #define _DEFAULT_SOURCE
8 #define _GNU_SOURCE
9 
10 #include <asm/unistd.h>
11 #include <assert.h>
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <grp.h>
16 #include <linux/capability.h>
17 #include <linux/filter.h>
18 #include <sched.h>
19 #include <signal.h>
20 #include <stddef.h>
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/capability.h>
25 #include <sys/mount.h>
26 #include <sys/param.h>
27 #include <sys/prctl.h>
28 #include <sys/resource.h>
29 #include <sys/stat.h>
30 #include <sys/sysmacros.h>
31 #include <sys/types.h>
32 #include <sys/user.h>
33 #include <sys/wait.h>
34 #include <syscall.h>
35 #include <unistd.h>
36 
37 #include "landlock_util.h"
38 #include "libminijail-private.h"
39 #include "libminijail.h"
40 
41 #include "signal_handler.h"
42 #include "syscall_filter.h"
43 #include "syscall_wrapper.h"
44 #include "system.h"
45 #include "util.h"
46 
47 /* Until these are reliably available in linux/prctl.h. */
48 #ifndef PR_ALT_SYSCALL
49 #define PR_ALT_SYSCALL 0x43724f53
50 #endif
51 
52 /* New cgroup namespace might not be in linux-headers yet. */
53 #ifndef CLONE_NEWCGROUP
54 #define CLONE_NEWCGROUP 0x02000000
55 #endif
56 
57 #define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
58 
59 #define MAX_RLIMITS 32 /* Currently there are 15 supported by Linux. */
60 
61 #define MAX_PRESERVED_FDS 128U
62 
63 /* Keyctl commands. */
64 #define KEYCTL_JOIN_SESSION_KEYRING 1
65 
66 /*
67  * The userspace equivalent of MNT_USER_SETTABLE_MASK, which is the mask of all
68  * flags that can be modified by MS_REMOUNT.
69  */
70 #define MS_USER_SETTABLE_MASK                                                  \
71 	(MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_NOATIME | MS_NODIRATIME |       \
72 	 MS_RELATIME | MS_RDONLY)
73 
74 /*
75  * Required for Android host glibc which is permanently stuck on 2.17. Causes
76  * no harm for newer glibc versions.
77  */
78 #ifndef MS_NOSYMFOLLOW
79 /* Added locally in kernels 4.x+. */
80 #define MS_NOSYMFOLLOW 256
81 #endif
82 
83 struct minijail_rlimit {
84 	int type;
85 	rlim_t cur;
86 	rlim_t max;
87 };
88 
89 struct mountpoint {
90 	char *src;
91 	char *dest;
92 	char *type;
93 	char *data;
94 	int has_data;
95 	unsigned long flags;
96 	struct mountpoint *next;
97 };
98 
99 struct minijail_remount {
100 	unsigned long remount_mode;
101 	char *mount_name;
102 	struct minijail_remount *next;
103 };
104 
105 struct hook {
106 	minijail_hook_t hook;
107 	void *payload;
108 	minijail_hook_event_t event;
109 	struct hook *next;
110 };
111 
112 struct fs_rule {
113 	char *path;
114 	uint64_t landlock_flags;
115 	struct fs_rule *next;
116 };
117 
118 struct preserved_fd {
119 	int parent_fd;
120 	int child_fd;
121 };
122 
123 /*
124  * minijail struct: new fields should either be marshaled/unmarshaled or have a
125  * comment explaining why that's unnecessary.
126  */
127 struct minijail {
128 	/*
129 	 * WARNING: new bool flags should always be added to this struct,
130 	 * unless you’re certain they don’t need to remain after marshaling.
131 	 * If you add a flag here you need to make sure it's
132 	 * accounted for in minijail_pre{enter|exec}() below.
133 	 */
134 	struct {
135 		bool uid : 1;
136 		bool gid : 1;
137 		bool inherit_suppl_gids : 1;
138 		bool set_suppl_gids : 1;
139 		bool keep_suppl_gids : 1;
140 		bool use_caps : 1;
141 		bool capbset_drop : 1;
142 		bool set_ambient_caps : 1;
143 		bool vfs : 1;
144 		bool enter_vfs : 1;
145 		bool pids : 1;
146 		bool ipc : 1;
147 		bool uts : 1;
148 		bool net : 1;
149 		bool net_loopback : 1;
150 		bool enter_net : 1;
151 		bool ns_cgroups : 1;
152 		bool userns : 1;
153 		bool disable_setgroups : 1;
154 		bool seccomp : 1;
155 		bool remount_proc_ro : 1;
156 		bool no_new_privs : 1;
157 		bool seccomp_filter : 1;
158 		bool seccomp_filter_tsync : 1;
159 		bool seccomp_filter_logging : 1;
160 		bool seccomp_filter_allow_speculation : 1;
161 		bool chroot : 1;
162 		bool pivot_root : 1;
163 		bool mount_dev : 1;
164 		bool mount_tmp : 1;
165 		bool do_init : 1;
166 		bool run_as_init : 1;
167 		bool pid_file : 1;
168 		bool cgroups : 1;
169 		bool alt_syscall : 1;
170 		bool reset_signal_mask : 1;
171 		bool reset_signal_handlers : 1;
172 		bool close_open_fds : 1;
173 		bool new_session_keyring : 1;
174 		bool forward_signals : 1;
175 		bool setsid : 1;
176 		bool using_minimalistic_mountns : 1;
177 		bool enable_fs_restrictions : 1;
178 		bool enable_profile_fs_restrictions : 1;
179 		bool enable_default_runtime : 1;
180 		bool enable_new_sessions : 1;
181 	} flags;
182 	uid_t uid;
183 	gid_t gid;
184 	gid_t usergid;
185 	char *user;
186 	size_t suppl_gid_count;
187 	gid_t *suppl_gid_list;
188 	uint64_t caps;
189 	uint64_t cap_bset;
190 	pid_t initpid;
191 	int mountns_fd;
192 	int netns_fd;
193 	int fs_rules_fd;
194 	int fs_rules_landlock_abi;
195 	char *chrootdir;
196 	char *pid_file_path;
197 	char *uidmap;
198 	char *gidmap;
199 	char *hostname;
200 	char *preload_path;
201 	/*
202 	 * Filename that will be executed, unless an ELF fd is used instead.
203 	 * This field is only used for logs and isn't included in marshaling.
204 	 */
205 	char *filename;
206 	size_t filter_len;
207 	struct sock_fprog *filter_prog;
208 	char *alt_syscall_table;
209 	struct mountpoint *mounts_head;
210 	struct mountpoint *mounts_tail;
211 	size_t mounts_count;
212 	unsigned long remount_mode;
213 	struct minijail_remount *remounts_head;
214 	struct minijail_remount *remounts_tail;
215 	size_t tmpfs_size;
216 	struct fs_rule *fs_rules_head;
217 	struct fs_rule *fs_rules_tail;
218 	size_t fs_rules_count;
219 	char *cgroups[MAX_CGROUPS];
220 	size_t cgroup_count;
221 	struct minijail_rlimit rlimits[MAX_RLIMITS];
222 	size_t rlimit_count;
223 	uint64_t securebits_skip_mask;
224 	struct hook *hooks_head;
225 	struct hook *hooks_tail;
226 	struct preserved_fd preserved_fds[MAX_PRESERVED_FDS];
227 	size_t preserved_fd_count;
228 	char *seccomp_policy_path;
229 };
230 
231 static void run_hooks_or_die(const struct minijail *j,
232 			     minijail_hook_event_t event);
233 
seccomp_is_logging_allowed(const struct minijail * j)234 static bool seccomp_is_logging_allowed(const struct minijail *j)
235 {
236 	return seccomp_default_ret_log() || j->flags.seccomp_filter_logging;
237 }
238 
free_mounts_list(struct minijail * j)239 static void free_mounts_list(struct minijail *j)
240 {
241 	while (j->mounts_head) {
242 		struct mountpoint *m = j->mounts_head;
243 		j->mounts_head = j->mounts_head->next;
244 		free(m->data);
245 		free(m->type);
246 		free(m->dest);
247 		free(m->src);
248 		free(m);
249 	}
250 	// No need to clear mounts_head as we know it's NULL after the loop.
251 	j->mounts_tail = NULL;
252 }
253 
free_remounts_list(struct minijail * j)254 static void free_remounts_list(struct minijail *j)
255 {
256 	while (j->remounts_head) {
257 		struct minijail_remount *m = j->remounts_head;
258 		j->remounts_head = j->remounts_head->next;
259 		free(m->mount_name);
260 		free(m);
261 	}
262 	// No need to clear remounts_head as we know it's NULL after the loop.
263 	j->remounts_tail = NULL;
264 }
265 
free_fs_rules_list(struct minijail * j)266 static void free_fs_rules_list(struct minijail *j)
267 {
268 	while (j->fs_rules_head) {
269 		struct fs_rule *r = j->fs_rules_head;
270 		j->fs_rules_head = j->fs_rules_head->next;
271 		free(r->path);
272 		free(r);
273 	}
274 	j->fs_rules_tail = NULL;
275 }
276 
277 /*
278  * Writes exactly n bytes from buf to file descriptor fd.
279  * Returns 0 on success or a negative error code on error.
280  */
write_exactly(int fd,const void * buf,size_t n)281 static int write_exactly(int fd, const void *buf, size_t n)
282 {
283 	const char *p = buf;
284 	while (n > 0) {
285 		const ssize_t written = write(fd, p, n);
286 		if (written < 0) {
287 			if (errno == EINTR)
288 				continue;
289 
290 			return -errno;
291 		}
292 
293 		p += written;
294 		n -= written;
295 	}
296 
297 	return 0;
298 }
299 
300 /* Closes *pfd and sets it to -1. */
close_and_reset(int * pfd)301 static void close_and_reset(int *pfd)
302 {
303 	if (*pfd != -1)
304 		close(*pfd);
305 	*pfd = -1;
306 }
307 
308 /*
309  * Strip out flags meant for the parent.
310  * We keep things that are not inherited across execve(2) (e.g. capabilities),
311  * or are easier to set after execve(2) (e.g. seccomp filters).
312  */
minijail_preenter(struct minijail * j)313 void minijail_preenter(struct minijail *j)
314 {
315 	j->flags.vfs = 0;
316 	j->flags.enter_vfs = 0;
317 	j->flags.ns_cgroups = 0;
318 	j->flags.net = 0;
319 	j->flags.net_loopback = 0;
320 	j->flags.uts = 0;
321 	j->flags.remount_proc_ro = 0;
322 	j->flags.pids = 0;
323 	j->flags.do_init = 0;
324 	j->flags.run_as_init = 0;
325 	j->flags.pid_file = 0;
326 	j->flags.cgroups = 0;
327 	j->flags.forward_signals = 0;
328 	j->flags.setsid = 0;
329 	j->remount_mode = 0;
330 	j->flags.using_minimalistic_mountns = 0;
331 	j->flags.enable_profile_fs_restrictions = 0;
332 	j->flags.enable_default_runtime = 0;
333 	j->flags.enable_new_sessions = 0;
334 	free_remounts_list(j);
335 }
336 
fs_refer_restriction_supported(struct minijail * j)337 static bool fs_refer_restriction_supported(struct minijail *j)
338 {
339 	if (j->fs_rules_landlock_abi < 0) {
340 		const int abi = landlock_create_ruleset(
341 		    NULL, 0, LANDLOCK_CREATE_RULESET_VERSION);
342 		/*
343 		 * If we have a valid ABI, save the result. Otherwise, leave
344 		 * the struct field unmodified to make sure it's correctly
345 		 * marshaled and unmarshaled.
346 		 */
347 		if (abi > 0) {
348 			j->fs_rules_landlock_abi = abi;
349 		}
350 	}
351 
352 	return j->fs_rules_landlock_abi >= LANDLOCK_ABI_FS_REFER_SUPPORTED;
353 }
354 
355 /* Sets fs_rules_fd to an empty ruleset, if Landlock is available. */
setup_fs_rules_fd(struct minijail * j)356 static int setup_fs_rules_fd(struct minijail *j)
357 {
358 	struct minijail_landlock_ruleset_attr ruleset_attr = {
359 	    .handled_access_fs = HANDLED_ACCESS_TYPES};
360 	if (fs_refer_restriction_supported(j)) {
361 		ruleset_attr.handled_access_fs |= LANDLOCK_ACCESS_FS_REFER;
362 	}
363 
364 	j->fs_rules_fd =
365 	    landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
366 	if (j->fs_rules_fd < 0) {
367 		/*
368 		 * As of Landlock ABI=3, the useful errors we expect here are
369 		 * ENOSYS or EOPNOTSUPP. In both cases, Landlock is not
370 		 * supported by the kernel and Minijail can silently ignore it.
371 		 * TODO(b/300142205): log when we no longer have 5.4 kernels in
372 		 * ChromeOS (~EoY 2024).
373 		 */
374 		return errno;
375 	}
376 
377 	return 0;
378 }
379 
380 /* Adds a rule for a given path to apply once minijail is entered. */
add_fs_restriction_path(struct minijail * j,const char * path,uint64_t landlock_flags)381 static int add_fs_restriction_path(struct minijail *j, const char *path,
382 				   uint64_t landlock_flags)
383 {
384 	struct fs_rule *r = calloc(1, sizeof(*r));
385 	if (!r)
386 		return -ENOMEM;
387 	r->path = strdup(path);
388 	r->landlock_flags = landlock_flags;
389 
390 	if (j->fs_rules_tail) {
391 		j->fs_rules_tail->next = r;
392 		j->fs_rules_tail = r;
393 	} else {
394 		j->fs_rules_head = r;
395 		j->fs_rules_tail = r;
396 	}
397 
398 	/*
399 	 * If this is our first rule, set up the rules FD early for API users.
400 	 *
401 	 * This is important for users calling minijail_enter() directly.
402 	 * Otherise, this is handled later inside minijail_run_internal().
403 	 *
404 	 * The reason for this is because setup_fs_rules_fd() needs to be
405 	 * called from inside the process that applies Landlock rules. For
406 	 * minijail_enter(), that's this process. For minijail_run_internal(),
407 	 * that's the child process.
408 	 */
409 	if (j->fs_rules_count == 0)
410 		setup_fs_rules_fd(j);
411 
412 	j->fs_rules_count++;
413 	return 0;
414 }
415 
mount_has_bind_flag(struct mountpoint * m)416 bool mount_has_bind_flag(struct mountpoint *m)
417 {
418 	return !!(m->flags & MS_BIND);
419 }
420 
mount_has_readonly_flag(struct mountpoint * m)421 bool mount_has_readonly_flag(struct mountpoint *m)
422 {
423 	return !!(m->flags & MS_RDONLY);
424 }
425 
mount_events_allowed(struct mountpoint * m)426 bool mount_events_allowed(struct mountpoint *m)
427 {
428 	return !!(m->flags & MS_SHARED) || !!(m->flags & MS_SLAVE);
429 }
430 
431 /*
432  * Strip out flags meant for the child.
433  * We keep things that are inherited across execve(2).
434  */
minijail_preexec(struct minijail * j)435 void minijail_preexec(struct minijail *j)
436 {
437 	int vfs = j->flags.vfs;
438 	int enter_vfs = j->flags.enter_vfs;
439 	int ns_cgroups = j->flags.ns_cgroups;
440 	int net = j->flags.net;
441 	int net_loopback = j->flags.net_loopback;
442 	int uts = j->flags.uts;
443 	int remount_proc_ro = j->flags.remount_proc_ro;
444 	int userns = j->flags.userns;
445 	int using_minimalistic_mountns = j->flags.using_minimalistic_mountns;
446 	int enable_fs_restrictions = j->flags.enable_fs_restrictions;
447 	int enable_profile_fs_restrictions =
448 	    j->flags.enable_profile_fs_restrictions;
449 	int enable_default_runtime = j->flags.enable_default_runtime;
450 	int enable_new_sessions = j->flags.enable_new_sessions;
451 	if (j->user)
452 		free(j->user);
453 	j->user = NULL;
454 	if (j->suppl_gid_list)
455 		free(j->suppl_gid_list);
456 	j->suppl_gid_list = NULL;
457 	if (j->preload_path)
458 		free(j->preload_path);
459 	j->preload_path = NULL;
460 	free_mounts_list(j);
461 	free_fs_rules_list(j);
462 	memset(&j->flags, 0, sizeof(j->flags));
463 	/* Now restore anything we meant to keep. */
464 	j->flags.vfs = vfs;
465 	j->flags.enter_vfs = enter_vfs;
466 	j->flags.ns_cgroups = ns_cgroups;
467 	j->flags.net = net;
468 	j->flags.net_loopback = net_loopback;
469 	j->flags.uts = uts;
470 	j->flags.remount_proc_ro = remount_proc_ro;
471 	j->flags.userns = userns;
472 	j->flags.using_minimalistic_mountns = using_minimalistic_mountns;
473 	j->flags.enable_fs_restrictions = enable_fs_restrictions;
474 	j->flags.enable_profile_fs_restrictions =
475 	    enable_profile_fs_restrictions;
476 	j->flags.enable_default_runtime = enable_default_runtime;
477 	j->flags.enable_new_sessions = enable_new_sessions;
478 	/* Note, |pids| will already have been used before this call. */
479 }
480 
481 /* Minijail API. */
482 
minijail_new(void)483 struct minijail API *minijail_new(void)
484 {
485 	struct minijail *j = calloc(1, sizeof(struct minijail));
486 	if (j) {
487 		j->remount_mode = MS_PRIVATE;
488 		j->fs_rules_fd = -1;
489 		j->fs_rules_landlock_abi = -1;
490 		j->flags.using_minimalistic_mountns = false;
491 		j->flags.enable_fs_restrictions = true;
492 		j->flags.enable_profile_fs_restrictions = true;
493 		j->flags.enable_default_runtime = true;
494 		j->flags.enable_new_sessions = true;
495 	}
496 	return j;
497 }
498 
minijail_change_uid(struct minijail * j,uid_t uid)499 void API minijail_change_uid(struct minijail *j, uid_t uid)
500 {
501 	if (uid == 0)
502 		die("useless change to uid 0");
503 	j->uid = uid;
504 	j->flags.uid = 1;
505 }
506 
minijail_change_gid(struct minijail * j,gid_t gid)507 void API minijail_change_gid(struct minijail *j, gid_t gid)
508 {
509 	if (gid == 0)
510 		die("useless change to gid 0");
511 	j->gid = gid;
512 	j->flags.gid = 1;
513 }
514 
minijail_set_supplementary_gids(struct minijail * j,size_t size,const gid_t * list)515 void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
516 					 const gid_t *list)
517 {
518 	size_t i;
519 
520 	if (j->flags.inherit_suppl_gids)
521 		die("cannot inherit *and* set supplementary groups");
522 	if (j->flags.keep_suppl_gids)
523 		die("cannot keep *and* set supplementary groups");
524 
525 	if (size == 0) {
526 		/* Clear supplementary groups. */
527 		j->suppl_gid_list = NULL;
528 		j->suppl_gid_count = 0;
529 		j->flags.set_suppl_gids = 1;
530 		return;
531 	}
532 
533 	/* Copy the gid_t array. */
534 	j->suppl_gid_list = calloc(size, sizeof(gid_t));
535 	if (!j->suppl_gid_list) {
536 		die("failed to allocate internal supplementary group array");
537 	}
538 	for (i = 0; i < size; i++) {
539 		j->suppl_gid_list[i] = list[i];
540 	}
541 	j->suppl_gid_count = size;
542 	j->flags.set_suppl_gids = 1;
543 }
544 
minijail_keep_supplementary_gids(struct minijail * j)545 void API minijail_keep_supplementary_gids(struct minijail *j)
546 {
547 	j->flags.keep_suppl_gids = 1;
548 }
549 
minijail_change_user(struct minijail * j,const char * user)550 int API minijail_change_user(struct minijail *j, const char *user)
551 {
552 	uid_t uid;
553 	gid_t gid;
554 	int rc = lookup_user(user, &uid, &gid);
555 	if (rc)
556 		return rc;
557 	minijail_change_uid(j, uid);
558 	j->user = strdup(user);
559 	if (!j->user)
560 		return -ENOMEM;
561 	j->usergid = gid;
562 	return 0;
563 }
564 
minijail_change_group(struct minijail * j,const char * group)565 int API minijail_change_group(struct minijail *j, const char *group)
566 {
567 	gid_t gid;
568 	int rc = lookup_group(group, &gid);
569 	if (rc)
570 		return rc;
571 	minijail_change_gid(j, gid);
572 	return 0;
573 }
574 
minijail_use_seccomp(struct minijail * j)575 void API minijail_use_seccomp(struct minijail *j)
576 {
577 	j->flags.seccomp = 1;
578 }
579 
minijail_no_new_privs(struct minijail * j)580 void API minijail_no_new_privs(struct minijail *j)
581 {
582 	j->flags.no_new_privs = 1;
583 }
584 
minijail_use_seccomp_filter(struct minijail * j)585 void API minijail_use_seccomp_filter(struct minijail *j)
586 {
587 	j->flags.seccomp_filter = 1;
588 }
589 
minijail_set_seccomp_filter_tsync(struct minijail * j)590 void API minijail_set_seccomp_filter_tsync(struct minijail *j)
591 {
592 	if (j->filter_len > 0 && j->filter_prog != NULL) {
593 		die("minijail_set_seccomp_filter_tsync() must be called "
594 		    "before minijail_parse_seccomp_filters()");
595 	}
596 
597 	if (seccomp_is_logging_allowed(j) && !seccomp_ret_log_available()) {
598 		/*
599 		 * If SECCOMP_RET_LOG is not available, we don't want to use
600 		 * SECCOMP_RET_TRAP to both kill the entire process and report
601 		 * failing syscalls, since it will be brittle. Just bail.
602 		 */
603 		die("SECCOMP_RET_LOG not available, cannot use logging with "
604 		    "thread sync at the same time");
605 	}
606 
607 	j->flags.seccomp_filter_tsync = 1;
608 }
609 
minijail_set_seccomp_filter_allow_speculation(struct minijail * j)610 void API minijail_set_seccomp_filter_allow_speculation(struct minijail *j)
611 {
612 	if (j->filter_len > 0 && j->filter_prog != NULL) {
613 		die("minijail_set_seccomp_filter_allow_speculation() must be "
614 		    "called before minijail_parse_seccomp_filters()");
615 	}
616 
617 	j->flags.seccomp_filter_allow_speculation = 1;
618 }
619 
minijail_log_seccomp_filter_failures(struct minijail * j)620 void API minijail_log_seccomp_filter_failures(struct minijail *j)
621 {
622 	if (j->filter_len > 0 && j->filter_prog != NULL) {
623 		die("minijail_log_seccomp_filter_failures() must be called "
624 		    "before minijail_parse_seccomp_filters()");
625 	}
626 
627 	if (j->flags.seccomp_filter_tsync && !seccomp_ret_log_available()) {
628 		/*
629 		 * If SECCOMP_RET_LOG is not available, we don't want to use
630 		 * SECCOMP_RET_TRAP to both kill the entire process and report
631 		 * failing syscalls, since it will be brittle. Just bail.
632 		 */
633 		die("SECCOMP_RET_LOG not available, cannot use thread sync "
634 		    "with logging at the same time");
635 	}
636 
637 	if (debug_logging_allowed()) {
638 		j->flags.seccomp_filter_logging = 1;
639 	} else {
640 		warn("non-debug build: ignoring request to enable seccomp "
641 		     "logging");
642 	}
643 }
644 
minijail_set_using_minimalistic_mountns(struct minijail * j)645 void API minijail_set_using_minimalistic_mountns(struct minijail *j)
646 {
647 	j->flags.using_minimalistic_mountns = true;
648 }
649 
minijail_set_enable_new_sessions(struct minijail * j,bool enable_new_sessions)650 void API minijail_set_enable_new_sessions(struct minijail *j,
651 					  bool enable_new_sessions)
652 {
653 	j->flags.enable_new_sessions = enable_new_sessions;
654 }
655 
minijail_set_enable_default_runtime(struct minijail * j,bool enable_default_runtime)656 void API minijail_set_enable_default_runtime(struct minijail *j,
657 					     bool enable_default_runtime)
658 {
659 	j->flags.enable_default_runtime = enable_default_runtime;
660 }
661 
minijail_get_enable_default_runtime(struct minijail * j)662 bool API minijail_get_enable_default_runtime(struct minijail *j)
663 {
664 	return j->flags.enable_default_runtime;
665 }
666 
minijail_is_fs_restriction_available(void)667 bool API minijail_is_fs_restriction_available(void)
668 {
669 	const int abi =
670 	    landlock_create_ruleset(NULL, 0, LANDLOCK_CREATE_RULESET_VERSION);
671 	// ABI > 0 is considered supported.
672 	return abi > 0;
673 }
674 
minijail_disable_fs_restrictions(struct minijail * j)675 void API minijail_disable_fs_restrictions(struct minijail *j)
676 {
677 	j->flags.enable_fs_restrictions = false;
678 }
679 
minijail_set_enable_profile_fs_restrictions(struct minijail * j)680 void API minijail_set_enable_profile_fs_restrictions(struct minijail *j)
681 {
682 	j->flags.enable_profile_fs_restrictions = true;
683 }
684 
minijail_add_minimalistic_mountns_fs_rules(struct minijail * j)685 void API minijail_add_minimalistic_mountns_fs_rules(struct minijail *j)
686 {
687 	struct mountpoint *m = j->mounts_head;
688 	bool landlock_enabled_by_profile = false;
689 	if (!j->flags.using_minimalistic_mountns ||
690 	    !j->flags.enable_profile_fs_restrictions)
691 		return;
692 
693 	/* Apply Landlock rules. */
694 	while (m) {
695 		landlock_enabled_by_profile = true;
696 		minijail_add_fs_restriction_rx(j, m->dest);
697 		/*
698 		 * Allow rw if mounted as writable, or mount flags allow mount
699 		 * events.
700 		 */
701 		if (!mount_has_readonly_flag(m) || mount_events_allowed(m))
702 			minijail_add_fs_restriction_advanced_rw(j, m->dest);
703 		m = m->next;
704 	}
705 	if (landlock_enabled_by_profile) {
706 		minijail_enable_default_fs_restrictions(j);
707 		minijail_add_fs_restriction_edit(j, "/dev");
708 		minijail_add_fs_restriction_ro(j, "/proc");
709 		if (j->flags.vfs)
710 			minijail_add_fs_restriction_rw(j, "/tmp");
711 	}
712 }
713 
minijail_enable_default_fs_restrictions(struct minijail * j)714 void API minijail_enable_default_fs_restrictions(struct minijail *j)
715 {
716 	// Common library locations.
717 	minijail_add_fs_restriction_rx(j, "/lib");
718 	minijail_add_fs_restriction_rx(j, "/lib64");
719 	minijail_add_fs_restriction_rx(j, "/usr/lib");
720 	minijail_add_fs_restriction_rx(j, "/usr/lib64");
721 	// Common locations for services invoking Minijail.
722 	minijail_add_fs_restriction_rx(j, "/bin");
723 	minijail_add_fs_restriction_rx(j, "/sbin");
724 	minijail_add_fs_restriction_rx(j, "/usr/sbin");
725 	minijail_add_fs_restriction_rx(j, "/usr/bin");
726 	// Common /etc locations.
727 	minijail_add_fs_restriction_ro(j, "/etc/group");
728 	minijail_add_fs_restriction_ro(j, "/etc/passwd");
729 }
730 
minijail_use_caps(struct minijail * j,uint64_t capmask)731 void API minijail_use_caps(struct minijail *j, uint64_t capmask)
732 {
733 	/*
734 	 * 'minijail_use_caps' configures a runtime-capabilities-only
735 	 * environment, including a bounding set matching the thread's runtime
736 	 * (permitted|inheritable|effective) sets.
737 	 * Therefore, it will override any existing bounding set configurations
738 	 * since the latter would allow gaining extra runtime capabilities from
739 	 * file capabilities.
740 	 */
741 	if (j->flags.capbset_drop) {
742 		warn("overriding bounding set configuration");
743 		j->cap_bset = 0;
744 		j->flags.capbset_drop = 0;
745 	}
746 	j->caps = capmask;
747 	j->flags.use_caps = 1;
748 }
749 
minijail_capbset_drop(struct minijail * j,uint64_t capmask)750 void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
751 {
752 	if (j->flags.use_caps) {
753 		/*
754 		 * 'minijail_use_caps' will have already configured a capability
755 		 * bounding set matching the (permitted|inheritable|effective)
756 		 * sets. Abort if the user tries to configure a separate
757 		 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
758 		 * are mutually exclusive.
759 		 */
760 		die("runtime capabilities already configured, can't drop "
761 		    "bounding set separately");
762 	}
763 	j->cap_bset = capmask;
764 	j->flags.capbset_drop = 1;
765 }
766 
minijail_set_ambient_caps(struct minijail * j)767 void API minijail_set_ambient_caps(struct minijail *j)
768 {
769 	j->flags.set_ambient_caps = 1;
770 }
771 
minijail_reset_signal_mask(struct minijail * j)772 void API minijail_reset_signal_mask(struct minijail *j)
773 {
774 	j->flags.reset_signal_mask = 1;
775 }
776 
minijail_reset_signal_handlers(struct minijail * j)777 void API minijail_reset_signal_handlers(struct minijail *j)
778 {
779 	j->flags.reset_signal_handlers = 1;
780 }
781 
minijail_namespace_vfs(struct minijail * j)782 void API minijail_namespace_vfs(struct minijail *j)
783 {
784 	j->flags.vfs = 1;
785 }
786 
minijail_namespace_enter_vfs(struct minijail * j,const char * ns_path)787 void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
788 {
789 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
790 	int ns_fd = open(ns_path, O_RDONLY);
791 	if (ns_fd < 0) {
792 		pdie("failed to open namespace '%s'", ns_path);
793 	}
794 	j->mountns_fd = ns_fd;
795 	j->flags.enter_vfs = 1;
796 }
797 
minijail_new_session_keyring(struct minijail * j)798 void API minijail_new_session_keyring(struct minijail *j)
799 {
800 	j->flags.new_session_keyring = 1;
801 }
802 
minijail_skip_setting_securebits(struct minijail * j,uint64_t securebits_skip_mask)803 void API minijail_skip_setting_securebits(struct minijail *j,
804 					  uint64_t securebits_skip_mask)
805 {
806 	j->securebits_skip_mask = securebits_skip_mask;
807 }
808 
minijail_remount_mode(struct minijail * j,unsigned long mode)809 void API minijail_remount_mode(struct minijail *j, unsigned long mode)
810 {
811 	j->remount_mode = mode;
812 }
813 
minijail_skip_remount_private(struct minijail * j)814 void API minijail_skip_remount_private(struct minijail *j)
815 {
816 	j->remount_mode = 0;
817 }
818 
minijail_namespace_pids(struct minijail * j)819 void API minijail_namespace_pids(struct minijail *j)
820 {
821 	j->flags.vfs = 1;
822 	j->flags.remount_proc_ro = 1;
823 	j->flags.pids = 1;
824 	j->flags.do_init = 1;
825 }
826 
minijail_namespace_pids_rw_proc(struct minijail * j)827 void API minijail_namespace_pids_rw_proc(struct minijail *j)
828 {
829 	j->flags.vfs = 1;
830 	j->flags.pids = 1;
831 	j->flags.do_init = 1;
832 }
833 
minijail_namespace_ipc(struct minijail * j)834 void API minijail_namespace_ipc(struct minijail *j)
835 {
836 	j->flags.ipc = 1;
837 }
838 
minijail_namespace_uts(struct minijail * j)839 void API minijail_namespace_uts(struct minijail *j)
840 {
841 	j->flags.uts = 1;
842 }
843 
minijail_namespace_set_hostname(struct minijail * j,const char * name)844 int API minijail_namespace_set_hostname(struct minijail *j, const char *name)
845 {
846 	if (j->hostname)
847 		return -EINVAL;
848 	minijail_namespace_uts(j);
849 	j->hostname = strdup(name);
850 	if (!j->hostname)
851 		return -ENOMEM;
852 	return 0;
853 }
854 
minijail_namespace_net_loopback(struct minijail * j,bool enable_loopback)855 void API minijail_namespace_net_loopback(struct minijail *j,
856 					 bool enable_loopback)
857 {
858 	j->flags.net = 1;
859 	j->flags.net_loopback = enable_loopback;
860 }
861 
minijail_namespace_net(struct minijail * j)862 void API minijail_namespace_net(struct minijail *j)
863 {
864 	minijail_namespace_net_loopback(j, true);
865 }
866 
minijail_namespace_enter_net(struct minijail * j,const char * ns_path)867 void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
868 {
869 	/* Note: Do not use O_CLOEXEC here.  We'll close it after we use it. */
870 	int ns_fd = open(ns_path, O_RDONLY);
871 	if (ns_fd < 0) {
872 		pdie("failed to open namespace '%s'", ns_path);
873 	}
874 	j->netns_fd = ns_fd;
875 	j->flags.enter_net = 1;
876 }
877 
minijail_namespace_cgroups(struct minijail * j)878 void API minijail_namespace_cgroups(struct minijail *j)
879 {
880 	j->flags.ns_cgroups = 1;
881 }
882 
minijail_close_open_fds(struct minijail * j)883 void API minijail_close_open_fds(struct minijail *j)
884 {
885 	j->flags.close_open_fds = 1;
886 }
887 
minijail_remount_proc_readonly(struct minijail * j)888 void API minijail_remount_proc_readonly(struct minijail *j)
889 {
890 	j->flags.vfs = 1;
891 	j->flags.remount_proc_ro = 1;
892 }
893 
minijail_namespace_user(struct minijail * j)894 void API minijail_namespace_user(struct minijail *j)
895 {
896 	j->flags.userns = 1;
897 }
898 
minijail_namespace_user_disable_setgroups(struct minijail * j)899 void API minijail_namespace_user_disable_setgroups(struct minijail *j)
900 {
901 	j->flags.disable_setgroups = 1;
902 }
903 
minijail_uidmap(struct minijail * j,const char * uidmap)904 int API minijail_uidmap(struct minijail *j, const char *uidmap)
905 {
906 	j->uidmap = strdup(uidmap);
907 	if (!j->uidmap)
908 		return -ENOMEM;
909 	char *ch;
910 	for (ch = j->uidmap; *ch; ch++) {
911 		if (*ch == ',')
912 			*ch = '\n';
913 	}
914 	return 0;
915 }
916 
minijail_gidmap(struct minijail * j,const char * gidmap)917 int API minijail_gidmap(struct minijail *j, const char *gidmap)
918 {
919 	j->gidmap = strdup(gidmap);
920 	if (!j->gidmap)
921 		return -ENOMEM;
922 	char *ch;
923 	for (ch = j->gidmap; *ch; ch++) {
924 		if (*ch == ',')
925 			*ch = '\n';
926 	}
927 	return 0;
928 }
929 
minijail_inherit_usergroups(struct minijail * j)930 void API minijail_inherit_usergroups(struct minijail *j)
931 {
932 	j->flags.inherit_suppl_gids = 1;
933 }
934 
minijail_run_as_init(struct minijail * j)935 void API minijail_run_as_init(struct minijail *j)
936 {
937 	/*
938 	 * Since the jailed program will become 'init' in the new PID namespace,
939 	 * Minijail does not need to fork an 'init' process.
940 	 */
941 	j->flags.run_as_init = 1;
942 }
943 
minijail_enter_chroot(struct minijail * j,const char * dir)944 int API minijail_enter_chroot(struct minijail *j, const char *dir)
945 {
946 	if (j->chrootdir)
947 		return -EINVAL;
948 	j->chrootdir = strdup(dir);
949 	if (!j->chrootdir)
950 		return -ENOMEM;
951 	j->flags.chroot = 1;
952 	return 0;
953 }
954 
minijail_enter_pivot_root(struct minijail * j,const char * dir)955 int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
956 {
957 	if (j->chrootdir)
958 		return -EINVAL;
959 	j->chrootdir = strdup(dir);
960 	if (!j->chrootdir)
961 		return -ENOMEM;
962 	j->flags.pivot_root = 1;
963 	return 0;
964 }
965 
minijail_get_original_path(struct minijail * j,const char * path_inside_chroot)966 char API *minijail_get_original_path(struct minijail *j,
967 				     const char *path_inside_chroot)
968 {
969 	struct mountpoint *b;
970 
971 	b = j->mounts_head;
972 	while (b) {
973 		/*
974 		 * If |path_inside_chroot| is the exact destination of a
975 		 * mount, then the original path is exactly the source of
976 		 * the mount.
977 		 *  for example: "-b /some/path/exe,/chroot/path/exe"
978 		 *    mount source = /some/path/exe, mount dest =
979 		 *    /chroot/path/exe Then when getting the original path of
980 		 *    "/chroot/path/exe", the source of that mount,
981 		 *    "/some/path/exe" is what should be returned.
982 		 */
983 		if (streq(b->dest, path_inside_chroot))
984 			return strdup(b->src);
985 
986 		/*
987 		 * If |path_inside_chroot| is within the destination path of a
988 		 * mount, take the suffix of the chroot path relative to the
989 		 * mount destination path, and append it to the mount source
990 		 * path.
991 		 */
992 		if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
993 			const char *relative_path =
994 			    path_inside_chroot + strlen(b->dest);
995 			return path_join(b->src, relative_path);
996 		}
997 		b = b->next;
998 	}
999 
1000 	/* If there is a chroot path, append |path_inside_chroot| to that. */
1001 	if (j->chrootdir)
1002 		return path_join(j->chrootdir, path_inside_chroot);
1003 
1004 	/* No chroot, so the path outside is the same as it is inside. */
1005 	return strdup(path_inside_chroot);
1006 }
1007 
minijail_mount_dev(struct minijail * j)1008 void API minijail_mount_dev(struct minijail *j)
1009 {
1010 	j->flags.mount_dev = 1;
1011 }
1012 
minijail_mount_tmp(struct minijail * j)1013 void API minijail_mount_tmp(struct minijail *j)
1014 {
1015 	minijail_mount_tmp_size(j, 64 * 1024 * 1024);
1016 }
1017 
minijail_mount_tmp_size(struct minijail * j,size_t size)1018 void API minijail_mount_tmp_size(struct minijail *j, size_t size)
1019 {
1020 	j->tmpfs_size = size;
1021 	j->flags.mount_tmp = 1;
1022 }
1023 
minijail_write_pid_file(struct minijail * j,const char * path)1024 int API minijail_write_pid_file(struct minijail *j, const char *path)
1025 {
1026 	j->pid_file_path = strdup(path);
1027 	if (!j->pid_file_path)
1028 		return -ENOMEM;
1029 	j->flags.pid_file = 1;
1030 	return 0;
1031 }
1032 
minijail_add_to_cgroup(struct minijail * j,const char * path)1033 int API minijail_add_to_cgroup(struct minijail *j, const char *path)
1034 {
1035 	if (j->cgroup_count >= MAX_CGROUPS)
1036 		return -ENOMEM;
1037 	j->cgroups[j->cgroup_count] = strdup(path);
1038 	if (!j->cgroups[j->cgroup_count])
1039 		return -ENOMEM;
1040 	j->cgroup_count++;
1041 	j->flags.cgroups = 1;
1042 	return 0;
1043 }
1044 
minijail_rlimit(struct minijail * j,int type,rlim_t cur,rlim_t max)1045 int API minijail_rlimit(struct minijail *j, int type, rlim_t cur, rlim_t max)
1046 {
1047 	size_t i;
1048 
1049 	if (j->rlimit_count >= MAX_RLIMITS)
1050 		return -ENOMEM;
1051 	/* It's an error if the caller sets the same rlimit multiple times. */
1052 	for (i = 0; i < j->rlimit_count; i++) {
1053 		if (j->rlimits[i].type == type)
1054 			return -EEXIST;
1055 	}
1056 
1057 	j->rlimits[j->rlimit_count].type = type;
1058 	j->rlimits[j->rlimit_count].cur = cur;
1059 	j->rlimits[j->rlimit_count].max = max;
1060 	j->rlimit_count++;
1061 	return 0;
1062 }
1063 
minijail_forward_signals(struct minijail * j)1064 int API minijail_forward_signals(struct minijail *j)
1065 {
1066 	j->flags.forward_signals = 1;
1067 	return 0;
1068 }
1069 
minijail_create_session(struct minijail * j)1070 int API minijail_create_session(struct minijail *j)
1071 {
1072 	j->flags.setsid = 1;
1073 	return 0;
1074 }
1075 
minijail_add_fs_restriction_rx(struct minijail * j,const char * path)1076 int API minijail_add_fs_restriction_rx(struct minijail *j, const char *path)
1077 {
1078 	return !add_fs_restriction_path(j, path,
1079 					ACCESS_FS_ROUGHLY_READ_EXECUTE);
1080 }
1081 
minijail_add_fs_restriction_ro(struct minijail * j,const char * path)1082 int API minijail_add_fs_restriction_ro(struct minijail *j, const char *path)
1083 {
1084 	return !add_fs_restriction_path(j, path, ACCESS_FS_ROUGHLY_READ);
1085 }
1086 
minijail_add_fs_restriction_rw(struct minijail * j,const char * path)1087 int API minijail_add_fs_restriction_rw(struct minijail *j, const char *path)
1088 {
1089 	return !add_fs_restriction_path(
1090 	    j, path, ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_BASIC_WRITE);
1091 }
1092 
minijail_add_fs_restriction_advanced_rw(struct minijail * j,const char * path)1093 int API minijail_add_fs_restriction_advanced_rw(struct minijail *j,
1094 						const char *path)
1095 {
1096 	uint16_t landlock_flags =
1097 	    ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_FULL_WRITE;
1098 	if (fs_refer_restriction_supported(j)) {
1099 		landlock_flags |= LANDLOCK_ACCESS_FS_REFER;
1100 	}
1101 
1102 	return !add_fs_restriction_path(j, path, landlock_flags);
1103 }
1104 
minijail_add_fs_restriction_edit(struct minijail * j,const char * path)1105 int API minijail_add_fs_restriction_edit(struct minijail *j, const char *path)
1106 {
1107 	return !add_fs_restriction_path(
1108 	    j, path, ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_EDIT);
1109 }
1110 
minijail_add_fs_restriction_access_rights(struct minijail * j,const char * path,uint16_t landlock_flags)1111 int API minijail_add_fs_restriction_access_rights(struct minijail *j,
1112 						  const char *path,
1113 						  uint16_t landlock_flags)
1114 {
1115 	return !add_fs_restriction_path(j, path, landlock_flags);
1116 }
1117 
1118 bool API
minijail_is_fs_restriction_ruleset_initialized(const struct minijail * j)1119 minijail_is_fs_restriction_ruleset_initialized(const struct minijail *j)
1120 {
1121 	return j->fs_rules_fd >= 0;
1122 }
1123 
is_valid_bind_path(const char * path)1124 static bool is_valid_bind_path(const char *path)
1125 {
1126 	if (!block_symlinks_in_bindmount_paths()) {
1127 		return true;
1128 	}
1129 
1130 	/*
1131 	 * tokenize() will modify both the |prefixes| pointer and the contents
1132 	 * of the string, so:
1133 	 * -Copy |BINDMOUNT_ALLOWED_PREFIXES| since it lives in .rodata.
1134 	 * -Save the original pointer for free()ing.
1135 	 */
1136 	char *prefixes = strdup(BINDMOUNT_ALLOWED_PREFIXES);
1137 	attribute_cleanup_str char *orig_prefixes = prefixes;
1138 	(void)orig_prefixes;
1139 
1140 	char *prefix = NULL;
1141 	bool found_prefix = false;
1142 	if (!is_canonical_path(path)) {
1143 		while ((prefix = tokenize(&prefixes, ",")) != NULL) {
1144 			if (path_is_parent(prefix, path)) {
1145 				found_prefix = true;
1146 				break;
1147 			}
1148 		}
1149 		if (!found_prefix) {
1150 			/*
1151 			 * If the path does not include one of the allowed
1152 			 * prefixes, fail.
1153 			 */
1154 			warn("path '%s' is not a canonical path", path);
1155 			return false;
1156 		}
1157 	}
1158 	return true;
1159 }
1160 
minijail_mount_with_data(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags,const char * data)1161 int API minijail_mount_with_data(struct minijail *j, const char *src,
1162 				 const char *dest, const char *type,
1163 				 unsigned long flags, const char *data)
1164 {
1165 	struct mountpoint *m;
1166 
1167 	if (*dest != '/')
1168 		return -EINVAL;
1169 	m = calloc(1, sizeof(*m));
1170 	if (!m)
1171 		return -ENOMEM;
1172 	m->dest = strdup(dest);
1173 	if (!m->dest)
1174 		goto error;
1175 	m->src = strdup(src);
1176 	if (!m->src)
1177 		goto error;
1178 	m->type = strdup(type);
1179 	if (!m->type)
1180 		goto error;
1181 
1182 	if (!data || !data[0]) {
1183 		/*
1184 		 * Set up secure defaults for certain filesystems.  Adding this
1185 		 * fs-specific logic here kind of sucks, but considering how
1186 		 * people use these in practice, it's probably OK.  If they want
1187 		 * the kernel defaults, they can pass data="" instead of NULL.
1188 		 */
1189 		if (streq(type, "tmpfs")) {
1190 			/* tmpfs defaults to mode=1777 and size=50%. */
1191 			data = "mode=0755,size=10M";
1192 		}
1193 	}
1194 	if (data) {
1195 		m->data = strdup(data);
1196 		if (!m->data)
1197 			goto error;
1198 		m->has_data = 1;
1199 	}
1200 
1201 	/* If they don't specify any flags, default to secure ones. */
1202 	if (flags == 0)
1203 		flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1204 	m->flags = flags;
1205 
1206 	/*
1207 	 * Unless asked to enter an existing namespace, force vfs namespacing
1208 	 * so the mounts don't leak out into the containing vfs namespace.
1209 	 * If Minijail is being asked to enter the root vfs namespace this will
1210 	 * leak mounts, but it's unlikely that the user would ask to do that by
1211 	 * mistake.
1212 	 */
1213 	if (!j->flags.enter_vfs)
1214 		minijail_namespace_vfs(j);
1215 
1216 	if (j->mounts_tail)
1217 		j->mounts_tail->next = m;
1218 	else
1219 		j->mounts_head = m;
1220 	j->mounts_tail = m;
1221 	j->mounts_count++;
1222 
1223 	return 0;
1224 
1225 error:
1226 	free(m->type);
1227 	free(m->src);
1228 	free(m->dest);
1229 	free(m);
1230 	return -ENOMEM;
1231 }
1232 
minijail_mount(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags)1233 int API minijail_mount(struct minijail *j, const char *src, const char *dest,
1234 		       const char *type, unsigned long flags)
1235 {
1236 	return minijail_mount_with_data(j, src, dest, type, flags, NULL);
1237 }
1238 
minijail_bind(struct minijail * j,const char * src,const char * dest,int writeable)1239 int API minijail_bind(struct minijail *j, const char *src, const char *dest,
1240 		      int writeable)
1241 {
1242 	unsigned long flags = MS_BIND;
1243 
1244 	/*
1245 	 * Check for symlinks in bind-mount source paths to warn the user early.
1246 	 * Minijail will perform one final check immediately before the mount()
1247 	 * call.
1248 	 */
1249 	if (!is_valid_bind_path(src)) {
1250 		warn("src '%s' is not a valid bind mount path", src);
1251 		return -ELOOP;
1252 	}
1253 
1254 	/*
1255 	 * Symlinks in |dest| are blocked by the ChromiumOS LSM:
1256 	 * <kernel>/security/chromiumos/lsm.c#77
1257 	 */
1258 
1259 	if (!writeable)
1260 		flags |= MS_RDONLY;
1261 
1262 	/*
1263 	 * |type| is ignored for bind mounts, use it to signal that this mount
1264 	 * came from minijail_bind().
1265 	 * TODO(b/238362528): Implement a better way to signal this.
1266 	 */
1267 	return minijail_mount(j, src, dest, "minijail_bind", flags);
1268 }
1269 
minijail_add_remount(struct minijail * j,const char * mount_name,unsigned long remount_mode)1270 int API minijail_add_remount(struct minijail *j, const char *mount_name,
1271 			     unsigned long remount_mode)
1272 {
1273 	struct minijail_remount *m;
1274 
1275 	if (*mount_name != '/')
1276 		return -EINVAL;
1277 	m = calloc(1, sizeof(*m));
1278 	if (!m)
1279 		return -ENOMEM;
1280 	m->mount_name = strdup(mount_name);
1281 	if (!m->mount_name) {
1282 		free(m);
1283 		return -ENOMEM;
1284 	}
1285 
1286 	m->remount_mode = remount_mode;
1287 
1288 	if (j->remounts_tail)
1289 		j->remounts_tail->next = m;
1290 	else
1291 		j->remounts_head = m;
1292 	j->remounts_tail = m;
1293 
1294 	return 0;
1295 }
1296 
minijail_add_hook(struct minijail * j,minijail_hook_t hook,void * payload,minijail_hook_event_t event)1297 int API minijail_add_hook(struct minijail *j, minijail_hook_t hook,
1298 			  void *payload, minijail_hook_event_t event)
1299 {
1300 	struct hook *c;
1301 
1302 	if (event >= MINIJAIL_HOOK_EVENT_MAX)
1303 		return -EINVAL;
1304 	c = calloc(1, sizeof(*c));
1305 	if (!c)
1306 		return -ENOMEM;
1307 
1308 	c->hook = hook;
1309 	c->payload = payload;
1310 	c->event = event;
1311 
1312 	if (j->hooks_tail)
1313 		j->hooks_tail->next = c;
1314 	else
1315 		j->hooks_head = c;
1316 	j->hooks_tail = c;
1317 
1318 	return 0;
1319 }
1320 
minijail_preserve_fd(struct minijail * j,int parent_fd,int child_fd)1321 int API minijail_preserve_fd(struct minijail *j, int parent_fd, int child_fd)
1322 {
1323 	if (parent_fd < 0 || child_fd < 0)
1324 		return -EINVAL;
1325 	if (j->preserved_fd_count >= MAX_PRESERVED_FDS)
1326 		return -ENOMEM;
1327 	j->preserved_fds[j->preserved_fd_count].parent_fd = parent_fd;
1328 	j->preserved_fds[j->preserved_fd_count].child_fd = child_fd;
1329 	j->preserved_fd_count++;
1330 	return 0;
1331 }
1332 
minijail_set_preload_path(struct minijail * j,const char * preload_path)1333 int API minijail_set_preload_path(struct minijail *j, const char *preload_path)
1334 {
1335 	if (j->preload_path)
1336 		return -EINVAL;
1337 	j->preload_path = strdup(preload_path);
1338 	if (!j->preload_path)
1339 		return -ENOMEM;
1340 	return 0;
1341 }
1342 
clear_seccomp_options(struct minijail * j)1343 static void clear_seccomp_options(struct minijail *j)
1344 {
1345 	j->flags.seccomp_filter = 0;
1346 	j->flags.seccomp_filter_tsync = 0;
1347 	j->flags.seccomp_filter_logging = 0;
1348 	j->flags.seccomp_filter_allow_speculation = 0;
1349 	j->filter_len = 0;
1350 	j->filter_prog = NULL;
1351 	j->flags.no_new_privs = 0;
1352 	if (j->seccomp_policy_path) {
1353 		free(j->seccomp_policy_path);
1354 	}
1355 	j->seccomp_policy_path = NULL;
1356 }
1357 
seccomp_should_use_filters(struct minijail * j)1358 static int seccomp_should_use_filters(struct minijail *j)
1359 {
1360 	if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL) == -1) {
1361 		/*
1362 		 * |errno| will be set to EINVAL when seccomp has not been
1363 		 * compiled into the kernel. On certain platforms and kernel
1364 		 * versions this is not a fatal failure. In that case, and only
1365 		 * in that case, disable seccomp and skip loading the filters.
1366 		 */
1367 		if ((errno == EINVAL) && seccomp_can_softfail()) {
1368 			warn("not loading seccomp filters, seccomp filter not "
1369 			     "supported");
1370 			clear_seccomp_options(j);
1371 			return 0;
1372 		}
1373 		/*
1374 		 * If |errno| != EINVAL or seccomp_can_softfail() is false,
1375 		 * we can proceed. Worst case scenario minijail_enter() will
1376 		 * abort() if seccomp fails.
1377 		 */
1378 	}
1379 	if (j->flags.seccomp_filter_tsync) {
1380 		/* Are the seccomp(2) syscall and the TSYNC option supported? */
1381 		if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
1382 				SECCOMP_FILTER_FLAG_TSYNC, NULL) == -1) {
1383 			int saved_errno = errno;
1384 			if (saved_errno == ENOSYS && seccomp_can_softfail()) {
1385 				warn("seccomp(2) syscall not supported");
1386 				clear_seccomp_options(j);
1387 				return 0;
1388 			} else if (saved_errno == EINVAL &&
1389 				   seccomp_can_softfail()) {
1390 				warn(
1391 				    "seccomp filter thread sync not supported");
1392 				clear_seccomp_options(j);
1393 				return 0;
1394 			}
1395 			/*
1396 			 * Similar logic here. If seccomp_can_softfail() is
1397 			 * false, or |errno| != ENOSYS, or |errno| != EINVAL,
1398 			 * we can proceed. Worst case scenario minijail_enter()
1399 			 * will abort() if seccomp or TSYNC fail.
1400 			 */
1401 		}
1402 	}
1403 	if (j->flags.seccomp_filter_allow_speculation) {
1404 		/* Is the SPEC_ALLOW flag supported? */
1405 		if (!seccomp_filter_flags_available(
1406 			SECCOMP_FILTER_FLAG_SPEC_ALLOW)) {
1407 			warn("allowing speculative execution on seccomp "
1408 			     "processes not supported");
1409 			j->flags.seccomp_filter_allow_speculation = 0;
1410 		}
1411 	}
1412 	return 1;
1413 }
1414 
set_seccomp_filters_internal(struct minijail * j,const struct sock_fprog * filter,bool owned)1415 static int set_seccomp_filters_internal(struct minijail *j,
1416 					const struct sock_fprog *filter,
1417 					bool owned)
1418 {
1419 	struct sock_fprog *fprog;
1420 
1421 	if (owned) {
1422 		/*
1423 		 * If |owned| is true, it's OK to cast away the const-ness since
1424 		 * we'll own the pointer going forward.
1425 		 */
1426 		fprog = (struct sock_fprog *)filter;
1427 	} else {
1428 		fprog = malloc(sizeof(struct sock_fprog));
1429 		if (!fprog)
1430 			return -ENOMEM;
1431 		fprog->len = filter->len;
1432 		fprog->filter = malloc(sizeof(struct sock_filter) * fprog->len);
1433 		if (!fprog->filter) {
1434 			free(fprog);
1435 			return -ENOMEM;
1436 		}
1437 		memcpy(fprog->filter, filter->filter,
1438 		       sizeof(struct sock_filter) * fprog->len);
1439 	}
1440 
1441 	if (j->filter_prog) {
1442 		free(j->filter_prog->filter);
1443 		free(j->filter_prog);
1444 	}
1445 
1446 	j->filter_len = fprog->len;
1447 	j->filter_prog = fprog;
1448 	return 0;
1449 }
1450 
parse_seccomp_filters(struct minijail * j,const char * filename,FILE * policy_file)1451 static int parse_seccomp_filters(struct minijail *j, const char *filename,
1452 				 FILE *policy_file)
1453 {
1454 	struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
1455 	if (!fprog)
1456 		return -ENOMEM;
1457 
1458 	struct filter_options filteropts;
1459 
1460 	/*
1461 	 * Figure out filter options.
1462 	 * Allow logging?
1463 	 */
1464 	filteropts.allow_logging =
1465 	    debug_logging_allowed() && seccomp_is_logging_allowed(j);
1466 
1467 	/* What to do on a blocked system call? */
1468 	if (filteropts.allow_logging) {
1469 		if (seccomp_ret_log_available())
1470 			filteropts.action = ACTION_RET_LOG;
1471 		else
1472 			filteropts.action = ACTION_RET_TRAP;
1473 	} else {
1474 		if (j->flags.seccomp_filter_tsync) {
1475 			if (seccomp_ret_kill_process_available()) {
1476 				filteropts.action = ACTION_RET_KILL_PROCESS;
1477 			} else {
1478 				filteropts.action = ACTION_RET_TRAP;
1479 			}
1480 		} else {
1481 			filteropts.action = ACTION_RET_KILL;
1482 		}
1483 	}
1484 
1485 	/*
1486 	 * If SECCOMP_RET_LOG is not available, need to allow extra syscalls
1487 	 * for logging.
1488 	 */
1489 	filteropts.allow_syscalls_for_logging =
1490 	    filteropts.allow_logging && !seccomp_ret_log_available();
1491 
1492 	/* Whether to also allow syscalls for libc compatibility. */
1493 	filteropts.include_libc_compatibility_allowlist =
1494 	    allow_libc_compatibility_syscalls();
1495 
1496 	/* Whether to fail on duplicate syscalls. */
1497 	filteropts.allow_duplicate_syscalls = allow_duplicate_syscalls();
1498 
1499 	if (compile_filter(filename, policy_file, fprog, &filteropts)) {
1500 		free(fprog);
1501 		return -1;
1502 	}
1503 
1504 	return set_seccomp_filters_internal(j, fprog, true /* owned */);
1505 }
1506 
minijail_parse_seccomp_filters(struct minijail * j,const char * path)1507 void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
1508 {
1509 	if (!seccomp_should_use_filters(j))
1510 		return;
1511 
1512 	attribute_cleanup_fp FILE *file = fopen(path, "re");
1513 	if (!file) {
1514 		pdie("failed to open seccomp filter file '%s'", path);
1515 	}
1516 
1517 	if (parse_seccomp_filters(j, path, file) != 0) {
1518 		die("failed to compile seccomp filter BPF program in '%s'",
1519 		    path);
1520 	}
1521 	if (j->seccomp_policy_path) {
1522 		free(j->seccomp_policy_path);
1523 	}
1524 	j->seccomp_policy_path = strdup(path);
1525 }
1526 
minijail_parse_seccomp_filters_from_fd(struct minijail * j,int fd)1527 void API minijail_parse_seccomp_filters_from_fd(struct minijail *j, int fd)
1528 {
1529 	char *fd_path, *path;
1530 	attribute_cleanup_fp FILE *file = NULL;
1531 
1532 	if (!seccomp_should_use_filters(j))
1533 		return;
1534 
1535 	file = fdopen(fd, "r");
1536 	if (!file) {
1537 		pdie("failed to associate stream with fd %d", fd);
1538 	}
1539 
1540 	if (asprintf(&fd_path, "/proc/self/fd/%d", fd) == -1)
1541 		pdie("failed to create path for fd %d", fd);
1542 	path = realpath(fd_path, NULL);
1543 	if (path == NULL)
1544 		pwarn("failed to get path of fd %d", fd);
1545 	free(fd_path);
1546 
1547 	if (parse_seccomp_filters(j, path ? path : "<fd>", file) != 0) {
1548 		die("failed to compile seccomp filter BPF program from fd %d",
1549 		    fd);
1550 	}
1551 	if (j->seccomp_policy_path) {
1552 		free(j->seccomp_policy_path);
1553 	}
1554 	j->seccomp_policy_path = path;
1555 }
1556 
minijail_set_seccomp_filters(struct minijail * j,const struct sock_fprog * filter)1557 void API minijail_set_seccomp_filters(struct minijail *j,
1558 				      const struct sock_fprog *filter)
1559 {
1560 	if (!seccomp_should_use_filters(j))
1561 		return;
1562 
1563 	if (seccomp_is_logging_allowed(j)) {
1564 		die("minijail_log_seccomp_filter_failures() is incompatible "
1565 		    "with minijail_set_seccomp_filters()");
1566 	}
1567 
1568 	/*
1569 	 * set_seccomp_filters_internal() can only fail with ENOMEM.
1570 	 * Furthermore, since we won't own the incoming filter, it will not be
1571 	 * modified.
1572 	 */
1573 	if (set_seccomp_filters_internal(j, filter, false /* owned */) < 0) {
1574 		die("failed to set seccomp filter");
1575 	}
1576 }
1577 
minijail_use_alt_syscall(struct minijail * j,const char * table)1578 int API minijail_use_alt_syscall(struct minijail *j, const char *table)
1579 {
1580 	j->alt_syscall_table = strdup(table);
1581 	if (!j->alt_syscall_table)
1582 		return -ENOMEM;
1583 	j->flags.alt_syscall = 1;
1584 	return 0;
1585 }
1586 
1587 struct marshal_state {
1588 	size_t available;
1589 	size_t total;
1590 	char *buf;
1591 };
1592 
marshal_state_init(struct marshal_state * state,char * buf,size_t available)1593 static void marshal_state_init(struct marshal_state *state, char *buf,
1594 			       size_t available)
1595 {
1596 	state->available = available;
1597 	state->buf = buf;
1598 	state->total = 0;
1599 }
1600 
marshal_append(struct marshal_state * state,const void * src,size_t length)1601 static void marshal_append(struct marshal_state *state, const void *src,
1602 			   size_t length)
1603 {
1604 	size_t copy_len = MIN(state->available, length);
1605 
1606 	/* Up to |available| will be written. */
1607 	if (copy_len) {
1608 		memcpy(state->buf, src, copy_len);
1609 		state->buf += copy_len;
1610 		state->available -= copy_len;
1611 	}
1612 	/* |total| will contain the expected length. */
1613 	state->total += length;
1614 }
1615 
marshal_append_string(struct marshal_state * state,const char * src)1616 static void marshal_append_string(struct marshal_state *state, const char *src)
1617 {
1618 	marshal_append(state, src, strlen(src) + 1);
1619 }
1620 
marshal_mount(struct marshal_state * state,const struct mountpoint * m)1621 static void marshal_mount(struct marshal_state *state,
1622 			  const struct mountpoint *m)
1623 {
1624 	marshal_append(state, m->src, strlen(m->src) + 1);
1625 	marshal_append(state, m->dest, strlen(m->dest) + 1);
1626 	marshal_append(state, m->type, strlen(m->type) + 1);
1627 	marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
1628 	if (m->has_data)
1629 		marshal_append(state, m->data, strlen(m->data) + 1);
1630 	marshal_append(state, (char *)&m->flags, sizeof(m->flags));
1631 }
1632 
marshal_fs_rule(struct marshal_state * state,const struct fs_rule * r)1633 static void marshal_fs_rule(struct marshal_state *state,
1634 			    const struct fs_rule *r)
1635 {
1636 	marshal_append(state, r->path, strlen(r->path) + 1);
1637 	marshal_append(state, (char *)&r->landlock_flags,
1638 		       sizeof(r->landlock_flags));
1639 }
1640 
minijail_marshal_helper(struct marshal_state * state,const struct minijail * j)1641 static void minijail_marshal_helper(struct marshal_state *state,
1642 				    const struct minijail *j)
1643 {
1644 	struct mountpoint *m = NULL;
1645 	struct fs_rule *r = NULL;
1646 	size_t i;
1647 
1648 	marshal_append(state, (char *)j, sizeof(*j));
1649 	if (j->user)
1650 		marshal_append_string(state, j->user);
1651 	if (j->suppl_gid_list) {
1652 		marshal_append(state, j->suppl_gid_list,
1653 			       j->suppl_gid_count * sizeof(gid_t));
1654 	}
1655 	if (j->chrootdir)
1656 		marshal_append_string(state, j->chrootdir);
1657 	if (j->hostname)
1658 		marshal_append_string(state, j->hostname);
1659 	if (j->alt_syscall_table) {
1660 		marshal_append(state, j->alt_syscall_table,
1661 			       strlen(j->alt_syscall_table) + 1);
1662 	}
1663 	if (j->flags.seccomp_filter && j->filter_prog) {
1664 		struct sock_fprog *fp = j->filter_prog;
1665 		marshal_append(state, (char *)fp->filter,
1666 			       fp->len * sizeof(struct sock_filter));
1667 	}
1668 	for (m = j->mounts_head; m; m = m->next) {
1669 		marshal_mount(state, m);
1670 	}
1671 	for (i = 0; i < j->cgroup_count; ++i)
1672 		marshal_append_string(state, j->cgroups[i]);
1673 	for (r = j->fs_rules_head; r; r = r->next)
1674 		marshal_fs_rule(state, r);
1675 	marshal_append(state, (char *)&j->fs_rules_fd, sizeof(j->fs_rules_fd));
1676 	if (j->seccomp_policy_path)
1677 		marshal_append_string(state, j->seccomp_policy_path);
1678 }
1679 
minijail_size(const struct minijail * j)1680 size_t API minijail_size(const struct minijail *j)
1681 {
1682 	struct marshal_state state;
1683 	marshal_state_init(&state, NULL, 0);
1684 	minijail_marshal_helper(&state, j);
1685 	return state.total;
1686 }
1687 
minijail_marshal(const struct minijail * j,char * buf,size_t available)1688 int minijail_marshal(const struct minijail *j, char *buf, size_t available)
1689 {
1690 	struct marshal_state state;
1691 	marshal_state_init(&state, buf, available);
1692 	minijail_marshal_helper(&state, j);
1693 	return (state.total > available);
1694 }
1695 
minijail_unmarshal(struct minijail * j,char * serialized,size_t length)1696 int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
1697 {
1698 	size_t i;
1699 	size_t count;
1700 	size_t fs_rules_count;
1701 	int ret = -EINVAL;
1702 
1703 	if (length < sizeof(*j))
1704 		goto out;
1705 	memcpy((void *)j, serialized, sizeof(*j));
1706 	serialized += sizeof(*j);
1707 	length -= sizeof(*j);
1708 
1709 	/* Potentially stale pointers not used as signals. */
1710 	j->preload_path = NULL;
1711 	j->filename = NULL;
1712 	j->pid_file_path = NULL;
1713 	j->uidmap = NULL;
1714 	j->gidmap = NULL;
1715 	j->mounts_head = NULL;
1716 	j->mounts_tail = NULL;
1717 	j->remounts_head = NULL;
1718 	j->remounts_tail = NULL;
1719 	j->filter_prog = NULL;
1720 	j->hooks_head = NULL;
1721 	j->hooks_tail = NULL;
1722 	j->fs_rules_head = NULL;
1723 	j->fs_rules_tail = NULL;
1724 
1725 	if (j->user) { /* stale pointer */
1726 		char *user = consumestr(&serialized, &length);
1727 		if (!user)
1728 			goto clear_pointers;
1729 		j->user = strdup(user);
1730 		if (!j->user)
1731 			goto clear_pointers;
1732 	}
1733 
1734 	if (j->suppl_gid_list) { /* stale pointer */
1735 		if (j->suppl_gid_count > NGROUPS_MAX) {
1736 			goto bad_gid_list;
1737 		}
1738 		size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
1739 		void *gid_list_bytes =
1740 		    consumebytes(gid_list_size, &serialized, &length);
1741 		if (!gid_list_bytes)
1742 			goto bad_gid_list;
1743 
1744 		j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
1745 		if (!j->suppl_gid_list)
1746 			goto bad_gid_list;
1747 
1748 		memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
1749 	}
1750 
1751 	if (j->chrootdir) { /* stale pointer */
1752 		char *chrootdir = consumestr(&serialized, &length);
1753 		if (!chrootdir)
1754 			goto bad_chrootdir;
1755 		j->chrootdir = strdup(chrootdir);
1756 		if (!j->chrootdir)
1757 			goto bad_chrootdir;
1758 	}
1759 
1760 	if (j->hostname) { /* stale pointer */
1761 		char *hostname = consumestr(&serialized, &length);
1762 		if (!hostname)
1763 			goto bad_hostname;
1764 		j->hostname = strdup(hostname);
1765 		if (!j->hostname)
1766 			goto bad_hostname;
1767 	}
1768 
1769 	if (j->alt_syscall_table) { /* stale pointer */
1770 		char *alt_syscall_table = consumestr(&serialized, &length);
1771 		if (!alt_syscall_table)
1772 			goto bad_syscall_table;
1773 		j->alt_syscall_table = strdup(alt_syscall_table);
1774 		if (!j->alt_syscall_table)
1775 			goto bad_syscall_table;
1776 	}
1777 
1778 	if (j->flags.seccomp_filter && j->filter_len > 0) {
1779 		size_t ninstrs = j->filter_len;
1780 		if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
1781 		    ninstrs > USHRT_MAX)
1782 			goto bad_filters;
1783 
1784 		size_t program_len = ninstrs * sizeof(struct sock_filter);
1785 		void *program = consumebytes(program_len, &serialized, &length);
1786 		if (!program)
1787 			goto bad_filters;
1788 
1789 		j->filter_prog = malloc(sizeof(struct sock_fprog));
1790 		if (!j->filter_prog)
1791 			goto bad_filters;
1792 
1793 		j->filter_prog->len = ninstrs;
1794 		j->filter_prog->filter = malloc(program_len);
1795 		if (!j->filter_prog->filter)
1796 			goto bad_filter_prog_instrs;
1797 
1798 		memcpy(j->filter_prog->filter, program, program_len);
1799 	}
1800 
1801 	count = j->mounts_count;
1802 	j->mounts_count = 0;
1803 	for (i = 0; i < count; ++i) {
1804 		unsigned long *flags;
1805 		int *has_data;
1806 		const char *dest;
1807 		const char *type;
1808 		const char *data = NULL;
1809 		const char *src = consumestr(&serialized, &length);
1810 		if (!src)
1811 			goto bad_mounts;
1812 		dest = consumestr(&serialized, &length);
1813 		if (!dest)
1814 			goto bad_mounts;
1815 		type = consumestr(&serialized, &length);
1816 		if (!type)
1817 			goto bad_mounts;
1818 		has_data =
1819 		    consumebytes(sizeof(*has_data), &serialized, &length);
1820 		if (!has_data)
1821 			goto bad_mounts;
1822 		if (*has_data) {
1823 			data = consumestr(&serialized, &length);
1824 			if (!data)
1825 				goto bad_mounts;
1826 		}
1827 		flags = consumebytes(sizeof(*flags), &serialized, &length);
1828 		if (!flags)
1829 			goto bad_mounts;
1830 		if (minijail_mount_with_data(j, src, dest, type, *flags, data))
1831 			goto bad_mounts;
1832 	}
1833 
1834 	count = j->cgroup_count;
1835 	j->cgroup_count = 0;
1836 	for (i = 0; i < count; ++i) {
1837 		char *cgroup = consumestr(&serialized, &length);
1838 		if (!cgroup)
1839 			goto bad_cgroups;
1840 		j->cgroups[i] = strdup(cgroup);
1841 		if (!j->cgroups[i])
1842 			goto bad_cgroups;
1843 		++j->cgroup_count;
1844 	}
1845 
1846 	/* Unmarshal fs_rules. */
1847 	fs_rules_count = j->fs_rules_count;
1848 	j->fs_rules_count = 0;
1849 	for (i = 0; i < fs_rules_count; ++i) {
1850 		const char *path = consumestr(&serialized, &length);
1851 		uint64_t landlock_flags;
1852 		void *landlock_flags_bytes =
1853 		    consumebytes(sizeof(landlock_flags), &serialized, &length);
1854 
1855 		if (!path)
1856 			goto bad_fs_rules;
1857 		memcpy(&landlock_flags, landlock_flags_bytes,
1858 		       sizeof(landlock_flags));
1859 		if (!landlock_flags)
1860 			goto bad_fs_rules;
1861 		if (add_fs_restriction_path(j, path, landlock_flags))
1862 			goto bad_fs_rules;
1863 	}
1864 	/* Unmarshal fs_rules_fd. */
1865 	void *fs_rules_fd_bytes =
1866 	    consumebytes(sizeof(j->fs_rules_fd), &serialized, &length);
1867 	memcpy(&j->fs_rules_fd, fs_rules_fd_bytes, sizeof(j->fs_rules_fd));
1868 	if (!j->fs_rules_fd)
1869 		goto bad_cgroups;
1870 
1871 	if (j->seccomp_policy_path) { /* stale pointer */
1872 		char *seccomp_policy_path = consumestr(&serialized, &length);
1873 		if (!seccomp_policy_path)
1874 			goto bad_cgroups;
1875 		j->seccomp_policy_path = strdup(seccomp_policy_path);
1876 		if (!j->seccomp_policy_path)
1877 			goto bad_cgroups;
1878 	}
1879 
1880 	return 0;
1881 
1882 	/*
1883 	 * If more is added after j->seccomp_policy_path, then this is needed:
1884 	 * if (j->seccomp_policy_path)
1885 	 * 	free(j->seccomp_policy_path);
1886 	 */
1887 
1888 bad_cgroups:
1889 	free_mounts_list(j);
1890 	free_remounts_list(j);
1891 	for (i = 0; i < j->cgroup_count; ++i)
1892 		free(j->cgroups[i]);
1893 bad_fs_rules:
1894 	free_fs_rules_list(j);
1895 bad_mounts:
1896 	if (j->filter_prog && j->filter_prog->filter)
1897 		free(j->filter_prog->filter);
1898 bad_filter_prog_instrs:
1899 	if (j->filter_prog)
1900 		free(j->filter_prog);
1901 bad_filters:
1902 	if (j->alt_syscall_table)
1903 		free(j->alt_syscall_table);
1904 bad_syscall_table:
1905 	if (j->hostname)
1906 		free(j->hostname);
1907 bad_hostname:
1908 	if (j->chrootdir)
1909 		free(j->chrootdir);
1910 bad_chrootdir:
1911 	if (j->suppl_gid_list)
1912 		free(j->suppl_gid_list);
1913 bad_gid_list:
1914 	if (j->user)
1915 		free(j->user);
1916 clear_pointers:
1917 	j->user = NULL;
1918 	j->suppl_gid_list = NULL;
1919 	j->chrootdir = NULL;
1920 	j->hostname = NULL;
1921 	j->alt_syscall_table = NULL;
1922 	j->cgroup_count = 0;
1923 	j->fs_rules_count = 0;
1924 	j->seccomp_policy_path = NULL;
1925 out:
1926 	return ret;
1927 }
1928 
1929 struct dev_spec {
1930 	const char *name;
1931 	mode_t mode;
1932 	dev_t major, minor;
1933 };
1934 
1935 // clang-format off
1936 static const struct dev_spec device_nodes[] = {
1937     {
1938 "null",
1939 	S_IFCHR | 0666, 1, 3,
1940     },
1941     {
1942 	"zero",
1943 	S_IFCHR | 0666, 1, 5,
1944     },
1945     {
1946 	"full",
1947 	S_IFCHR | 0666, 1, 7,
1948     },
1949     {
1950 	"urandom",
1951 	S_IFCHR | 0444, 1, 9,
1952     },
1953     {
1954 	"tty",
1955 	S_IFCHR | 0666, 5, 0,
1956     },
1957 };
1958 // clang-format on
1959 
1960 struct dev_sym_spec {
1961 	const char *source, *dest;
1962 };
1963 
1964 static const struct dev_sym_spec device_symlinks[] = {
1965     {
1966 	"ptmx",
1967 	"pts/ptmx",
1968     },
1969     {
1970 	"fd",
1971 	"/proc/self/fd",
1972     },
1973     {
1974 	"stdin",
1975 	"fd/0",
1976     },
1977     {
1978 	"stdout",
1979 	"fd/1",
1980     },
1981     {
1982 	"stderr",
1983 	"fd/2",
1984     },
1985 };
1986 
1987 /*
1988  * Clean up the temporary dev path we had setup previously.  In case of errors,
1989  * we don't want to go leaking empty tempdirs.
1990  */
mount_dev_cleanup(char * dev_path)1991 static void mount_dev_cleanup(char *dev_path)
1992 {
1993 	umount2(dev_path, MNT_DETACH);
1994 	rmdir(dev_path);
1995 	free(dev_path);
1996 }
1997 
1998 /*
1999  * Set up the pseudo /dev path at the temporary location.
2000  * See mount_dev_finalize for more details.
2001  */
mount_dev(char ** dev_path_ret)2002 static int mount_dev(char **dev_path_ret)
2003 {
2004 	int ret;
2005 	attribute_cleanup_fd int dev_fd = -1;
2006 	size_t i;
2007 	mode_t mask;
2008 	char *dev_path;
2009 
2010 	/*
2011 	 * Create a temp path for the /dev init.  We'll relocate this to the
2012 	 * final location later on in the startup process.
2013 	 */
2014 	dev_path = *dev_path_ret = strdup("/tmp/minijail.dev.XXXXXX");
2015 	if (dev_path == NULL || mkdtemp(dev_path) == NULL)
2016 		pdie("could not create temp path for /dev");
2017 
2018 	/* Set up the empty /dev mount point first. */
2019 	ret = mount("minijail-devfs", dev_path, "tmpfs", MS_NOEXEC | MS_NOSUID,
2020 		    "size=5M,mode=755");
2021 	if (ret) {
2022 		rmdir(dev_path);
2023 		return ret;
2024 	}
2025 
2026 	/* We want to set the mode directly from the spec. */
2027 	mask = umask(0);
2028 
2029 	/* Get a handle to the temp dev path for *at funcs below. */
2030 	dev_fd = open(dev_path, O_DIRECTORY | O_PATH | O_CLOEXEC);
2031 	if (dev_fd < 0) {
2032 		ret = 1;
2033 		goto done;
2034 	}
2035 
2036 	/* Create all the nodes in /dev. */
2037 	for (i = 0; i < ARRAY_SIZE(device_nodes); ++i) {
2038 		const struct dev_spec *ds = &device_nodes[i];
2039 		ret = mknodat(dev_fd, ds->name, ds->mode,
2040 			      makedev(ds->major, ds->minor));
2041 		if (ret)
2042 			goto done;
2043 	}
2044 
2045 	/* Create all the symlinks in /dev. */
2046 	for (i = 0; i < ARRAY_SIZE(device_symlinks); ++i) {
2047 		const struct dev_sym_spec *ds = &device_symlinks[i];
2048 		ret = symlinkat(ds->dest, dev_fd, ds->source);
2049 		if (ret)
2050 			goto done;
2051 	}
2052 
2053 	/* Create empty dir for glibc shared mem APIs. */
2054 	ret = mkdirat(dev_fd, "shm", 01777);
2055 	if (ret)
2056 		goto done;
2057 
2058 	/* Restore old mask. */
2059 done:
2060 	umask(mask);
2061 
2062 	if (ret)
2063 		mount_dev_cleanup(dev_path);
2064 
2065 	return ret;
2066 }
2067 
2068 /*
2069  * Relocate the temporary /dev mount to its final /dev place.
2070  * We have to do this two step process so people can bind mount extra
2071  * /dev paths like /dev/log.
2072  */
mount_dev_finalize(const struct minijail * j,char * dev_path)2073 static int mount_dev_finalize(const struct minijail *j, char *dev_path)
2074 {
2075 	int ret = -1;
2076 	char *dest = NULL;
2077 
2078 	/* Unmount the /dev mount if possible. */
2079 	if (umount2("/dev", MNT_DETACH))
2080 		goto done;
2081 
2082 	if (asprintf(&dest, "%s/dev", j->chrootdir ?: "") < 0)
2083 		goto done;
2084 
2085 	if (mount(dev_path, dest, NULL, MS_MOVE, NULL))
2086 		goto done;
2087 
2088 	ret = 0;
2089 done:
2090 	free(dest);
2091 	mount_dev_cleanup(dev_path);
2092 
2093 	return ret;
2094 }
2095 
2096 /*
2097  * mount_one: Applies mounts from @m for @j, recursing as needed.
2098  * @j Minijail these mounts are for
2099  * @m Head of list of mounts
2100  *
2101  * Returns 0 for success.
2102  */
mount_one(const struct minijail * j,struct mountpoint * m,const char * dev_path)2103 static int mount_one(const struct minijail *j, struct mountpoint *m,
2104 		     const char *dev_path)
2105 {
2106 	int ret;
2107 	char *dest;
2108 	bool do_remount = false;
2109 	bool has_bind_flag = mount_has_bind_flag(m);
2110 	bool has_remount_flag = !!(m->flags & MS_REMOUNT);
2111 	unsigned long original_mnt_flags = 0;
2112 
2113 	/* We assume |dest| has a leading "/". */
2114 	if (dev_path && strncmp("/dev/", m->dest, 5) == 0) {
2115 		/*
2116 		 * Since the temp path is rooted at /dev, skip that dest part.
2117 		 */
2118 		if (asprintf(&dest, "%s%s", dev_path, m->dest + 4) < 0)
2119 			return -ENOMEM;
2120 	} else {
2121 		if (asprintf(&dest, "%s%s", j->chrootdir ?: "", m->dest) < 0)
2122 			return -ENOMEM;
2123 	}
2124 
2125 	ret = setup_mount_destination(m->src, dest, j->uid, j->gid,
2126 				      has_bind_flag);
2127 	if (ret) {
2128 		warn("cannot create mount target '%s'", dest);
2129 		goto error;
2130 	}
2131 
2132 	/*
2133 	 * Remount bind mounts that:
2134 	 * - Come from the minijail_bind() API, and
2135 	 * - Add the 'ro' flag
2136 	 * since 'bind' and other flags can't both be specified in the same
2137 	 * mount(2) call.
2138 	 * Callers using minijail_mount() to perform bind mounts are expected to
2139 	 * know what they're doing and call minijail_mount() with MS_REMOUNT as
2140 	 * needed.
2141 	 * Therefore, if the caller is asking for a remount (using MS_REMOUNT),
2142 	 * there is no need to do an extra remount here.
2143 	 */
2144 	if (has_bind_flag && strcmp(m->type, "minijail_bind") == 0 &&
2145 	    !has_remount_flag) {
2146 		/*
2147 		 * Grab the mount flags of the source. These are used to figure
2148 		 * out whether the bind mount needs to be remounted read-only.
2149 		 */
2150 		if (get_mount_flags(m->src, &original_mnt_flags)) {
2151 			warn("cannot get mount flags for '%s'", m->src);
2152 			goto error;
2153 		}
2154 
2155 		if ((m->flags & MS_RDONLY) !=
2156 		    (original_mnt_flags & MS_RDONLY)) {
2157 			do_remount = 1;
2158 			/*
2159 			 * Restrict the mount flags to those that are
2160 			 * user-settable in a MS_REMOUNT request, but excluding
2161 			 * MS_RDONLY. The user-requested mount flags will
2162 			 * dictate whether the remount will have that flag or
2163 			 * not.
2164 			 */
2165 			original_mnt_flags &=
2166 			    (MS_USER_SETTABLE_MASK & ~MS_RDONLY);
2167 		}
2168 	}
2169 
2170 	/*
2171 	 * Do a final check for symlinks in |m->src|.
2172 	 * |m->src| will only contain a valid path when purely bind-mounting
2173 	 * (but not when remounting a bind mount).
2174 	 *
2175 	 * Short of having a version of mount(2) that can take fd's, this is the
2176 	 * smallest we can make the TOCTOU window.
2177 	 */
2178 	if (has_bind_flag && !has_remount_flag && !is_valid_bind_path(m->src)) {
2179 		warn("src '%s' is not a valid bind mount path", m->src);
2180 		goto error;
2181 	}
2182 
2183 	ret = mount(m->src, dest, m->type, m->flags, m->data);
2184 	if (ret) {
2185 		pwarn("cannot mount '%s' as '%s' with flags %#lx", m->src, dest,
2186 		      m->flags);
2187 		goto error;
2188 	}
2189 
2190 	/* Remount *after* the initial mount. */
2191 	if (do_remount) {
2192 		ret =
2193 		    mount(m->src, dest, NULL,
2194 			  m->flags | original_mnt_flags | MS_REMOUNT, m->data);
2195 		if (ret) {
2196 			pwarn(
2197 			    "cannot bind-remount '%s' as '%s' with flags %#lx",
2198 			    m->src, dest,
2199 			    m->flags | original_mnt_flags | MS_REMOUNT);
2200 			goto error;
2201 		}
2202 	}
2203 
2204 	free(dest);
2205 	if (m->next)
2206 		return mount_one(j, m->next, dev_path);
2207 	return 0;
2208 
2209 error:
2210 	free(dest);
2211 	return ret;
2212 }
2213 
process_mounts_or_die(const struct minijail * j)2214 static void process_mounts_or_die(const struct minijail *j)
2215 {
2216 	/*
2217 	 * We have to mount /dev first in case there are bind mounts from
2218 	 * the original /dev into the new unique tmpfs one.
2219 	 */
2220 	char *dev_path = NULL;
2221 	if (j->flags.mount_dev && mount_dev(&dev_path))
2222 		pdie("mount_dev failed");
2223 
2224 	if (j->mounts_head && mount_one(j, j->mounts_head, dev_path)) {
2225 		warn("mount_one failed with /dev at '%s'", dev_path);
2226 
2227 		if (dev_path)
2228 			mount_dev_cleanup(dev_path);
2229 
2230 		_exit(MINIJAIL_ERR_MOUNT);
2231 	}
2232 
2233 	/*
2234 	 * Once all bind mounts have been processed, move the temp dev to
2235 	 * its final /dev home.
2236 	 */
2237 	if (j->flags.mount_dev && mount_dev_finalize(j, dev_path))
2238 		pdie("mount_dev_finalize failed");
2239 }
2240 
enter_chroot(const struct minijail * j)2241 static int enter_chroot(const struct minijail *j)
2242 {
2243 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
2244 
2245 	if (chroot(j->chrootdir))
2246 		return -errno;
2247 
2248 	if (chdir("/"))
2249 		return -errno;
2250 
2251 	return 0;
2252 }
2253 
enter_pivot_root(const struct minijail * j)2254 static int enter_pivot_root(const struct minijail *j)
2255 {
2256 	attribute_cleanup_fd int oldroot = -1;
2257 	attribute_cleanup_fd int newroot = -1;
2258 
2259 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_CHROOT);
2260 
2261 	/*
2262 	 * Keep the fd for both old and new root.
2263 	 * It will be used in fchdir(2) later.
2264 	 */
2265 	oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
2266 	if (oldroot < 0)
2267 		pdie("failed to open / for fchdir");
2268 	newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
2269 	if (newroot < 0)
2270 		pdie("failed to open %s for fchdir", j->chrootdir);
2271 
2272 	/*
2273 	 * To ensure j->chrootdir is the root of a filesystem,
2274 	 * do a self bind mount.
2275 	 */
2276 	if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
2277 		pdie("failed to bind mount '%s'", j->chrootdir);
2278 	if (chdir(j->chrootdir))
2279 		return -errno;
2280 	if (syscall(SYS_pivot_root, ".", "."))
2281 		pdie("pivot_root");
2282 
2283 	/*
2284 	 * Now the old root is mounted on top of the new root. Use fchdir(2) to
2285 	 * change to the old root and unmount it.
2286 	 */
2287 	if (fchdir(oldroot))
2288 		pdie("failed to fchdir to old /");
2289 
2290 	/*
2291 	 * If skip_remount_private was enabled for minijail_enter(),
2292 	 * there could be a shared mount point under |oldroot|. In that case,
2293 	 * mounts under this shared mount point will be unmounted below, and
2294 	 * this unmounting will propagate to the original mount namespace
2295 	 * (because the mount point is shared). To prevent this unexpected
2296 	 * unmounting, remove these mounts from their peer groups by recursively
2297 	 * remounting them as MS_PRIVATE.
2298 	 */
2299 	if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
2300 		pdie("failed to mount(/, private) before umount(/)");
2301 	/* The old root might be busy, so use lazy unmount. */
2302 	if (umount2(".", MNT_DETACH))
2303 		pdie("umount(/)");
2304 	/* Change back to the new root. */
2305 	if (fchdir(newroot))
2306 		return -errno;
2307 	if (chroot("/"))
2308 		return -errno;
2309 	/* Set correct CWD for getcwd(3). */
2310 	if (chdir("/"))
2311 		return -errno;
2312 
2313 	return 0;
2314 }
2315 
mount_tmp(const struct minijail * j)2316 static int mount_tmp(const struct minijail *j)
2317 {
2318 	const char fmt[] = "size=%zu,mode=1777";
2319 	/* Count for the user storing ULLONG_MAX literally + extra space. */
2320 	char data[sizeof(fmt) + sizeof("18446744073709551615ULL")];
2321 	int ret;
2322 
2323 	ret = snprintf(data, sizeof(data), fmt, j->tmpfs_size);
2324 
2325 	if (ret <= 0)
2326 		pdie("tmpfs size spec error");
2327 	else if ((size_t)ret >= sizeof(data))
2328 		pdie("tmpfs size spec too large");
2329 
2330 	unsigned long flags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
2331 
2332 	if (block_symlinks_in_noninit_mountns_tmp()) {
2333 		flags |= MS_NOSYMFOLLOW;
2334 	}
2335 
2336 	return mount("none", "/tmp", "tmpfs", flags, data);
2337 }
2338 
remount_proc_readonly(const struct minijail * j)2339 static int remount_proc_readonly(const struct minijail *j)
2340 {
2341 	const char *kProcPath = "/proc";
2342 	const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
2343 	/*
2344 	 * Right now, we're holding a reference to our parent's old mount of
2345 	 * /proc in our namespace, which means using MS_REMOUNT here would
2346 	 * mutate our parent's mount as well, even though we're in a VFS
2347 	 * namespace (!). Instead, remove their mount from our namespace lazily
2348 	 * (MNT_DETACH) and make our own.
2349 	 *
2350 	 * However, we skip this in the user namespace case because it will
2351 	 * invariably fail. Every mount namespace is "owned" by the
2352 	 * user namespace of the process that creates it. Mount namespace A is
2353 	 * "less privileged" than mount namespace B if A is created off of B,
2354 	 * and B is owned by a different user namespace.
2355 	 * When a less privileged mount namespace is created, the mounts used to
2356 	 * initialize it (coming from the more privileged mount namespace) come
2357 	 * as a unit, and are locked together. This means that code running in
2358 	 * the new mount (and user) namespace cannot piecemeal unmount
2359 	 * individual mounts inherited from a more privileged mount namespace.
2360 	 * See https://man7.org/linux/man-pages/man7/mount_namespaces.7.html,
2361 	 * "Restrictions on mount namespaces" for details.
2362 	 *
2363 	 * This happens in our use case because we first enter a new user
2364 	 * namespace (on clone(2)) and then we unshare(2) a new mount namespace,
2365 	 * which means the new mount namespace is less privileged than its
2366 	 * parent mount namespace. This would also happen if we entered a new
2367 	 * mount namespace on clone(2), since the user namespace is created
2368 	 * first.
2369 	 * In all other non-user-namespace cases the new mount namespace is
2370 	 * similarly privileged as the parent mount namespace so unmounting a
2371 	 * single mount is allowed.
2372 	 *
2373 	 * We still remount /proc as read-only in the user namespace case
2374 	 * because while a process with CAP_SYS_ADMIN in the new user namespace
2375 	 * can unmount the RO mount and get at the RW mount, an attacker with
2376 	 * access only to a write primitive will not be able to modify /proc.
2377 	 */
2378 	if (!j->flags.userns && umount2(kProcPath, MNT_DETACH))
2379 		return -errno;
2380 	if (mount("proc", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
2381 		return -errno;
2382 	return 0;
2383 }
2384 
kill_child_and_die(const struct minijail * j,const char * msg)2385 static void kill_child_and_die(const struct minijail *j, const char *msg)
2386 {
2387 	kill(j->initpid, SIGKILL);
2388 	die("%s", msg);
2389 }
2390 
write_pid_file_or_die(const struct minijail * j)2391 static void write_pid_file_or_die(const struct minijail *j)
2392 {
2393 	if (write_pid_to_path(j->initpid, j->pid_file_path))
2394 		kill_child_and_die(j, "failed to write pid file");
2395 }
2396 
add_to_cgroups_or_die(const struct minijail * j)2397 static void add_to_cgroups_or_die(const struct minijail *j)
2398 {
2399 	size_t i;
2400 
2401 	for (i = 0; i < j->cgroup_count; ++i) {
2402 		if (write_pid_to_path(j->initpid, j->cgroups[i]))
2403 			kill_child_and_die(j, "failed to add to cgroups");
2404 	}
2405 }
2406 
set_rlimits_or_die(const struct minijail * j)2407 static void set_rlimits_or_die(const struct minijail *j)
2408 {
2409 	size_t i;
2410 
2411 	for (i = 0; i < j->rlimit_count; ++i) {
2412 		struct rlimit limit;
2413 		limit.rlim_cur = j->rlimits[i].cur;
2414 		limit.rlim_max = j->rlimits[i].max;
2415 		if (prlimit(j->initpid, j->rlimits[i].type, &limit, NULL))
2416 			kill_child_and_die(j, "failed to set rlimit");
2417 	}
2418 }
2419 
write_ugid_maps_or_die(const struct minijail * j)2420 static void write_ugid_maps_or_die(const struct minijail *j)
2421 {
2422 	if (j->uidmap && write_proc_file(j->initpid, j->uidmap, "uid_map") != 0)
2423 		kill_child_and_die(j, "failed to write uid_map");
2424 	if (j->gidmap && j->flags.disable_setgroups) {
2425 		/*
2426 		 * Older kernels might not have the /proc/<pid>/setgroups files.
2427 		 */
2428 		int ret = write_proc_file(j->initpid, "deny", "setgroups");
2429 		if (ret != 0) {
2430 			if (ret == -ENOENT) {
2431 				/*
2432 				 * See
2433 				 * http://man7.org/linux/man-pages/man7/user_namespaces.7.html.
2434 				 */
2435 				warn("could not disable setgroups(2)");
2436 			} else
2437 				kill_child_and_die(
2438 				    j, "failed to disable setgroups(2)");
2439 		}
2440 	}
2441 	if (j->gidmap && write_proc_file(j->initpid, j->gidmap, "gid_map") != 0)
2442 		kill_child_and_die(j, "failed to write gid_map");
2443 }
2444 
enter_user_namespace(const struct minijail * j)2445 static void enter_user_namespace(const struct minijail *j)
2446 {
2447 	int uid = j->flags.uid ? j->uid : 0;
2448 	int gid = j->flags.gid ? j->gid : 0;
2449 	if (j->gidmap && setresgid(gid, gid, gid)) {
2450 		pdie("user_namespaces: setresgid(%d, %d, %d) failed", gid, gid,
2451 		     gid);
2452 	}
2453 	if (j->uidmap && setresuid(uid, uid, uid)) {
2454 		pdie("user_namespaces: setresuid(%d, %d, %d) failed", uid, uid,
2455 		     uid);
2456 	}
2457 }
2458 
parent_setup_complete(int * pipe_fds)2459 static void parent_setup_complete(int *pipe_fds)
2460 {
2461 	close_and_reset(&pipe_fds[0]);
2462 	close_and_reset(&pipe_fds[1]);
2463 }
2464 
2465 /*
2466  * wait_for_parent_setup: Called by the child process to wait for any
2467  * further parent-side setup to complete before continuing.
2468  */
wait_for_parent_setup(int * pipe_fds)2469 static void wait_for_parent_setup(int *pipe_fds)
2470 {
2471 	char buf;
2472 
2473 	close_and_reset(&pipe_fds[1]);
2474 
2475 	/* Wait for parent to complete setup and close the pipe. */
2476 	if (read(pipe_fds[0], &buf, 1) != 0)
2477 		die("failed to sync with parent");
2478 	close_and_reset(&pipe_fds[0]);
2479 }
2480 
drop_ugid(const struct minijail * j)2481 static void drop_ugid(const struct minijail *j)
2482 {
2483 	if (j->flags.inherit_suppl_gids + j->flags.keep_suppl_gids +
2484 		j->flags.set_suppl_gids >
2485 	    1) {
2486 		die("can only do one of inherit, keep, or set supplementary "
2487 		    "groups");
2488 	}
2489 
2490 	if (j->flags.inherit_suppl_gids) {
2491 		if (initgroups(j->user, j->usergid))
2492 			pdie("initgroups(%s, %d) failed", j->user, j->usergid);
2493 	} else if (j->flags.set_suppl_gids) {
2494 		if (setgroups(j->suppl_gid_count, j->suppl_gid_list))
2495 			pdie("setgroups(suppl_gids) failed");
2496 	} else if (!j->flags.keep_suppl_gids && !j->flags.disable_setgroups) {
2497 		/*
2498 		 * Only attempt to clear supplementary groups if we are changing
2499 		 * users or groups, and if the caller did not request to disable
2500 		 * setgroups (used when entering a user namespace as a
2501 		 * non-privileged user).
2502 		 */
2503 		if ((j->flags.uid || j->flags.gid) && setgroups(0, NULL))
2504 			pdie("setgroups(0, NULL) failed");
2505 	}
2506 
2507 	if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
2508 		pdie("setresgid(%d, %d, %d) failed", j->gid, j->gid, j->gid);
2509 
2510 	if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
2511 		pdie("setresuid(%d, %d, %d) failed", j->uid, j->uid, j->uid);
2512 }
2513 
drop_capbset(uint64_t keep_mask,unsigned int last_valid_cap)2514 static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
2515 {
2516 	const uint64_t one = 1;
2517 	unsigned int i;
2518 	for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
2519 		if (keep_mask & (one << i))
2520 			continue;
2521 		if (prctl(PR_CAPBSET_DROP, i))
2522 			pdie("could not drop capability from bounding set");
2523 	}
2524 }
2525 
drop_caps(const struct minijail * j,unsigned int last_valid_cap)2526 static void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
2527 {
2528 	if (!j->flags.use_caps)
2529 		return;
2530 
2531 	cap_t caps = cap_get_proc();
2532 	cap_value_t flag[1];
2533 	const size_t ncaps = sizeof(j->caps) * 8;
2534 	const uint64_t one = 1;
2535 	unsigned int i;
2536 	if (!caps)
2537 		die("can't get process caps");
2538 	if (cap_clear(caps))
2539 		die("can't clear caps");
2540 
2541 	for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2542 		/* Keep CAP_SETPCAP for dropping bounding set bits. */
2543 		if (i != CAP_SETPCAP && !(j->caps & (one << i)))
2544 			continue;
2545 		flag[0] = i;
2546 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
2547 			die("can't add effective cap");
2548 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
2549 			die("can't add permitted cap");
2550 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
2551 			die("can't add inheritable cap");
2552 	}
2553 	if (cap_set_proc(caps))
2554 		die("can't apply initial cleaned capset");
2555 
2556 	/*
2557 	 * Instead of dropping the bounding set first, do it here in case
2558 	 * the caller had a more permissive bounding set which could
2559 	 * have been used above to raise a capability that wasn't already
2560 	 * present. This requires CAP_SETPCAP, so we raised/kept it above.
2561 	 *
2562 	 * However, if we're asked to skip setting *and* locking the
2563 	 * SECURE_NOROOT securebit, also skip dropping the bounding set.
2564 	 * If the caller wants to regain all capabilities when executing a
2565 	 * set-user-ID-root program, allow them to do so. The default behavior
2566 	 * (i.e. the behavior without |securebits_skip_mask| set) will still put
2567 	 * the jailed process tree in a capabilities-only environment.
2568 	 *
2569 	 * We check the negated skip mask for SECURE_NOROOT and
2570 	 * SECURE_NOROOT_LOCKED. If the bits are set in the negated mask they
2571 	 * will *not* be skipped in lock_securebits(), and therefore we should
2572 	 * drop the bounding set.
2573 	 */
2574 	if (secure_noroot_set_and_locked(~j->securebits_skip_mask)) {
2575 		drop_capbset(j->caps, last_valid_cap);
2576 	} else {
2577 		warn("SECURE_NOROOT not set, not dropping bounding set");
2578 	}
2579 
2580 	/* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
2581 	if ((j->caps & (one << CAP_SETPCAP)) == 0) {
2582 		flag[0] = CAP_SETPCAP;
2583 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
2584 			die("can't clear effective cap");
2585 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
2586 			die("can't clear permitted cap");
2587 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
2588 			die("can't clear inheritable cap");
2589 	}
2590 
2591 	if (cap_set_proc(caps))
2592 		die("can't apply final cleaned capset");
2593 
2594 	/*
2595 	 * If ambient capabilities are supported, clear all capabilities first,
2596 	 * then raise the requested ones.
2597 	 */
2598 	if (j->flags.set_ambient_caps) {
2599 		if (!cap_ambient_supported()) {
2600 			pdie("ambient capabilities not supported");
2601 		}
2602 		if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0) !=
2603 		    0) {
2604 			pdie("can't clear ambient capabilities");
2605 		}
2606 
2607 		for (i = 0; i < ncaps && i <= last_valid_cap; ++i) {
2608 			if (!(j->caps & (one << i)))
2609 				continue;
2610 
2611 			if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, i, 0,
2612 				  0) != 0) {
2613 				pdie("prctl(PR_CAP_AMBIENT, "
2614 				     "PR_CAP_AMBIENT_RAISE, %u) failed",
2615 				     i);
2616 			}
2617 		}
2618 	}
2619 
2620 	cap_free(caps);
2621 }
2622 
2623 /* Calls landlock_restrict_self(), based on current inodes. */
apply_landlock_restrictions(const struct minijail * j)2624 static void apply_landlock_restrictions(const struct minijail *j)
2625 {
2626 	struct fs_rule *r = j->fs_rules_head;
2627 	/* The ruleset_fd needs to be mutable so use a stack copy from now on.
2628 	 */
2629 	int ruleset_fd = j->fs_rules_fd;
2630 	if (!j->flags.enable_fs_restrictions || !r) {
2631 		return;
2632 	}
2633 
2634 	if (minijail_is_fs_restriction_available()) {
2635 		while (r) {
2636 			populate_ruleset_internal(r->path, ruleset_fd,
2637 						  r->landlock_flags);
2638 			r = r->next;
2639 		}
2640 	}
2641 
2642 	if (ruleset_fd >= 0) {
2643 		if (j->filename != NULL) {
2644 			info("applying Landlock to process %s", j->filename);
2645 		}
2646 		if (landlock_restrict_self(ruleset_fd, 0)) {
2647 			pdie("failed to enforce ruleset");
2648 		}
2649 		close(ruleset_fd);
2650 	}
2651 }
2652 
set_no_new_privs(const struct minijail * j)2653 static void set_no_new_privs(const struct minijail *j)
2654 {
2655 	if (j->flags.no_new_privs) {
2656 		if (!sys_set_no_new_privs()) {
2657 			die("set_no_new_privs() failed");
2658 		}
2659 	}
2660 }
2661 
set_seccomp_filter(const struct minijail * j)2662 static void set_seccomp_filter(const struct minijail *j)
2663 {
2664 	/*
2665 	 * Code running with ASan
2666 	 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
2667 	 * will make system calls not included in the syscall filter policy,
2668 	 * which will likely crash the program. Skip setting seccomp filter in
2669 	 * that case.
2670 	 * 'running_with_asan()' has no inputs and is completely defined at
2671 	 * build time, so this cannot be used by an attacker to skip setting
2672 	 * seccomp filter.
2673 	 */
2674 	if (j->flags.seccomp_filter && running_with_asan()) {
2675 		warn("running with (HW)ASan, not setting seccomp filter");
2676 		return;
2677 	}
2678 
2679 	if (j->flags.seccomp_filter) {
2680 		if (seccomp_is_logging_allowed(j)) {
2681 			warn("logging seccomp filter failures");
2682 			if (!seccomp_ret_log_available()) {
2683 				/*
2684 				 * If SECCOMP_RET_LOG is not available,
2685 				 * install the SIGSYS handler first.
2686 				 */
2687 				if (install_sigsys_handler())
2688 					pdie(
2689 					    "failed to install SIGSYS handler");
2690 			}
2691 		} else if (j->flags.seccomp_filter_tsync) {
2692 			/*
2693 			 * If setting thread sync,
2694 			 * reset the SIGSYS signal handler so that
2695 			 * the entire thread group is killed.
2696 			 */
2697 			if (signal(SIGSYS, SIG_DFL) == SIG_ERR)
2698 				pdie("failed to reset SIGSYS disposition");
2699 		}
2700 	}
2701 
2702 	/*
2703 	 * Install the syscall filter.
2704 	 */
2705 	if (j->flags.seccomp_filter) {
2706 		if (j->flags.seccomp_filter_tsync ||
2707 		    j->flags.seccomp_filter_allow_speculation) {
2708 			int filter_flags =
2709 			    (j->flags.seccomp_filter_tsync
2710 				 ? SECCOMP_FILTER_FLAG_TSYNC
2711 				 : 0) |
2712 			    (j->flags.seccomp_filter_allow_speculation
2713 				 ? SECCOMP_FILTER_FLAG_SPEC_ALLOW
2714 				 : 0);
2715 			if (sys_seccomp(SECCOMP_SET_MODE_FILTER, filter_flags,
2716 					j->filter_prog)) {
2717 				pdie("seccomp(tsync) failed");
2718 			}
2719 		} else {
2720 			if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2721 				  j->filter_prog)) {
2722 				pdie("prctl(seccomp_filter) failed");
2723 			}
2724 		}
2725 	}
2726 }
2727 
2728 static pid_t forward_pid = -1;
2729 
forward_signal(int sig,siginfo_t * siginfo attribute_unused,void * void_context attribute_unused)2730 static void forward_signal(int sig, siginfo_t *siginfo attribute_unused,
2731 			   void *void_context attribute_unused)
2732 {
2733 	if (forward_pid != -1) {
2734 		kill(forward_pid, sig);
2735 	}
2736 }
2737 
install_signal_handlers(void)2738 static void install_signal_handlers(void)
2739 {
2740 	struct sigaction act;
2741 
2742 	memset(&act, 0, sizeof(act));
2743 	act.sa_sigaction = &forward_signal;
2744 	act.sa_flags = SA_SIGINFO | SA_RESTART;
2745 
2746 	/* Handle all signals, except SIGCHLD. */
2747 	for (int sig = 1; sig < NSIG; sig++) {
2748 		/*
2749 		 * We don't care if we get EINVAL: that just means that we
2750 		 * can't handle this signal, so let's skip it and continue.
2751 		 */
2752 		sigaction(sig, &act, NULL);
2753 	}
2754 	/* Reset SIGCHLD's handler. */
2755 	signal(SIGCHLD, SIG_DFL);
2756 
2757 	/* Handle real-time signals. */
2758 	for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++) {
2759 		sigaction(sig, &act, NULL);
2760 	}
2761 }
2762 
lookup_hook_name(minijail_hook_event_t event)2763 static const char *lookup_hook_name(minijail_hook_event_t event)
2764 {
2765 	switch (event) {
2766 	case MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS:
2767 		return "pre-drop-caps";
2768 	case MINIJAIL_HOOK_EVENT_PRE_EXECVE:
2769 		return "pre-execve";
2770 	case MINIJAIL_HOOK_EVENT_PRE_CHROOT:
2771 		return "pre-chroot";
2772 	case MINIJAIL_HOOK_EVENT_MAX:
2773 		/*
2774 		 * Adding this in favor of a default case to force the
2775 		 * compiler to error out if a new enum value is added.
2776 		 */
2777 		break;
2778 	}
2779 	return "unknown";
2780 }
2781 
run_hooks_or_die(const struct minijail * j,minijail_hook_event_t event)2782 static void run_hooks_or_die(const struct minijail *j,
2783 			     minijail_hook_event_t event)
2784 {
2785 	int rc;
2786 	int hook_index = 0;
2787 	for (struct hook *c = j->hooks_head; c; c = c->next) {
2788 		if (c->event != event)
2789 			continue;
2790 		rc = c->hook(c->payload);
2791 		if (rc != 0) {
2792 			errno = -rc;
2793 			pdie("%s hook (index %d) failed",
2794 			     lookup_hook_name(event), hook_index);
2795 		}
2796 		/* Only increase the index within the same hook event type. */
2797 		++hook_index;
2798 	}
2799 }
2800 
minijail_enter(const struct minijail * j)2801 void API minijail_enter(const struct minijail *j)
2802 {
2803 	/*
2804 	 * If we're dropping caps, get the last valid cap from /proc now,
2805 	 * since /proc can be unmounted before drop_caps() is called.
2806 	 */
2807 	unsigned int last_valid_cap = 0;
2808 	if (j->flags.capbset_drop || j->flags.use_caps)
2809 		last_valid_cap = get_last_valid_cap();
2810 
2811 	if (j->flags.pids)
2812 		die("tried to enter a pid-namespaced jail;"
2813 		    " try minijail_run()?");
2814 
2815 	if (j->flags.inherit_suppl_gids && !j->user)
2816 		die("cannot inherit supplementary groups without setting a "
2817 		    "username");
2818 
2819 	/*
2820 	 * We can't recover from failures if we've dropped privileges partially,
2821 	 * so we don't even try. If any of our operations fail, we abort() the
2822 	 * entire process.
2823 	 */
2824 	if (j->flags.enter_vfs) {
2825 		if (setns(j->mountns_fd, CLONE_NEWNS))
2826 			pdie("setns(CLONE_NEWNS) failed");
2827 		close(j->mountns_fd);
2828 	}
2829 
2830 	if (j->flags.vfs) {
2831 		if (unshare(CLONE_NEWNS))
2832 			pdie("unshare(CLONE_NEWNS) failed");
2833 		/*
2834 		 * By default, remount all filesystems as private, unless
2835 		 * - Passed a specific remount mode, in which case remount with
2836 		 *   that,
2837 		 * - Asked not to remount at all, in which case skip the
2838 		 *   mount(2) call.
2839 		 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
2840 		 */
2841 		if (j->remount_mode) {
2842 			if (mount(NULL, "/", NULL, MS_REC | j->remount_mode,
2843 				  NULL))
2844 				pdie("mount(NULL, /, NULL, "
2845 				     "MS_REC | j->remount_mode, NULL) failed");
2846 
2847 			struct minijail_remount *temp = j->remounts_head;
2848 			while (temp) {
2849 				if (temp->remount_mode < j->remount_mode)
2850 					die("cannot remount %s as stricter "
2851 					    "than the root dir",
2852 					    temp->mount_name);
2853 				if (mount(NULL, temp->mount_name, NULL,
2854 					  MS_REC | temp->remount_mode, NULL))
2855 					pdie("mount(NULL, %s, NULL, "
2856 					     "MS_REC | temp->remount_mode, "
2857 					     "NULL) failed",
2858 					     temp->mount_name);
2859 				temp = temp->next;
2860 			}
2861 		}
2862 	}
2863 
2864 	if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
2865 		pdie("unshare(CLONE_NEWIPC) failed");
2866 	}
2867 
2868 	if (j->flags.uts) {
2869 		if (unshare(CLONE_NEWUTS))
2870 			pdie("unshare(CLONE_NEWUTS) failed");
2871 
2872 		if (j->hostname &&
2873 		    sethostname(j->hostname, strlen(j->hostname)))
2874 			pdie("sethostname(%s) failed", j->hostname);
2875 	}
2876 
2877 	if (j->flags.enter_net) {
2878 		if (setns(j->netns_fd, CLONE_NEWNET))
2879 			pdie("setns(CLONE_NEWNET) failed");
2880 		close(j->netns_fd);
2881 	} else if (j->flags.net) {
2882 		if (unshare(CLONE_NEWNET))
2883 			pdie("unshare(CLONE_NEWNET) failed");
2884 		if (j->flags.net_loopback)
2885 			config_net_loopback();
2886 	}
2887 
2888 	if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
2889 		pdie("unshare(CLONE_NEWCGROUP) failed");
2890 
2891 	if (j->flags.new_session_keyring) {
2892 		if (syscall(SYS_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL) < 0)
2893 			pdie("keyctl(KEYCTL_JOIN_SESSION_KEYRING) failed");
2894 	}
2895 
2896 	/* We have to process all the mounts before we chroot/pivot_root. */
2897 	process_mounts_or_die(j);
2898 
2899 	if (j->flags.chroot && enter_chroot(j))
2900 		pdie("chroot");
2901 
2902 	if (j->flags.pivot_root && enter_pivot_root(j))
2903 		pdie("pivot_root");
2904 
2905 	if (j->flags.mount_tmp && mount_tmp(j))
2906 		pdie("mount_tmp");
2907 
2908 	if (j->flags.remount_proc_ro && remount_proc_readonly(j))
2909 		pdie("remount");
2910 
2911 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_DROP_CAPS);
2912 
2913 	/*
2914 	 * If we're only dropping capabilities from the bounding set, but not
2915 	 * from the thread's (permitted|inheritable|effective) sets, do it now.
2916 	 */
2917 	if (j->flags.capbset_drop) {
2918 		drop_capbset(j->cap_bset, last_valid_cap);
2919 	}
2920 
2921 	/*
2922 	 * POSIX capabilities are a bit tricky. We must set SECBIT_KEEP_CAPS
2923 	 * before drop_ugid() below as the latter would otherwise drop all
2924 	 * capabilities.
2925 	 */
2926 	if (j->flags.use_caps) {
2927 		/*
2928 		 * When using ambient capabilities, CAP_SET{GID,UID} can be
2929 		 * inherited across execve(2), so SECBIT_KEEP_CAPS is not
2930 		 * strictly needed.
2931 		 */
2932 		bool require_keep_caps = !j->flags.set_ambient_caps;
2933 		if (lock_securebits(j->securebits_skip_mask,
2934 				    require_keep_caps) < 0) {
2935 			pdie("locking securebits failed");
2936 		}
2937 	}
2938 
2939 	if (j->flags.no_new_privs) {
2940 		/*
2941 		 * If we're setting no_new_privs, we can drop privileges
2942 		 * before setting seccomp filter. This way filter policies
2943 		 * don't need to allow privilege-dropping syscalls.
2944 		 */
2945 		drop_ugid(j);
2946 		drop_caps(j, last_valid_cap);
2947 
2948 		/*
2949 		 * Landlock is applied as late as possible. If no_new_privs is
2950 		 * requested, then we need to set that first because the
2951 		 * landlock_restrict_self() syscall has a seccomp(2) like check
2952 		 * for that. See:
2953 		 * https://elixir.bootlin.com/linux/v5.15.74/source/security/landlock/syscalls.c#L409
2954 		 */
2955 		set_no_new_privs(j);
2956 		apply_landlock_restrictions(j);
2957 		set_seccomp_filter(j);
2958 	} else {
2959 		apply_landlock_restrictions(j);
2960 
2961 		/*
2962 		 * If we're not setting no_new_privs,
2963 		 * we need to set seccomp filter *before* dropping privileges.
2964 		 * WARNING: this means that filter policies *must* allow
2965 		 * setgroups()/setresgid()/setresuid() for dropping root and
2966 		 * capget()/capset()/prctl() for dropping caps.
2967 		 */
2968 		set_seccomp_filter(j);
2969 		drop_ugid(j);
2970 		drop_caps(j, last_valid_cap);
2971 	}
2972 
2973 	/*
2974 	 * Select the specified alternate syscall table.  The table must not
2975 	 * block prctl(2) if we're using seccomp as well.
2976 	 */
2977 	if (j->flags.alt_syscall) {
2978 		if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
2979 			pdie("prctl(PR_ALT_SYSCALL) failed");
2980 	}
2981 
2982 	/*
2983 	 * seccomp has to come last since it cuts off all the other
2984 	 * privilege-dropping syscalls :)
2985 	 */
2986 	if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
2987 		if ((errno == EINVAL) && seccomp_can_softfail()) {
2988 			warn("seccomp not supported");
2989 			return;
2990 		}
2991 		pdie("prctl(PR_SET_SECCOMP) failed");
2992 	}
2993 }
2994 
2995 /* TODO(wad): will visibility affect this variable? */
2996 static int init_exitstatus = 0;
2997 
init_term(int sig attribute_unused)2998 static void init_term(int sig attribute_unused)
2999 {
3000 	_exit(init_exitstatus);
3001 }
3002 
init(pid_t rootpid)3003 static void init(pid_t rootpid)
3004 {
3005 	pid_t pid;
3006 	int status;
3007 	/* So that we exit with the right status. */
3008 	signal(SIGTERM, init_term);
3009 	/* TODO(wad): self jail with seccomp filters here. */
3010 	while ((pid = wait(&status)) > 0) {
3011 		/*
3012 		 * This loop will only end when either there are no processes
3013 		 * left inside our pid namespace or we get a signal.
3014 		 */
3015 		if (pid == rootpid)
3016 			init_exitstatus = status;
3017 	}
3018 	if (!WIFEXITED(init_exitstatus))
3019 		_exit(MINIJAIL_ERR_INIT);
3020 	_exit(WEXITSTATUS(init_exitstatus));
3021 }
3022 
minijail_from_fd(int fd,struct minijail * j)3023 int API minijail_from_fd(int fd, struct minijail *j)
3024 {
3025 	size_t sz = 0;
3026 	size_t bytes = read(fd, &sz, sizeof(sz));
3027 	attribute_cleanup_str char *buf = NULL;
3028 	int r;
3029 	if (sizeof(sz) != bytes)
3030 		return -EINVAL;
3031 	if (sz > USHRT_MAX) /* arbitrary check */
3032 		return -E2BIG;
3033 	buf = malloc(sz);
3034 	if (!buf)
3035 		return -ENOMEM;
3036 	bytes = read(fd, buf, sz);
3037 	if (bytes != sz)
3038 		return -EINVAL;
3039 	r = minijail_unmarshal(j, buf, sz);
3040 	return r;
3041 }
3042 
minijail_to_fd(struct minijail * j,int fd)3043 int API minijail_to_fd(struct minijail *j, int fd)
3044 {
3045 	size_t sz = minijail_size(j);
3046 	if (!sz)
3047 		return -EINVAL;
3048 
3049 	attribute_cleanup_str char *buf = malloc(sz);
3050 	if (!buf)
3051 		return -ENOMEM;
3052 
3053 	int err = minijail_marshal(j, buf, sz);
3054 	if (err)
3055 		return err;
3056 
3057 	/* Sends [size][minijail]. */
3058 	err = write_exactly(fd, &sz, sizeof(sz));
3059 	if (err)
3060 		return err;
3061 
3062 	return write_exactly(fd, buf, sz);
3063 }
3064 
minijail_copy_jail(const struct minijail * from,struct minijail * out)3065 int API minijail_copy_jail(const struct minijail *from, struct minijail *out)
3066 {
3067 	size_t sz = minijail_size(from);
3068 	if (!sz)
3069 		return -EINVAL;
3070 
3071 	attribute_cleanup_str char *buf = malloc(sz);
3072 	if (!buf)
3073 		return -ENOMEM;
3074 
3075 	int err = minijail_marshal(from, buf, sz);
3076 	if (err)
3077 		return err;
3078 
3079 	return minijail_unmarshal(out, buf, sz);
3080 }
3081 
setup_preload(const struct minijail * j attribute_unused,char *** child_env attribute_unused)3082 static int setup_preload(const struct minijail *j attribute_unused,
3083 			 char ***child_env attribute_unused)
3084 {
3085 #if defined(__ANDROID__)
3086 	/* Don't use LDPRELOAD on Android. */
3087 	return 0;
3088 #else
3089 	const char *preload_path = j->preload_path ?: PRELOADPATH;
3090 	char *newenv = NULL;
3091 	int ret = 0;
3092 	const char *oldenv = minijail_getenv(*child_env, kLdPreloadEnvVar);
3093 
3094 	if (!oldenv)
3095 		oldenv = "";
3096 
3097 	/* Only insert a separating space if we have something to separate... */
3098 	if (asprintf(&newenv, "%s%s%s", oldenv, oldenv[0] != '\0' ? " " : "",
3099 		     preload_path) < 0) {
3100 		return -1;
3101 	}
3102 
3103 	ret = minijail_setenv(child_env, kLdPreloadEnvVar, newenv, 1);
3104 	free(newenv);
3105 	return ret;
3106 #endif
3107 }
3108 
3109 /*
3110  * This is for logging purposes and does not change the enforced seccomp
3111  * filter.
3112  */
setup_seccomp_policy_path(const struct minijail * j,char *** child_env)3113 static int setup_seccomp_policy_path(const struct minijail *j,
3114 				     char ***child_env)
3115 {
3116 	return minijail_setenv(child_env, kSeccompPolicyPathEnvVar,
3117 			       j->seccomp_policy_path ? j->seccomp_policy_path
3118 						      : "NO-LABEL",
3119 			       1 /* overwrite */);
3120 }
3121 
setup_pipe(char *** child_env,int fds[2])3122 static int setup_pipe(char ***child_env, int fds[2])
3123 {
3124 	int r = pipe(fds);
3125 	char fd_buf[11];
3126 	if (r)
3127 		return r;
3128 	r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
3129 	if (r <= 0)
3130 		return -EINVAL;
3131 	return minijail_setenv(child_env, kFdEnvVar, fd_buf, 1);
3132 }
3133 
close_open_fds(int * inheritable_fds,size_t size)3134 static int close_open_fds(int *inheritable_fds, size_t size)
3135 {
3136 	const char *kFdPath = "/proc/self/fd";
3137 
3138 	DIR *d = opendir(kFdPath);
3139 	struct dirent *dir_entry;
3140 
3141 	if (d == NULL)
3142 		return -1;
3143 	int dir_fd = dirfd(d);
3144 	while ((dir_entry = readdir(d)) != NULL) {
3145 		size_t i;
3146 		char *end;
3147 		bool should_close = true;
3148 		const int fd = strtol(dir_entry->d_name, &end, 10);
3149 
3150 		if ((*end) != '\0') {
3151 			continue;
3152 		}
3153 		/*
3154 		 * We might have set up some pipes that we want to share with
3155 		 * the parent process, and should not be closed.
3156 		 */
3157 		for (i = 0; i < size; ++i) {
3158 			if (fd == inheritable_fds[i]) {
3159 				should_close = false;
3160 				break;
3161 			}
3162 		}
3163 		/* Also avoid closing the directory fd. */
3164 		if (should_close && fd != dir_fd)
3165 			close(fd);
3166 	}
3167 	closedir(d);
3168 	return 0;
3169 }
3170 
3171 /* Return true if the specified file descriptor is already open. */
minijail_fd_is_open(int fd)3172 int minijail_fd_is_open(int fd)
3173 {
3174 	return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
3175 }
3176 
3177 /*
3178  * Returns true if |check_fd| is one of j->preserved_fds[:max_index].child_fd.
3179  */
is_preserved_child_fd(struct minijail * j,int check_fd,size_t max_index)3180 static bool is_preserved_child_fd(struct minijail *j, int check_fd,
3181 				  size_t max_index)
3182 {
3183 	max_index = MIN(max_index, j->preserved_fd_count);
3184 	for (size_t i = 0; i < max_index; i++) {
3185 		if (j->preserved_fds[i].child_fd == check_fd) {
3186 			return true;
3187 		}
3188 	}
3189 	return false;
3190 }
3191 
3192 /* If parent_fd will be used by a child fd, move it to an unused fd. */
ensure_no_fd_conflict(struct minijail * j,int child_fd,int * parent_fd,size_t max_index)3193 static int ensure_no_fd_conflict(struct minijail *j, int child_fd,
3194 				 int *parent_fd, size_t max_index)
3195 {
3196 	if (!is_preserved_child_fd(j, *parent_fd, max_index)) {
3197 		return 0;
3198 	}
3199 
3200 	/*
3201 	 * If no other parent_fd matches the child_fd then use it instead of a
3202 	 * temporary.
3203 	 */
3204 	int fd = child_fd;
3205 	if (fd == -1 || minijail_fd_is_open(fd)) {
3206 		fd = 1023;
3207 		while (is_preserved_child_fd(j, fd, j->preserved_fd_count) ||
3208 		       minijail_fd_is_open(fd)) {
3209 			--fd;
3210 			if (fd < 0) {
3211 				die("failed to find an unused fd");
3212 			}
3213 		}
3214 	}
3215 
3216 	int ret = dup2(*parent_fd, fd);
3217 	/*
3218 	 * warn() opens a file descriptor so it needs to happen after dup2 to
3219 	 * avoid unintended side effects. This can be avoided by reordering the
3220 	 * mapping requests so that the source fds with overlap are mapped
3221 	 * first (unless there are cycles).
3222 	 */
3223 	warn("mapped fd overlap: moving %d to %d", *parent_fd, fd);
3224 	if (ret == -1) {
3225 		return -1;
3226 	}
3227 
3228 	*parent_fd = fd;
3229 	return 0;
3230 }
3231 
3232 /*
3233  * Check for contradictory mappings and create temporaries for parent file
3234  * descriptors that would otherwise be overwritten during redirect_fds().
3235  */
prepare_preserved_fds(struct minijail * j)3236 static int prepare_preserved_fds(struct minijail *j)
3237 {
3238 	/* Relocate parent_fds that would be replaced by a child_fd. */
3239 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3240 		int child_fd = j->preserved_fds[i].child_fd;
3241 		if (is_preserved_child_fd(j, child_fd, i)) {
3242 			die("fd %d is mapped more than once", child_fd);
3243 		}
3244 
3245 		int *parent_fd = &j->preserved_fds[i].parent_fd;
3246 		if (ensure_no_fd_conflict(j, child_fd, parent_fd, i) == -1) {
3247 			return -1;
3248 		}
3249 	}
3250 	return 0;
3251 }
3252 
3253 /*
3254  * Structure holding resources and state created when running a minijail.
3255  */
3256 struct minijail_run_state {
3257 	pid_t child_pid;
3258 	int pipe_fds[2];
3259 	int stdin_fds[2];
3260 	int stdout_fds[2];
3261 	int stderr_fds[2];
3262 	int child_sync_pipe_fds[2];
3263 	char **child_env;
3264 };
3265 
3266 /*
3267  * Move pipe_fds if they conflict with a child_fd.
3268  */
avoid_pipe_conflicts(struct minijail * j,struct minijail_run_state * state)3269 static int avoid_pipe_conflicts(struct minijail *j,
3270 				struct minijail_run_state *state)
3271 {
3272 	int *pipe_fds[] = {
3273 	    state->pipe_fds,   state->child_sync_pipe_fds, state->stdin_fds,
3274 	    state->stdout_fds, state->stderr_fds,
3275 	};
3276 	for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3277 		if (pipe_fds[i][0] != -1 &&
3278 		    ensure_no_fd_conflict(j, -1, &pipe_fds[i][0],
3279 					  j->preserved_fd_count) == -1) {
3280 			return -1;
3281 		}
3282 		if (pipe_fds[i][1] != -1 &&
3283 		    ensure_no_fd_conflict(j, -1, &pipe_fds[i][1],
3284 					  j->preserved_fd_count) == -1) {
3285 			return -1;
3286 		}
3287 	}
3288 	return 0;
3289 }
3290 
3291 /*
3292  * Redirect j->preserved_fds from the parent_fd to the child_fd.
3293  *
3294  * NOTE: This will clear FD_CLOEXEC since otherwise the child_fd would not be
3295  * inherited after the exec call.
3296  */
redirect_fds(struct minijail * j)3297 static int redirect_fds(struct minijail *j)
3298 {
3299 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3300 		if (j->preserved_fds[i].parent_fd ==
3301 		    j->preserved_fds[i].child_fd) {
3302 			// Clear CLOEXEC if it is set so the FD will be
3303 			// inherited by the child.
3304 			int flags =
3305 			    fcntl(j->preserved_fds[i].child_fd, F_GETFD);
3306 			if (flags == -1 || (flags & FD_CLOEXEC) == 0) {
3307 				continue;
3308 			}
3309 
3310 			// Currently FD_CLOEXEC is cleared without being
3311 			// restored. It may make sense to track when this
3312 			// happens and restore FD_CLOEXEC in the child process.
3313 			flags &= ~FD_CLOEXEC;
3314 			if (fcntl(j->preserved_fds[i].child_fd, F_SETFD,
3315 				  flags) == -1) {
3316 				pwarn("failed to clear CLOEXEC for %d",
3317 				      j->preserved_fds[i].parent_fd);
3318 			}
3319 			continue;
3320 		}
3321 		if (dup2(j->preserved_fds[i].parent_fd,
3322 			 j->preserved_fds[i].child_fd) == -1) {
3323 			return -1;
3324 		}
3325 	}
3326 
3327 	/*
3328 	 * After all fds have been duped, we are now free to close all parent
3329 	 * fds that are *not* child fds.
3330 	 */
3331 	for (size_t i = 0; i < j->preserved_fd_count; i++) {
3332 		int parent_fd = j->preserved_fds[i].parent_fd;
3333 		if (!is_preserved_child_fd(j, parent_fd,
3334 					   j->preserved_fd_count)) {
3335 			close(parent_fd);
3336 		}
3337 	}
3338 	return 0;
3339 }
3340 
minijail_free_run_state(struct minijail_run_state * state)3341 static void minijail_free_run_state(struct minijail_run_state *state)
3342 {
3343 	state->child_pid = -1;
3344 
3345 	int *fd_pairs[] = {state->pipe_fds, state->stdin_fds, state->stdout_fds,
3346 			   state->stderr_fds, state->child_sync_pipe_fds};
3347 	for (size_t i = 0; i < ARRAY_SIZE(fd_pairs); ++i) {
3348 		close_and_reset(&fd_pairs[i][0]);
3349 		close_and_reset(&fd_pairs[i][1]);
3350 	}
3351 
3352 	minijail_free_env(state->child_env);
3353 	state->child_env = NULL;
3354 }
3355 
3356 /* Set up stdin/stdout/stderr file descriptors in the child. */
setup_child_std_fds(struct minijail * j,struct minijail_run_state * state)3357 static void setup_child_std_fds(struct minijail *j,
3358 				struct minijail_run_state *state)
3359 {
3360 	struct {
3361 		const char *name;
3362 		int from;
3363 		int to;
3364 	} fd_map[] = {
3365 	    {"stdin", state->stdin_fds[0], STDIN_FILENO},
3366 	    {"stdout", state->stdout_fds[1], STDOUT_FILENO},
3367 	    {"stderr", state->stderr_fds[1], STDERR_FILENO},
3368 	};
3369 
3370 	for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
3371 		if (fd_map[i].from == -1 || fd_map[i].from == fd_map[i].to)
3372 			continue;
3373 		if (dup2(fd_map[i].from, fd_map[i].to) == -1)
3374 			die("failed to set up %s pipe", fd_map[i].name);
3375 	}
3376 
3377 	/* Close temporary pipe file descriptors. */
3378 	int *std_pipes[] = {state->stdin_fds, state->stdout_fds,
3379 			    state->stderr_fds};
3380 	for (size_t i = 0; i < ARRAY_SIZE(std_pipes); ++i) {
3381 		close_and_reset(&std_pipes[i][0]);
3382 		close_and_reset(&std_pipes[i][1]);
3383 	}
3384 
3385 	/* Make sure we're not trying to skip setsid() with a PID namespace. */
3386 	if (!j->flags.enable_new_sessions && j->flags.pids) {
3387 		die("cannot skip setsid() with PID namespace");
3388 	}
3389 
3390 	/*
3391 	 * If new sessions are enabled and any of stdin, stdout, or stderr are
3392 	 * TTYs, or setsid flag is set, create a new session. This prevents
3393 	 * the jailed process from using the TIOCSTI ioctl to push characters
3394 	 * into the parent process terminal's input buffer, therefore escaping
3395 	 * the jail.
3396 	 *
3397 	 * Since it has just forked, the child will not be a process group
3398 	 * leader, and this call to setsid() should always succeed.
3399 	 */
3400 	if (j->flags.enable_new_sessions &&
3401 	    (j->flags.setsid || isatty(STDIN_FILENO) || isatty(STDOUT_FILENO) ||
3402 	     isatty(STDERR_FILENO))) {
3403 		if (setsid() < 0) {
3404 			pdie("setsid() failed");
3405 		}
3406 
3407 		if (isatty(STDIN_FILENO)) {
3408 			ioctl(STDIN_FILENO, TIOCSCTTY, 0);
3409 		}
3410 	}
3411 }
3412 
3413 /*
3414  * Structure that specifies how to start a minijail.
3415  *
3416  * filename - The program to exec in the child. Should be NULL if elf_fd is set.
3417  * elf_fd - A fd to be used with fexecve. Should be -1 if filename is set.
3418  *   NOTE: either filename or elf_fd is required if |exec_in_child| = 1.
3419  * argv - Arguments for the child program. Required if |exec_in_child| = 1.
3420  * envp - Environment for the child program. Available if |exec_in_child| = 1.
3421  * use_preload - If true use LD_PRELOAD.
3422  * exec_in_child - If true, run |filename|. Otherwise, the child will return to
3423  *     the caller.
3424  * pstdin_fd - Filled with stdin pipe if non-NULL.
3425  * pstdout_fd - Filled with stdout pipe if non-NULL.
3426  * pstderr_fd - Filled with stderr pipe if non-NULL.
3427  * pchild_pid - Filled with the pid of the child process if non-NULL.
3428  */
3429 struct minijail_run_config {
3430 	const char *filename;
3431 	int elf_fd;
3432 	char *const *argv;
3433 	char *const *envp;
3434 	int use_preload;
3435 	int exec_in_child;
3436 	int *pstdin_fd;
3437 	int *pstdout_fd;
3438 	int *pstderr_fd;
3439 	pid_t *pchild_pid;
3440 };
3441 
3442 static int
3443 minijail_run_config_internal(struct minijail *j,
3444 			     const struct minijail_run_config *config);
3445 
minijail_run(struct minijail * j,const char * filename,char * const argv[])3446 int API minijail_run(struct minijail *j, const char *filename,
3447 		     char *const argv[])
3448 {
3449 	struct minijail_run_config config = {
3450 	    .filename = filename,
3451 	    .elf_fd = -1,
3452 	    .argv = argv,
3453 	    .envp = NULL,
3454 	    .use_preload = true,
3455 	    .exec_in_child = true,
3456 	};
3457 	return minijail_run_config_internal(j, &config);
3458 }
3459 
minijail_run_env(struct minijail * j,const char * filename,char * const argv[],char * const envp[])3460 int API minijail_run_env(struct minijail *j, const char *filename,
3461 			 char *const argv[], char *const envp[])
3462 {
3463 	struct minijail_run_config config = {
3464 	    .filename = filename,
3465 	    .elf_fd = -1,
3466 	    .argv = argv,
3467 	    .envp = envp,
3468 	    .use_preload = true,
3469 	    .exec_in_child = true,
3470 	};
3471 	return minijail_run_config_internal(j, &config);
3472 }
3473 
minijail_run_pid(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid)3474 int API minijail_run_pid(struct minijail *j, const char *filename,
3475 			 char *const argv[], pid_t *pchild_pid)
3476 {
3477 	struct minijail_run_config config = {
3478 	    .filename = filename,
3479 	    .elf_fd = -1,
3480 	    .argv = argv,
3481 	    .envp = NULL,
3482 	    .use_preload = true,
3483 	    .exec_in_child = true,
3484 	    .pchild_pid = pchild_pid,
3485 	};
3486 	return minijail_run_config_internal(j, &config);
3487 }
3488 
minijail_run_pipe(struct minijail * j,const char * filename,char * const argv[],int * pstdin_fd)3489 int API minijail_run_pipe(struct minijail *j, const char *filename,
3490 			  char *const argv[], int *pstdin_fd)
3491 {
3492 	struct minijail_run_config config = {
3493 	    .filename = filename,
3494 	    .elf_fd = -1,
3495 	    .argv = argv,
3496 	    .envp = NULL,
3497 	    .use_preload = true,
3498 	    .exec_in_child = true,
3499 	    .pstdin_fd = pstdin_fd,
3500 	};
3501 	return minijail_run_config_internal(j, &config);
3502 }
3503 
minijail_run_pid_pipes(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3504 int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
3505 			       char *const argv[], pid_t *pchild_pid,
3506 			       int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
3507 {
3508 	struct minijail_run_config config = {
3509 	    .filename = filename,
3510 	    .elf_fd = -1,
3511 	    .argv = argv,
3512 	    .envp = NULL,
3513 	    .use_preload = true,
3514 	    .exec_in_child = true,
3515 	    .pstdin_fd = pstdin_fd,
3516 	    .pstdout_fd = pstdout_fd,
3517 	    .pstderr_fd = pstderr_fd,
3518 	    .pchild_pid = pchild_pid,
3519 	};
3520 	return minijail_run_config_internal(j, &config);
3521 }
3522 
minijail_run_env_pid_pipes(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3523 int API minijail_run_env_pid_pipes(struct minijail *j, const char *filename,
3524 				   char *const argv[], char *const envp[],
3525 				   pid_t *pchild_pid, int *pstdin_fd,
3526 				   int *pstdout_fd, int *pstderr_fd)
3527 {
3528 	struct minijail_run_config config = {
3529 	    .filename = filename,
3530 	    .elf_fd = -1,
3531 	    .argv = argv,
3532 	    .envp = envp,
3533 	    .use_preload = true,
3534 	    .exec_in_child = true,
3535 	    .pstdin_fd = pstdin_fd,
3536 	    .pstdout_fd = pstdout_fd,
3537 	    .pstderr_fd = pstderr_fd,
3538 	    .pchild_pid = pchild_pid,
3539 	};
3540 	return minijail_run_config_internal(j, &config);
3541 }
3542 
minijail_run_fd_env_pid_pipes(struct minijail * j,int elf_fd,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3543 int API minijail_run_fd_env_pid_pipes(struct minijail *j, int elf_fd,
3544 				      char *const argv[], char *const envp[],
3545 				      pid_t *pchild_pid, int *pstdin_fd,
3546 				      int *pstdout_fd, int *pstderr_fd)
3547 {
3548 	struct minijail_run_config config = {
3549 	    .filename = NULL,
3550 	    .elf_fd = elf_fd,
3551 	    .argv = argv,
3552 	    .envp = envp,
3553 	    .use_preload = true,
3554 	    .exec_in_child = true,
3555 	    .pstdin_fd = pstdin_fd,
3556 	    .pstdout_fd = pstdout_fd,
3557 	    .pstderr_fd = pstderr_fd,
3558 	    .pchild_pid = pchild_pid,
3559 	};
3560 	return minijail_run_config_internal(j, &config);
3561 }
3562 
minijail_run_no_preload(struct minijail * j,const char * filename,char * const argv[])3563 int API minijail_run_no_preload(struct minijail *j, const char *filename,
3564 				char *const argv[])
3565 {
3566 	struct minijail_run_config config = {
3567 	    .filename = filename,
3568 	    .elf_fd = -1,
3569 	    .argv = argv,
3570 	    .envp = NULL,
3571 	    .use_preload = false,
3572 	    .exec_in_child = true,
3573 	};
3574 	return minijail_run_config_internal(j, &config);
3575 }
3576 
minijail_run_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3577 int API minijail_run_pid_pipes_no_preload(struct minijail *j,
3578 					  const char *filename,
3579 					  char *const argv[], pid_t *pchild_pid,
3580 					  int *pstdin_fd, int *pstdout_fd,
3581 					  int *pstderr_fd)
3582 {
3583 	struct minijail_run_config config = {
3584 	    .filename = filename,
3585 	    .elf_fd = -1,
3586 	    .argv = argv,
3587 	    .envp = NULL,
3588 	    .use_preload = false,
3589 	    .exec_in_child = true,
3590 	    .pstdin_fd = pstdin_fd,
3591 	    .pstdout_fd = pstdout_fd,
3592 	    .pstderr_fd = pstderr_fd,
3593 	    .pchild_pid = pchild_pid,
3594 	};
3595 	return minijail_run_config_internal(j, &config);
3596 }
3597 
minijail_run_env_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],char * const envp[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)3598 int API minijail_run_env_pid_pipes_no_preload(struct minijail *j,
3599 					      const char *filename,
3600 					      char *const argv[],
3601 					      char *const envp[],
3602 					      pid_t *pchild_pid, int *pstdin_fd,
3603 					      int *pstdout_fd, int *pstderr_fd)
3604 {
3605 	struct minijail_run_config config = {
3606 	    .filename = filename,
3607 	    .elf_fd = -1,
3608 	    .argv = argv,
3609 	    .envp = envp,
3610 	    .use_preload = false,
3611 	    .exec_in_child = true,
3612 	    .pstdin_fd = pstdin_fd,
3613 	    .pstdout_fd = pstdout_fd,
3614 	    .pstderr_fd = pstderr_fd,
3615 	    .pchild_pid = pchild_pid,
3616 	};
3617 	return minijail_run_config_internal(j, &config);
3618 }
3619 
minijail_fork(struct minijail * j)3620 pid_t API minijail_fork(struct minijail *j)
3621 {
3622 	struct minijail_run_config config = {
3623 	    .elf_fd = -1,
3624 	};
3625 	return minijail_run_config_internal(j, &config);
3626 }
3627 
minijail_run_internal(struct minijail * j,const struct minijail_run_config * config,struct minijail_run_state * state_out)3628 static int minijail_run_internal(struct minijail *j,
3629 				 const struct minijail_run_config *config,
3630 				 struct minijail_run_state *state_out)
3631 {
3632 	int sync_child = 0;
3633 	int ret;
3634 	/* We need to remember this across the minijail_preexec() call. */
3635 	int pid_namespace = j->flags.pids;
3636 	/*
3637 	 * Create an init process if we are entering a pid namespace, unless the
3638 	 * user has explicitly opted out by calling minijail_run_as_init().
3639 	 */
3640 	int do_init = j->flags.do_init && !j->flags.run_as_init;
3641 	int use_preload = config->use_preload;
3642 
3643 	if (config->filename != NULL && config->elf_fd != -1) {
3644 		die("filename and elf_fd cannot be set at the same time");
3645 	}
3646 	if (config->filename != NULL) {
3647 		j->filename = strdup(config->filename);
3648 	}
3649 
3650 	/*
3651 	 * Only copy the environment if we need to modify it. If this is done
3652 	 * unconditionally, it triggers odd behavior in the ARC container.
3653 	 */
3654 	if (use_preload || j->seccomp_policy_path) {
3655 		state_out->child_env =
3656 		    minijail_copy_env(config->envp ? config->envp : environ);
3657 		if (!state_out->child_env)
3658 			return ENOMEM;
3659 	}
3660 
3661 	if (j->seccomp_policy_path &&
3662 	    setup_seccomp_policy_path(j, &state_out->child_env))
3663 		return -EFAULT;
3664 
3665 	if (use_preload) {
3666 		if (j->hooks_head != NULL)
3667 			die("Minijail hooks are not supported with LD_PRELOAD");
3668 		if (!config->exec_in_child)
3669 			die("minijail_fork is not supported with LD_PRELOAD");
3670 
3671 		/*
3672 		 * Before we fork(2) and execve(2) the child process, we need
3673 		 * to open a pipe(2) to send the minijail configuration over.
3674 		 */
3675 		if (setup_preload(j, &state_out->child_env) ||
3676 		    setup_pipe(&state_out->child_env, state_out->pipe_fds))
3677 			return -EFAULT;
3678 	} else {
3679 		if (j->flags.use_caps && j->caps != 0 &&
3680 		    !j->flags.set_ambient_caps) {
3681 			die("non-empty, non-ambient capabilities are not "
3682 			    "supported without LD_PRELOAD");
3683 		}
3684 	}
3685 
3686 	/* Create pipes for stdin/stdout/stderr as requested by caller. */
3687 	struct {
3688 		bool requested;
3689 		int *pipe_fds;
3690 	} pipe_fd_req[] = {
3691 	    {config->pstdin_fd != NULL, state_out->stdin_fds},
3692 	    {config->pstdout_fd != NULL, state_out->stdout_fds},
3693 	    {config->pstderr_fd != NULL, state_out->stderr_fds},
3694 	};
3695 
3696 	for (size_t i = 0; i < ARRAY_SIZE(pipe_fd_req); ++i) {
3697 		if (pipe_fd_req[i].requested &&
3698 		    pipe(pipe_fd_req[i].pipe_fds) == -1)
3699 			return EFAULT;
3700 	}
3701 
3702 	/*
3703 	 * If the parent process needs to configure the child's runtime
3704 	 * environment after forking, create a pipe(2) to block the child until
3705 	 * configuration is done.
3706 	 */
3707 	if (j->flags.forward_signals || j->flags.pid_file || j->flags.cgroups ||
3708 	    j->rlimit_count || j->flags.userns) {
3709 		sync_child = 1;
3710 		if (pipe(state_out->child_sync_pipe_fds))
3711 			return -EFAULT;
3712 	}
3713 
3714 	/*
3715 	 * Use sys_clone() if and only if we're creating a pid namespace.
3716 	 *
3717 	 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
3718 	 *
3719 	 * In multithreaded programs, there are a bunch of locks inside libc,
3720 	 * some of which may be held by other threads at the time that we call
3721 	 * minijail_run_pid(). If we call fork(), glibc does its level best to
3722 	 * ensure that we hold all of these locks before it calls clone()
3723 	 * internally and drop them after clone() returns, but when we call
3724 	 * sys_clone(2) directly, all that gets bypassed and we end up with a
3725 	 * child address space where some of libc's important locks are held by
3726 	 * other threads (which did not get cloned, and hence will never release
3727 	 * those locks). This is okay so long as we call exec() immediately
3728 	 * after, but a bunch of seemingly-innocent libc functions like setenv()
3729 	 * take locks.
3730 	 *
3731 	 * Hence, only call sys_clone() if we need to, in order to get at pid
3732 	 * namespacing. If we follow this path, the child's address space might
3733 	 * have broken locks; you may only call functions that do not acquire
3734 	 * any locks.
3735 	 *
3736 	 * Unfortunately, fork() acquires every lock it can get its hands on, as
3737 	 * previously detailed, so this function is highly likely to deadlock
3738 	 * later on (see "deadlock here") if we're multithreaded.
3739 	 *
3740 	 * We might hack around this by having the clone()d child (init of the
3741 	 * pid namespace) return directly, rather than leaving the clone()d
3742 	 * process hanging around to be init for the new namespace (and having
3743 	 * its fork()ed child return in turn), but that process would be
3744 	 * crippled with its libc locks potentially broken. We might try
3745 	 * fork()ing in the parent before we clone() to ensure that we own all
3746 	 * the locks, but then we have to have the forked child hanging around
3747 	 * consuming resources (and possibly having file descriptors / shared
3748 	 * memory regions / etc attached). We'd need to keep the child around to
3749 	 * avoid having its children get reparented to init.
3750 	 *
3751 	 * TODO(b/317404364): figure out if the "forked child hanging around"
3752 	 * problem is fixable or not. It would be nice if we worked in this
3753 	 * case.
3754 	 */
3755 	pid_t child_pid;
3756 	if (pid_namespace) {
3757 		unsigned long clone_flags = CLONE_NEWPID | SIGCHLD;
3758 		if (j->flags.userns)
3759 			clone_flags |= CLONE_NEWUSER;
3760 
3761 		child_pid = syscall(SYS_clone, clone_flags, NULL, 0L, 0L, 0L);
3762 
3763 		if (child_pid < 0) {
3764 			if (errno == EPERM)
3765 				pdie("clone(CLONE_NEWPID | ...) failed with "
3766 				     "EPERM; is this process missing "
3767 				     "CAP_SYS_ADMIN?");
3768 			pdie("clone(CLONE_NEWPID | ...) failed");
3769 		}
3770 	} else {
3771 		if (j->flags.userns)
3772 			die("user namespaces in Minijail require a PID "
3773 			    "namespace");
3774 
3775 		child_pid = fork();
3776 
3777 		if (child_pid < 0)
3778 			pdie("fork failed");
3779 	}
3780 
3781 	/*
3782 	 * setup_fs_rules_fd() needs to be called before close_open_fds(), and
3783 	 * before logic for the child process.
3784 	 */
3785 	if (j->fs_rules_head) {
3786 		setup_fs_rules_fd(j);
3787 		minijail_preserve_fd(j, j->fs_rules_fd, j->fs_rules_fd);
3788 	}
3789 
3790 	state_out->child_pid = child_pid;
3791 	if (child_pid) {
3792 		j->initpid = child_pid;
3793 
3794 		if (j->flags.forward_signals) {
3795 			forward_pid = child_pid;
3796 			install_signal_handlers();
3797 		}
3798 
3799 		if (j->flags.pid_file)
3800 			write_pid_file_or_die(j);
3801 
3802 		if (j->flags.cgroups)
3803 			add_to_cgroups_or_die(j);
3804 
3805 		if (j->rlimit_count)
3806 			set_rlimits_or_die(j);
3807 
3808 		if (j->flags.userns)
3809 			write_ugid_maps_or_die(j);
3810 
3811 		if (j->flags.enter_vfs)
3812 			close(j->mountns_fd);
3813 
3814 		if (j->flags.enter_net)
3815 			close(j->netns_fd);
3816 
3817 		if (sync_child)
3818 			parent_setup_complete(state_out->child_sync_pipe_fds);
3819 
3820 		if (use_preload) {
3821 			/*
3822 			 * Add SIGPIPE to the signal mask to avoid getting
3823 			 * killed if the child process finishes or closes its
3824 			 * end of the pipe prematurely.
3825 			 *
3826 			 * TODO(crbug.com/1022170): Use pthread_sigmask instead
3827 			 * of sigprocmask if Minijail is used in multithreaded
3828 			 * programs.
3829 			 */
3830 			sigset_t to_block, to_restore;
3831 			if (sigemptyset(&to_block) < 0)
3832 				pdie("sigemptyset failed");
3833 			if (sigaddset(&to_block, SIGPIPE) < 0)
3834 				pdie("sigaddset failed");
3835 			if (sigprocmask(SIG_BLOCK, &to_block, &to_restore) < 0)
3836 				pdie("sigprocmask failed");
3837 
3838 			/* Send marshalled minijail. */
3839 			close_and_reset(&state_out->pipe_fds[0]);
3840 			ret = minijail_to_fd(j, state_out->pipe_fds[1]);
3841 			close_and_reset(&state_out->pipe_fds[1]);
3842 
3843 			/* Accept any pending SIGPIPE. */
3844 			while (true) {
3845 				const struct timespec zero_time = {0, 0};
3846 				const int sig =
3847 				    sigtimedwait(&to_block, NULL, &zero_time);
3848 				if (sig < 0) {
3849 					if (errno != EINTR)
3850 						break;
3851 				} else {
3852 					if (sig != SIGPIPE)
3853 						die("unexpected signal %d",
3854 						    sig);
3855 				}
3856 			}
3857 
3858 			/* Restore the signal mask to its original state. */
3859 			if (sigprocmask(SIG_SETMASK, &to_restore, NULL) < 0)
3860 				pdie("sigprocmask failed");
3861 
3862 			if (ret) {
3863 				warn("failed to send marshalled minijail: %s",
3864 				     strerror(-ret));
3865 				kill(j->initpid, SIGKILL);
3866 			}
3867 		}
3868 
3869 		return 0;
3870 	}
3871 
3872 	/* Child process. */
3873 	if (j->flags.reset_signal_mask) {
3874 		sigset_t signal_mask;
3875 		if (sigemptyset(&signal_mask) != 0)
3876 			pdie("sigemptyset failed");
3877 		if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
3878 			pdie("sigprocmask failed");
3879 	}
3880 
3881 	if (j->flags.reset_signal_handlers) {
3882 		int signum;
3883 		for (signum = 0; signum <= SIGRTMAX; signum++) {
3884 			/*
3885 			 * Ignore EINVAL since some signal numbers in the range
3886 			 * might not be valid.
3887 			 */
3888 			if (signal(signum, SIG_DFL) == SIG_ERR &&
3889 			    errno != EINVAL) {
3890 				pdie("failed to reset signal %d disposition",
3891 				     signum);
3892 			}
3893 		}
3894 	}
3895 
3896 	if (j->flags.close_open_fds) {
3897 		const size_t kMaxInheritableFdsSize = 11 + MAX_PRESERVED_FDS;
3898 		int inheritable_fds[kMaxInheritableFdsSize];
3899 		size_t size = 0;
3900 
3901 		int *pipe_fds[] = {
3902 		    state_out->pipe_fds,   state_out->child_sync_pipe_fds,
3903 		    state_out->stdin_fds,  state_out->stdout_fds,
3904 		    state_out->stderr_fds,
3905 		};
3906 
3907 		for (size_t i = 0; i < ARRAY_SIZE(pipe_fds); ++i) {
3908 			if (pipe_fds[i][0] != -1) {
3909 				inheritable_fds[size++] = pipe_fds[i][0];
3910 			}
3911 			if (pipe_fds[i][1] != -1) {
3912 				inheritable_fds[size++] = pipe_fds[i][1];
3913 			}
3914 		}
3915 
3916 		/*
3917 		 * Preserve namespace file descriptors over the close_open_fds()
3918 		 * call. These are closed in minijail_enter() so they won't leak
3919 		 * into the child process.
3920 		 */
3921 		if (j->flags.enter_vfs)
3922 			minijail_preserve_fd(j, j->mountns_fd, j->mountns_fd);
3923 		if (j->flags.enter_net)
3924 			minijail_preserve_fd(j, j->netns_fd, j->netns_fd);
3925 
3926 		for (size_t i = 0; i < j->preserved_fd_count; i++) {
3927 			/*
3928 			 * Preserve all parent_fds. They will be dup2(2)-ed in
3929 			 * the child later.
3930 			 */
3931 			inheritable_fds[size++] = j->preserved_fds[i].parent_fd;
3932 		}
3933 
3934 		if (config->elf_fd > -1) {
3935 			inheritable_fds[size++] = config->elf_fd;
3936 		}
3937 
3938 		if (close_open_fds(inheritable_fds, size) < 0)
3939 			die("failed to close open file descriptors");
3940 	}
3941 
3942 	/* The set of fds will be replaced. */
3943 	if (prepare_preserved_fds(j))
3944 		die("failed to set up fd redirections");
3945 
3946 	if (avoid_pipe_conflicts(j, state_out))
3947 		die("failed to redirect conflicting pipes");
3948 
3949 	/* The elf_fd needs to be mutable so use a stack copy from now on. */
3950 	int elf_fd = config->elf_fd;
3951 	if (elf_fd != -1 &&
3952 	    ensure_no_fd_conflict(j, -1, &elf_fd, j->preserved_fd_count))
3953 		die("failed to redirect elf_fd");
3954 
3955 	if (redirect_fds(j))
3956 		die("failed to set up fd redirections");
3957 
3958 	if (sync_child)
3959 		wait_for_parent_setup(state_out->child_sync_pipe_fds);
3960 
3961 	if (j->flags.userns)
3962 		enter_user_namespace(j);
3963 
3964 	setup_child_std_fds(j, state_out);
3965 
3966 	/* If running an init program, let it decide when/how to mount /proc. */
3967 	if (pid_namespace && !do_init)
3968 		j->flags.remount_proc_ro = 0;
3969 
3970 	if (use_preload) {
3971 		/* Strip out flags that cannot be inherited across execve(2). */
3972 		minijail_preexec(j);
3973 	} else {
3974 		/*
3975 		 * If not using LD_PRELOAD, do all jailing before execve(2).
3976 		 * Note that PID namespaces can only be entered on fork(2),
3977 		 * so that flag is still cleared.
3978 		 */
3979 		j->flags.pids = 0;
3980 	}
3981 
3982 	/*
3983 	 * Jail this process.
3984 	 * If forking, return.
3985 	 * If not, execve(2) the target.
3986 	 */
3987 	minijail_enter(j);
3988 
3989 	if (config->exec_in_child && pid_namespace && do_init) {
3990 		/*
3991 		 * pid namespace: this process will become init inside the new
3992 		 * namespace. We don't want all programs we might exec to have
3993 		 * to know how to be init. Normally (do_init == 1) we fork off
3994 		 * a child to actually run the program. If |do_init == 0|, we
3995 		 * let the program keep pid 1 and be init.
3996 		 *
3997 		 * If we're multithreaded, we'll probably deadlock here. See
3998 		 * WARNING above.
3999 		 */
4000 		child_pid = fork();
4001 		if (child_pid < 0) {
4002 			_exit(child_pid);
4003 		} else if (child_pid > 0) {
4004 			minijail_free_run_state(state_out);
4005 
4006 			/*
4007 			 * Best effort. Don't bother checking the return value.
4008 			 */
4009 			prctl(PR_SET_NAME, "minijail-init");
4010 			init(child_pid); /* Never returns. */
4011 		}
4012 		state_out->child_pid = child_pid;
4013 	}
4014 
4015 	run_hooks_or_die(j, MINIJAIL_HOOK_EVENT_PRE_EXECVE);
4016 
4017 	if (!config->exec_in_child)
4018 		return 0;
4019 
4020 	/*
4021 	 * We're going to execve(), so make sure any remaining resources are
4022 	 * freed. Exceptions are:
4023 	 *  1. The child environment. No need to worry about freeing it since
4024 	 *     execve reinitializes the heap anyways.
4025 	 *  2. The read side of the LD_PRELOAD pipe, which we need to hand down
4026 	 *     into the target in which the preloaded code will read from it and
4027 	 *     then close it.
4028 	 */
4029 	state_out->pipe_fds[0] = -1;
4030 	char *const *child_env = state_out->child_env;
4031 	state_out->child_env = NULL;
4032 	minijail_free_run_state(state_out);
4033 
4034 	/*
4035 	 * If we aren't pid-namespaced, or the jailed program asked to be init:
4036 	 *   calling process
4037 	 *   -> execve()-ing process
4038 	 * If we are:
4039 	 *   calling process
4040 	 *   -> init()-ing process
4041 	 *      -> execve()-ing process
4042 	 */
4043 	if (!child_env)
4044 		child_env = config->envp ? config->envp : environ;
4045 	if (elf_fd > -1) {
4046 		fexecve(elf_fd, config->argv, child_env);
4047 		pwarn("fexecve(%d) failed", config->elf_fd);
4048 	} else {
4049 		execve(config->filename, config->argv, child_env);
4050 		pwarn("execve(%s) failed", config->filename);
4051 	}
4052 
4053 	ret = (errno == ENOENT ? MINIJAIL_ERR_NO_COMMAND
4054 			       : MINIJAIL_ERR_NO_ACCESS);
4055 	_exit(ret);
4056 }
4057 
4058 static int
minijail_run_config_internal(struct minijail * j,const struct minijail_run_config * config)4059 minijail_run_config_internal(struct minijail *j,
4060 			     const struct minijail_run_config *config)
4061 {
4062 	struct minijail_run_state state = {
4063 	    .child_pid = -1,
4064 	    .pipe_fds = {-1, -1},
4065 	    .stdin_fds = {-1, -1},
4066 	    .stdout_fds = {-1, -1},
4067 	    .stderr_fds = {-1, -1},
4068 	    .child_sync_pipe_fds = {-1, -1},
4069 	    .child_env = NULL,
4070 	};
4071 	int ret = minijail_run_internal(j, config, &state);
4072 
4073 	if (ret == 0) {
4074 		if (config->pchild_pid)
4075 			*config->pchild_pid = state.child_pid;
4076 
4077 		/* Grab stdin/stdout/stderr descriptors requested by caller. */
4078 		struct {
4079 			int *pfd;
4080 			int *psrc;
4081 		} fd_map[] = {
4082 		    {config->pstdin_fd, &state.stdin_fds[1]},
4083 		    {config->pstdout_fd, &state.stdout_fds[0]},
4084 		    {config->pstderr_fd, &state.stderr_fds[0]},
4085 		};
4086 
4087 		for (size_t i = 0; i < ARRAY_SIZE(fd_map); ++i) {
4088 			if (fd_map[i].pfd) {
4089 				*fd_map[i].pfd = *fd_map[i].psrc;
4090 				*fd_map[i].psrc = -1;
4091 			}
4092 		}
4093 
4094 		if (!config->exec_in_child)
4095 			ret = state.child_pid;
4096 	}
4097 
4098 	minijail_free_run_state(&state);
4099 
4100 	return ret;
4101 }
4102 
minijail_wait_internal(struct minijail * j,int expected_signal)4103 static int minijail_wait_internal(struct minijail *j, int expected_signal)
4104 {
4105 	if (j->initpid <= 0)
4106 		return -ECHILD;
4107 
4108 	int st;
4109 	while (true) {
4110 		const int ret = waitpid(j->initpid, &st, 0);
4111 		if (ret >= 0)
4112 			break;
4113 		if (errno != EINTR)
4114 			return -errno;
4115 	}
4116 
4117 	if (!WIFEXITED(st)) {
4118 		int error_status = st;
4119 		if (!WIFSIGNALED(st)) {
4120 			return error_status;
4121 		}
4122 
4123 		int signum = WTERMSIG(st);
4124 		/*
4125 		 * We return MINIJAIL_ERR_JAIL if the process received
4126 		 * SIGSYS, which happens when a syscall is blocked by
4127 		 * seccomp filters.
4128 		 * If not, we do what bash(1) does:
4129 		 * $? = 128 + signum
4130 		 */
4131 		if (signum == SIGSYS) {
4132 			warn("child process %d had a policy violation (%s)",
4133 			     j->initpid,
4134 			     j->seccomp_policy_path ? j->seccomp_policy_path
4135 						    : "NO-LABEL");
4136 			error_status = MINIJAIL_ERR_JAIL;
4137 		} else {
4138 			if (signum != expected_signal) {
4139 				warn("child process %d received signal %d",
4140 				     j->initpid, signum);
4141 			}
4142 			error_status = MINIJAIL_ERR_SIG_BASE + signum;
4143 		}
4144 		return error_status;
4145 	}
4146 
4147 	int exit_status = WEXITSTATUS(st);
4148 	if (exit_status != 0)
4149 		info("child process %d exited with status %d", j->initpid,
4150 		     exit_status);
4151 
4152 	return exit_status;
4153 }
4154 
minijail_kill(struct minijail * j)4155 int API minijail_kill(struct minijail *j)
4156 {
4157 	if (j->initpid <= 0)
4158 		return -ECHILD;
4159 
4160 	if (kill(j->initpid, SIGTERM))
4161 		return -errno;
4162 
4163 	return minijail_wait_internal(j, SIGTERM);
4164 }
4165 
minijail_wait(struct minijail * j)4166 int API minijail_wait(struct minijail *j)
4167 {
4168 	return minijail_wait_internal(j, 0);
4169 }
4170 
minijail_destroy(struct minijail * j)4171 void API minijail_destroy(struct minijail *j)
4172 {
4173 	size_t i;
4174 
4175 	if (j->filter_prog) {
4176 		free(j->filter_prog->filter);
4177 		free(j->filter_prog);
4178 	}
4179 	free_mounts_list(j);
4180 	free_remounts_list(j);
4181 	while (j->hooks_head) {
4182 		struct hook *c = j->hooks_head;
4183 		j->hooks_head = c->next;
4184 		free(c);
4185 	}
4186 	j->hooks_tail = NULL;
4187 	free_fs_rules_list(j);
4188 	if (j->user)
4189 		free(j->user);
4190 	if (j->suppl_gid_list)
4191 		free(j->suppl_gid_list);
4192 	if (j->chrootdir)
4193 		free(j->chrootdir);
4194 	if (j->pid_file_path)
4195 		free(j->pid_file_path);
4196 	if (j->uidmap)
4197 		free(j->uidmap);
4198 	if (j->gidmap)
4199 		free(j->gidmap);
4200 	if (j->hostname)
4201 		free(j->hostname);
4202 	if (j->preload_path)
4203 		free(j->preload_path);
4204 	if (j->filename)
4205 		free(j->filename);
4206 	if (j->alt_syscall_table)
4207 		free(j->alt_syscall_table);
4208 	for (i = 0; i < j->cgroup_count; ++i)
4209 		free(j->cgroups[i]);
4210 	if (j->seccomp_policy_path)
4211 		free(j->seccomp_policy_path);
4212 	free(j);
4213 }
4214 
minijail_log_to_fd(int fd,int min_priority)4215 void API minijail_log_to_fd(int fd, int min_priority)
4216 {
4217 	init_logging(LOG_TO_FD, fd, min_priority);
4218 }
4219 
minijail_syscall_name(const struct minijail * j,long nr)4220 const char API *minijail_syscall_name(const struct minijail *j, long nr)
4221 {
4222 	if (j && j->flags.alt_syscall)
4223 		return kAltSyscallNamePlaceholder;
4224 	return lookup_syscall_name(nr);
4225 }
4226