• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/kernel/seccomp.c
3  *
4  * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
5  *
6  * Copyright (C) 2012 Google, Inc.
7  * Will Drewry <wad@chromium.org>
8  *
9  * This defines a simple but solid secure-computing facility.
10  *
11  * Mode 1 uses a fixed list of allowed system calls.
12  * Mode 2 allows user-defined system call filters in the form
13  *        of Berkeley Packet Filters/Linux Socket Filters.
14  */
15 
16 #include <linux/atomic.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/sched.h>
20 #include <linux/seccomp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 
24 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
25 #include <asm/syscall.h>
26 #endif
27 
28 #ifdef CONFIG_SECCOMP_FILTER
29 #include <linux/filter.h>
30 #include <linux/pid.h>
31 #include <linux/ptrace.h>
32 #include <linux/security.h>
33 #include <linux/tracehook.h>
34 #include <linux/uaccess.h>
35 
36 /**
37  * struct seccomp_filter - container for seccomp BPF programs
38  *
39  * @usage: reference count to manage the object lifetime.
40  *         get/put helpers should be used when accessing an instance
41  *         outside of a lifetime-guarded section.  In general, this
42  *         is only needed for handling filters shared across tasks.
43  * @prev: points to a previously installed, or inherited, filter
44  * @len: the number of instructions in the program
45  * @insnsi: the BPF program instructions to evaluate
46  *
47  * seccomp_filter objects are organized in a tree linked via the @prev
48  * pointer.  For any task, it appears to be a singly-linked list starting
49  * with current->seccomp.filter, the most recently attached or inherited filter.
50  * However, multiple filters may share a @prev node, by way of fork(), which
51  * results in a unidirectional tree existing in memory.  This is similar to
52  * how namespaces work.
53  *
54  * seccomp_filter objects should never be modified after being attached
55  * to a task_struct (other than @usage).
56  */
57 struct seccomp_filter {
58 	atomic_t usage;
59 	struct seccomp_filter *prev;
60 	struct bpf_prog *prog;
61 };
62 
63 /* Limit any path through the tree to 256KB worth of instructions. */
64 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
65 
66 /*
67  * Endianness is explicitly ignored and left for BPF program authors to manage
68  * as per the specific architecture.
69  */
populate_seccomp_data(struct seccomp_data * sd)70 static void populate_seccomp_data(struct seccomp_data *sd)
71 {
72 	struct task_struct *task = current;
73 	struct pt_regs *regs = task_pt_regs(task);
74 	unsigned long args[6];
75 
76 	sd->nr = syscall_get_nr(task, regs);
77 	sd->arch = syscall_get_arch();
78 	syscall_get_arguments(task, regs, 0, 6, args);
79 	sd->args[0] = args[0];
80 	sd->args[1] = args[1];
81 	sd->args[2] = args[2];
82 	sd->args[3] = args[3];
83 	sd->args[4] = args[4];
84 	sd->args[5] = args[5];
85 	sd->instruction_pointer = KSTK_EIP(task);
86 }
87 
88 /**
89  *	seccomp_check_filter - verify seccomp filter code
90  *	@filter: filter to verify
91  *	@flen: length of filter
92  *
93  * Takes a previously checked filter (by bpf_check_classic) and
94  * redirects all filter code that loads struct sk_buff data
95  * and related data through seccomp_bpf_load.  It also
96  * enforces length and alignment checking of those loads.
97  *
98  * Returns 0 if the rule set is legal or -EINVAL if not.
99  */
seccomp_check_filter(struct sock_filter * filter,unsigned int flen)100 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
101 {
102 	int pc;
103 	for (pc = 0; pc < flen; pc++) {
104 		struct sock_filter *ftest = &filter[pc];
105 		u16 code = ftest->code;
106 		u32 k = ftest->k;
107 
108 		switch (code) {
109 		case BPF_LD | BPF_W | BPF_ABS:
110 			ftest->code = BPF_LDX | BPF_W | BPF_ABS;
111 			/* 32-bit aligned and not out of bounds. */
112 			if (k >= sizeof(struct seccomp_data) || k & 3)
113 				return -EINVAL;
114 			continue;
115 		case BPF_LD | BPF_W | BPF_LEN:
116 			ftest->code = BPF_LD | BPF_IMM;
117 			ftest->k = sizeof(struct seccomp_data);
118 			continue;
119 		case BPF_LDX | BPF_W | BPF_LEN:
120 			ftest->code = BPF_LDX | BPF_IMM;
121 			ftest->k = sizeof(struct seccomp_data);
122 			continue;
123 		/* Explicitly include allowed calls. */
124 		case BPF_RET | BPF_K:
125 		case BPF_RET | BPF_A:
126 		case BPF_ALU | BPF_ADD | BPF_K:
127 		case BPF_ALU | BPF_ADD | BPF_X:
128 		case BPF_ALU | BPF_SUB | BPF_K:
129 		case BPF_ALU | BPF_SUB | BPF_X:
130 		case BPF_ALU | BPF_MUL | BPF_K:
131 		case BPF_ALU | BPF_MUL | BPF_X:
132 		case BPF_ALU | BPF_DIV | BPF_K:
133 		case BPF_ALU | BPF_DIV | BPF_X:
134 		case BPF_ALU | BPF_AND | BPF_K:
135 		case BPF_ALU | BPF_AND | BPF_X:
136 		case BPF_ALU | BPF_OR | BPF_K:
137 		case BPF_ALU | BPF_OR | BPF_X:
138 		case BPF_ALU | BPF_XOR | BPF_K:
139 		case BPF_ALU | BPF_XOR | BPF_X:
140 		case BPF_ALU | BPF_LSH | BPF_K:
141 		case BPF_ALU | BPF_LSH | BPF_X:
142 		case BPF_ALU | BPF_RSH | BPF_K:
143 		case BPF_ALU | BPF_RSH | BPF_X:
144 		case BPF_ALU | BPF_NEG:
145 		case BPF_LD | BPF_IMM:
146 		case BPF_LDX | BPF_IMM:
147 		case BPF_MISC | BPF_TAX:
148 		case BPF_MISC | BPF_TXA:
149 		case BPF_LD | BPF_MEM:
150 		case BPF_LDX | BPF_MEM:
151 		case BPF_ST:
152 		case BPF_STX:
153 		case BPF_JMP | BPF_JA:
154 		case BPF_JMP | BPF_JEQ | BPF_K:
155 		case BPF_JMP | BPF_JEQ | BPF_X:
156 		case BPF_JMP | BPF_JGE | BPF_K:
157 		case BPF_JMP | BPF_JGE | BPF_X:
158 		case BPF_JMP | BPF_JGT | BPF_K:
159 		case BPF_JMP | BPF_JGT | BPF_X:
160 		case BPF_JMP | BPF_JSET | BPF_K:
161 		case BPF_JMP | BPF_JSET | BPF_X:
162 			continue;
163 		default:
164 			return -EINVAL;
165 		}
166 	}
167 	return 0;
168 }
169 
170 /**
171  * seccomp_run_filters - evaluates all seccomp filters against @syscall
172  * @syscall: number of the current system call
173  *
174  * Returns valid seccomp BPF response codes.
175  */
seccomp_run_filters(const struct seccomp_data * sd)176 static u32 seccomp_run_filters(const struct seccomp_data *sd)
177 {
178 	struct seccomp_data sd_local;
179 	u32 ret = SECCOMP_RET_ALLOW;
180 	/* Make sure cross-thread synced filter points somewhere sane. */
181 	struct seccomp_filter *f =
182 			lockless_dereference(current->seccomp.filter);
183 
184 	/* Ensure unexpected behavior doesn't result in failing open. */
185 	if (unlikely(WARN_ON(f == NULL)))
186 		return SECCOMP_RET_KILL;
187 
188 	if (!sd) {
189 		populate_seccomp_data(&sd_local);
190 		sd = &sd_local;
191 	}
192 
193 	/*
194 	 * All filters in the list are evaluated and the lowest BPF return
195 	 * value always takes priority (ignoring the DATA).
196 	 */
197 	for (; f; f = f->prev) {
198 		u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
199 
200 		if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
201 			ret = cur_ret;
202 	}
203 	return ret;
204 }
205 #endif /* CONFIG_SECCOMP_FILTER */
206 
seccomp_may_assign_mode(unsigned long seccomp_mode)207 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
208 {
209 	assert_spin_locked(&current->sighand->siglock);
210 
211 	if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
212 		return false;
213 
214 	return true;
215 }
216 
seccomp_assign_mode(struct task_struct * task,unsigned long seccomp_mode)217 static inline void seccomp_assign_mode(struct task_struct *task,
218 				       unsigned long seccomp_mode)
219 {
220 	assert_spin_locked(&task->sighand->siglock);
221 
222 	task->seccomp.mode = seccomp_mode;
223 	/*
224 	 * Make sure TIF_SECCOMP cannot be set before the mode (and
225 	 * filter) is set.
226 	 */
227 	smp_mb__before_atomic();
228 	set_tsk_thread_flag(task, TIF_SECCOMP);
229 }
230 
231 #ifdef CONFIG_SECCOMP_FILTER
232 /* Returns 1 if the parent is an ancestor of the child. */
is_ancestor(struct seccomp_filter * parent,struct seccomp_filter * child)233 static int is_ancestor(struct seccomp_filter *parent,
234 		       struct seccomp_filter *child)
235 {
236 	/* NULL is the root ancestor. */
237 	if (parent == NULL)
238 		return 1;
239 	for (; child; child = child->prev)
240 		if (child == parent)
241 			return 1;
242 	return 0;
243 }
244 
245 /**
246  * seccomp_can_sync_threads: checks if all threads can be synchronized
247  *
248  * Expects sighand and cred_guard_mutex locks to be held.
249  *
250  * Returns 0 on success, -ve on error, or the pid of a thread which was
251  * either not in the correct seccomp mode or it did not have an ancestral
252  * seccomp filter.
253  */
seccomp_can_sync_threads(void)254 static inline pid_t seccomp_can_sync_threads(void)
255 {
256 	struct task_struct *thread, *caller;
257 
258 	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
259 	assert_spin_locked(&current->sighand->siglock);
260 
261 	/* Validate all threads being eligible for synchronization. */
262 	caller = current;
263 	for_each_thread(caller, thread) {
264 		pid_t failed;
265 
266 		/* Skip current, since it is initiating the sync. */
267 		if (thread == caller)
268 			continue;
269 
270 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
271 		    (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
272 		     is_ancestor(thread->seccomp.filter,
273 				 caller->seccomp.filter)))
274 			continue;
275 
276 		/* Return the first thread that cannot be synchronized. */
277 		failed = task_pid_vnr(thread);
278 		/* If the pid cannot be resolved, then return -ESRCH */
279 		if (unlikely(WARN_ON(failed == 0)))
280 			failed = -ESRCH;
281 		return failed;
282 	}
283 
284 	return 0;
285 }
286 
287 /**
288  * seccomp_sync_threads: sets all threads to use current's filter
289  *
290  * Expects sighand and cred_guard_mutex locks to be held, and for
291  * seccomp_can_sync_threads() to have returned success already
292  * without dropping the locks.
293  *
294  */
seccomp_sync_threads(void)295 static inline void seccomp_sync_threads(void)
296 {
297 	struct task_struct *thread, *caller;
298 
299 	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
300 	assert_spin_locked(&current->sighand->siglock);
301 
302 	/* Synchronize all threads. */
303 	caller = current;
304 	for_each_thread(caller, thread) {
305 		/* Skip current, since it needs no changes. */
306 		if (thread == caller)
307 			continue;
308 
309 		/* Get a task reference for the new leaf node. */
310 		get_seccomp_filter(caller);
311 		/*
312 		 * Drop the task reference to the shared ancestor since
313 		 * current's path will hold a reference.  (This also
314 		 * allows a put before the assignment.)
315 		 */
316 		put_seccomp_filter(thread);
317 		smp_store_release(&thread->seccomp.filter,
318 				  caller->seccomp.filter);
319 
320 		/*
321 		 * Don't let an unprivileged task work around
322 		 * the no_new_privs restriction by creating
323 		 * a thread that sets it up, enters seccomp,
324 		 * then dies.
325 		 */
326 		if (task_no_new_privs(caller))
327 			task_set_no_new_privs(thread);
328 
329 		/*
330 		 * Opt the other thread into seccomp if needed.
331 		 * As threads are considered to be trust-realm
332 		 * equivalent (see ptrace_may_access), it is safe to
333 		 * allow one thread to transition the other.
334 		 */
335 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
336 			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
337 	}
338 }
339 
340 /**
341  * seccomp_prepare_filter: Prepares a seccomp filter for use.
342  * @fprog: BPF program to install
343  *
344  * Returns filter on success or an ERR_PTR on failure.
345  */
seccomp_prepare_filter(struct sock_fprog * fprog)346 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
347 {
348 	struct seccomp_filter *sfilter;
349 	int ret;
350 	const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
351 
352 	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
353 		return ERR_PTR(-EINVAL);
354 
355 	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
356 
357 	/*
358 	 * Installing a seccomp filter requires that the task has
359 	 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
360 	 * This avoids scenarios where unprivileged tasks can affect the
361 	 * behavior of privileged children.
362 	 */
363 	if (!task_no_new_privs(current) &&
364 	    security_capable_noaudit(current_cred(), current_user_ns(),
365 				     CAP_SYS_ADMIN) != 0)
366 		return ERR_PTR(-EACCES);
367 
368 	/* Allocate a new seccomp_filter */
369 	sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
370 	if (!sfilter)
371 		return ERR_PTR(-ENOMEM);
372 
373 	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
374 					seccomp_check_filter, save_orig);
375 	if (ret < 0) {
376 		kfree(sfilter);
377 		return ERR_PTR(ret);
378 	}
379 
380 	atomic_set(&sfilter->usage, 1);
381 
382 	return sfilter;
383 }
384 
385 /**
386  * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
387  * @user_filter: pointer to the user data containing a sock_fprog.
388  *
389  * Returns 0 on success and non-zero otherwise.
390  */
391 static struct seccomp_filter *
seccomp_prepare_user_filter(const char __user * user_filter)392 seccomp_prepare_user_filter(const char __user *user_filter)
393 {
394 	struct sock_fprog fprog;
395 	struct seccomp_filter *filter = ERR_PTR(-EFAULT);
396 
397 #ifdef CONFIG_COMPAT
398 	if (in_compat_syscall()) {
399 		struct compat_sock_fprog fprog32;
400 		if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
401 			goto out;
402 		fprog.len = fprog32.len;
403 		fprog.filter = compat_ptr(fprog32.filter);
404 	} else /* falls through to the if below. */
405 #endif
406 	if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
407 		goto out;
408 	filter = seccomp_prepare_filter(&fprog);
409 out:
410 	return filter;
411 }
412 
413 /**
414  * seccomp_attach_filter: validate and attach filter
415  * @flags:  flags to change filter behavior
416  * @filter: seccomp filter to add to the current process
417  *
418  * Caller must be holding current->sighand->siglock lock.
419  *
420  * Returns 0 on success, -ve on error.
421  */
seccomp_attach_filter(unsigned int flags,struct seccomp_filter * filter)422 static long seccomp_attach_filter(unsigned int flags,
423 				  struct seccomp_filter *filter)
424 {
425 	unsigned long total_insns;
426 	struct seccomp_filter *walker;
427 
428 	assert_spin_locked(&current->sighand->siglock);
429 
430 	/* Validate resulting filter length. */
431 	total_insns = filter->prog->len;
432 	for (walker = current->seccomp.filter; walker; walker = walker->prev)
433 		total_insns += walker->prog->len + 4;  /* 4 instr penalty */
434 	if (total_insns > MAX_INSNS_PER_PATH)
435 		return -ENOMEM;
436 
437 	/* If thread sync has been requested, check that it is possible. */
438 	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
439 		int ret;
440 
441 		ret = seccomp_can_sync_threads();
442 		if (ret)
443 			return ret;
444 	}
445 
446 	/*
447 	 * If there is an existing filter, make it the prev and don't drop its
448 	 * task reference.
449 	 */
450 	filter->prev = current->seccomp.filter;
451 	current->seccomp.filter = filter;
452 
453 	/* Now that the new filter is in place, synchronize to all threads. */
454 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
455 		seccomp_sync_threads();
456 
457 	return 0;
458 }
459 
__get_seccomp_filter(struct seccomp_filter * filter)460 void __get_seccomp_filter(struct seccomp_filter *filter)
461 {
462 	/* Reference count is bounded by the number of total processes. */
463 	atomic_inc(&filter->usage);
464 }
465 
466 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
get_seccomp_filter(struct task_struct * tsk)467 void get_seccomp_filter(struct task_struct *tsk)
468 {
469 	struct seccomp_filter *orig = tsk->seccomp.filter;
470 	if (!orig)
471 		return;
472 	__get_seccomp_filter(orig);
473 }
474 
seccomp_filter_free(struct seccomp_filter * filter)475 static inline void seccomp_filter_free(struct seccomp_filter *filter)
476 {
477 	if (filter) {
478 		bpf_prog_destroy(filter->prog);
479 		kfree(filter);
480 	}
481 }
482 
__put_seccomp_filter(struct seccomp_filter * orig)483 static void __put_seccomp_filter(struct seccomp_filter *orig)
484 {
485 	/* Clean up single-reference branches iteratively. */
486 	while (orig && atomic_dec_and_test(&orig->usage)) {
487 		struct seccomp_filter *freeme = orig;
488 		orig = orig->prev;
489 		seccomp_filter_free(freeme);
490 	}
491 }
492 
493 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
put_seccomp_filter(struct task_struct * tsk)494 void put_seccomp_filter(struct task_struct *tsk)
495 {
496 	__put_seccomp_filter(tsk->seccomp.filter);
497 }
498 
499 /**
500  * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
501  * @syscall: syscall number to send to userland
502  * @reason: filter-supplied reason code to send to userland (via si_errno)
503  *
504  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
505  */
seccomp_send_sigsys(int syscall,int reason)506 static void seccomp_send_sigsys(int syscall, int reason)
507 {
508 	struct siginfo info;
509 	memset(&info, 0, sizeof(info));
510 	info.si_signo = SIGSYS;
511 	info.si_code = SYS_SECCOMP;
512 	info.si_call_addr = (void __user *)KSTK_EIP(current);
513 	info.si_errno = reason;
514 	info.si_arch = syscall_get_arch();
515 	info.si_syscall = syscall;
516 	force_sig_info(SIGSYS, &info, current);
517 }
518 #endif	/* CONFIG_SECCOMP_FILTER */
519 
520 /*
521  * Secure computing mode 1 allows only read/write/exit/sigreturn.
522  * To be fully secure this must be combined with rlimit
523  * to limit the stack allocations too.
524  */
525 static const int mode1_syscalls[] = {
526 	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
527 	0, /* null terminated */
528 };
529 
__secure_computing_strict(int this_syscall)530 static void __secure_computing_strict(int this_syscall)
531 {
532 	const int *syscall_whitelist = mode1_syscalls;
533 #ifdef CONFIG_COMPAT
534 	if (in_compat_syscall())
535 		syscall_whitelist = get_compat_mode1_syscalls();
536 #endif
537 	do {
538 		if (*syscall_whitelist == this_syscall)
539 			return;
540 	} while (*++syscall_whitelist);
541 
542 #ifdef SECCOMP_DEBUG
543 	dump_stack();
544 #endif
545 	audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
546 	do_exit(SIGKILL);
547 }
548 
549 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
secure_computing_strict(int this_syscall)550 void secure_computing_strict(int this_syscall)
551 {
552 	int mode = current->seccomp.mode;
553 
554 	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
555 	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
556 		return;
557 
558 	if (mode == SECCOMP_MODE_DISABLED)
559 		return;
560 	else if (mode == SECCOMP_MODE_STRICT)
561 		__secure_computing_strict(this_syscall);
562 	else
563 		BUG();
564 }
565 #else
566 
567 #ifdef CONFIG_SECCOMP_FILTER
__seccomp_filter(int this_syscall,const struct seccomp_data * sd,const bool recheck_after_trace)568 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
569 			    const bool recheck_after_trace)
570 {
571 	u32 filter_ret, action;
572 	int data;
573 
574 	/*
575 	 * Make sure that any changes to mode from another thread have
576 	 * been seen after TIF_SECCOMP was seen.
577 	 */
578 	rmb();
579 
580 	filter_ret = seccomp_run_filters(sd);
581 	data = filter_ret & SECCOMP_RET_DATA;
582 	action = filter_ret & SECCOMP_RET_ACTION;
583 
584 	switch (action) {
585 	case SECCOMP_RET_ERRNO:
586 		/* Set low-order bits as an errno, capped at MAX_ERRNO. */
587 		if (data > MAX_ERRNO)
588 			data = MAX_ERRNO;
589 		syscall_set_return_value(current, task_pt_regs(current),
590 					 -data, 0);
591 		goto skip;
592 
593 	case SECCOMP_RET_TRAP:
594 		/* Show the handler the original registers. */
595 		syscall_rollback(current, task_pt_regs(current));
596 		/* Let the filter pass back 16 bits of data. */
597 		seccomp_send_sigsys(this_syscall, data);
598 		goto skip;
599 
600 	case SECCOMP_RET_TRACE:
601 		/* We've been put in this state by the ptracer already. */
602 		if (recheck_after_trace)
603 			return 0;
604 
605 		/* ENOSYS these calls if there is no tracer attached. */
606 		if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
607 			syscall_set_return_value(current,
608 						 task_pt_regs(current),
609 						 -ENOSYS, 0);
610 			goto skip;
611 		}
612 
613 		/* Allow the BPF to provide the event message */
614 		ptrace_event(PTRACE_EVENT_SECCOMP, data);
615 		/*
616 		 * The delivery of a fatal signal during event
617 		 * notification may silently skip tracer notification,
618 		 * which could leave us with a potentially unmodified
619 		 * syscall that the tracer would have liked to have
620 		 * changed. Since the process is about to die, we just
621 		 * force the syscall to be skipped and let the signal
622 		 * kill the process and correctly handle any tracer exit
623 		 * notifications.
624 		 */
625 		if (fatal_signal_pending(current))
626 			goto skip;
627 		/* Check if the tracer forced the syscall to be skipped. */
628 		this_syscall = syscall_get_nr(current, task_pt_regs(current));
629 		if (this_syscall < 0)
630 			goto skip;
631 
632 		/*
633 		 * Recheck the syscall, since it may have changed. This
634 		 * intentionally uses a NULL struct seccomp_data to force
635 		 * a reload of all registers. This does not goto skip since
636 		 * a skip would have already been reported.
637 		 */
638 		if (__seccomp_filter(this_syscall, NULL, true))
639 			return -1;
640 
641 		return 0;
642 
643 	case SECCOMP_RET_ALLOW:
644 		return 0;
645 
646 	case SECCOMP_RET_KILL:
647 	default:
648 		audit_seccomp(this_syscall, SIGSYS, action);
649 		do_exit(SIGSYS);
650 	}
651 
652 	unreachable();
653 
654 skip:
655 	audit_seccomp(this_syscall, 0, action);
656 	return -1;
657 }
658 #else
__seccomp_filter(int this_syscall,const struct seccomp_data * sd,const bool recheck_after_trace)659 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
660 			    const bool recheck_after_trace)
661 {
662 	BUG();
663 }
664 #endif
665 
__secure_computing(const struct seccomp_data * sd)666 int __secure_computing(const struct seccomp_data *sd)
667 {
668 	int mode = current->seccomp.mode;
669 	int this_syscall;
670 
671 	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
672 	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
673 		return 0;
674 
675 	this_syscall = sd ? sd->nr :
676 		syscall_get_nr(current, task_pt_regs(current));
677 
678 	switch (mode) {
679 	case SECCOMP_MODE_STRICT:
680 		__secure_computing_strict(this_syscall);  /* may call do_exit */
681 		return 0;
682 	case SECCOMP_MODE_FILTER:
683 		return __seccomp_filter(this_syscall, sd, false);
684 	default:
685 		BUG();
686 	}
687 }
688 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
689 
prctl_get_seccomp(void)690 long prctl_get_seccomp(void)
691 {
692 	return current->seccomp.mode;
693 }
694 
695 /**
696  * seccomp_set_mode_strict: internal function for setting strict seccomp
697  *
698  * Once current->seccomp.mode is non-zero, it may not be changed.
699  *
700  * Returns 0 on success or -EINVAL on failure.
701  */
seccomp_set_mode_strict(void)702 static long seccomp_set_mode_strict(void)
703 {
704 	const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
705 	long ret = -EINVAL;
706 
707 	spin_lock_irq(&current->sighand->siglock);
708 
709 	if (!seccomp_may_assign_mode(seccomp_mode))
710 		goto out;
711 
712 #ifdef TIF_NOTSC
713 	disable_TSC();
714 #endif
715 	seccomp_assign_mode(current, seccomp_mode);
716 	ret = 0;
717 
718 out:
719 	spin_unlock_irq(&current->sighand->siglock);
720 
721 	return ret;
722 }
723 
724 #ifdef CONFIG_SECCOMP_FILTER
725 /**
726  * seccomp_set_mode_filter: internal function for setting seccomp filter
727  * @flags:  flags to change filter behavior
728  * @filter: struct sock_fprog containing filter
729  *
730  * This function may be called repeatedly to install additional filters.
731  * Every filter successfully installed will be evaluated (in reverse order)
732  * for each system call the task makes.
733  *
734  * Once current->seccomp.mode is non-zero, it may not be changed.
735  *
736  * Returns 0 on success or -EINVAL on failure.
737  */
seccomp_set_mode_filter(unsigned int flags,const char __user * filter)738 static long seccomp_set_mode_filter(unsigned int flags,
739 				    const char __user *filter)
740 {
741 	const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
742 	struct seccomp_filter *prepared = NULL;
743 	long ret = -EINVAL;
744 
745 	/* Validate flags. */
746 	if (flags & ~SECCOMP_FILTER_FLAG_MASK)
747 		return -EINVAL;
748 
749 	/* Prepare the new filter before holding any locks. */
750 	prepared = seccomp_prepare_user_filter(filter);
751 	if (IS_ERR(prepared))
752 		return PTR_ERR(prepared);
753 
754 	/*
755 	 * Make sure we cannot change seccomp or nnp state via TSYNC
756 	 * while another thread is in the middle of calling exec.
757 	 */
758 	if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
759 	    mutex_lock_killable(&current->signal->cred_guard_mutex))
760 		goto out_free;
761 
762 	spin_lock_irq(&current->sighand->siglock);
763 
764 	if (!seccomp_may_assign_mode(seccomp_mode))
765 		goto out;
766 
767 	ret = seccomp_attach_filter(flags, prepared);
768 	if (ret)
769 		goto out;
770 	/* Do not free the successfully attached filter. */
771 	prepared = NULL;
772 
773 	seccomp_assign_mode(current, seccomp_mode);
774 out:
775 	spin_unlock_irq(&current->sighand->siglock);
776 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
777 		mutex_unlock(&current->signal->cred_guard_mutex);
778 out_free:
779 	seccomp_filter_free(prepared);
780 	return ret;
781 }
782 #else
seccomp_set_mode_filter(unsigned int flags,const char __user * filter)783 static inline long seccomp_set_mode_filter(unsigned int flags,
784 					   const char __user *filter)
785 {
786 	return -EINVAL;
787 }
788 #endif
789 
790 /* Common entry point for both prctl and syscall. */
do_seccomp(unsigned int op,unsigned int flags,const char __user * uargs)791 static long do_seccomp(unsigned int op, unsigned int flags,
792 		       const char __user *uargs)
793 {
794 	switch (op) {
795 	case SECCOMP_SET_MODE_STRICT:
796 		if (flags != 0 || uargs != NULL)
797 			return -EINVAL;
798 		return seccomp_set_mode_strict();
799 	case SECCOMP_SET_MODE_FILTER:
800 		return seccomp_set_mode_filter(flags, uargs);
801 	default:
802 		return -EINVAL;
803 	}
804 }
805 
SYSCALL_DEFINE3(seccomp,unsigned int,op,unsigned int,flags,const char __user *,uargs)806 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
807 			 const char __user *, uargs)
808 {
809 	return do_seccomp(op, flags, uargs);
810 }
811 
812 /**
813  * prctl_set_seccomp: configures current->seccomp.mode
814  * @seccomp_mode: requested mode to use
815  * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
816  *
817  * Returns 0 on success or -EINVAL on failure.
818  */
prctl_set_seccomp(unsigned long seccomp_mode,char __user * filter)819 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
820 {
821 	unsigned int op;
822 	char __user *uargs;
823 
824 	switch (seccomp_mode) {
825 	case SECCOMP_MODE_STRICT:
826 		op = SECCOMP_SET_MODE_STRICT;
827 		/*
828 		 * Setting strict mode through prctl always ignored filter,
829 		 * so make sure it is always NULL here to pass the internal
830 		 * check in do_seccomp().
831 		 */
832 		uargs = NULL;
833 		break;
834 	case SECCOMP_MODE_FILTER:
835 		op = SECCOMP_SET_MODE_FILTER;
836 		uargs = filter;
837 		break;
838 	default:
839 		return -EINVAL;
840 	}
841 
842 	/* prctl interface doesn't have flags, so they are always zero. */
843 	return do_seccomp(op, 0, uargs);
844 }
845 
846 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
seccomp_get_filter(struct task_struct * task,unsigned long filter_off,void __user * data)847 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
848 			void __user *data)
849 {
850 	struct seccomp_filter *filter;
851 	struct sock_fprog_kern *fprog;
852 	long ret;
853 	unsigned long count = 0;
854 
855 	if (!capable(CAP_SYS_ADMIN) ||
856 	    current->seccomp.mode != SECCOMP_MODE_DISABLED) {
857 		return -EACCES;
858 	}
859 
860 	spin_lock_irq(&task->sighand->siglock);
861 	if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
862 		ret = -EINVAL;
863 		goto out;
864 	}
865 
866 	filter = task->seccomp.filter;
867 	while (filter) {
868 		filter = filter->prev;
869 		count++;
870 	}
871 
872 	if (filter_off >= count) {
873 		ret = -ENOENT;
874 		goto out;
875 	}
876 	count -= filter_off;
877 
878 	filter = task->seccomp.filter;
879 	while (filter && count > 1) {
880 		filter = filter->prev;
881 		count--;
882 	}
883 
884 	if (WARN_ON(count != 1 || !filter)) {
885 		/* The filter tree shouldn't shrink while we're using it. */
886 		ret = -ENOENT;
887 		goto out;
888 	}
889 
890 	fprog = filter->prog->orig_prog;
891 	if (!fprog) {
892 		/* This must be a new non-cBPF filter, since we save
893 		 * every cBPF filter's orig_prog above when
894 		 * CONFIG_CHECKPOINT_RESTORE is enabled.
895 		 */
896 		ret = -EMEDIUMTYPE;
897 		goto out;
898 	}
899 
900 	ret = fprog->len;
901 	if (!data)
902 		goto out;
903 
904 	__get_seccomp_filter(filter);
905 	spin_unlock_irq(&task->sighand->siglock);
906 
907 	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
908 		ret = -EFAULT;
909 
910 	__put_seccomp_filter(filter);
911 	return ret;
912 
913 out:
914 	spin_unlock_irq(&task->sighand->siglock);
915 	return ret;
916 }
917 #endif
918