• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 #include <linux/oom.h>
49 
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52 
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58 #include <asm/syscall.h>	/* for syscall_get_* */
59 
60 #undef CREATE_TRACE_POINTS
61 #include <trace/hooks/signal.h>
62 #include <trace/hooks/dtask.h>
63 /*
64  * SLAB caches for signal bits.
65  */
66 
67 static struct kmem_cache *sigqueue_cachep;
68 
69 int print_fatal_signals __read_mostly;
70 
sig_handler(struct task_struct * t,int sig)71 static void __user *sig_handler(struct task_struct *t, int sig)
72 {
73 	return t->sighand->action[sig - 1].sa.sa_handler;
74 }
75 
sig_handler_ignored(void __user * handler,int sig)76 static inline bool sig_handler_ignored(void __user *handler, int sig)
77 {
78 	/* Is it explicitly or implicitly ignored? */
79 	return handler == SIG_IGN ||
80 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
81 }
82 
sig_task_ignored(struct task_struct * t,int sig,bool force)83 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
84 {
85 	void __user *handler;
86 
87 	handler = sig_handler(t, sig);
88 
89 	/* SIGKILL and SIGSTOP may not be sent to the global init */
90 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
91 		return true;
92 
93 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
94 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
95 		return true;
96 
97 	/* Only allow kernel generated signals to this kthread */
98 	if (unlikely((t->flags & PF_KTHREAD) &&
99 		     (handler == SIG_KTHREAD_KERNEL) && !force))
100 		return true;
101 
102 	return sig_handler_ignored(handler, sig);
103 }
104 
sig_ignored(struct task_struct * t,int sig,bool force)105 static bool sig_ignored(struct task_struct *t, int sig, bool force)
106 {
107 	/*
108 	 * Blocked signals are never ignored, since the
109 	 * signal handler may change by the time it is
110 	 * unblocked.
111 	 */
112 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
113 		return false;
114 
115 	/*
116 	 * Tracers may want to know about even ignored signal unless it
117 	 * is SIGKILL which can't be reported anyway but can be ignored
118 	 * by SIGNAL_UNKILLABLE task.
119 	 */
120 	if (t->ptrace && sig != SIGKILL)
121 		return false;
122 
123 	return sig_task_ignored(t, sig, force);
124 }
125 
126 /*
127  * Re-calculate pending state from the set of locally pending
128  * signals, globally pending signals, and blocked signals.
129  */
has_pending_signals(sigset_t * signal,sigset_t * blocked)130 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 {
132 	unsigned long ready;
133 	long i;
134 
135 	switch (_NSIG_WORDS) {
136 	default:
137 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
138 			ready |= signal->sig[i] &~ blocked->sig[i];
139 		break;
140 
141 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
142 		ready |= signal->sig[2] &~ blocked->sig[2];
143 		ready |= signal->sig[1] &~ blocked->sig[1];
144 		ready |= signal->sig[0] &~ blocked->sig[0];
145 		break;
146 
147 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
148 		ready |= signal->sig[0] &~ blocked->sig[0];
149 		break;
150 
151 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
152 	}
153 	return ready !=	0;
154 }
155 
156 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
157 
recalc_sigpending_tsk(struct task_struct * t)158 static bool recalc_sigpending_tsk(struct task_struct *t)
159 {
160 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
161 	    PENDING(&t->pending, &t->blocked) ||
162 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
163 	    cgroup_task_frozen(t)) {
164 		set_tsk_thread_flag(t, TIF_SIGPENDING);
165 		return true;
166 	}
167 
168 	/*
169 	 * We must never clear the flag in another thread, or in current
170 	 * when it's possible the current syscall is returning -ERESTART*.
171 	 * So we don't clear it here, and only callers who know they should do.
172 	 */
173 	return false;
174 }
175 
176 /*
177  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
178  * This is superfluous when called on current, the wakeup is a harmless no-op.
179  */
recalc_sigpending_and_wake(struct task_struct * t)180 void recalc_sigpending_and_wake(struct task_struct *t)
181 {
182 	if (recalc_sigpending_tsk(t))
183 		signal_wake_up(t, 0);
184 }
185 
recalc_sigpending(void)186 void recalc_sigpending(void)
187 {
188 	if (!recalc_sigpending_tsk(current) && !freezing(current))
189 		clear_thread_flag(TIF_SIGPENDING);
190 
191 }
192 EXPORT_SYMBOL(recalc_sigpending);
193 
calculate_sigpending(void)194 void calculate_sigpending(void)
195 {
196 	/* Have any signals or users of TIF_SIGPENDING been delayed
197 	 * until after fork?
198 	 */
199 	spin_lock_irq(&current->sighand->siglock);
200 	set_tsk_thread_flag(current, TIF_SIGPENDING);
201 	recalc_sigpending();
202 	spin_unlock_irq(&current->sighand->siglock);
203 }
204 
205 /* Given the mask, find the first available signal that should be serviced. */
206 
207 #define SYNCHRONOUS_MASK \
208 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
209 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
210 
next_signal(struct sigpending * pending,sigset_t * mask)211 int next_signal(struct sigpending *pending, sigset_t *mask)
212 {
213 	unsigned long i, *s, *m, x;
214 	int sig = 0;
215 
216 	s = pending->signal.sig;
217 	m = mask->sig;
218 
219 	/*
220 	 * Handle the first word specially: it contains the
221 	 * synchronous signals that need to be dequeued first.
222 	 */
223 	x = *s &~ *m;
224 	if (x) {
225 		if (x & SYNCHRONOUS_MASK)
226 			x &= SYNCHRONOUS_MASK;
227 		sig = ffz(~x) + 1;
228 		return sig;
229 	}
230 
231 	switch (_NSIG_WORDS) {
232 	default:
233 		for (i = 1; i < _NSIG_WORDS; ++i) {
234 			x = *++s &~ *++m;
235 			if (!x)
236 				continue;
237 			sig = ffz(~x) + i*_NSIG_BPW + 1;
238 			break;
239 		}
240 		break;
241 
242 	case 2:
243 		x = s[1] &~ m[1];
244 		if (!x)
245 			break;
246 		sig = ffz(~x) + _NSIG_BPW + 1;
247 		break;
248 
249 	case 1:
250 		/* Nothing to do */
251 		break;
252 	}
253 
254 	return sig;
255 }
256 
print_dropped_signal(int sig)257 static inline void print_dropped_signal(int sig)
258 {
259 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
260 
261 	if (!print_fatal_signals)
262 		return;
263 
264 	if (!__ratelimit(&ratelimit_state))
265 		return;
266 
267 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
268 				current->comm, current->pid, sig);
269 }
270 
271 /**
272  * task_set_jobctl_pending - set jobctl pending bits
273  * @task: target task
274  * @mask: pending bits to set
275  *
276  * Clear @mask from @task->jobctl.  @mask must be subset of
277  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
278  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
279  * cleared.  If @task is already being killed or exiting, this function
280  * becomes noop.
281  *
282  * CONTEXT:
283  * Must be called with @task->sighand->siglock held.
284  *
285  * RETURNS:
286  * %true if @mask is set, %false if made noop because @task was dying.
287  */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)288 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
289 {
290 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
291 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
292 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
293 
294 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
295 		return false;
296 
297 	if (mask & JOBCTL_STOP_SIGMASK)
298 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
299 
300 	task->jobctl |= mask;
301 	return true;
302 }
303 
304 /**
305  * task_clear_jobctl_trapping - clear jobctl trapping bit
306  * @task: target task
307  *
308  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
309  * Clear it and wake up the ptracer.  Note that we don't need any further
310  * locking.  @task->siglock guarantees that @task->parent points to the
311  * ptracer.
312  *
313  * CONTEXT:
314  * Must be called with @task->sighand->siglock held.
315  */
task_clear_jobctl_trapping(struct task_struct * task)316 void task_clear_jobctl_trapping(struct task_struct *task)
317 {
318 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
319 		task->jobctl &= ~JOBCTL_TRAPPING;
320 		smp_mb();	/* advised by wake_up_bit() */
321 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 	}
323 }
324 
325 /**
326  * task_clear_jobctl_pending - clear jobctl pending bits
327  * @task: target task
328  * @mask: pending bits to clear
329  *
330  * Clear @mask from @task->jobctl.  @mask must be subset of
331  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
332  * STOP bits are cleared together.
333  *
334  * If clearing of @mask leaves no stop or trap pending, this function calls
335  * task_clear_jobctl_trapping().
336  *
337  * CONTEXT:
338  * Must be called with @task->sighand->siglock held.
339  */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)340 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
341 {
342 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
343 
344 	if (mask & JOBCTL_STOP_PENDING)
345 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
346 
347 	task->jobctl &= ~mask;
348 
349 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
350 		task_clear_jobctl_trapping(task);
351 }
352 
353 /**
354  * task_participate_group_stop - participate in a group stop
355  * @task: task participating in a group stop
356  *
357  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
358  * Group stop states are cleared and the group stop count is consumed if
359  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
360  * stop, the appropriate `SIGNAL_*` flags are set.
361  *
362  * CONTEXT:
363  * Must be called with @task->sighand->siglock held.
364  *
365  * RETURNS:
366  * %true if group stop completion should be notified to the parent, %false
367  * otherwise.
368  */
task_participate_group_stop(struct task_struct * task)369 static bool task_participate_group_stop(struct task_struct *task)
370 {
371 	struct signal_struct *sig = task->signal;
372 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
373 
374 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
375 
376 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377 
378 	if (!consume)
379 		return false;
380 
381 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
382 		sig->group_stop_count--;
383 
384 	/*
385 	 * Tell the caller to notify completion iff we are entering into a
386 	 * fresh group stop.  Read comment in do_signal_stop() for details.
387 	 */
388 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
389 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 		return true;
391 	}
392 	return false;
393 }
394 
task_join_group_stop(struct task_struct * task)395 void task_join_group_stop(struct task_struct *task)
396 {
397 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
398 	struct signal_struct *sig = current->signal;
399 
400 	if (sig->group_stop_count) {
401 		sig->group_stop_count++;
402 		mask |= JOBCTL_STOP_CONSUME;
403 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 		return;
405 
406 	/* Have the new thread join an on-going signal group stop */
407 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 }
409 
410 /*
411  * allocate a new signal queue record
412  * - this may be called without locks if and only if t == current, otherwise an
413  *   appropriate lock must be held to stop the target task from exiting
414  */
415 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t gfp_flags,int override_rlimit,const unsigned int sigqueue_flags)416 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
417 		 int override_rlimit, const unsigned int sigqueue_flags)
418 {
419 	struct sigqueue *q = NULL;
420 	struct ucounts *ucounts = NULL;
421 	long sigpending;
422 
423 	/*
424 	 * Protect access to @t credentials. This can go away when all
425 	 * callers hold rcu read lock.
426 	 *
427 	 * NOTE! A pending signal will hold on to the user refcount,
428 	 * and we get/put the refcount only when the sigpending count
429 	 * changes from/to zero.
430 	 */
431 	rcu_read_lock();
432 	ucounts = task_ucounts(t);
433 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 	rcu_read_unlock();
435 	if (!sigpending)
436 		return NULL;
437 
438 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
439 		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
440 	} else {
441 		print_dropped_signal(sig);
442 	}
443 
444 	if (unlikely(q == NULL)) {
445 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
446 	} else {
447 		INIT_LIST_HEAD(&q->list);
448 		q->flags = sigqueue_flags;
449 		q->ucounts = ucounts;
450 	}
451 	return q;
452 }
453 
__sigqueue_free(struct sigqueue * q)454 static void __sigqueue_free(struct sigqueue *q)
455 {
456 	if (q->flags & SIGQUEUE_PREALLOC)
457 		return;
458 	if (q->ucounts) {
459 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
460 		q->ucounts = NULL;
461 	}
462 	kmem_cache_free(sigqueue_cachep, q);
463 }
464 
flush_sigqueue(struct sigpending * queue)465 void flush_sigqueue(struct sigpending *queue)
466 {
467 	struct sigqueue *q;
468 
469 	sigemptyset(&queue->signal);
470 	while (!list_empty(&queue->list)) {
471 		q = list_entry(queue->list.next, struct sigqueue , list);
472 		list_del_init(&q->list);
473 		__sigqueue_free(q);
474 	}
475 }
476 
477 /*
478  * Flush all pending signals for this kthread.
479  */
flush_signals(struct task_struct * t)480 void flush_signals(struct task_struct *t)
481 {
482 	unsigned long flags;
483 
484 	spin_lock_irqsave(&t->sighand->siglock, flags);
485 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
486 	flush_sigqueue(&t->pending);
487 	flush_sigqueue(&t->signal->shared_pending);
488 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
489 }
490 EXPORT_SYMBOL(flush_signals);
491 
492 #ifdef CONFIG_POSIX_TIMERS
__flush_itimer_signals(struct sigpending * pending)493 static void __flush_itimer_signals(struct sigpending *pending)
494 {
495 	sigset_t signal, retain;
496 	struct sigqueue *q, *n;
497 
498 	signal = pending->signal;
499 	sigemptyset(&retain);
500 
501 	list_for_each_entry_safe(q, n, &pending->list, list) {
502 		int sig = q->info.si_signo;
503 
504 		if (likely(q->info.si_code != SI_TIMER)) {
505 			sigaddset(&retain, sig);
506 		} else {
507 			sigdelset(&signal, sig);
508 			list_del_init(&q->list);
509 			__sigqueue_free(q);
510 		}
511 	}
512 
513 	sigorsets(&pending->signal, &signal, &retain);
514 }
515 
flush_itimer_signals(void)516 void flush_itimer_signals(void)
517 {
518 	struct task_struct *tsk = current;
519 	unsigned long flags;
520 
521 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
522 	__flush_itimer_signals(&tsk->pending);
523 	__flush_itimer_signals(&tsk->signal->shared_pending);
524 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
525 }
526 #endif
527 
ignore_signals(struct task_struct * t)528 void ignore_signals(struct task_struct *t)
529 {
530 	int i;
531 
532 	for (i = 0; i < _NSIG; ++i)
533 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
534 
535 	flush_signals(t);
536 }
537 
538 /*
539  * Flush all handlers for a task.
540  */
541 
542 void
flush_signal_handlers(struct task_struct * t,int force_default)543 flush_signal_handlers(struct task_struct *t, int force_default)
544 {
545 	int i;
546 	struct k_sigaction *ka = &t->sighand->action[0];
547 	for (i = _NSIG ; i != 0 ; i--) {
548 		if (force_default || ka->sa.sa_handler != SIG_IGN)
549 			ka->sa.sa_handler = SIG_DFL;
550 		ka->sa.sa_flags = 0;
551 #ifdef __ARCH_HAS_SA_RESTORER
552 		ka->sa.sa_restorer = NULL;
553 #endif
554 		sigemptyset(&ka->sa.sa_mask);
555 		ka++;
556 	}
557 }
558 
unhandled_signal(struct task_struct * tsk,int sig)559 bool unhandled_signal(struct task_struct *tsk, int sig)
560 {
561 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
562 	if (is_global_init(tsk))
563 		return true;
564 
565 	if (handler != SIG_IGN && handler != SIG_DFL)
566 		return false;
567 
568 	/* if ptraced, let the tracer determine */
569 	return !tsk->ptrace;
570 }
571 
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,bool * resched_timer)572 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
573 			   bool *resched_timer)
574 {
575 	struct sigqueue *q, *first = NULL;
576 
577 	/*
578 	 * Collect the siginfo appropriate to this signal.  Check if
579 	 * there is another siginfo for the same signal.
580 	*/
581 	list_for_each_entry(q, &list->list, list) {
582 		if (q->info.si_signo == sig) {
583 			if (first)
584 				goto still_pending;
585 			first = q;
586 		}
587 	}
588 
589 	sigdelset(&list->signal, sig);
590 
591 	if (first) {
592 still_pending:
593 		list_del_init(&first->list);
594 		copy_siginfo(info, &first->info);
595 
596 		*resched_timer =
597 			(first->flags & SIGQUEUE_PREALLOC) &&
598 			(info->si_code == SI_TIMER) &&
599 			(info->si_sys_private);
600 
601 		__sigqueue_free(first);
602 	} else {
603 		/*
604 		 * Ok, it wasn't in the queue.  This must be
605 		 * a fast-pathed signal or we must have been
606 		 * out of queue space.  So zero out the info.
607 		 */
608 		clear_siginfo(info);
609 		info->si_signo = sig;
610 		info->si_errno = 0;
611 		info->si_code = SI_USER;
612 		info->si_pid = 0;
613 		info->si_uid = 0;
614 	}
615 }
616 
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,bool * resched_timer)617 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
618 			kernel_siginfo_t *info, bool *resched_timer)
619 {
620 	int sig = next_signal(pending, mask);
621 
622 	if (sig)
623 		collect_signal(sig, pending, info, resched_timer);
624 	return sig;
625 }
626 
627 /*
628  * Dequeue a signal and return the element to the caller, which is
629  * expected to free it.
630  *
631  * All callers have to hold the siglock.
632  */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,kernel_siginfo_t * info)633 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
634 {
635 	bool resched_timer = false;
636 	int signr;
637 
638 	/* We only dequeue private signals from ourselves, we don't let
639 	 * signalfd steal them
640 	 */
641 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
642 	if (!signr) {
643 		signr = __dequeue_signal(&tsk->signal->shared_pending,
644 					 mask, info, &resched_timer);
645 #ifdef CONFIG_POSIX_TIMERS
646 		/*
647 		 * itimer signal ?
648 		 *
649 		 * itimers are process shared and we restart periodic
650 		 * itimers in the signal delivery path to prevent DoS
651 		 * attacks in the high resolution timer case. This is
652 		 * compliant with the old way of self-restarting
653 		 * itimers, as the SIGALRM is a legacy signal and only
654 		 * queued once. Changing the restart behaviour to
655 		 * restart the timer in the signal dequeue path is
656 		 * reducing the timer noise on heavy loaded !highres
657 		 * systems too.
658 		 */
659 		if (unlikely(signr == SIGALRM)) {
660 			struct hrtimer *tmr = &tsk->signal->real_timer;
661 
662 			if (!hrtimer_is_queued(tmr) &&
663 			    tsk->signal->it_real_incr != 0) {
664 				hrtimer_forward(tmr, tmr->base->get_time(),
665 						tsk->signal->it_real_incr);
666 				hrtimer_restart(tmr);
667 			}
668 		}
669 #endif
670 	}
671 
672 	recalc_sigpending();
673 	if (!signr)
674 		return 0;
675 
676 	if (unlikely(sig_kernel_stop(signr))) {
677 		/*
678 		 * Set a marker that we have dequeued a stop signal.  Our
679 		 * caller might release the siglock and then the pending
680 		 * stop signal it is about to process is no longer in the
681 		 * pending bitmasks, but must still be cleared by a SIGCONT
682 		 * (and overruled by a SIGKILL).  So those cases clear this
683 		 * shared flag after we've set it.  Note that this flag may
684 		 * remain set after the signal we return is ignored or
685 		 * handled.  That doesn't matter because its only purpose
686 		 * is to alert stop-signal processing code when another
687 		 * processor has come along and cleared the flag.
688 		 */
689 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
690 	}
691 #ifdef CONFIG_POSIX_TIMERS
692 	if (resched_timer) {
693 		/*
694 		 * Release the siglock to ensure proper locking order
695 		 * of timer locks outside of siglocks.  Note, we leave
696 		 * irqs disabled here, since the posix-timers code is
697 		 * about to disable them again anyway.
698 		 */
699 		spin_unlock(&tsk->sighand->siglock);
700 		posixtimer_rearm(info);
701 		spin_lock(&tsk->sighand->siglock);
702 
703 		/* Don't expose the si_sys_private value to userspace */
704 		info->si_sys_private = 0;
705 	}
706 #endif
707 	return signr;
708 }
709 EXPORT_SYMBOL_GPL(dequeue_signal);
710 
dequeue_synchronous_signal(kernel_siginfo_t * info)711 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
712 {
713 	struct task_struct *tsk = current;
714 	struct sigpending *pending = &tsk->pending;
715 	struct sigqueue *q, *sync = NULL;
716 
717 	/*
718 	 * Might a synchronous signal be in the queue?
719 	 */
720 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
721 		return 0;
722 
723 	/*
724 	 * Return the first synchronous signal in the queue.
725 	 */
726 	list_for_each_entry(q, &pending->list, list) {
727 		/* Synchronous signals have a positive si_code */
728 		if ((q->info.si_code > SI_USER) &&
729 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
730 			sync = q;
731 			goto next;
732 		}
733 	}
734 	return 0;
735 next:
736 	/*
737 	 * Check if there is another siginfo for the same signal.
738 	 */
739 	list_for_each_entry_continue(q, &pending->list, list) {
740 		if (q->info.si_signo == sync->info.si_signo)
741 			goto still_pending;
742 	}
743 
744 	sigdelset(&pending->signal, sync->info.si_signo);
745 	recalc_sigpending();
746 still_pending:
747 	list_del_init(&sync->list);
748 	copy_siginfo(info, &sync->info);
749 	__sigqueue_free(sync);
750 	return info->si_signo;
751 }
752 
753 /*
754  * Tell a process that it has a new active signal..
755  *
756  * NOTE! we rely on the previous spin_lock to
757  * lock interrupts for us! We can only be called with
758  * "siglock" held, and the local interrupt must
759  * have been disabled when that got acquired!
760  *
761  * No need to set need_resched since signal event passing
762  * goes through ->blocked
763  */
signal_wake_up_state(struct task_struct * t,unsigned int state)764 void signal_wake_up_state(struct task_struct *t, unsigned int state)
765 {
766 	set_tsk_thread_flag(t, TIF_SIGPENDING);
767 	/*
768 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
769 	 * case. We don't check t->state here because there is a race with it
770 	 * executing another processor and just now entering stopped state.
771 	 * By using wake_up_state, we ensure the process will wake up and
772 	 * handle its death signal.
773 	 */
774 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
775 		kick_process(t);
776 }
777 
778 /*
779  * Remove signals in mask from the pending set and queue.
780  * Returns 1 if any signals were found.
781  *
782  * All callers must be holding the siglock.
783  */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)784 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
785 {
786 	struct sigqueue *q, *n;
787 	sigset_t m;
788 
789 	sigandsets(&m, mask, &s->signal);
790 	if (sigisemptyset(&m))
791 		return;
792 
793 	sigandnsets(&s->signal, &s->signal, mask);
794 	list_for_each_entry_safe(q, n, &s->list, list) {
795 		if (sigismember(mask, q->info.si_signo)) {
796 			list_del_init(&q->list);
797 			__sigqueue_free(q);
798 		}
799 	}
800 }
801 
is_si_special(const struct kernel_siginfo * info)802 static inline int is_si_special(const struct kernel_siginfo *info)
803 {
804 	return info <= SEND_SIG_PRIV;
805 }
806 
si_fromuser(const struct kernel_siginfo * info)807 static inline bool si_fromuser(const struct kernel_siginfo *info)
808 {
809 	return info == SEND_SIG_NOINFO ||
810 		(!is_si_special(info) && SI_FROMUSER(info));
811 }
812 
813 /*
814  * called with RCU read lock from check_kill_permission()
815  */
kill_ok_by_cred(struct task_struct * t)816 static bool kill_ok_by_cred(struct task_struct *t)
817 {
818 	const struct cred *cred = current_cred();
819 	const struct cred *tcred = __task_cred(t);
820 
821 	return uid_eq(cred->euid, tcred->suid) ||
822 	       uid_eq(cred->euid, tcred->uid) ||
823 	       uid_eq(cred->uid, tcred->suid) ||
824 	       uid_eq(cred->uid, tcred->uid) ||
825 	       ns_capable(tcred->user_ns, CAP_KILL);
826 }
827 
828 /*
829  * Bad permissions for sending the signal
830  * - the caller must hold the RCU read lock
831  */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)832 static int check_kill_permission(int sig, struct kernel_siginfo *info,
833 				 struct task_struct *t)
834 {
835 	struct pid *sid;
836 	int error;
837 
838 	if (!valid_signal(sig))
839 		return -EINVAL;
840 
841 	if (!si_fromuser(info))
842 		return 0;
843 
844 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
845 	if (error)
846 		return error;
847 
848 	if (!same_thread_group(current, t) &&
849 	    !kill_ok_by_cred(t)) {
850 		switch (sig) {
851 		case SIGCONT:
852 			sid = task_session(t);
853 			/*
854 			 * We don't return the error if sid == NULL. The
855 			 * task was unhashed, the caller must notice this.
856 			 */
857 			if (!sid || sid == task_session(current))
858 				break;
859 			fallthrough;
860 		default:
861 			return -EPERM;
862 		}
863 	}
864 
865 	return security_task_kill(t, info, sig, NULL);
866 }
867 
868 /**
869  * ptrace_trap_notify - schedule trap to notify ptracer
870  * @t: tracee wanting to notify tracer
871  *
872  * This function schedules sticky ptrace trap which is cleared on the next
873  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
874  * ptracer.
875  *
876  * If @t is running, STOP trap will be taken.  If trapped for STOP and
877  * ptracer is listening for events, tracee is woken up so that it can
878  * re-trap for the new event.  If trapped otherwise, STOP trap will be
879  * eventually taken without returning to userland after the existing traps
880  * are finished by PTRACE_CONT.
881  *
882  * CONTEXT:
883  * Must be called with @task->sighand->siglock held.
884  */
ptrace_trap_notify(struct task_struct * t)885 static void ptrace_trap_notify(struct task_struct *t)
886 {
887 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
888 	assert_spin_locked(&t->sighand->siglock);
889 
890 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
891 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
892 }
893 
894 /*
895  * Handle magic process-wide effects of stop/continue signals. Unlike
896  * the signal actions, these happen immediately at signal-generation
897  * time regardless of blocking, ignoring, or handling.  This does the
898  * actual continuing for SIGCONT, but not the actual stopping for stop
899  * signals. The process stop is done as a signal action for SIG_DFL.
900  *
901  * Returns true if the signal should be actually delivered, otherwise
902  * it should be dropped.
903  */
prepare_signal(int sig,struct task_struct * p,bool force)904 static bool prepare_signal(int sig, struct task_struct *p, bool force)
905 {
906 	struct signal_struct *signal = p->signal;
907 	struct task_struct *t;
908 	sigset_t flush;
909 
910 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
911 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
912 			return sig == SIGKILL;
913 		/*
914 		 * The process is in the middle of dying, nothing to do.
915 		 */
916 	} else if (sig_kernel_stop(sig)) {
917 		/*
918 		 * This is a stop signal.  Remove SIGCONT from all queues.
919 		 */
920 		siginitset(&flush, sigmask(SIGCONT));
921 		flush_sigqueue_mask(&flush, &signal->shared_pending);
922 		for_each_thread(p, t)
923 			flush_sigqueue_mask(&flush, &t->pending);
924 	} else if (sig == SIGCONT) {
925 		unsigned int why;
926 		/*
927 		 * Remove all stop signals from all queues, wake all threads.
928 		 */
929 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
930 		flush_sigqueue_mask(&flush, &signal->shared_pending);
931 		for_each_thread(p, t) {
932 			flush_sigqueue_mask(&flush, &t->pending);
933 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
934 			if (likely(!(t->ptrace & PT_SEIZED)))
935 				wake_up_state(t, __TASK_STOPPED);
936 			else
937 				ptrace_trap_notify(t);
938 		}
939 
940 		/*
941 		 * Notify the parent with CLD_CONTINUED if we were stopped.
942 		 *
943 		 * If we were in the middle of a group stop, we pretend it
944 		 * was already finished, and then continued. Since SIGCHLD
945 		 * doesn't queue we report only CLD_STOPPED, as if the next
946 		 * CLD_CONTINUED was dropped.
947 		 */
948 		why = 0;
949 		if (signal->flags & SIGNAL_STOP_STOPPED)
950 			why |= SIGNAL_CLD_CONTINUED;
951 		else if (signal->group_stop_count)
952 			why |= SIGNAL_CLD_STOPPED;
953 
954 		if (why) {
955 			/*
956 			 * The first thread which returns from do_signal_stop()
957 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
958 			 * notify its parent. See get_signal().
959 			 */
960 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
961 			signal->group_stop_count = 0;
962 			signal->group_exit_code = 0;
963 		}
964 	}
965 
966 	return !sig_ignored(p, sig, force);
967 }
968 
969 /*
970  * Test if P wants to take SIG.  After we've checked all threads with this,
971  * it's equivalent to finding no threads not blocking SIG.  Any threads not
972  * blocking SIG were ruled out because they are not running and already
973  * have pending signals.  Such threads will dequeue from the shared queue
974  * as soon as they're available, so putting the signal on the shared queue
975  * will be equivalent to sending it to one such thread.
976  */
wants_signal(int sig,struct task_struct * p)977 static inline bool wants_signal(int sig, struct task_struct *p)
978 {
979 	if (sigismember(&p->blocked, sig))
980 		return false;
981 
982 	if (p->flags & PF_EXITING)
983 		return false;
984 
985 	if (sig == SIGKILL)
986 		return true;
987 
988 	if (task_is_stopped_or_traced(p))
989 		return false;
990 
991 	return task_curr(p) || !task_sigpending(p);
992 }
993 
complete_signal(int sig,struct task_struct * p,enum pid_type type)994 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
995 {
996 	struct signal_struct *signal = p->signal;
997 	struct task_struct *t;
998 	bool wake;
999 
1000 	/*
1001 	 * Now find a thread we can wake up to take the signal off the queue.
1002 	 *
1003 	 * If the main thread wants the signal, it gets first crack.
1004 	 * Probably the least surprising to the average bear.
1005 	 */
1006 	if (wants_signal(sig, p))
1007 		t = p;
1008 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1009 		/*
1010 		 * There is just one thread and it does not need to be woken.
1011 		 * It will dequeue unblocked signals before it runs again.
1012 		 */
1013 		return;
1014 	else {
1015 		/*
1016 		 * Otherwise try to find a suitable thread.
1017 		 */
1018 		t = signal->curr_target;
1019 		while (!wants_signal(sig, t)) {
1020 			t = next_thread(t);
1021 			if (t == signal->curr_target)
1022 				/*
1023 				 * No thread needs to be woken.
1024 				 * Any eligible threads will see
1025 				 * the signal in the queue soon.
1026 				 */
1027 				return;
1028 		}
1029 		signal->curr_target = t;
1030 	}
1031 
1032 	/*
1033 	 * Found a killable thread.  If the signal will be fatal,
1034 	 * then start taking the whole group down immediately.
1035 	 */
1036 	if (sig_fatal(p, sig) &&
1037 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1038 	    !sigismember(&t->real_blocked, sig) &&
1039 	    (sig == SIGKILL || !p->ptrace)) {
1040 		/*
1041 		 * This signal will be fatal to the whole group.
1042 		 */
1043 		if (!sig_kernel_coredump(sig)) {
1044 			/*
1045 			 * Start a group exit and wake everybody up.
1046 			 * This way we don't have other threads
1047 			 * running and doing things after a slower
1048 			 * thread has the fatal signal pending.
1049 			 */
1050 			signal->flags = SIGNAL_GROUP_EXIT;
1051 			signal->group_exit_code = sig;
1052 			signal->group_stop_count = 0;
1053 			t = p;
1054 			do {
1055 				trace_android_vh_exit_signal(t);
1056 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1057 				sigaddset(&t->pending.signal, SIGKILL);
1058 				wake = true;
1059 				trace_android_vh_exit_signal_whether_wake(t, &wake);
1060 				if (wake)
1061 					signal_wake_up(t, 1);
1062 			} while_each_thread(p, t);
1063 			return;
1064 		}
1065 	}
1066 
1067 	/*
1068 	 * The signal is already in the shared-pending queue.
1069 	 * Tell the chosen thread to wake up and dequeue it.
1070 	 */
1071 	signal_wake_up(t, sig == SIGKILL);
1072 	return;
1073 }
1074 
legacy_queue(struct sigpending * signals,int sig)1075 static inline bool legacy_queue(struct sigpending *signals, int sig)
1076 {
1077 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1078 }
1079 
__send_signal(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1080 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1081 			enum pid_type type, bool force)
1082 {
1083 	struct sigpending *pending;
1084 	struct sigqueue *q;
1085 	int override_rlimit;
1086 	int ret = 0, result;
1087 
1088 	assert_spin_locked(&t->sighand->siglock);
1089 
1090 	result = TRACE_SIGNAL_IGNORED;
1091 	if (!prepare_signal(sig, t, force))
1092 		goto ret;
1093 
1094 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1095 	/*
1096 	 * Short-circuit ignored signals and support queuing
1097 	 * exactly one non-rt signal, so that we can get more
1098 	 * detailed information about the cause of the signal.
1099 	 */
1100 	result = TRACE_SIGNAL_ALREADY_PENDING;
1101 	if (legacy_queue(pending, sig))
1102 		goto ret;
1103 
1104 	result = TRACE_SIGNAL_DELIVERED;
1105 	/*
1106 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1107 	 */
1108 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1109 		goto out_set;
1110 
1111 	/*
1112 	 * Real-time signals must be queued if sent by sigqueue, or
1113 	 * some other real-time mechanism.  It is implementation
1114 	 * defined whether kill() does so.  We attempt to do so, on
1115 	 * the principle of least surprise, but since kill is not
1116 	 * allowed to fail with EAGAIN when low on memory we just
1117 	 * make sure at least one signal gets delivered and don't
1118 	 * pass on the info struct.
1119 	 */
1120 	if (sig < SIGRTMIN)
1121 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1122 	else
1123 		override_rlimit = 0;
1124 
1125 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1126 
1127 	if (q) {
1128 		list_add_tail(&q->list, &pending->list);
1129 		switch ((unsigned long) info) {
1130 		case (unsigned long) SEND_SIG_NOINFO:
1131 			clear_siginfo(&q->info);
1132 			q->info.si_signo = sig;
1133 			q->info.si_errno = 0;
1134 			q->info.si_code = SI_USER;
1135 			q->info.si_pid = task_tgid_nr_ns(current,
1136 							task_active_pid_ns(t));
1137 			rcu_read_lock();
1138 			q->info.si_uid =
1139 				from_kuid_munged(task_cred_xxx(t, user_ns),
1140 						 current_uid());
1141 			rcu_read_unlock();
1142 			break;
1143 		case (unsigned long) SEND_SIG_PRIV:
1144 			clear_siginfo(&q->info);
1145 			q->info.si_signo = sig;
1146 			q->info.si_errno = 0;
1147 			q->info.si_code = SI_KERNEL;
1148 			q->info.si_pid = 0;
1149 			q->info.si_uid = 0;
1150 			break;
1151 		default:
1152 			copy_siginfo(&q->info, info);
1153 			break;
1154 		}
1155 	} else if (!is_si_special(info) &&
1156 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1157 		/*
1158 		 * Queue overflow, abort.  We may abort if the
1159 		 * signal was rt and sent by user using something
1160 		 * other than kill().
1161 		 */
1162 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1163 		ret = -EAGAIN;
1164 		goto ret;
1165 	} else {
1166 		/*
1167 		 * This is a silent loss of information.  We still
1168 		 * send the signal, but the *info bits are lost.
1169 		 */
1170 		result = TRACE_SIGNAL_LOSE_INFO;
1171 	}
1172 
1173 out_set:
1174 	signalfd_notify(t, sig);
1175 	sigaddset(&pending->signal, sig);
1176 
1177 	/* Let multiprocess signals appear after on-going forks */
1178 	if (type > PIDTYPE_TGID) {
1179 		struct multiprocess_signals *delayed;
1180 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1181 			sigset_t *signal = &delayed->signal;
1182 			/* Can't queue both a stop and a continue signal */
1183 			if (sig == SIGCONT)
1184 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1185 			else if (sig_kernel_stop(sig))
1186 				sigdelset(signal, SIGCONT);
1187 			sigaddset(signal, sig);
1188 		}
1189 	}
1190 
1191 	complete_signal(sig, t, type);
1192 ret:
1193 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1194 	return ret;
1195 }
1196 
has_si_pid_and_uid(struct kernel_siginfo * info)1197 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1198 {
1199 	bool ret = false;
1200 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1201 	case SIL_KILL:
1202 	case SIL_CHLD:
1203 	case SIL_RT:
1204 		ret = true;
1205 		break;
1206 	case SIL_TIMER:
1207 	case SIL_POLL:
1208 	case SIL_FAULT:
1209 	case SIL_FAULT_TRAPNO:
1210 	case SIL_FAULT_MCEERR:
1211 	case SIL_FAULT_BNDERR:
1212 	case SIL_FAULT_PKUERR:
1213 	case SIL_FAULT_PERF_EVENT:
1214 	case SIL_SYS:
1215 		ret = false;
1216 		break;
1217 	}
1218 	return ret;
1219 }
1220 
send_signal(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1221 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1222 			enum pid_type type)
1223 {
1224 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1225 	bool force = false;
1226 
1227 	if (info == SEND_SIG_NOINFO) {
1228 		/* Force if sent from an ancestor pid namespace */
1229 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1230 	} else if (info == SEND_SIG_PRIV) {
1231 		/* Don't ignore kernel generated signals */
1232 		force = true;
1233 	} else if (has_si_pid_and_uid(info)) {
1234 		/* SIGKILL and SIGSTOP is special or has ids */
1235 		struct user_namespace *t_user_ns;
1236 
1237 		rcu_read_lock();
1238 		t_user_ns = task_cred_xxx(t, user_ns);
1239 		if (current_user_ns() != t_user_ns) {
1240 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1241 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1242 		}
1243 		rcu_read_unlock();
1244 
1245 		/* A kernel generated signal? */
1246 		force = (info->si_code == SI_KERNEL);
1247 
1248 		/* From an ancestor pid namespace? */
1249 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1250 			info->si_pid = 0;
1251 			force = true;
1252 		}
1253 	}
1254 	return __send_signal(sig, info, t, type, force);
1255 }
1256 
print_fatal_signal(int signr)1257 static void print_fatal_signal(int signr)
1258 {
1259 	struct pt_regs *regs = signal_pt_regs();
1260 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1261 
1262 #if defined(__i386__) && !defined(__arch_um__)
1263 	pr_info("code at %08lx: ", regs->ip);
1264 	{
1265 		int i;
1266 		for (i = 0; i < 16; i++) {
1267 			unsigned char insn;
1268 
1269 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1270 				break;
1271 			pr_cont("%02x ", insn);
1272 		}
1273 	}
1274 	pr_cont("\n");
1275 #endif
1276 	preempt_disable();
1277 	show_regs(regs);
1278 	preempt_enable();
1279 }
1280 
setup_print_fatal_signals(char * str)1281 static int __init setup_print_fatal_signals(char *str)
1282 {
1283 	get_option (&str, &print_fatal_signals);
1284 
1285 	return 1;
1286 }
1287 
1288 __setup("print-fatal-signals=", setup_print_fatal_signals);
1289 
1290 int
__group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1291 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1292 {
1293 	return send_signal(sig, info, p, PIDTYPE_TGID);
1294 }
1295 
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1296 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1297 			enum pid_type type)
1298 {
1299 	unsigned long flags;
1300 	int ret = -ESRCH;
1301 	trace_android_vh_do_send_sig_info(sig, current, p);
1302 	if (lock_task_sighand(p, &flags)) {
1303 		ret = send_signal(sig, info, p, type);
1304 		unlock_task_sighand(p, &flags);
1305 	}
1306 
1307 	return ret;
1308 }
1309 
1310 enum sig_handler {
1311 	HANDLER_CURRENT, /* If reachable use the current handler */
1312 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1313 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1314 };
1315 
1316 /*
1317  * Force a signal that the process can't ignore: if necessary
1318  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1319  *
1320  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1321  * since we do not want to have a signal handler that was blocked
1322  * be invoked when user space had explicitly blocked it.
1323  *
1324  * We don't want to have recursive SIGSEGV's etc, for example,
1325  * that is why we also clear SIGNAL_UNKILLABLE.
1326  */
1327 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t,enum sig_handler handler)1328 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1329 	enum sig_handler handler)
1330 {
1331 	unsigned long int flags;
1332 	int ret, blocked, ignored;
1333 	struct k_sigaction *action;
1334 	int sig = info->si_signo;
1335 
1336 	spin_lock_irqsave(&t->sighand->siglock, flags);
1337 	action = &t->sighand->action[sig-1];
1338 	ignored = action->sa.sa_handler == SIG_IGN;
1339 	blocked = sigismember(&t->blocked, sig);
1340 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1341 		action->sa.sa_handler = SIG_DFL;
1342 		if (handler == HANDLER_EXIT)
1343 			action->sa.sa_flags |= SA_IMMUTABLE;
1344 		if (blocked) {
1345 			sigdelset(&t->blocked, sig);
1346 			recalc_sigpending_and_wake(t);
1347 		}
1348 	}
1349 	/*
1350 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1351 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1352 	 */
1353 	if (action->sa.sa_handler == SIG_DFL &&
1354 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1355 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1356 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1357 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1358 
1359 	return ret;
1360 }
1361 
force_sig_info(struct kernel_siginfo * info)1362 int force_sig_info(struct kernel_siginfo *info)
1363 {
1364 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1365 }
1366 
1367 /*
1368  * Nuke all other threads in the group.
1369  */
zap_other_threads(struct task_struct * p)1370 int zap_other_threads(struct task_struct *p)
1371 {
1372 	struct task_struct *t = p;
1373 	int count = 0;
1374 
1375 	p->signal->group_stop_count = 0;
1376 
1377 	while_each_thread(p, t) {
1378 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1379 		count++;
1380 
1381 		/* Don't bother with already dead threads */
1382 		if (t->exit_state)
1383 			continue;
1384 		sigaddset(&t->pending.signal, SIGKILL);
1385 		signal_wake_up(t, 1);
1386 	}
1387 
1388 	return count;
1389 }
1390 
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1391 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392 					   unsigned long *flags)
1393 {
1394 	struct sighand_struct *sighand;
1395 
1396 	rcu_read_lock();
1397 	for (;;) {
1398 		sighand = rcu_dereference(tsk->sighand);
1399 		if (unlikely(sighand == NULL))
1400 			break;
1401 
1402 		/*
1403 		 * This sighand can be already freed and even reused, but
1404 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1405 		 * initializes ->siglock: this slab can't go away, it has
1406 		 * the same object type, ->siglock can't be reinitialized.
1407 		 *
1408 		 * We need to ensure that tsk->sighand is still the same
1409 		 * after we take the lock, we can race with de_thread() or
1410 		 * __exit_signal(). In the latter case the next iteration
1411 		 * must see ->sighand == NULL.
1412 		 */
1413 		spin_lock_irqsave(&sighand->siglock, *flags);
1414 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1415 			break;
1416 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1417 	}
1418 	rcu_read_unlock();
1419 
1420 	return sighand;
1421 }
1422 
1423 #ifdef CONFIG_LOCKDEP
lockdep_assert_task_sighand_held(struct task_struct * task)1424 void lockdep_assert_task_sighand_held(struct task_struct *task)
1425 {
1426 	struct sighand_struct *sighand;
1427 
1428 	rcu_read_lock();
1429 	sighand = rcu_dereference(task->sighand);
1430 	if (sighand)
1431 		lockdep_assert_held(&sighand->siglock);
1432 	else
1433 		WARN_ON_ONCE(1);
1434 	rcu_read_unlock();
1435 }
1436 #endif
1437 
1438 /*
1439  * send signal info to all the members of a group
1440  */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1441 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442 			struct task_struct *p, enum pid_type type)
1443 {
1444 	int ret;
1445 
1446 	rcu_read_lock();
1447 	ret = check_kill_permission(sig, info, p);
1448 	rcu_read_unlock();
1449 
1450 	if (!ret && sig) {
1451 		ret = do_send_sig_info(sig, info, p, type);
1452 		if (!ret && sig == SIGKILL) {
1453 			bool reap = false;
1454 
1455 			trace_android_vh_process_killed(current, &reap);
1456 			trace_android_vh_killed_process(current, p, &reap);
1457 			if (reap)
1458 				add_to_oom_reaper(p);
1459 		}
1460 	}
1461 
1462 	return ret;
1463 }
1464 
1465 /*
1466  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1467  * control characters do (^C, ^Z etc)
1468  * - the caller must hold at least a readlock on tasklist_lock
1469  */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1470 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1471 {
1472 	struct task_struct *p = NULL;
1473 	int retval, success;
1474 
1475 	success = 0;
1476 	retval = -ESRCH;
1477 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1478 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1479 		success |= !err;
1480 		retval = err;
1481 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1482 	return success ? 0 : retval;
1483 }
1484 
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1485 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1486 {
1487 	int error = -ESRCH;
1488 	struct task_struct *p;
1489 
1490 	for (;;) {
1491 		rcu_read_lock();
1492 		p = pid_task(pid, PIDTYPE_PID);
1493 		if (p)
1494 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1495 		rcu_read_unlock();
1496 		if (likely(!p || error != -ESRCH))
1497 			return error;
1498 
1499 		/*
1500 		 * The task was unhashed in between, try again.  If it
1501 		 * is dead, pid_task() will return NULL, if we race with
1502 		 * de_thread() it will find the new leader.
1503 		 */
1504 	}
1505 }
1506 
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1507 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1508 {
1509 	int error;
1510 	rcu_read_lock();
1511 	error = kill_pid_info(sig, info, find_vpid(pid));
1512 	rcu_read_unlock();
1513 	return error;
1514 }
1515 
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1516 static inline bool kill_as_cred_perm(const struct cred *cred,
1517 				     struct task_struct *target)
1518 {
1519 	const struct cred *pcred = __task_cred(target);
1520 
1521 	return uid_eq(cred->euid, pcred->suid) ||
1522 	       uid_eq(cred->euid, pcred->uid) ||
1523 	       uid_eq(cred->uid, pcred->suid) ||
1524 	       uid_eq(cred->uid, pcred->uid);
1525 }
1526 
1527 /*
1528  * The usb asyncio usage of siginfo is wrong.  The glibc support
1529  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1530  * AKA after the generic fields:
1531  *	kernel_pid_t	si_pid;
1532  *	kernel_uid32_t	si_uid;
1533  *	sigval_t	si_value;
1534  *
1535  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1536  * after the generic fields is:
1537  *	void __user 	*si_addr;
1538  *
1539  * This is a practical problem when there is a 64bit big endian kernel
1540  * and a 32bit userspace.  As the 32bit address will encoded in the low
1541  * 32bits of the pointer.  Those low 32bits will be stored at higher
1542  * address than appear in a 32 bit pointer.  So userspace will not
1543  * see the address it was expecting for it's completions.
1544  *
1545  * There is nothing in the encoding that can allow
1546  * copy_siginfo_to_user32 to detect this confusion of formats, so
1547  * handle this by requiring the caller of kill_pid_usb_asyncio to
1548  * notice when this situration takes place and to store the 32bit
1549  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1550  * parameter.
1551  */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1552 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1553 			 struct pid *pid, const struct cred *cred)
1554 {
1555 	struct kernel_siginfo info;
1556 	struct task_struct *p;
1557 	unsigned long flags;
1558 	int ret = -EINVAL;
1559 
1560 	if (!valid_signal(sig))
1561 		return ret;
1562 
1563 	clear_siginfo(&info);
1564 	info.si_signo = sig;
1565 	info.si_errno = errno;
1566 	info.si_code = SI_ASYNCIO;
1567 	*((sigval_t *)&info.si_pid) = addr;
1568 
1569 	rcu_read_lock();
1570 	p = pid_task(pid, PIDTYPE_PID);
1571 	if (!p) {
1572 		ret = -ESRCH;
1573 		goto out_unlock;
1574 	}
1575 	if (!kill_as_cred_perm(cred, p)) {
1576 		ret = -EPERM;
1577 		goto out_unlock;
1578 	}
1579 	ret = security_task_kill(p, &info, sig, cred);
1580 	if (ret)
1581 		goto out_unlock;
1582 
1583 	if (sig) {
1584 		if (lock_task_sighand(p, &flags)) {
1585 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1586 			unlock_task_sighand(p, &flags);
1587 		} else
1588 			ret = -ESRCH;
1589 	}
1590 out_unlock:
1591 	rcu_read_unlock();
1592 	return ret;
1593 }
1594 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1595 
1596 /*
1597  * kill_something_info() interprets pid in interesting ways just like kill(2).
1598  *
1599  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1600  * is probably wrong.  Should make it like BSD or SYSV.
1601  */
1602 
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1603 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1604 {
1605 	int ret;
1606 
1607 	if (pid > 0)
1608 		return kill_proc_info(sig, info, pid);
1609 
1610 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1611 	if (pid == INT_MIN)
1612 		return -ESRCH;
1613 
1614 	read_lock(&tasklist_lock);
1615 	if (pid != -1) {
1616 		ret = __kill_pgrp_info(sig, info,
1617 				pid ? find_vpid(-pid) : task_pgrp(current));
1618 	} else {
1619 		int retval = 0, count = 0;
1620 		struct task_struct * p;
1621 
1622 		for_each_process(p) {
1623 			if (task_pid_vnr(p) > 1 &&
1624 					!same_thread_group(p, current)) {
1625 				int err = group_send_sig_info(sig, info, p,
1626 							      PIDTYPE_MAX);
1627 				++count;
1628 				if (err != -EPERM)
1629 					retval = err;
1630 			}
1631 		}
1632 		ret = count ? retval : -ESRCH;
1633 	}
1634 	read_unlock(&tasklist_lock);
1635 
1636 	return ret;
1637 }
1638 
1639 /*
1640  * These are for backward compatibility with the rest of the kernel source.
1641  */
1642 
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1643 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1644 {
1645 	/*
1646 	 * Make sure legacy kernel users don't send in bad values
1647 	 * (normal paths check this in check_kill_permission).
1648 	 */
1649 	if (!valid_signal(sig))
1650 		return -EINVAL;
1651 
1652 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1653 }
1654 EXPORT_SYMBOL(send_sig_info);
1655 
1656 #define __si_special(priv) \
1657 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1658 
1659 int
send_sig(int sig,struct task_struct * p,int priv)1660 send_sig(int sig, struct task_struct *p, int priv)
1661 {
1662 	return send_sig_info(sig, __si_special(priv), p);
1663 }
1664 EXPORT_SYMBOL(send_sig);
1665 
force_sig(int sig)1666 void force_sig(int sig)
1667 {
1668 	struct kernel_siginfo info;
1669 
1670 	clear_siginfo(&info);
1671 	info.si_signo = sig;
1672 	info.si_errno = 0;
1673 	info.si_code = SI_KERNEL;
1674 	info.si_pid = 0;
1675 	info.si_uid = 0;
1676 	force_sig_info(&info);
1677 }
1678 EXPORT_SYMBOL(force_sig);
1679 
force_fatal_sig(int sig)1680 void force_fatal_sig(int sig)
1681 {
1682 	struct kernel_siginfo info;
1683 
1684 	clear_siginfo(&info);
1685 	info.si_signo = sig;
1686 	info.si_errno = 0;
1687 	info.si_code = SI_KERNEL;
1688 	info.si_pid = 0;
1689 	info.si_uid = 0;
1690 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1691 }
1692 
force_exit_sig(int sig)1693 void force_exit_sig(int sig)
1694 {
1695 	struct kernel_siginfo info;
1696 
1697 	clear_siginfo(&info);
1698 	info.si_signo = sig;
1699 	info.si_errno = 0;
1700 	info.si_code = SI_KERNEL;
1701 	info.si_pid = 0;
1702 	info.si_uid = 0;
1703 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1704 }
1705 
1706 /*
1707  * When things go south during signal handling, we
1708  * will force a SIGSEGV. And if the signal that caused
1709  * the problem was already a SIGSEGV, we'll want to
1710  * make sure we don't even try to deliver the signal..
1711  */
force_sigsegv(int sig)1712 void force_sigsegv(int sig)
1713 {
1714 	if (sig == SIGSEGV)
1715 		force_fatal_sig(SIGSEGV);
1716 	else
1717 		force_sig(SIGSEGV);
1718 }
1719 
force_sig_fault_to_task(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1720 int force_sig_fault_to_task(int sig, int code, void __user *addr
1721 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1722 	, struct task_struct *t)
1723 {
1724 	struct kernel_siginfo info;
1725 
1726 	clear_siginfo(&info);
1727 	info.si_signo = sig;
1728 	info.si_errno = 0;
1729 	info.si_code  = code;
1730 	info.si_addr  = addr;
1731 #ifdef __ia64__
1732 	info.si_imm = imm;
1733 	info.si_flags = flags;
1734 	info.si_isr = isr;
1735 #endif
1736 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1737 }
1738 
force_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr))1739 int force_sig_fault(int sig, int code, void __user *addr
1740 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1741 {
1742 	return force_sig_fault_to_task(sig, code, addr
1743 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1744 }
1745 
send_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1746 int send_sig_fault(int sig, int code, void __user *addr
1747 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1748 	, struct task_struct *t)
1749 {
1750 	struct kernel_siginfo info;
1751 
1752 	clear_siginfo(&info);
1753 	info.si_signo = sig;
1754 	info.si_errno = 0;
1755 	info.si_code  = code;
1756 	info.si_addr  = addr;
1757 #ifdef __ia64__
1758 	info.si_imm = imm;
1759 	info.si_flags = flags;
1760 	info.si_isr = isr;
1761 #endif
1762 	return send_sig_info(info.si_signo, &info, t);
1763 }
1764 
force_sig_mceerr(int code,void __user * addr,short lsb)1765 int force_sig_mceerr(int code, void __user *addr, short lsb)
1766 {
1767 	struct kernel_siginfo info;
1768 
1769 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1770 	clear_siginfo(&info);
1771 	info.si_signo = SIGBUS;
1772 	info.si_errno = 0;
1773 	info.si_code = code;
1774 	info.si_addr = addr;
1775 	info.si_addr_lsb = lsb;
1776 	return force_sig_info(&info);
1777 }
1778 
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1779 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1780 {
1781 	struct kernel_siginfo info;
1782 
1783 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1784 	clear_siginfo(&info);
1785 	info.si_signo = SIGBUS;
1786 	info.si_errno = 0;
1787 	info.si_code = code;
1788 	info.si_addr = addr;
1789 	info.si_addr_lsb = lsb;
1790 	return send_sig_info(info.si_signo, &info, t);
1791 }
1792 EXPORT_SYMBOL(send_sig_mceerr);
1793 
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1794 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1795 {
1796 	struct kernel_siginfo info;
1797 
1798 	clear_siginfo(&info);
1799 	info.si_signo = SIGSEGV;
1800 	info.si_errno = 0;
1801 	info.si_code  = SEGV_BNDERR;
1802 	info.si_addr  = addr;
1803 	info.si_lower = lower;
1804 	info.si_upper = upper;
1805 	return force_sig_info(&info);
1806 }
1807 
1808 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1809 int force_sig_pkuerr(void __user *addr, u32 pkey)
1810 {
1811 	struct kernel_siginfo info;
1812 
1813 	clear_siginfo(&info);
1814 	info.si_signo = SIGSEGV;
1815 	info.si_errno = 0;
1816 	info.si_code  = SEGV_PKUERR;
1817 	info.si_addr  = addr;
1818 	info.si_pkey  = pkey;
1819 	return force_sig_info(&info);
1820 }
1821 #endif
1822 
force_sig_perf(void __user * addr,u32 type,u64 sig_data)1823 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1824 {
1825 	struct kernel_siginfo info;
1826 
1827 	clear_siginfo(&info);
1828 	info.si_signo     = SIGTRAP;
1829 	info.si_errno     = 0;
1830 	info.si_code      = TRAP_PERF;
1831 	info.si_addr      = addr;
1832 	info.si_perf_data = sig_data;
1833 	info.si_perf_type = type;
1834 
1835 	return force_sig_info(&info);
1836 }
1837 
1838 /**
1839  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1840  * @syscall: syscall number to send to userland
1841  * @reason: filter-supplied reason code to send to userland (via si_errno)
1842  *
1843  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1844  */
force_sig_seccomp(int syscall,int reason,bool force_coredump)1845 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1846 {
1847 	struct kernel_siginfo info;
1848 
1849 	clear_siginfo(&info);
1850 	info.si_signo = SIGSYS;
1851 	info.si_code = SYS_SECCOMP;
1852 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1853 	info.si_errno = reason;
1854 	info.si_arch = syscall_get_arch(current);
1855 	info.si_syscall = syscall;
1856 	return force_sig_info_to_task(&info, current,
1857 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1858 }
1859 
1860 /* For the crazy architectures that include trap information in
1861  * the errno field, instead of an actual errno value.
1862  */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1863 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1864 {
1865 	struct kernel_siginfo info;
1866 
1867 	clear_siginfo(&info);
1868 	info.si_signo = SIGTRAP;
1869 	info.si_errno = errno;
1870 	info.si_code  = TRAP_HWBKPT;
1871 	info.si_addr  = addr;
1872 	return force_sig_info(&info);
1873 }
1874 
1875 /* For the rare architectures that include trap information using
1876  * si_trapno.
1877  */
force_sig_fault_trapno(int sig,int code,void __user * addr,int trapno)1878 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1879 {
1880 	struct kernel_siginfo info;
1881 
1882 	clear_siginfo(&info);
1883 	info.si_signo = sig;
1884 	info.si_errno = 0;
1885 	info.si_code  = code;
1886 	info.si_addr  = addr;
1887 	info.si_trapno = trapno;
1888 	return force_sig_info(&info);
1889 }
1890 
1891 /* For the rare architectures that include trap information using
1892  * si_trapno.
1893  */
send_sig_fault_trapno(int sig,int code,void __user * addr,int trapno,struct task_struct * t)1894 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1895 			  struct task_struct *t)
1896 {
1897 	struct kernel_siginfo info;
1898 
1899 	clear_siginfo(&info);
1900 	info.si_signo = sig;
1901 	info.si_errno = 0;
1902 	info.si_code  = code;
1903 	info.si_addr  = addr;
1904 	info.si_trapno = trapno;
1905 	return send_sig_info(info.si_signo, &info, t);
1906 }
1907 
kill_pgrp(struct pid * pid,int sig,int priv)1908 int kill_pgrp(struct pid *pid, int sig, int priv)
1909 {
1910 	int ret;
1911 
1912 	read_lock(&tasklist_lock);
1913 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1914 	read_unlock(&tasklist_lock);
1915 
1916 	return ret;
1917 }
1918 EXPORT_SYMBOL(kill_pgrp);
1919 
kill_pid(struct pid * pid,int sig,int priv)1920 int kill_pid(struct pid *pid, int sig, int priv)
1921 {
1922 	return kill_pid_info(sig, __si_special(priv), pid);
1923 }
1924 EXPORT_SYMBOL(kill_pid);
1925 
1926 /*
1927  * These functions support sending signals using preallocated sigqueue
1928  * structures.  This is needed "because realtime applications cannot
1929  * afford to lose notifications of asynchronous events, like timer
1930  * expirations or I/O completions".  In the case of POSIX Timers
1931  * we allocate the sigqueue structure from the timer_create.  If this
1932  * allocation fails we are able to report the failure to the application
1933  * with an EAGAIN error.
1934  */
sigqueue_alloc(void)1935 struct sigqueue *sigqueue_alloc(void)
1936 {
1937 	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1938 }
1939 
sigqueue_free(struct sigqueue * q)1940 void sigqueue_free(struct sigqueue *q)
1941 {
1942 	unsigned long flags;
1943 	spinlock_t *lock = &current->sighand->siglock;
1944 
1945 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1946 	/*
1947 	 * We must hold ->siglock while testing q->list
1948 	 * to serialize with collect_signal() or with
1949 	 * __exit_signal()->flush_sigqueue().
1950 	 */
1951 	spin_lock_irqsave(lock, flags);
1952 	q->flags &= ~SIGQUEUE_PREALLOC;
1953 	/*
1954 	 * If it is queued it will be freed when dequeued,
1955 	 * like the "regular" sigqueue.
1956 	 */
1957 	if (!list_empty(&q->list))
1958 		q = NULL;
1959 	spin_unlock_irqrestore(lock, flags);
1960 
1961 	if (q)
1962 		__sigqueue_free(q);
1963 }
1964 
send_sigqueue(struct sigqueue * q,struct pid * pid,enum pid_type type)1965 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1966 {
1967 	int sig = q->info.si_signo;
1968 	struct sigpending *pending;
1969 	struct task_struct *t;
1970 	unsigned long flags;
1971 	int ret, result;
1972 
1973 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1974 
1975 	ret = -1;
1976 	rcu_read_lock();
1977 	t = pid_task(pid, type);
1978 	if (!t || !likely(lock_task_sighand(t, &flags)))
1979 		goto ret;
1980 
1981 	ret = 1; /* the signal is ignored */
1982 	result = TRACE_SIGNAL_IGNORED;
1983 	if (!prepare_signal(sig, t, false))
1984 		goto out;
1985 
1986 	ret = 0;
1987 	if (unlikely(!list_empty(&q->list))) {
1988 		/*
1989 		 * If an SI_TIMER entry is already queue just increment
1990 		 * the overrun count.
1991 		 */
1992 		BUG_ON(q->info.si_code != SI_TIMER);
1993 		q->info.si_overrun++;
1994 		result = TRACE_SIGNAL_ALREADY_PENDING;
1995 		goto out;
1996 	}
1997 	q->info.si_overrun = 0;
1998 
1999 	signalfd_notify(t, sig);
2000 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2001 	list_add_tail(&q->list, &pending->list);
2002 	sigaddset(&pending->signal, sig);
2003 	complete_signal(sig, t, type);
2004 	result = TRACE_SIGNAL_DELIVERED;
2005 out:
2006 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2007 	unlock_task_sighand(t, &flags);
2008 ret:
2009 	rcu_read_unlock();
2010 	return ret;
2011 }
2012 
do_notify_pidfd(struct task_struct * task)2013 static void do_notify_pidfd(struct task_struct *task)
2014 {
2015 	struct pid *pid;
2016 
2017 	WARN_ON(task->exit_state == 0);
2018 	pid = task_pid(task);
2019 	wake_up_all(&pid->wait_pidfd);
2020 }
2021 
2022 /*
2023  * Let a parent know about the death of a child.
2024  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2025  *
2026  * Returns true if our parent ignored us and so we've switched to
2027  * self-reaping.
2028  */
do_notify_parent(struct task_struct * tsk,int sig)2029 bool do_notify_parent(struct task_struct *tsk, int sig)
2030 {
2031 	struct kernel_siginfo info;
2032 	unsigned long flags;
2033 	struct sighand_struct *psig;
2034 	bool autoreap = false;
2035 	u64 utime, stime;
2036 
2037 	WARN_ON_ONCE(sig == -1);
2038 
2039 	/* do_notify_parent_cldstop should have been called instead.  */
2040 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2041 
2042 	WARN_ON_ONCE(!tsk->ptrace &&
2043 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2044 
2045 	/* Wake up all pidfd waiters */
2046 	do_notify_pidfd(tsk);
2047 
2048 	if (sig != SIGCHLD) {
2049 		/*
2050 		 * This is only possible if parent == real_parent.
2051 		 * Check if it has changed security domain.
2052 		 */
2053 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2054 			sig = SIGCHLD;
2055 	}
2056 
2057 	clear_siginfo(&info);
2058 	info.si_signo = sig;
2059 	info.si_errno = 0;
2060 	/*
2061 	 * We are under tasklist_lock here so our parent is tied to
2062 	 * us and cannot change.
2063 	 *
2064 	 * task_active_pid_ns will always return the same pid namespace
2065 	 * until a task passes through release_task.
2066 	 *
2067 	 * write_lock() currently calls preempt_disable() which is the
2068 	 * same as rcu_read_lock(), but according to Oleg, this is not
2069 	 * correct to rely on this
2070 	 */
2071 	rcu_read_lock();
2072 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2073 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2074 				       task_uid(tsk));
2075 	rcu_read_unlock();
2076 
2077 	task_cputime(tsk, &utime, &stime);
2078 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2079 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2080 
2081 	info.si_status = tsk->exit_code & 0x7f;
2082 	if (tsk->exit_code & 0x80)
2083 		info.si_code = CLD_DUMPED;
2084 	else if (tsk->exit_code & 0x7f)
2085 		info.si_code = CLD_KILLED;
2086 	else {
2087 		info.si_code = CLD_EXITED;
2088 		info.si_status = tsk->exit_code >> 8;
2089 	}
2090 
2091 	psig = tsk->parent->sighand;
2092 	spin_lock_irqsave(&psig->siglock, flags);
2093 	if (!tsk->ptrace && sig == SIGCHLD &&
2094 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2095 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2096 		/*
2097 		 * We are exiting and our parent doesn't care.  POSIX.1
2098 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2099 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2100 		 * automatically and not left for our parent's wait4 call.
2101 		 * Rather than having the parent do it as a magic kind of
2102 		 * signal handler, we just set this to tell do_exit that we
2103 		 * can be cleaned up without becoming a zombie.  Note that
2104 		 * we still call __wake_up_parent in this case, because a
2105 		 * blocked sys_wait4 might now return -ECHILD.
2106 		 *
2107 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2108 		 * is implementation-defined: we do (if you don't want
2109 		 * it, just use SIG_IGN instead).
2110 		 */
2111 		autoreap = true;
2112 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2113 			sig = 0;
2114 	}
2115 	/*
2116 	 * Send with __send_signal as si_pid and si_uid are in the
2117 	 * parent's namespaces.
2118 	 */
2119 	if (valid_signal(sig) && sig)
2120 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2121 	__wake_up_parent(tsk, tsk->parent);
2122 	spin_unlock_irqrestore(&psig->siglock, flags);
2123 
2124 	return autoreap;
2125 }
2126 
2127 /**
2128  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2129  * @tsk: task reporting the state change
2130  * @for_ptracer: the notification is for ptracer
2131  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2132  *
2133  * Notify @tsk's parent that the stopped/continued state has changed.  If
2134  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2135  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2136  *
2137  * CONTEXT:
2138  * Must be called with tasklist_lock at least read locked.
2139  */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2140 static void do_notify_parent_cldstop(struct task_struct *tsk,
2141 				     bool for_ptracer, int why)
2142 {
2143 	struct kernel_siginfo info;
2144 	unsigned long flags;
2145 	struct task_struct *parent;
2146 	struct sighand_struct *sighand;
2147 	u64 utime, stime;
2148 
2149 	if (for_ptracer) {
2150 		parent = tsk->parent;
2151 	} else {
2152 		tsk = tsk->group_leader;
2153 		parent = tsk->real_parent;
2154 	}
2155 
2156 	clear_siginfo(&info);
2157 	info.si_signo = SIGCHLD;
2158 	info.si_errno = 0;
2159 	/*
2160 	 * see comment in do_notify_parent() about the following 4 lines
2161 	 */
2162 	rcu_read_lock();
2163 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2164 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2165 	rcu_read_unlock();
2166 
2167 	task_cputime(tsk, &utime, &stime);
2168 	info.si_utime = nsec_to_clock_t(utime);
2169 	info.si_stime = nsec_to_clock_t(stime);
2170 
2171  	info.si_code = why;
2172  	switch (why) {
2173  	case CLD_CONTINUED:
2174  		info.si_status = SIGCONT;
2175  		break;
2176  	case CLD_STOPPED:
2177  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2178  		break;
2179  	case CLD_TRAPPED:
2180  		info.si_status = tsk->exit_code & 0x7f;
2181  		break;
2182  	default:
2183  		BUG();
2184  	}
2185 
2186 	sighand = parent->sighand;
2187 	spin_lock_irqsave(&sighand->siglock, flags);
2188 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2189 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2190 		__group_send_sig_info(SIGCHLD, &info, parent);
2191 	/*
2192 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2193 	 */
2194 	__wake_up_parent(tsk, parent);
2195 	spin_unlock_irqrestore(&sighand->siglock, flags);
2196 }
2197 
may_ptrace_stop(void)2198 static inline bool may_ptrace_stop(void)
2199 {
2200 	if (!likely(current->ptrace))
2201 		return false;
2202 	/*
2203 	 * Are we in the middle of do_coredump?
2204 	 * If so and our tracer is also part of the coredump stopping
2205 	 * is a deadlock situation, and pointless because our tracer
2206 	 * is dead so don't allow us to stop.
2207 	 * If SIGKILL was already sent before the caller unlocked
2208 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2209 	 * is safe to enter schedule().
2210 	 *
2211 	 * This is almost outdated, a task with the pending SIGKILL can't
2212 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2213 	 * after SIGKILL was already dequeued.
2214 	 */
2215 	if (unlikely(current->mm->core_state) &&
2216 	    unlikely(current->mm == current->parent->mm))
2217 		return false;
2218 
2219 	return true;
2220 }
2221 
2222 
2223 /*
2224  * This must be called with current->sighand->siglock held.
2225  *
2226  * This should be the path for all ptrace stops.
2227  * We always set current->last_siginfo while stopped here.
2228  * That makes it a way to test a stopped process for
2229  * being ptrace-stopped vs being job-control-stopped.
2230  *
2231  * If we actually decide not to stop at all because the tracer
2232  * is gone, we keep current->exit_code unless clear_code.
2233  */
ptrace_stop(int exit_code,int why,int clear_code,kernel_siginfo_t * info)2234 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2235 	__releases(&current->sighand->siglock)
2236 	__acquires(&current->sighand->siglock)
2237 {
2238 	bool gstop_done = false;
2239 
2240 	if (arch_ptrace_stop_needed(exit_code, info)) {
2241 		/*
2242 		 * The arch code has something special to do before a
2243 		 * ptrace stop.  This is allowed to block, e.g. for faults
2244 		 * on user stack pages.  We can't keep the siglock while
2245 		 * calling arch_ptrace_stop, so we must release it now.
2246 		 * To preserve proper semantics, we must do this before
2247 		 * any signal bookkeeping like checking group_stop_count.
2248 		 */
2249 		spin_unlock_irq(&current->sighand->siglock);
2250 		arch_ptrace_stop(exit_code, info);
2251 		spin_lock_irq(&current->sighand->siglock);
2252 	}
2253 
2254 	/*
2255 	 * schedule() will not sleep if there is a pending signal that
2256 	 * can awaken the task.
2257 	 */
2258 	set_special_state(TASK_TRACED);
2259 
2260 	/*
2261 	 * We're committing to trapping.  TRACED should be visible before
2262 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2263 	 * Also, transition to TRACED and updates to ->jobctl should be
2264 	 * atomic with respect to siglock and should be done after the arch
2265 	 * hook as siglock is released and regrabbed across it.
2266 	 *
2267 	 *     TRACER				    TRACEE
2268 	 *
2269 	 *     ptrace_attach()
2270 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2271 	 *     do_wait()
2272 	 *       set_current_state()                smp_wmb();
2273 	 *       ptrace_do_wait()
2274 	 *         wait_task_stopped()
2275 	 *           task_stopped_code()
2276 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2277 	 */
2278 	smp_wmb();
2279 
2280 	current->last_siginfo = info;
2281 	current->exit_code = exit_code;
2282 
2283 	/*
2284 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2285 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2286 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2287 	 * could be clear now.  We act as if SIGCONT is received after
2288 	 * TASK_TRACED is entered - ignore it.
2289 	 */
2290 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2291 		gstop_done = task_participate_group_stop(current);
2292 
2293 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2294 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2295 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2296 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2297 
2298 	/* entering a trap, clear TRAPPING */
2299 	task_clear_jobctl_trapping(current);
2300 
2301 	spin_unlock_irq(&current->sighand->siglock);
2302 	read_lock(&tasklist_lock);
2303 	if (may_ptrace_stop()) {
2304 		/*
2305 		 * Notify parents of the stop.
2306 		 *
2307 		 * While ptraced, there are two parents - the ptracer and
2308 		 * the real_parent of the group_leader.  The ptracer should
2309 		 * know about every stop while the real parent is only
2310 		 * interested in the completion of group stop.  The states
2311 		 * for the two don't interact with each other.  Notify
2312 		 * separately unless they're gonna be duplicates.
2313 		 */
2314 		do_notify_parent_cldstop(current, true, why);
2315 		if (gstop_done && ptrace_reparented(current))
2316 			do_notify_parent_cldstop(current, false, why);
2317 
2318 		/*
2319 		 * Don't want to allow preemption here, because
2320 		 * sys_ptrace() needs this task to be inactive.
2321 		 *
2322 		 * XXX: implement read_unlock_no_resched().
2323 		 */
2324 		preempt_disable();
2325 		read_unlock(&tasklist_lock);
2326 		cgroup_enter_frozen();
2327 		preempt_enable_no_resched();
2328 		freezable_schedule();
2329 		cgroup_leave_frozen(true);
2330 	} else {
2331 		/*
2332 		 * By the time we got the lock, our tracer went away.
2333 		 * Don't drop the lock yet, another tracer may come.
2334 		 *
2335 		 * If @gstop_done, the ptracer went away between group stop
2336 		 * completion and here.  During detach, it would have set
2337 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2338 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2339 		 * the real parent of the group stop completion is enough.
2340 		 */
2341 		if (gstop_done)
2342 			do_notify_parent_cldstop(current, false, why);
2343 
2344 		/* tasklist protects us from ptrace_freeze_traced() */
2345 		__set_current_state(TASK_RUNNING);
2346 		if (clear_code)
2347 			current->exit_code = 0;
2348 		read_unlock(&tasklist_lock);
2349 	}
2350 
2351 	/*
2352 	 * We are back.  Now reacquire the siglock before touching
2353 	 * last_siginfo, so that we are sure to have synchronized with
2354 	 * any signal-sending on another CPU that wants to examine it.
2355 	 */
2356 	spin_lock_irq(&current->sighand->siglock);
2357 	current->last_siginfo = NULL;
2358 
2359 	/* LISTENING can be set only during STOP traps, clear it */
2360 	current->jobctl &= ~JOBCTL_LISTENING;
2361 
2362 	/*
2363 	 * Queued signals ignored us while we were stopped for tracing.
2364 	 * So check for any that we should take before resuming user mode.
2365 	 * This sets TIF_SIGPENDING, but never clears it.
2366 	 */
2367 	recalc_sigpending_tsk(current);
2368 }
2369 
ptrace_do_notify(int signr,int exit_code,int why)2370 static void ptrace_do_notify(int signr, int exit_code, int why)
2371 {
2372 	kernel_siginfo_t info;
2373 
2374 	clear_siginfo(&info);
2375 	info.si_signo = signr;
2376 	info.si_code = exit_code;
2377 	info.si_pid = task_pid_vnr(current);
2378 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2379 
2380 	/* Let the debugger run.  */
2381 	ptrace_stop(exit_code, why, 1, &info);
2382 }
2383 
ptrace_notify(int exit_code)2384 void ptrace_notify(int exit_code)
2385 {
2386 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2387 	if (unlikely(current->task_works))
2388 		task_work_run();
2389 
2390 	spin_lock_irq(&current->sighand->siglock);
2391 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2392 	spin_unlock_irq(&current->sighand->siglock);
2393 }
2394 
2395 /**
2396  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2397  * @signr: signr causing group stop if initiating
2398  *
2399  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2400  * and participate in it.  If already set, participate in the existing
2401  * group stop.  If participated in a group stop (and thus slept), %true is
2402  * returned with siglock released.
2403  *
2404  * If ptraced, this function doesn't handle stop itself.  Instead,
2405  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2406  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2407  * places afterwards.
2408  *
2409  * CONTEXT:
2410  * Must be called with @current->sighand->siglock held, which is released
2411  * on %true return.
2412  *
2413  * RETURNS:
2414  * %false if group stop is already cancelled or ptrace trap is scheduled.
2415  * %true if participated in group stop.
2416  */
do_signal_stop(int signr)2417 static bool do_signal_stop(int signr)
2418 	__releases(&current->sighand->siglock)
2419 {
2420 	struct signal_struct *sig = current->signal;
2421 
2422 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2423 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2424 		struct task_struct *t;
2425 
2426 		/* signr will be recorded in task->jobctl for retries */
2427 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2428 
2429 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2430 		    unlikely(signal_group_exit(sig)))
2431 			return false;
2432 		/*
2433 		 * There is no group stop already in progress.  We must
2434 		 * initiate one now.
2435 		 *
2436 		 * While ptraced, a task may be resumed while group stop is
2437 		 * still in effect and then receive a stop signal and
2438 		 * initiate another group stop.  This deviates from the
2439 		 * usual behavior as two consecutive stop signals can't
2440 		 * cause two group stops when !ptraced.  That is why we
2441 		 * also check !task_is_stopped(t) below.
2442 		 *
2443 		 * The condition can be distinguished by testing whether
2444 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2445 		 * group_exit_code in such case.
2446 		 *
2447 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2448 		 * an intervening stop signal is required to cause two
2449 		 * continued events regardless of ptrace.
2450 		 */
2451 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2452 			sig->group_exit_code = signr;
2453 
2454 		sig->group_stop_count = 0;
2455 
2456 		if (task_set_jobctl_pending(current, signr | gstop))
2457 			sig->group_stop_count++;
2458 
2459 		t = current;
2460 		while_each_thread(current, t) {
2461 			/*
2462 			 * Setting state to TASK_STOPPED for a group
2463 			 * stop is always done with the siglock held,
2464 			 * so this check has no races.
2465 			 */
2466 			if (!task_is_stopped(t) &&
2467 			    task_set_jobctl_pending(t, signr | gstop)) {
2468 				sig->group_stop_count++;
2469 				if (likely(!(t->ptrace & PT_SEIZED)))
2470 					signal_wake_up(t, 0);
2471 				else
2472 					ptrace_trap_notify(t);
2473 			}
2474 		}
2475 	}
2476 
2477 	if (likely(!current->ptrace)) {
2478 		int notify = 0;
2479 
2480 		/*
2481 		 * If there are no other threads in the group, or if there
2482 		 * is a group stop in progress and we are the last to stop,
2483 		 * report to the parent.
2484 		 */
2485 		if (task_participate_group_stop(current))
2486 			notify = CLD_STOPPED;
2487 
2488 		set_special_state(TASK_STOPPED);
2489 		spin_unlock_irq(&current->sighand->siglock);
2490 
2491 		/*
2492 		 * Notify the parent of the group stop completion.  Because
2493 		 * we're not holding either the siglock or tasklist_lock
2494 		 * here, ptracer may attach inbetween; however, this is for
2495 		 * group stop and should always be delivered to the real
2496 		 * parent of the group leader.  The new ptracer will get
2497 		 * its notification when this task transitions into
2498 		 * TASK_TRACED.
2499 		 */
2500 		if (notify) {
2501 			read_lock(&tasklist_lock);
2502 			do_notify_parent_cldstop(current, false, notify);
2503 			read_unlock(&tasklist_lock);
2504 		}
2505 
2506 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2507 		cgroup_enter_frozen();
2508 		freezable_schedule();
2509 		return true;
2510 	} else {
2511 		/*
2512 		 * While ptraced, group stop is handled by STOP trap.
2513 		 * Schedule it and let the caller deal with it.
2514 		 */
2515 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2516 		return false;
2517 	}
2518 }
2519 
2520 /**
2521  * do_jobctl_trap - take care of ptrace jobctl traps
2522  *
2523  * When PT_SEIZED, it's used for both group stop and explicit
2524  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2525  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2526  * the stop signal; otherwise, %SIGTRAP.
2527  *
2528  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2529  * number as exit_code and no siginfo.
2530  *
2531  * CONTEXT:
2532  * Must be called with @current->sighand->siglock held, which may be
2533  * released and re-acquired before returning with intervening sleep.
2534  */
do_jobctl_trap(void)2535 static void do_jobctl_trap(void)
2536 {
2537 	struct signal_struct *signal = current->signal;
2538 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2539 
2540 	if (current->ptrace & PT_SEIZED) {
2541 		if (!signal->group_stop_count &&
2542 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2543 			signr = SIGTRAP;
2544 		WARN_ON_ONCE(!signr);
2545 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2546 				 CLD_STOPPED);
2547 	} else {
2548 		WARN_ON_ONCE(!signr);
2549 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2550 		current->exit_code = 0;
2551 	}
2552 }
2553 
2554 /**
2555  * do_freezer_trap - handle the freezer jobctl trap
2556  *
2557  * Puts the task into frozen state, if only the task is not about to quit.
2558  * In this case it drops JOBCTL_TRAP_FREEZE.
2559  *
2560  * CONTEXT:
2561  * Must be called with @current->sighand->siglock held,
2562  * which is always released before returning.
2563  */
do_freezer_trap(void)2564 static void do_freezer_trap(void)
2565 	__releases(&current->sighand->siglock)
2566 {
2567 	/*
2568 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2569 	 * let's make another loop to give it a chance to be handled.
2570 	 * In any case, we'll return back.
2571 	 */
2572 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2573 	     JOBCTL_TRAP_FREEZE) {
2574 		spin_unlock_irq(&current->sighand->siglock);
2575 		return;
2576 	}
2577 
2578 	/*
2579 	 * Now we're sure that there is no pending fatal signal and no
2580 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2581 	 * immediately (if there is a non-fatal signal pending), and
2582 	 * put the task into sleep.
2583 	 */
2584 	__set_current_state(TASK_INTERRUPTIBLE);
2585 	clear_thread_flag(TIF_SIGPENDING);
2586 	spin_unlock_irq(&current->sighand->siglock);
2587 	cgroup_enter_frozen();
2588 	freezable_schedule();
2589 }
2590 
ptrace_signal(int signr,kernel_siginfo_t * info)2591 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2592 {
2593 	/*
2594 	 * We do not check sig_kernel_stop(signr) but set this marker
2595 	 * unconditionally because we do not know whether debugger will
2596 	 * change signr. This flag has no meaning unless we are going
2597 	 * to stop after return from ptrace_stop(). In this case it will
2598 	 * be checked in do_signal_stop(), we should only stop if it was
2599 	 * not cleared by SIGCONT while we were sleeping. See also the
2600 	 * comment in dequeue_signal().
2601 	 */
2602 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2603 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2604 
2605 	/* We're back.  Did the debugger cancel the sig?  */
2606 	signr = current->exit_code;
2607 	if (signr == 0)
2608 		return signr;
2609 
2610 	current->exit_code = 0;
2611 
2612 	/*
2613 	 * Update the siginfo structure if the signal has
2614 	 * changed.  If the debugger wanted something
2615 	 * specific in the siginfo structure then it should
2616 	 * have updated *info via PTRACE_SETSIGINFO.
2617 	 */
2618 	if (signr != info->si_signo) {
2619 		clear_siginfo(info);
2620 		info->si_signo = signr;
2621 		info->si_errno = 0;
2622 		info->si_code = SI_USER;
2623 		rcu_read_lock();
2624 		info->si_pid = task_pid_vnr(current->parent);
2625 		info->si_uid = from_kuid_munged(current_user_ns(),
2626 						task_uid(current->parent));
2627 		rcu_read_unlock();
2628 	}
2629 
2630 	/* If the (new) signal is now blocked, requeue it.  */
2631 	if (sigismember(&current->blocked, signr)) {
2632 		send_signal(signr, info, current, PIDTYPE_PID);
2633 		signr = 0;
2634 	}
2635 
2636 	return signr;
2637 }
2638 
hide_si_addr_tag_bits(struct ksignal * ksig)2639 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2640 {
2641 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2642 	case SIL_FAULT:
2643 	case SIL_FAULT_TRAPNO:
2644 	case SIL_FAULT_MCEERR:
2645 	case SIL_FAULT_BNDERR:
2646 	case SIL_FAULT_PKUERR:
2647 	case SIL_FAULT_PERF_EVENT:
2648 		ksig->info.si_addr = arch_untagged_si_addr(
2649 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2650 		break;
2651 	case SIL_KILL:
2652 	case SIL_TIMER:
2653 	case SIL_POLL:
2654 	case SIL_CHLD:
2655 	case SIL_RT:
2656 	case SIL_SYS:
2657 		break;
2658 	}
2659 }
2660 
get_signal(struct ksignal * ksig)2661 bool get_signal(struct ksignal *ksig)
2662 {
2663 	struct sighand_struct *sighand = current->sighand;
2664 	struct signal_struct *signal = current->signal;
2665 	int signr;
2666 
2667 	if (unlikely(current->task_works))
2668 		task_work_run();
2669 
2670 	/*
2671 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2672 	 * that the arch handlers don't all have to do it. If we get here
2673 	 * without TIF_SIGPENDING, just exit after running signal work.
2674 	 */
2675 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2676 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2677 			tracehook_notify_signal();
2678 		if (!task_sigpending(current))
2679 			return false;
2680 	}
2681 
2682 	if (unlikely(uprobe_deny_signal()))
2683 		return false;
2684 
2685 	/*
2686 	 * Do this once, we can't return to user-mode if freezing() == T.
2687 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2688 	 * thus do not need another check after return.
2689 	 */
2690 	try_to_freeze();
2691 
2692 relock:
2693 	spin_lock_irq(&sighand->siglock);
2694 
2695 	/*
2696 	 * Every stopped thread goes here after wakeup. Check to see if
2697 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2698 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2699 	 */
2700 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2701 		int why;
2702 
2703 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2704 			why = CLD_CONTINUED;
2705 		else
2706 			why = CLD_STOPPED;
2707 
2708 		signal->flags &= ~SIGNAL_CLD_MASK;
2709 
2710 		spin_unlock_irq(&sighand->siglock);
2711 
2712 		/*
2713 		 * Notify the parent that we're continuing.  This event is
2714 		 * always per-process and doesn't make whole lot of sense
2715 		 * for ptracers, who shouldn't consume the state via
2716 		 * wait(2) either, but, for backward compatibility, notify
2717 		 * the ptracer of the group leader too unless it's gonna be
2718 		 * a duplicate.
2719 		 */
2720 		read_lock(&tasklist_lock);
2721 		do_notify_parent_cldstop(current, false, why);
2722 
2723 		if (ptrace_reparented(current->group_leader))
2724 			do_notify_parent_cldstop(current->group_leader,
2725 						true, why);
2726 		read_unlock(&tasklist_lock);
2727 
2728 		goto relock;
2729 	}
2730 
2731 	for (;;) {
2732 		struct k_sigaction *ka;
2733 
2734 		/* Has this task already been marked for death? */
2735 		if (signal_group_exit(signal)) {
2736 			ksig->info.si_signo = signr = SIGKILL;
2737 			sigdelset(&current->pending.signal, SIGKILL);
2738 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2739 				&sighand->action[SIGKILL - 1]);
2740 			recalc_sigpending();
2741 			goto fatal;
2742 		}
2743 
2744 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2745 		    do_signal_stop(0))
2746 			goto relock;
2747 
2748 		if (unlikely(current->jobctl &
2749 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2750 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2751 				do_jobctl_trap();
2752 				spin_unlock_irq(&sighand->siglock);
2753 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2754 				do_freezer_trap();
2755 
2756 			goto relock;
2757 		}
2758 
2759 		/*
2760 		 * If the task is leaving the frozen state, let's update
2761 		 * cgroup counters and reset the frozen bit.
2762 		 */
2763 		if (unlikely(cgroup_task_frozen(current))) {
2764 			spin_unlock_irq(&sighand->siglock);
2765 			cgroup_leave_frozen(false);
2766 			goto relock;
2767 		}
2768 
2769 		/*
2770 		 * Signals generated by the execution of an instruction
2771 		 * need to be delivered before any other pending signals
2772 		 * so that the instruction pointer in the signal stack
2773 		 * frame points to the faulting instruction.
2774 		 */
2775 		signr = dequeue_synchronous_signal(&ksig->info);
2776 		if (!signr)
2777 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2778 
2779 		if (!signr)
2780 			break; /* will return 0 */
2781 
2782 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2783 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2784 			signr = ptrace_signal(signr, &ksig->info);
2785 			if (!signr)
2786 				continue;
2787 		}
2788 
2789 		ka = &sighand->action[signr-1];
2790 
2791 		/* Trace actually delivered signals. */
2792 		trace_signal_deliver(signr, &ksig->info, ka);
2793 
2794 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2795 			continue;
2796 		if (ka->sa.sa_handler != SIG_DFL) {
2797 			/* Run the handler.  */
2798 			ksig->ka = *ka;
2799 
2800 			if (ka->sa.sa_flags & SA_ONESHOT)
2801 				ka->sa.sa_handler = SIG_DFL;
2802 
2803 			break; /* will return non-zero "signr" value */
2804 		}
2805 
2806 		/*
2807 		 * Now we are doing the default action for this signal.
2808 		 */
2809 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2810 			continue;
2811 
2812 		/*
2813 		 * Global init gets no signals it doesn't want.
2814 		 * Container-init gets no signals it doesn't want from same
2815 		 * container.
2816 		 *
2817 		 * Note that if global/container-init sees a sig_kernel_only()
2818 		 * signal here, the signal must have been generated internally
2819 		 * or must have come from an ancestor namespace. In either
2820 		 * case, the signal cannot be dropped.
2821 		 */
2822 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2823 				!sig_kernel_only(signr))
2824 			continue;
2825 
2826 		if (sig_kernel_stop(signr)) {
2827 			/*
2828 			 * The default action is to stop all threads in
2829 			 * the thread group.  The job control signals
2830 			 * do nothing in an orphaned pgrp, but SIGSTOP
2831 			 * always works.  Note that siglock needs to be
2832 			 * dropped during the call to is_orphaned_pgrp()
2833 			 * because of lock ordering with tasklist_lock.
2834 			 * This allows an intervening SIGCONT to be posted.
2835 			 * We need to check for that and bail out if necessary.
2836 			 */
2837 			if (signr != SIGSTOP) {
2838 				spin_unlock_irq(&sighand->siglock);
2839 
2840 				/* signals can be posted during this window */
2841 
2842 				if (is_current_pgrp_orphaned())
2843 					goto relock;
2844 
2845 				spin_lock_irq(&sighand->siglock);
2846 			}
2847 
2848 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2849 				/* It released the siglock.  */
2850 				goto relock;
2851 			}
2852 
2853 			/*
2854 			 * We didn't actually stop, due to a race
2855 			 * with SIGCONT or something like that.
2856 			 */
2857 			continue;
2858 		}
2859 
2860 	fatal:
2861 		spin_unlock_irq(&sighand->siglock);
2862 		if (unlikely(cgroup_task_frozen(current)))
2863 			cgroup_leave_frozen(true);
2864 
2865 		/*
2866 		 * Anything else is fatal, maybe with a core dump.
2867 		 */
2868 		current->flags |= PF_SIGNALED;
2869 
2870 		if (sig_kernel_coredump(signr)) {
2871 			if (print_fatal_signals)
2872 				print_fatal_signal(ksig->info.si_signo);
2873 			proc_coredump_connector(current);
2874 			/*
2875 			 * If it was able to dump core, this kills all
2876 			 * other threads in the group and synchronizes with
2877 			 * their demise.  If we lost the race with another
2878 			 * thread getting here, it set group_exit_code
2879 			 * first and our do_group_exit call below will use
2880 			 * that value and ignore the one we pass it.
2881 			 */
2882 			do_coredump(&ksig->info);
2883 		}
2884 
2885 		/*
2886 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2887 		 * themselves. They have cleanup that must be performed, so
2888 		 * we cannot call do_exit() on their behalf.
2889 		 */
2890 		if (current->flags & PF_IO_WORKER)
2891 			goto out;
2892 
2893 		/*
2894 		 * Death signals, no core dump.
2895 		 */
2896 		do_group_exit(ksig->info.si_signo);
2897 		/* NOTREACHED */
2898 	}
2899 	spin_unlock_irq(&sighand->siglock);
2900 out:
2901 	ksig->sig = signr;
2902 
2903 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2904 		hide_si_addr_tag_bits(ksig);
2905 
2906 	return ksig->sig > 0;
2907 }
2908 
2909 /**
2910  * signal_delivered -
2911  * @ksig:		kernel signal struct
2912  * @stepping:		nonzero if debugger single-step or block-step in use
2913  *
2914  * This function should be called when a signal has successfully been
2915  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2916  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2917  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2918  */
signal_delivered(struct ksignal * ksig,int stepping)2919 static void signal_delivered(struct ksignal *ksig, int stepping)
2920 {
2921 	sigset_t blocked;
2922 
2923 	/* A signal was successfully delivered, and the
2924 	   saved sigmask was stored on the signal frame,
2925 	   and will be restored by sigreturn.  So we can
2926 	   simply clear the restore sigmask flag.  */
2927 	clear_restore_sigmask();
2928 
2929 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2930 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2931 		sigaddset(&blocked, ksig->sig);
2932 	set_current_blocked(&blocked);
2933 	if (current->sas_ss_flags & SS_AUTODISARM)
2934 		sas_ss_reset(current);
2935 	tracehook_signal_handler(stepping);
2936 }
2937 
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2938 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2939 {
2940 	if (failed)
2941 		force_sigsegv(ksig->sig);
2942 	else
2943 		signal_delivered(ksig, stepping);
2944 }
2945 
2946 /*
2947  * It could be that complete_signal() picked us to notify about the
2948  * group-wide signal. Other threads should be notified now to take
2949  * the shared signals in @which since we will not.
2950  */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2951 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2952 {
2953 	sigset_t retarget;
2954 	struct task_struct *t;
2955 
2956 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2957 	if (sigisemptyset(&retarget))
2958 		return;
2959 
2960 	t = tsk;
2961 	while_each_thread(tsk, t) {
2962 		if (t->flags & PF_EXITING)
2963 			continue;
2964 
2965 		if (!has_pending_signals(&retarget, &t->blocked))
2966 			continue;
2967 		/* Remove the signals this thread can handle. */
2968 		sigandsets(&retarget, &retarget, &t->blocked);
2969 
2970 		if (!task_sigpending(t))
2971 			signal_wake_up(t, 0);
2972 
2973 		if (sigisemptyset(&retarget))
2974 			break;
2975 	}
2976 }
2977 
exit_signals(struct task_struct * tsk)2978 void exit_signals(struct task_struct *tsk)
2979 {
2980 	int group_stop = 0;
2981 	sigset_t unblocked;
2982 
2983 	/*
2984 	 * @tsk is about to have PF_EXITING set - lock out users which
2985 	 * expect stable threadgroup.
2986 	 */
2987 	cgroup_threadgroup_change_begin(tsk);
2988 
2989 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2990 		tsk->flags |= PF_EXITING;
2991 		cgroup_threadgroup_change_end(tsk);
2992 		return;
2993 	}
2994 
2995 	spin_lock_irq(&tsk->sighand->siglock);
2996 	/*
2997 	 * From now this task is not visible for group-wide signals,
2998 	 * see wants_signal(), do_signal_stop().
2999 	 */
3000 	tsk->flags |= PF_EXITING;
3001 
3002 	cgroup_threadgroup_change_end(tsk);
3003 
3004 	if (!task_sigpending(tsk))
3005 		goto out;
3006 
3007 	unblocked = tsk->blocked;
3008 	signotset(&unblocked);
3009 	retarget_shared_pending(tsk, &unblocked);
3010 
3011 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3012 	    task_participate_group_stop(tsk))
3013 		group_stop = CLD_STOPPED;
3014 out:
3015 	spin_unlock_irq(&tsk->sighand->siglock);
3016 
3017 	/*
3018 	 * If group stop has completed, deliver the notification.  This
3019 	 * should always go to the real parent of the group leader.
3020 	 */
3021 	if (unlikely(group_stop)) {
3022 		read_lock(&tasklist_lock);
3023 		do_notify_parent_cldstop(tsk, false, group_stop);
3024 		read_unlock(&tasklist_lock);
3025 	}
3026 }
3027 
3028 /*
3029  * System call entry points.
3030  */
3031 
3032 /**
3033  *  sys_restart_syscall - restart a system call
3034  */
SYSCALL_DEFINE0(restart_syscall)3035 SYSCALL_DEFINE0(restart_syscall)
3036 {
3037 	struct restart_block *restart = &current->restart_block;
3038 	return restart->fn(restart);
3039 }
3040 
do_no_restart_syscall(struct restart_block * param)3041 long do_no_restart_syscall(struct restart_block *param)
3042 {
3043 	return -EINTR;
3044 }
3045 
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)3046 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3047 {
3048 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3049 		sigset_t newblocked;
3050 		/* A set of now blocked but previously unblocked signals. */
3051 		sigandnsets(&newblocked, newset, &current->blocked);
3052 		retarget_shared_pending(tsk, &newblocked);
3053 	}
3054 	tsk->blocked = *newset;
3055 	recalc_sigpending();
3056 }
3057 
3058 /**
3059  * set_current_blocked - change current->blocked mask
3060  * @newset: new mask
3061  *
3062  * It is wrong to change ->blocked directly, this helper should be used
3063  * to ensure the process can't miss a shared signal we are going to block.
3064  */
set_current_blocked(sigset_t * newset)3065 void set_current_blocked(sigset_t *newset)
3066 {
3067 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3068 	__set_current_blocked(newset);
3069 }
3070 
__set_current_blocked(const sigset_t * newset)3071 void __set_current_blocked(const sigset_t *newset)
3072 {
3073 	struct task_struct *tsk = current;
3074 
3075 	/*
3076 	 * In case the signal mask hasn't changed, there is nothing we need
3077 	 * to do. The current->blocked shouldn't be modified by other task.
3078 	 */
3079 	if (sigequalsets(&tsk->blocked, newset))
3080 		return;
3081 
3082 	spin_lock_irq(&tsk->sighand->siglock);
3083 	__set_task_blocked(tsk, newset);
3084 	spin_unlock_irq(&tsk->sighand->siglock);
3085 }
3086 
3087 /*
3088  * This is also useful for kernel threads that want to temporarily
3089  * (or permanently) block certain signals.
3090  *
3091  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3092  * interface happily blocks "unblockable" signals like SIGKILL
3093  * and friends.
3094  */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)3095 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3096 {
3097 	struct task_struct *tsk = current;
3098 	sigset_t newset;
3099 
3100 	/* Lockless, only current can change ->blocked, never from irq */
3101 	if (oldset)
3102 		*oldset = tsk->blocked;
3103 
3104 	switch (how) {
3105 	case SIG_BLOCK:
3106 		sigorsets(&newset, &tsk->blocked, set);
3107 		break;
3108 	case SIG_UNBLOCK:
3109 		sigandnsets(&newset, &tsk->blocked, set);
3110 		break;
3111 	case SIG_SETMASK:
3112 		newset = *set;
3113 		break;
3114 	default:
3115 		return -EINVAL;
3116 	}
3117 
3118 	__set_current_blocked(&newset);
3119 	return 0;
3120 }
3121 EXPORT_SYMBOL(sigprocmask);
3122 
3123 /*
3124  * The api helps set app-provided sigmasks.
3125  *
3126  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3127  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3128  *
3129  * Note that it does set_restore_sigmask() in advance, so it must be always
3130  * paired with restore_saved_sigmask_unless() before return from syscall.
3131  */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)3132 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3133 {
3134 	sigset_t kmask;
3135 
3136 	if (!umask)
3137 		return 0;
3138 	if (sigsetsize != sizeof(sigset_t))
3139 		return -EINVAL;
3140 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3141 		return -EFAULT;
3142 
3143 	set_restore_sigmask();
3144 	current->saved_sigmask = current->blocked;
3145 	set_current_blocked(&kmask);
3146 
3147 	return 0;
3148 }
3149 
3150 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3151 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3152 			    size_t sigsetsize)
3153 {
3154 	sigset_t kmask;
3155 
3156 	if (!umask)
3157 		return 0;
3158 	if (sigsetsize != sizeof(compat_sigset_t))
3159 		return -EINVAL;
3160 	if (get_compat_sigset(&kmask, umask))
3161 		return -EFAULT;
3162 
3163 	set_restore_sigmask();
3164 	current->saved_sigmask = current->blocked;
3165 	set_current_blocked(&kmask);
3166 
3167 	return 0;
3168 }
3169 #endif
3170 
3171 /**
3172  *  sys_rt_sigprocmask - change the list of currently blocked signals
3173  *  @how: whether to add, remove, or set signals
3174  *  @nset: stores pending signals
3175  *  @oset: previous value of signal mask if non-null
3176  *  @sigsetsize: size of sigset_t type
3177  */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3178 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3179 		sigset_t __user *, oset, size_t, sigsetsize)
3180 {
3181 	sigset_t old_set, new_set;
3182 	int error;
3183 
3184 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3185 	if (sigsetsize != sizeof(sigset_t))
3186 		return -EINVAL;
3187 
3188 	old_set = current->blocked;
3189 
3190 	if (nset) {
3191 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3192 			return -EFAULT;
3193 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3194 
3195 		error = sigprocmask(how, &new_set, NULL);
3196 		if (error)
3197 			return error;
3198 	}
3199 
3200 	if (oset) {
3201 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3202 			return -EFAULT;
3203 	}
3204 
3205 	return 0;
3206 }
3207 
3208 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3209 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3210 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3211 {
3212 	sigset_t old_set = current->blocked;
3213 
3214 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3215 	if (sigsetsize != sizeof(sigset_t))
3216 		return -EINVAL;
3217 
3218 	if (nset) {
3219 		sigset_t new_set;
3220 		int error;
3221 		if (get_compat_sigset(&new_set, nset))
3222 			return -EFAULT;
3223 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3224 
3225 		error = sigprocmask(how, &new_set, NULL);
3226 		if (error)
3227 			return error;
3228 	}
3229 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3230 }
3231 #endif
3232 
do_sigpending(sigset_t * set)3233 static void do_sigpending(sigset_t *set)
3234 {
3235 	spin_lock_irq(&current->sighand->siglock);
3236 	sigorsets(set, &current->pending.signal,
3237 		  &current->signal->shared_pending.signal);
3238 	spin_unlock_irq(&current->sighand->siglock);
3239 
3240 	/* Outside the lock because only this thread touches it.  */
3241 	sigandsets(set, &current->blocked, set);
3242 }
3243 
3244 /**
3245  *  sys_rt_sigpending - examine a pending signal that has been raised
3246  *			while blocked
3247  *  @uset: stores pending signals
3248  *  @sigsetsize: size of sigset_t type or larger
3249  */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3250 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3251 {
3252 	sigset_t set;
3253 
3254 	if (sigsetsize > sizeof(*uset))
3255 		return -EINVAL;
3256 
3257 	do_sigpending(&set);
3258 
3259 	if (copy_to_user(uset, &set, sigsetsize))
3260 		return -EFAULT;
3261 
3262 	return 0;
3263 }
3264 
3265 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3266 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3267 		compat_size_t, sigsetsize)
3268 {
3269 	sigset_t set;
3270 
3271 	if (sigsetsize > sizeof(*uset))
3272 		return -EINVAL;
3273 
3274 	do_sigpending(&set);
3275 
3276 	return put_compat_sigset(uset, &set, sigsetsize);
3277 }
3278 #endif
3279 
3280 static const struct {
3281 	unsigned char limit, layout;
3282 } sig_sicodes[] = {
3283 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3284 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3285 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3286 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3287 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3288 #if defined(SIGEMT)
3289 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3290 #endif
3291 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3292 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3293 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3294 };
3295 
known_siginfo_layout(unsigned sig,int si_code)3296 static bool known_siginfo_layout(unsigned sig, int si_code)
3297 {
3298 	if (si_code == SI_KERNEL)
3299 		return true;
3300 	else if ((si_code > SI_USER)) {
3301 		if (sig_specific_sicodes(sig)) {
3302 			if (si_code <= sig_sicodes[sig].limit)
3303 				return true;
3304 		}
3305 		else if (si_code <= NSIGPOLL)
3306 			return true;
3307 	}
3308 	else if (si_code >= SI_DETHREAD)
3309 		return true;
3310 	else if (si_code == SI_ASYNCNL)
3311 		return true;
3312 	return false;
3313 }
3314 
siginfo_layout(unsigned sig,int si_code)3315 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3316 {
3317 	enum siginfo_layout layout = SIL_KILL;
3318 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3319 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3320 		    (si_code <= sig_sicodes[sig].limit)) {
3321 			layout = sig_sicodes[sig].layout;
3322 			/* Handle the exceptions */
3323 			if ((sig == SIGBUS) &&
3324 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3325 				layout = SIL_FAULT_MCEERR;
3326 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3327 				layout = SIL_FAULT_BNDERR;
3328 #ifdef SEGV_PKUERR
3329 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3330 				layout = SIL_FAULT_PKUERR;
3331 #endif
3332 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3333 				layout = SIL_FAULT_PERF_EVENT;
3334 			else if (IS_ENABLED(CONFIG_SPARC) &&
3335 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3336 				layout = SIL_FAULT_TRAPNO;
3337 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3338 				 ((sig == SIGFPE) ||
3339 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3340 				layout = SIL_FAULT_TRAPNO;
3341 		}
3342 		else if (si_code <= NSIGPOLL)
3343 			layout = SIL_POLL;
3344 	} else {
3345 		if (si_code == SI_TIMER)
3346 			layout = SIL_TIMER;
3347 		else if (si_code == SI_SIGIO)
3348 			layout = SIL_POLL;
3349 		else if (si_code < 0)
3350 			layout = SIL_RT;
3351 	}
3352 	return layout;
3353 }
3354 
si_expansion(const siginfo_t __user * info)3355 static inline char __user *si_expansion(const siginfo_t __user *info)
3356 {
3357 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3358 }
3359 
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3360 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3361 {
3362 	char __user *expansion = si_expansion(to);
3363 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3364 		return -EFAULT;
3365 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3366 		return -EFAULT;
3367 	return 0;
3368 }
3369 
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3370 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3371 				       const siginfo_t __user *from)
3372 {
3373 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3374 		char __user *expansion = si_expansion(from);
3375 		char buf[SI_EXPANSION_SIZE];
3376 		int i;
3377 		/*
3378 		 * An unknown si_code might need more than
3379 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3380 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3381 		 * will return this data to userspace exactly.
3382 		 */
3383 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3384 			return -EFAULT;
3385 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3386 			if (buf[i] != 0)
3387 				return -E2BIG;
3388 		}
3389 	}
3390 	return 0;
3391 }
3392 
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3393 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3394 				    const siginfo_t __user *from)
3395 {
3396 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3397 		return -EFAULT;
3398 	to->si_signo = signo;
3399 	return post_copy_siginfo_from_user(to, from);
3400 }
3401 
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3402 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3403 {
3404 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3405 		return -EFAULT;
3406 	return post_copy_siginfo_from_user(to, from);
3407 }
3408 
3409 #ifdef CONFIG_COMPAT
3410 /**
3411  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3412  * @to: compat siginfo destination
3413  * @from: kernel siginfo source
3414  *
3415  * Note: This function does not work properly for the SIGCHLD on x32, but
3416  * fortunately it doesn't have to.  The only valid callers for this function are
3417  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3418  * The latter does not care because SIGCHLD will never cause a coredump.
3419  */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3420 void copy_siginfo_to_external32(struct compat_siginfo *to,
3421 		const struct kernel_siginfo *from)
3422 {
3423 	memset(to, 0, sizeof(*to));
3424 
3425 	to->si_signo = from->si_signo;
3426 	to->si_errno = from->si_errno;
3427 	to->si_code  = from->si_code;
3428 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3429 	case SIL_KILL:
3430 		to->si_pid = from->si_pid;
3431 		to->si_uid = from->si_uid;
3432 		break;
3433 	case SIL_TIMER:
3434 		to->si_tid     = from->si_tid;
3435 		to->si_overrun = from->si_overrun;
3436 		to->si_int     = from->si_int;
3437 		break;
3438 	case SIL_POLL:
3439 		to->si_band = from->si_band;
3440 		to->si_fd   = from->si_fd;
3441 		break;
3442 	case SIL_FAULT:
3443 		to->si_addr = ptr_to_compat(from->si_addr);
3444 		break;
3445 	case SIL_FAULT_TRAPNO:
3446 		to->si_addr = ptr_to_compat(from->si_addr);
3447 		to->si_trapno = from->si_trapno;
3448 		break;
3449 	case SIL_FAULT_MCEERR:
3450 		to->si_addr = ptr_to_compat(from->si_addr);
3451 		to->si_addr_lsb = from->si_addr_lsb;
3452 		break;
3453 	case SIL_FAULT_BNDERR:
3454 		to->si_addr = ptr_to_compat(from->si_addr);
3455 		to->si_lower = ptr_to_compat(from->si_lower);
3456 		to->si_upper = ptr_to_compat(from->si_upper);
3457 		break;
3458 	case SIL_FAULT_PKUERR:
3459 		to->si_addr = ptr_to_compat(from->si_addr);
3460 		to->si_pkey = from->si_pkey;
3461 		break;
3462 	case SIL_FAULT_PERF_EVENT:
3463 		to->si_addr = ptr_to_compat(from->si_addr);
3464 		to->si_perf_data = from->si_perf_data;
3465 		to->si_perf_type = from->si_perf_type;
3466 		break;
3467 	case SIL_CHLD:
3468 		to->si_pid = from->si_pid;
3469 		to->si_uid = from->si_uid;
3470 		to->si_status = from->si_status;
3471 		to->si_utime = from->si_utime;
3472 		to->si_stime = from->si_stime;
3473 		break;
3474 	case SIL_RT:
3475 		to->si_pid = from->si_pid;
3476 		to->si_uid = from->si_uid;
3477 		to->si_int = from->si_int;
3478 		break;
3479 	case SIL_SYS:
3480 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3481 		to->si_syscall   = from->si_syscall;
3482 		to->si_arch      = from->si_arch;
3483 		break;
3484 	}
3485 }
3486 
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3487 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3488 			   const struct kernel_siginfo *from)
3489 {
3490 	struct compat_siginfo new;
3491 
3492 	copy_siginfo_to_external32(&new, from);
3493 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3494 		return -EFAULT;
3495 	return 0;
3496 }
3497 
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3498 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3499 					 const struct compat_siginfo *from)
3500 {
3501 	clear_siginfo(to);
3502 	to->si_signo = from->si_signo;
3503 	to->si_errno = from->si_errno;
3504 	to->si_code  = from->si_code;
3505 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3506 	case SIL_KILL:
3507 		to->si_pid = from->si_pid;
3508 		to->si_uid = from->si_uid;
3509 		break;
3510 	case SIL_TIMER:
3511 		to->si_tid     = from->si_tid;
3512 		to->si_overrun = from->si_overrun;
3513 		to->si_int     = from->si_int;
3514 		break;
3515 	case SIL_POLL:
3516 		to->si_band = from->si_band;
3517 		to->si_fd   = from->si_fd;
3518 		break;
3519 	case SIL_FAULT:
3520 		to->si_addr = compat_ptr(from->si_addr);
3521 		break;
3522 	case SIL_FAULT_TRAPNO:
3523 		to->si_addr = compat_ptr(from->si_addr);
3524 		to->si_trapno = from->si_trapno;
3525 		break;
3526 	case SIL_FAULT_MCEERR:
3527 		to->si_addr = compat_ptr(from->si_addr);
3528 		to->si_addr_lsb = from->si_addr_lsb;
3529 		break;
3530 	case SIL_FAULT_BNDERR:
3531 		to->si_addr = compat_ptr(from->si_addr);
3532 		to->si_lower = compat_ptr(from->si_lower);
3533 		to->si_upper = compat_ptr(from->si_upper);
3534 		break;
3535 	case SIL_FAULT_PKUERR:
3536 		to->si_addr = compat_ptr(from->si_addr);
3537 		to->si_pkey = from->si_pkey;
3538 		break;
3539 	case SIL_FAULT_PERF_EVENT:
3540 		to->si_addr = compat_ptr(from->si_addr);
3541 		to->si_perf_data = from->si_perf_data;
3542 		to->si_perf_type = from->si_perf_type;
3543 		break;
3544 	case SIL_CHLD:
3545 		to->si_pid    = from->si_pid;
3546 		to->si_uid    = from->si_uid;
3547 		to->si_status = from->si_status;
3548 #ifdef CONFIG_X86_X32_ABI
3549 		if (in_x32_syscall()) {
3550 			to->si_utime = from->_sifields._sigchld_x32._utime;
3551 			to->si_stime = from->_sifields._sigchld_x32._stime;
3552 		} else
3553 #endif
3554 		{
3555 			to->si_utime = from->si_utime;
3556 			to->si_stime = from->si_stime;
3557 		}
3558 		break;
3559 	case SIL_RT:
3560 		to->si_pid = from->si_pid;
3561 		to->si_uid = from->si_uid;
3562 		to->si_int = from->si_int;
3563 		break;
3564 	case SIL_SYS:
3565 		to->si_call_addr = compat_ptr(from->si_call_addr);
3566 		to->si_syscall   = from->si_syscall;
3567 		to->si_arch      = from->si_arch;
3568 		break;
3569 	}
3570 	return 0;
3571 }
3572 
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3573 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3574 				      const struct compat_siginfo __user *ufrom)
3575 {
3576 	struct compat_siginfo from;
3577 
3578 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3579 		return -EFAULT;
3580 
3581 	from.si_signo = signo;
3582 	return post_copy_siginfo_from_user32(to, &from);
3583 }
3584 
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3585 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3586 			     const struct compat_siginfo __user *ufrom)
3587 {
3588 	struct compat_siginfo from;
3589 
3590 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3591 		return -EFAULT;
3592 
3593 	return post_copy_siginfo_from_user32(to, &from);
3594 }
3595 #endif /* CONFIG_COMPAT */
3596 
3597 /**
3598  *  do_sigtimedwait - wait for queued signals specified in @which
3599  *  @which: queued signals to wait for
3600  *  @info: if non-null, the signal's siginfo is returned here
3601  *  @ts: upper bound on process time suspension
3602  */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3603 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3604 		    const struct timespec64 *ts)
3605 {
3606 	ktime_t *to = NULL, timeout = KTIME_MAX;
3607 	struct task_struct *tsk = current;
3608 	sigset_t mask = *which;
3609 	int sig, ret = 0;
3610 
3611 	if (ts) {
3612 		if (!timespec64_valid(ts))
3613 			return -EINVAL;
3614 		timeout = timespec64_to_ktime(*ts);
3615 		to = &timeout;
3616 	}
3617 
3618 	/*
3619 	 * Invert the set of allowed signals to get those we want to block.
3620 	 */
3621 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3622 	signotset(&mask);
3623 
3624 	spin_lock_irq(&tsk->sighand->siglock);
3625 	sig = dequeue_signal(tsk, &mask, info);
3626 	if (!sig && timeout) {
3627 		/*
3628 		 * None ready, temporarily unblock those we're interested
3629 		 * while we are sleeping in so that we'll be awakened when
3630 		 * they arrive. Unblocking is always fine, we can avoid
3631 		 * set_current_blocked().
3632 		 */
3633 		tsk->real_blocked = tsk->blocked;
3634 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3635 		recalc_sigpending();
3636 		spin_unlock_irq(&tsk->sighand->siglock);
3637 
3638 		__set_current_state(TASK_INTERRUPTIBLE);
3639 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3640 							 HRTIMER_MODE_REL);
3641 		spin_lock_irq(&tsk->sighand->siglock);
3642 		__set_task_blocked(tsk, &tsk->real_blocked);
3643 		sigemptyset(&tsk->real_blocked);
3644 		sig = dequeue_signal(tsk, &mask, info);
3645 	}
3646 	spin_unlock_irq(&tsk->sighand->siglock);
3647 
3648 	if (sig)
3649 		return sig;
3650 	return ret ? -EINTR : -EAGAIN;
3651 }
3652 
3653 /**
3654  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3655  *			in @uthese
3656  *  @uthese: queued signals to wait for
3657  *  @uinfo: if non-null, the signal's siginfo is returned here
3658  *  @uts: upper bound on process time suspension
3659  *  @sigsetsize: size of sigset_t type
3660  */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3661 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3662 		siginfo_t __user *, uinfo,
3663 		const struct __kernel_timespec __user *, uts,
3664 		size_t, sigsetsize)
3665 {
3666 	sigset_t these;
3667 	struct timespec64 ts;
3668 	kernel_siginfo_t info;
3669 	int ret;
3670 
3671 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3672 	if (sigsetsize != sizeof(sigset_t))
3673 		return -EINVAL;
3674 
3675 	if (copy_from_user(&these, uthese, sizeof(these)))
3676 		return -EFAULT;
3677 
3678 	if (uts) {
3679 		if (get_timespec64(&ts, uts))
3680 			return -EFAULT;
3681 	}
3682 
3683 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3684 
3685 	if (ret > 0 && uinfo) {
3686 		if (copy_siginfo_to_user(uinfo, &info))
3687 			ret = -EFAULT;
3688 	}
3689 
3690 	return ret;
3691 }
3692 
3693 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3694 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3695 		siginfo_t __user *, uinfo,
3696 		const struct old_timespec32 __user *, uts,
3697 		size_t, sigsetsize)
3698 {
3699 	sigset_t these;
3700 	struct timespec64 ts;
3701 	kernel_siginfo_t info;
3702 	int ret;
3703 
3704 	if (sigsetsize != sizeof(sigset_t))
3705 		return -EINVAL;
3706 
3707 	if (copy_from_user(&these, uthese, sizeof(these)))
3708 		return -EFAULT;
3709 
3710 	if (uts) {
3711 		if (get_old_timespec32(&ts, uts))
3712 			return -EFAULT;
3713 	}
3714 
3715 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3716 
3717 	if (ret > 0 && uinfo) {
3718 		if (copy_siginfo_to_user(uinfo, &info))
3719 			ret = -EFAULT;
3720 	}
3721 
3722 	return ret;
3723 }
3724 #endif
3725 
3726 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3727 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3728 		struct compat_siginfo __user *, uinfo,
3729 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3730 {
3731 	sigset_t s;
3732 	struct timespec64 t;
3733 	kernel_siginfo_t info;
3734 	long ret;
3735 
3736 	if (sigsetsize != sizeof(sigset_t))
3737 		return -EINVAL;
3738 
3739 	if (get_compat_sigset(&s, uthese))
3740 		return -EFAULT;
3741 
3742 	if (uts) {
3743 		if (get_timespec64(&t, uts))
3744 			return -EFAULT;
3745 	}
3746 
3747 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3748 
3749 	if (ret > 0 && uinfo) {
3750 		if (copy_siginfo_to_user32(uinfo, &info))
3751 			ret = -EFAULT;
3752 	}
3753 
3754 	return ret;
3755 }
3756 
3757 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3758 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3759 		struct compat_siginfo __user *, uinfo,
3760 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3761 {
3762 	sigset_t s;
3763 	struct timespec64 t;
3764 	kernel_siginfo_t info;
3765 	long ret;
3766 
3767 	if (sigsetsize != sizeof(sigset_t))
3768 		return -EINVAL;
3769 
3770 	if (get_compat_sigset(&s, uthese))
3771 		return -EFAULT;
3772 
3773 	if (uts) {
3774 		if (get_old_timespec32(&t, uts))
3775 			return -EFAULT;
3776 	}
3777 
3778 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3779 
3780 	if (ret > 0 && uinfo) {
3781 		if (copy_siginfo_to_user32(uinfo, &info))
3782 			ret = -EFAULT;
3783 	}
3784 
3785 	return ret;
3786 }
3787 #endif
3788 #endif
3789 
prepare_kill_siginfo(int sig,struct kernel_siginfo * info)3790 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3791 {
3792 	clear_siginfo(info);
3793 	info->si_signo = sig;
3794 	info->si_errno = 0;
3795 	info->si_code = SI_USER;
3796 	info->si_pid = task_tgid_vnr(current);
3797 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3798 }
3799 
3800 /**
3801  *  sys_kill - send a signal to a process
3802  *  @pid: the PID of the process
3803  *  @sig: signal to be sent
3804  */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3805 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3806 {
3807 	struct kernel_siginfo info;
3808 
3809 	prepare_kill_siginfo(sig, &info);
3810 
3811 	return kill_something_info(sig, &info, pid);
3812 }
3813 
3814 /*
3815  * Verify that the signaler and signalee either are in the same pid namespace
3816  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3817  * namespace.
3818  */
access_pidfd_pidns(struct pid * pid)3819 static bool access_pidfd_pidns(struct pid *pid)
3820 {
3821 	struct pid_namespace *active = task_active_pid_ns(current);
3822 	struct pid_namespace *p = ns_of_pid(pid);
3823 
3824 	for (;;) {
3825 		if (!p)
3826 			return false;
3827 		if (p == active)
3828 			break;
3829 		p = p->parent;
3830 	}
3831 
3832 	return true;
3833 }
3834 
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t __user * info)3835 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3836 		siginfo_t __user *info)
3837 {
3838 #ifdef CONFIG_COMPAT
3839 	/*
3840 	 * Avoid hooking up compat syscalls and instead handle necessary
3841 	 * conversions here. Note, this is a stop-gap measure and should not be
3842 	 * considered a generic solution.
3843 	 */
3844 	if (in_compat_syscall())
3845 		return copy_siginfo_from_user32(
3846 			kinfo, (struct compat_siginfo __user *)info);
3847 #endif
3848 	return copy_siginfo_from_user(kinfo, info);
3849 }
3850 
pidfd_to_pid(const struct file * file)3851 static struct pid *pidfd_to_pid(const struct file *file)
3852 {
3853 	struct pid *pid;
3854 
3855 	pid = pidfd_pid(file);
3856 	if (!IS_ERR(pid))
3857 		return pid;
3858 
3859 	return tgid_pidfd_to_pid(file);
3860 }
3861 
3862 /**
3863  * sys_pidfd_send_signal - Signal a process through a pidfd
3864  * @pidfd:  file descriptor of the process
3865  * @sig:    signal to send
3866  * @info:   signal info
3867  * @flags:  future flags
3868  *
3869  * The syscall currently only signals via PIDTYPE_PID which covers
3870  * kill(<positive-pid>, <signal>. It does not signal threads or process
3871  * groups.
3872  * In order to extend the syscall to threads and process groups the @flags
3873  * argument should be used. In essence, the @flags argument will determine
3874  * what is signaled and not the file descriptor itself. Put in other words,
3875  * grouping is a property of the flags argument not a property of the file
3876  * descriptor.
3877  *
3878  * Return: 0 on success, negative errno on failure
3879  */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)3880 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3881 		siginfo_t __user *, info, unsigned int, flags)
3882 {
3883 	int ret;
3884 	struct fd f;
3885 	struct pid *pid;
3886 	kernel_siginfo_t kinfo;
3887 
3888 	/* Enforce flags be set to 0 until we add an extension. */
3889 	if (flags)
3890 		return -EINVAL;
3891 
3892 	f = fdget(pidfd);
3893 	if (!f.file)
3894 		return -EBADF;
3895 
3896 	/* Is this a pidfd? */
3897 	pid = pidfd_to_pid(f.file);
3898 	if (IS_ERR(pid)) {
3899 		ret = PTR_ERR(pid);
3900 		goto err;
3901 	}
3902 
3903 	ret = -EINVAL;
3904 	if (!access_pidfd_pidns(pid))
3905 		goto err;
3906 
3907 	if (info) {
3908 		ret = copy_siginfo_from_user_any(&kinfo, info);
3909 		if (unlikely(ret))
3910 			goto err;
3911 
3912 		ret = -EINVAL;
3913 		if (unlikely(sig != kinfo.si_signo))
3914 			goto err;
3915 
3916 		/* Only allow sending arbitrary signals to yourself. */
3917 		ret = -EPERM;
3918 		if ((task_pid(current) != pid) &&
3919 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3920 			goto err;
3921 	} else {
3922 		prepare_kill_siginfo(sig, &kinfo);
3923 	}
3924 
3925 	ret = kill_pid_info(sig, &kinfo, pid);
3926 
3927 err:
3928 	fdput(f);
3929 	return ret;
3930 }
3931 
3932 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)3933 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3934 {
3935 	struct task_struct *p;
3936 	int error = -ESRCH;
3937 
3938 	rcu_read_lock();
3939 	p = find_task_by_vpid(pid);
3940 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3941 		error = check_kill_permission(sig, info, p);
3942 		/*
3943 		 * The null signal is a permissions and process existence
3944 		 * probe.  No signal is actually delivered.
3945 		 */
3946 		if (!error && sig) {
3947 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3948 			/*
3949 			 * If lock_task_sighand() failed we pretend the task
3950 			 * dies after receiving the signal. The window is tiny,
3951 			 * and the signal is private anyway.
3952 			 */
3953 			if (unlikely(error == -ESRCH))
3954 				error = 0;
3955 		}
3956 	}
3957 	rcu_read_unlock();
3958 
3959 	return error;
3960 }
3961 
do_tkill(pid_t tgid,pid_t pid,int sig)3962 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3963 {
3964 	struct kernel_siginfo info;
3965 
3966 	clear_siginfo(&info);
3967 	info.si_signo = sig;
3968 	info.si_errno = 0;
3969 	info.si_code = SI_TKILL;
3970 	info.si_pid = task_tgid_vnr(current);
3971 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3972 
3973 	return do_send_specific(tgid, pid, sig, &info);
3974 }
3975 
3976 /**
3977  *  sys_tgkill - send signal to one specific thread
3978  *  @tgid: the thread group ID of the thread
3979  *  @pid: the PID of the thread
3980  *  @sig: signal to be sent
3981  *
3982  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3983  *  exists but it's not belonging to the target process anymore. This
3984  *  method solves the problem of threads exiting and PIDs getting reused.
3985  */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)3986 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3987 {
3988 	/* This is only valid for single tasks */
3989 	if (pid <= 0 || tgid <= 0)
3990 		return -EINVAL;
3991 
3992 	return do_tkill(tgid, pid, sig);
3993 }
3994 
3995 /**
3996  *  sys_tkill - send signal to one specific task
3997  *  @pid: the PID of the task
3998  *  @sig: signal to be sent
3999  *
4000  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4001  */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)4002 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4003 {
4004 	/* This is only valid for single tasks */
4005 	if (pid <= 0)
4006 		return -EINVAL;
4007 
4008 	return do_tkill(0, pid, sig);
4009 }
4010 
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)4011 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4012 {
4013 	/* Not even root can pretend to send signals from the kernel.
4014 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4015 	 */
4016 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4017 	    (task_pid_vnr(current) != pid))
4018 		return -EPERM;
4019 
4020 	/* POSIX.1b doesn't mention process groups.  */
4021 	return kill_proc_info(sig, info, pid);
4022 }
4023 
4024 /**
4025  *  sys_rt_sigqueueinfo - send signal information to a signal
4026  *  @pid: the PID of the thread
4027  *  @sig: signal to be sent
4028  *  @uinfo: signal info to be sent
4029  */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4030 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4031 		siginfo_t __user *, uinfo)
4032 {
4033 	kernel_siginfo_t info;
4034 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4035 	if (unlikely(ret))
4036 		return ret;
4037 	return do_rt_sigqueueinfo(pid, sig, &info);
4038 }
4039 
4040 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4041 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4042 			compat_pid_t, pid,
4043 			int, sig,
4044 			struct compat_siginfo __user *, uinfo)
4045 {
4046 	kernel_siginfo_t info;
4047 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4048 	if (unlikely(ret))
4049 		return ret;
4050 	return do_rt_sigqueueinfo(pid, sig, &info);
4051 }
4052 #endif
4053 
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)4054 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4055 {
4056 	/* This is only valid for single tasks */
4057 	if (pid <= 0 || tgid <= 0)
4058 		return -EINVAL;
4059 
4060 	/* Not even root can pretend to send signals from the kernel.
4061 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4062 	 */
4063 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4064 	    (task_pid_vnr(current) != pid))
4065 		return -EPERM;
4066 
4067 	return do_send_specific(tgid, pid, sig, info);
4068 }
4069 
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4070 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4071 		siginfo_t __user *, uinfo)
4072 {
4073 	kernel_siginfo_t info;
4074 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4075 	if (unlikely(ret))
4076 		return ret;
4077 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4078 }
4079 
4080 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4081 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4082 			compat_pid_t, tgid,
4083 			compat_pid_t, pid,
4084 			int, sig,
4085 			struct compat_siginfo __user *, uinfo)
4086 {
4087 	kernel_siginfo_t info;
4088 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4089 	if (unlikely(ret))
4090 		return ret;
4091 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4092 }
4093 #endif
4094 
4095 /*
4096  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4097  */
kernel_sigaction(int sig,__sighandler_t action)4098 void kernel_sigaction(int sig, __sighandler_t action)
4099 {
4100 	spin_lock_irq(&current->sighand->siglock);
4101 	current->sighand->action[sig - 1].sa.sa_handler = action;
4102 	if (action == SIG_IGN) {
4103 		sigset_t mask;
4104 
4105 		sigemptyset(&mask);
4106 		sigaddset(&mask, sig);
4107 
4108 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4109 		flush_sigqueue_mask(&mask, &current->pending);
4110 		recalc_sigpending();
4111 	}
4112 	spin_unlock_irq(&current->sighand->siglock);
4113 }
4114 EXPORT_SYMBOL(kernel_sigaction);
4115 
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)4116 void __weak sigaction_compat_abi(struct k_sigaction *act,
4117 		struct k_sigaction *oact)
4118 {
4119 }
4120 
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)4121 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4122 {
4123 	struct task_struct *p = current, *t;
4124 	struct k_sigaction *k;
4125 	sigset_t mask;
4126 
4127 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4128 		return -EINVAL;
4129 
4130 	k = &p->sighand->action[sig-1];
4131 
4132 	spin_lock_irq(&p->sighand->siglock);
4133 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4134 		spin_unlock_irq(&p->sighand->siglock);
4135 		return -EINVAL;
4136 	}
4137 	if (oact)
4138 		*oact = *k;
4139 
4140 	/*
4141 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4142 	 * e.g. by having an architecture use the bit in their uapi.
4143 	 */
4144 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4145 
4146 	/*
4147 	 * Clear unknown flag bits in order to allow userspace to detect missing
4148 	 * support for flag bits and to allow the kernel to use non-uapi bits
4149 	 * internally.
4150 	 */
4151 	if (act)
4152 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4153 	if (oact)
4154 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4155 
4156 	sigaction_compat_abi(act, oact);
4157 
4158 	if (act) {
4159 		sigdelsetmask(&act->sa.sa_mask,
4160 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4161 		*k = *act;
4162 		/*
4163 		 * POSIX 3.3.1.3:
4164 		 *  "Setting a signal action to SIG_IGN for a signal that is
4165 		 *   pending shall cause the pending signal to be discarded,
4166 		 *   whether or not it is blocked."
4167 		 *
4168 		 *  "Setting a signal action to SIG_DFL for a signal that is
4169 		 *   pending and whose default action is to ignore the signal
4170 		 *   (for example, SIGCHLD), shall cause the pending signal to
4171 		 *   be discarded, whether or not it is blocked"
4172 		 */
4173 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4174 			sigemptyset(&mask);
4175 			sigaddset(&mask, sig);
4176 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4177 			for_each_thread(p, t)
4178 				flush_sigqueue_mask(&mask, &t->pending);
4179 		}
4180 	}
4181 
4182 	spin_unlock_irq(&p->sighand->siglock);
4183 	return 0;
4184 }
4185 
4186 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4187 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4188 		size_t min_ss_size)
4189 {
4190 	struct task_struct *t = current;
4191 
4192 	if (oss) {
4193 		memset(oss, 0, sizeof(stack_t));
4194 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4195 		oss->ss_size = t->sas_ss_size;
4196 		oss->ss_flags = sas_ss_flags(sp) |
4197 			(current->sas_ss_flags & SS_FLAG_BITS);
4198 	}
4199 
4200 	if (ss) {
4201 		void __user *ss_sp = ss->ss_sp;
4202 		size_t ss_size = ss->ss_size;
4203 		unsigned ss_flags = ss->ss_flags;
4204 		int ss_mode;
4205 
4206 		if (unlikely(on_sig_stack(sp)))
4207 			return -EPERM;
4208 
4209 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4210 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4211 				ss_mode != 0))
4212 			return -EINVAL;
4213 
4214 		if (ss_mode == SS_DISABLE) {
4215 			ss_size = 0;
4216 			ss_sp = NULL;
4217 		} else {
4218 			if (unlikely(ss_size < min_ss_size))
4219 				return -ENOMEM;
4220 		}
4221 
4222 		t->sas_ss_sp = (unsigned long) ss_sp;
4223 		t->sas_ss_size = ss_size;
4224 		t->sas_ss_flags = ss_flags;
4225 	}
4226 	return 0;
4227 }
4228 
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4229 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4230 {
4231 	stack_t new, old;
4232 	int err;
4233 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4234 		return -EFAULT;
4235 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4236 			      current_user_stack_pointer(),
4237 			      MINSIGSTKSZ);
4238 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4239 		err = -EFAULT;
4240 	return err;
4241 }
4242 
restore_altstack(const stack_t __user * uss)4243 int restore_altstack(const stack_t __user *uss)
4244 {
4245 	stack_t new;
4246 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4247 		return -EFAULT;
4248 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4249 			     MINSIGSTKSZ);
4250 	/* squash all but EFAULT for now */
4251 	return 0;
4252 }
4253 
__save_altstack(stack_t __user * uss,unsigned long sp)4254 int __save_altstack(stack_t __user *uss, unsigned long sp)
4255 {
4256 	struct task_struct *t = current;
4257 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4258 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4259 		__put_user(t->sas_ss_size, &uss->ss_size);
4260 	return err;
4261 }
4262 
4263 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4264 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4265 				 compat_stack_t __user *uoss_ptr)
4266 {
4267 	stack_t uss, uoss;
4268 	int ret;
4269 
4270 	if (uss_ptr) {
4271 		compat_stack_t uss32;
4272 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4273 			return -EFAULT;
4274 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4275 		uss.ss_flags = uss32.ss_flags;
4276 		uss.ss_size = uss32.ss_size;
4277 	}
4278 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4279 			     compat_user_stack_pointer(),
4280 			     COMPAT_MINSIGSTKSZ);
4281 	if (ret >= 0 && uoss_ptr)  {
4282 		compat_stack_t old;
4283 		memset(&old, 0, sizeof(old));
4284 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4285 		old.ss_flags = uoss.ss_flags;
4286 		old.ss_size = uoss.ss_size;
4287 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4288 			ret = -EFAULT;
4289 	}
4290 	return ret;
4291 }
4292 
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4293 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4294 			const compat_stack_t __user *, uss_ptr,
4295 			compat_stack_t __user *, uoss_ptr)
4296 {
4297 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4298 }
4299 
compat_restore_altstack(const compat_stack_t __user * uss)4300 int compat_restore_altstack(const compat_stack_t __user *uss)
4301 {
4302 	int err = do_compat_sigaltstack(uss, NULL);
4303 	/* squash all but -EFAULT for now */
4304 	return err == -EFAULT ? err : 0;
4305 }
4306 
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4307 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4308 {
4309 	int err;
4310 	struct task_struct *t = current;
4311 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4312 			 &uss->ss_sp) |
4313 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4314 		__put_user(t->sas_ss_size, &uss->ss_size);
4315 	return err;
4316 }
4317 #endif
4318 
4319 #ifdef __ARCH_WANT_SYS_SIGPENDING
4320 
4321 /**
4322  *  sys_sigpending - examine pending signals
4323  *  @uset: where mask of pending signal is returned
4324  */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4325 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4326 {
4327 	sigset_t set;
4328 
4329 	if (sizeof(old_sigset_t) > sizeof(*uset))
4330 		return -EINVAL;
4331 
4332 	do_sigpending(&set);
4333 
4334 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4335 		return -EFAULT;
4336 
4337 	return 0;
4338 }
4339 
4340 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4341 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4342 {
4343 	sigset_t set;
4344 
4345 	do_sigpending(&set);
4346 
4347 	return put_user(set.sig[0], set32);
4348 }
4349 #endif
4350 
4351 #endif
4352 
4353 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4354 /**
4355  *  sys_sigprocmask - examine and change blocked signals
4356  *  @how: whether to add, remove, or set signals
4357  *  @nset: signals to add or remove (if non-null)
4358  *  @oset: previous value of signal mask if non-null
4359  *
4360  * Some platforms have their own version with special arguments;
4361  * others support only sys_rt_sigprocmask.
4362  */
4363 
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4364 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4365 		old_sigset_t __user *, oset)
4366 {
4367 	old_sigset_t old_set, new_set;
4368 	sigset_t new_blocked;
4369 
4370 	old_set = current->blocked.sig[0];
4371 
4372 	if (nset) {
4373 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4374 			return -EFAULT;
4375 
4376 		new_blocked = current->blocked;
4377 
4378 		switch (how) {
4379 		case SIG_BLOCK:
4380 			sigaddsetmask(&new_blocked, new_set);
4381 			break;
4382 		case SIG_UNBLOCK:
4383 			sigdelsetmask(&new_blocked, new_set);
4384 			break;
4385 		case SIG_SETMASK:
4386 			new_blocked.sig[0] = new_set;
4387 			break;
4388 		default:
4389 			return -EINVAL;
4390 		}
4391 
4392 		set_current_blocked(&new_blocked);
4393 	}
4394 
4395 	if (oset) {
4396 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4397 			return -EFAULT;
4398 	}
4399 
4400 	return 0;
4401 }
4402 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4403 
4404 #ifndef CONFIG_ODD_RT_SIGACTION
4405 /**
4406  *  sys_rt_sigaction - alter an action taken by a process
4407  *  @sig: signal to be sent
4408  *  @act: new sigaction
4409  *  @oact: used to save the previous sigaction
4410  *  @sigsetsize: size of sigset_t type
4411  */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4412 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4413 		const struct sigaction __user *, act,
4414 		struct sigaction __user *, oact,
4415 		size_t, sigsetsize)
4416 {
4417 	struct k_sigaction new_sa, old_sa;
4418 	int ret;
4419 
4420 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4421 	if (sigsetsize != sizeof(sigset_t))
4422 		return -EINVAL;
4423 
4424 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4425 		return -EFAULT;
4426 
4427 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4428 	if (ret)
4429 		return ret;
4430 
4431 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4432 		return -EFAULT;
4433 
4434 	return 0;
4435 }
4436 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4437 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4438 		const struct compat_sigaction __user *, act,
4439 		struct compat_sigaction __user *, oact,
4440 		compat_size_t, sigsetsize)
4441 {
4442 	struct k_sigaction new_ka, old_ka;
4443 #ifdef __ARCH_HAS_SA_RESTORER
4444 	compat_uptr_t restorer;
4445 #endif
4446 	int ret;
4447 
4448 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4449 	if (sigsetsize != sizeof(compat_sigset_t))
4450 		return -EINVAL;
4451 
4452 	if (act) {
4453 		compat_uptr_t handler;
4454 		ret = get_user(handler, &act->sa_handler);
4455 		new_ka.sa.sa_handler = compat_ptr(handler);
4456 #ifdef __ARCH_HAS_SA_RESTORER
4457 		ret |= get_user(restorer, &act->sa_restorer);
4458 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4459 #endif
4460 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4461 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4462 		if (ret)
4463 			return -EFAULT;
4464 	}
4465 
4466 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4467 	if (!ret && oact) {
4468 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4469 			       &oact->sa_handler);
4470 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4471 					 sizeof(oact->sa_mask));
4472 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4473 #ifdef __ARCH_HAS_SA_RESTORER
4474 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4475 				&oact->sa_restorer);
4476 #endif
4477 	}
4478 	return ret;
4479 }
4480 #endif
4481 #endif /* !CONFIG_ODD_RT_SIGACTION */
4482 
4483 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4484 SYSCALL_DEFINE3(sigaction, int, sig,
4485 		const struct old_sigaction __user *, act,
4486 	        struct old_sigaction __user *, oact)
4487 {
4488 	struct k_sigaction new_ka, old_ka;
4489 	int ret;
4490 
4491 	if (act) {
4492 		old_sigset_t mask;
4493 		if (!access_ok(act, sizeof(*act)) ||
4494 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4495 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4496 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4497 		    __get_user(mask, &act->sa_mask))
4498 			return -EFAULT;
4499 #ifdef __ARCH_HAS_KA_RESTORER
4500 		new_ka.ka_restorer = NULL;
4501 #endif
4502 		siginitset(&new_ka.sa.sa_mask, mask);
4503 	}
4504 
4505 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4506 
4507 	if (!ret && oact) {
4508 		if (!access_ok(oact, sizeof(*oact)) ||
4509 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4510 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4511 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4512 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4513 			return -EFAULT;
4514 	}
4515 
4516 	return ret;
4517 }
4518 #endif
4519 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4520 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4521 		const struct compat_old_sigaction __user *, act,
4522 	        struct compat_old_sigaction __user *, oact)
4523 {
4524 	struct k_sigaction new_ka, old_ka;
4525 	int ret;
4526 	compat_old_sigset_t mask;
4527 	compat_uptr_t handler, restorer;
4528 
4529 	if (act) {
4530 		if (!access_ok(act, sizeof(*act)) ||
4531 		    __get_user(handler, &act->sa_handler) ||
4532 		    __get_user(restorer, &act->sa_restorer) ||
4533 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4534 		    __get_user(mask, &act->sa_mask))
4535 			return -EFAULT;
4536 
4537 #ifdef __ARCH_HAS_KA_RESTORER
4538 		new_ka.ka_restorer = NULL;
4539 #endif
4540 		new_ka.sa.sa_handler = compat_ptr(handler);
4541 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4542 		siginitset(&new_ka.sa.sa_mask, mask);
4543 	}
4544 
4545 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4546 
4547 	if (!ret && oact) {
4548 		if (!access_ok(oact, sizeof(*oact)) ||
4549 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4550 			       &oact->sa_handler) ||
4551 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4552 			       &oact->sa_restorer) ||
4553 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4554 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4555 			return -EFAULT;
4556 	}
4557 	return ret;
4558 }
4559 #endif
4560 
4561 #ifdef CONFIG_SGETMASK_SYSCALL
4562 
4563 /*
4564  * For backwards compatibility.  Functionality superseded by sigprocmask.
4565  */
SYSCALL_DEFINE0(sgetmask)4566 SYSCALL_DEFINE0(sgetmask)
4567 {
4568 	/* SMP safe */
4569 	return current->blocked.sig[0];
4570 }
4571 
SYSCALL_DEFINE1(ssetmask,int,newmask)4572 SYSCALL_DEFINE1(ssetmask, int, newmask)
4573 {
4574 	int old = current->blocked.sig[0];
4575 	sigset_t newset;
4576 
4577 	siginitset(&newset, newmask);
4578 	set_current_blocked(&newset);
4579 
4580 	return old;
4581 }
4582 #endif /* CONFIG_SGETMASK_SYSCALL */
4583 
4584 #ifdef __ARCH_WANT_SYS_SIGNAL
4585 /*
4586  * For backwards compatibility.  Functionality superseded by sigaction.
4587  */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4588 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4589 {
4590 	struct k_sigaction new_sa, old_sa;
4591 	int ret;
4592 
4593 	new_sa.sa.sa_handler = handler;
4594 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4595 	sigemptyset(&new_sa.sa.sa_mask);
4596 
4597 	ret = do_sigaction(sig, &new_sa, &old_sa);
4598 
4599 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4600 }
4601 #endif /* __ARCH_WANT_SYS_SIGNAL */
4602 
4603 #ifdef __ARCH_WANT_SYS_PAUSE
4604 
SYSCALL_DEFINE0(pause)4605 SYSCALL_DEFINE0(pause)
4606 {
4607 	while (!signal_pending(current)) {
4608 		__set_current_state(TASK_INTERRUPTIBLE);
4609 		schedule();
4610 	}
4611 	return -ERESTARTNOHAND;
4612 }
4613 
4614 #endif
4615 
sigsuspend(sigset_t * set)4616 static int sigsuspend(sigset_t *set)
4617 {
4618 	current->saved_sigmask = current->blocked;
4619 	set_current_blocked(set);
4620 
4621 	while (!signal_pending(current)) {
4622 		__set_current_state(TASK_INTERRUPTIBLE);
4623 		schedule();
4624 	}
4625 	set_restore_sigmask();
4626 	return -ERESTARTNOHAND;
4627 }
4628 
4629 /**
4630  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4631  *	@unewset value until a signal is received
4632  *  @unewset: new signal mask value
4633  *  @sigsetsize: size of sigset_t type
4634  */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4635 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4636 {
4637 	sigset_t newset;
4638 
4639 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4640 	if (sigsetsize != sizeof(sigset_t))
4641 		return -EINVAL;
4642 
4643 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4644 		return -EFAULT;
4645 	return sigsuspend(&newset);
4646 }
4647 
4648 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4649 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4650 {
4651 	sigset_t newset;
4652 
4653 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4654 	if (sigsetsize != sizeof(sigset_t))
4655 		return -EINVAL;
4656 
4657 	if (get_compat_sigset(&newset, unewset))
4658 		return -EFAULT;
4659 	return sigsuspend(&newset);
4660 }
4661 #endif
4662 
4663 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4664 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4665 {
4666 	sigset_t blocked;
4667 	siginitset(&blocked, mask);
4668 	return sigsuspend(&blocked);
4669 }
4670 #endif
4671 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4672 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4673 {
4674 	sigset_t blocked;
4675 	siginitset(&blocked, mask);
4676 	return sigsuspend(&blocked);
4677 }
4678 #endif
4679 
arch_vma_name(struct vm_area_struct * vma)4680 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4681 {
4682 	return NULL;
4683 }
4684 
siginfo_buildtime_checks(void)4685 static inline void siginfo_buildtime_checks(void)
4686 {
4687 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4688 
4689 	/* Verify the offsets in the two siginfos match */
4690 #define CHECK_OFFSET(field) \
4691 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4692 
4693 	/* kill */
4694 	CHECK_OFFSET(si_pid);
4695 	CHECK_OFFSET(si_uid);
4696 
4697 	/* timer */
4698 	CHECK_OFFSET(si_tid);
4699 	CHECK_OFFSET(si_overrun);
4700 	CHECK_OFFSET(si_value);
4701 
4702 	/* rt */
4703 	CHECK_OFFSET(si_pid);
4704 	CHECK_OFFSET(si_uid);
4705 	CHECK_OFFSET(si_value);
4706 
4707 	/* sigchld */
4708 	CHECK_OFFSET(si_pid);
4709 	CHECK_OFFSET(si_uid);
4710 	CHECK_OFFSET(si_status);
4711 	CHECK_OFFSET(si_utime);
4712 	CHECK_OFFSET(si_stime);
4713 
4714 	/* sigfault */
4715 	CHECK_OFFSET(si_addr);
4716 	CHECK_OFFSET(si_trapno);
4717 	CHECK_OFFSET(si_addr_lsb);
4718 	CHECK_OFFSET(si_lower);
4719 	CHECK_OFFSET(si_upper);
4720 	CHECK_OFFSET(si_pkey);
4721 	CHECK_OFFSET(si_perf_data);
4722 	CHECK_OFFSET(si_perf_type);
4723 
4724 	/* sigpoll */
4725 	CHECK_OFFSET(si_band);
4726 	CHECK_OFFSET(si_fd);
4727 
4728 	/* sigsys */
4729 	CHECK_OFFSET(si_call_addr);
4730 	CHECK_OFFSET(si_syscall);
4731 	CHECK_OFFSET(si_arch);
4732 #undef CHECK_OFFSET
4733 
4734 	/* usb asyncio */
4735 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4736 		     offsetof(struct siginfo, si_addr));
4737 	if (sizeof(int) == sizeof(void __user *)) {
4738 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4739 			     sizeof(void __user *));
4740 	} else {
4741 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4742 			      sizeof_field(struct siginfo, si_uid)) !=
4743 			     sizeof(void __user *));
4744 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4745 			     offsetof(struct siginfo, si_uid));
4746 	}
4747 #ifdef CONFIG_COMPAT
4748 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4749 		     offsetof(struct compat_siginfo, si_addr));
4750 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751 		     sizeof(compat_uptr_t));
4752 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4753 		     sizeof_field(struct siginfo, si_pid));
4754 #endif
4755 }
4756 
signals_init(void)4757 void __init signals_init(void)
4758 {
4759 	siginfo_buildtime_checks();
4760 
4761 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4762 }
4763 
4764 #ifdef CONFIG_KGDB_KDB
4765 #include <linux/kdb.h>
4766 /*
4767  * kdb_send_sig - Allows kdb to send signals without exposing
4768  * signal internals.  This function checks if the required locks are
4769  * available before calling the main signal code, to avoid kdb
4770  * deadlocks.
4771  */
kdb_send_sig(struct task_struct * t,int sig)4772 void kdb_send_sig(struct task_struct *t, int sig)
4773 {
4774 	static struct task_struct *kdb_prev_t;
4775 	int new_t, ret;
4776 	if (!spin_trylock(&t->sighand->siglock)) {
4777 		kdb_printf("Can't do kill command now.\n"
4778 			   "The sigmask lock is held somewhere else in "
4779 			   "kernel, try again later\n");
4780 		return;
4781 	}
4782 	new_t = kdb_prev_t != t;
4783 	kdb_prev_t = t;
4784 	if (!task_is_running(t) && new_t) {
4785 		spin_unlock(&t->sighand->siglock);
4786 		kdb_printf("Process is not RUNNING, sending a signal from "
4787 			   "kdb risks deadlock\n"
4788 			   "on the run queue locks. "
4789 			   "The signal has _not_ been sent.\n"
4790 			   "Reissue the kill command if you want to risk "
4791 			   "the deadlock.\n");
4792 		return;
4793 	}
4794 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4795 	spin_unlock(&t->sighand->siglock);
4796 	if (ret)
4797 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4798 			   sig, t->pid);
4799 	else
4800 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4801 }
4802 #endif	/* CONFIG_KGDB_KDB */
4803