• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
40 
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h"	/* audit_signal_info() */
47 
48 /*
49  * SLAB caches for signal bits.
50  */
51 
52 static struct kmem_cache *sigqueue_cachep;
53 
54 int print_fatal_signals __read_mostly;
55 
sig_handler(struct task_struct * t,int sig)56 static void __user *sig_handler(struct task_struct *t, int sig)
57 {
58 	return t->sighand->action[sig - 1].sa.sa_handler;
59 }
60 
sig_handler_ignored(void __user * handler,int sig)61 static int sig_handler_ignored(void __user *handler, int sig)
62 {
63 	/* Is it explicitly or implicitly ignored? */
64 	return handler == SIG_IGN ||
65 		(handler == SIG_DFL && sig_kernel_ignore(sig));
66 }
67 
sig_task_ignored(struct task_struct * t,int sig,bool force)68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 {
70 	void __user *handler;
71 
72 	handler = sig_handler(t, sig);
73 
74 	/* SIGKILL and SIGSTOP may not be sent to the global init */
75 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
76 		return true;
77 
78 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
79 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
80 		return 1;
81 
82 	/* Only allow kernel generated signals to this kthread */
83 	if (unlikely((t->flags & PF_KTHREAD) &&
84 		     (handler == SIG_KTHREAD_KERNEL) && !force))
85 		return true;
86 
87 	return sig_handler_ignored(handler, sig);
88 }
89 
sig_ignored(struct task_struct * t,int sig,bool force)90 static int sig_ignored(struct task_struct *t, int sig, bool force)
91 {
92 	/*
93 	 * Blocked signals are never ignored, since the
94 	 * signal handler may change by the time it is
95 	 * unblocked.
96 	 */
97 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
98 		return 0;
99 
100 	/*
101 	 * Tracers may want to know about even ignored signal unless it
102 	 * is SIGKILL which can't be reported anyway but can be ignored
103 	 * by SIGNAL_UNKILLABLE task.
104 	 */
105 	if (t->ptrace && sig != SIGKILL)
106 		return 0;
107 
108 	return sig_task_ignored(t, sig, force);
109 }
110 
111 /*
112  * Re-calculate pending state from the set of locally pending
113  * signals, globally pending signals, and blocked signals.
114  */
has_pending_signals(sigset_t * signal,sigset_t * blocked)115 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
116 {
117 	unsigned long ready;
118 	long i;
119 
120 	switch (_NSIG_WORDS) {
121 	default:
122 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
123 			ready |= signal->sig[i] &~ blocked->sig[i];
124 		break;
125 
126 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
127 		ready |= signal->sig[2] &~ blocked->sig[2];
128 		ready |= signal->sig[1] &~ blocked->sig[1];
129 		ready |= signal->sig[0] &~ blocked->sig[0];
130 		break;
131 
132 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
133 		ready |= signal->sig[0] &~ blocked->sig[0];
134 		break;
135 
136 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
137 	}
138 	return ready !=	0;
139 }
140 
141 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
142 
recalc_sigpending_tsk(struct task_struct * t)143 static int recalc_sigpending_tsk(struct task_struct *t)
144 {
145 	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
146 	    PENDING(&t->pending, &t->blocked) ||
147 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
148 		set_tsk_thread_flag(t, TIF_SIGPENDING);
149 		return 1;
150 	}
151 	/*
152 	 * We must never clear the flag in another thread, or in current
153 	 * when it's possible the current syscall is returning -ERESTART*.
154 	 * So we don't clear it here, and only callers who know they should do.
155 	 */
156 	return 0;
157 }
158 
159 /*
160  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
161  * This is superfluous when called on current, the wakeup is a harmless no-op.
162  */
recalc_sigpending_and_wake(struct task_struct * t)163 void recalc_sigpending_and_wake(struct task_struct *t)
164 {
165 	if (recalc_sigpending_tsk(t))
166 		signal_wake_up(t, 0);
167 }
168 
recalc_sigpending(void)169 void recalc_sigpending(void)
170 {
171 	if (!recalc_sigpending_tsk(current) && !freezing(current))
172 		clear_thread_flag(TIF_SIGPENDING);
173 
174 }
175 
176 /* Given the mask, find the first available signal that should be serviced. */
177 
178 #define SYNCHRONOUS_MASK \
179 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
180 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
181 
next_signal(struct sigpending * pending,sigset_t * mask)182 int next_signal(struct sigpending *pending, sigset_t *mask)
183 {
184 	unsigned long i, *s, *m, x;
185 	int sig = 0;
186 
187 	s = pending->signal.sig;
188 	m = mask->sig;
189 
190 	/*
191 	 * Handle the first word specially: it contains the
192 	 * synchronous signals that need to be dequeued first.
193 	 */
194 	x = *s &~ *m;
195 	if (x) {
196 		if (x & SYNCHRONOUS_MASK)
197 			x &= SYNCHRONOUS_MASK;
198 		sig = ffz(~x) + 1;
199 		return sig;
200 	}
201 
202 	switch (_NSIG_WORDS) {
203 	default:
204 		for (i = 1; i < _NSIG_WORDS; ++i) {
205 			x = *++s &~ *++m;
206 			if (!x)
207 				continue;
208 			sig = ffz(~x) + i*_NSIG_BPW + 1;
209 			break;
210 		}
211 		break;
212 
213 	case 2:
214 		x = s[1] &~ m[1];
215 		if (!x)
216 			break;
217 		sig = ffz(~x) + _NSIG_BPW + 1;
218 		break;
219 
220 	case 1:
221 		/* Nothing to do */
222 		break;
223 	}
224 
225 	return sig;
226 }
227 
print_dropped_signal(int sig)228 static inline void print_dropped_signal(int sig)
229 {
230 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
231 
232 	if (!print_fatal_signals)
233 		return;
234 
235 	if (!__ratelimit(&ratelimit_state))
236 		return;
237 
238 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
239 				current->comm, current->pid, sig);
240 }
241 
242 /**
243  * task_set_jobctl_pending - set jobctl pending bits
244  * @task: target task
245  * @mask: pending bits to set
246  *
247  * Clear @mask from @task->jobctl.  @mask must be subset of
248  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
249  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
250  * cleared.  If @task is already being killed or exiting, this function
251  * becomes noop.
252  *
253  * CONTEXT:
254  * Must be called with @task->sighand->siglock held.
255  *
256  * RETURNS:
257  * %true if @mask is set, %false if made noop because @task was dying.
258  */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)259 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
260 {
261 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
262 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
263 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
264 
265 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
266 		return false;
267 
268 	if (mask & JOBCTL_STOP_SIGMASK)
269 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
270 
271 	task->jobctl |= mask;
272 	return true;
273 }
274 
275 /**
276  * task_clear_jobctl_trapping - clear jobctl trapping bit
277  * @task: target task
278  *
279  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
280  * Clear it and wake up the ptracer.  Note that we don't need any further
281  * locking.  @task->siglock guarantees that @task->parent points to the
282  * ptracer.
283  *
284  * CONTEXT:
285  * Must be called with @task->sighand->siglock held.
286  */
task_clear_jobctl_trapping(struct task_struct * task)287 void task_clear_jobctl_trapping(struct task_struct *task)
288 {
289 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
290 		task->jobctl &= ~JOBCTL_TRAPPING;
291 		smp_mb();	/* advised by wake_up_bit() */
292 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
293 	}
294 }
295 
296 /**
297  * task_clear_jobctl_pending - clear jobctl pending bits
298  * @task: target task
299  * @mask: pending bits to clear
300  *
301  * Clear @mask from @task->jobctl.  @mask must be subset of
302  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
303  * STOP bits are cleared together.
304  *
305  * If clearing of @mask leaves no stop or trap pending, this function calls
306  * task_clear_jobctl_trapping().
307  *
308  * CONTEXT:
309  * Must be called with @task->sighand->siglock held.
310  */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)311 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
312 {
313 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
314 
315 	if (mask & JOBCTL_STOP_PENDING)
316 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
317 
318 	task->jobctl &= ~mask;
319 
320 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
321 		task_clear_jobctl_trapping(task);
322 }
323 
324 /**
325  * task_participate_group_stop - participate in a group stop
326  * @task: task participating in a group stop
327  *
328  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
329  * Group stop states are cleared and the group stop count is consumed if
330  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
331  * stop, the appropriate %SIGNAL_* flags are set.
332  *
333  * CONTEXT:
334  * Must be called with @task->sighand->siglock held.
335  *
336  * RETURNS:
337  * %true if group stop completion should be notified to the parent, %false
338  * otherwise.
339  */
task_participate_group_stop(struct task_struct * task)340 static bool task_participate_group_stop(struct task_struct *task)
341 {
342 	struct signal_struct *sig = task->signal;
343 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
344 
345 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
346 
347 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
348 
349 	if (!consume)
350 		return false;
351 
352 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
353 		sig->group_stop_count--;
354 
355 	/*
356 	 * Tell the caller to notify completion iff we are entering into a
357 	 * fresh group stop.  Read comment in do_signal_stop() for details.
358 	 */
359 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
360 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
361 		return true;
362 	}
363 	return false;
364 }
365 
366 /*
367  * allocate a new signal queue record
368  * - this may be called without locks if and only if t == current, otherwise an
369  *   appropriate lock must be held to stop the target task from exiting
370  */
371 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t flags,int override_rlimit)372 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
373 {
374 	struct sigqueue *q = NULL;
375 	struct user_struct *user;
376 	int sigpending;
377 
378 	/*
379 	 * Protect access to @t credentials. This can go away when all
380 	 * callers hold rcu read lock.
381 	 *
382 	 * NOTE! A pending signal will hold on to the user refcount,
383 	 * and we get/put the refcount only when the sigpending count
384 	 * changes from/to zero.
385 	 */
386 	rcu_read_lock();
387 	user = __task_cred(t)->user;
388 	sigpending = atomic_inc_return(&user->sigpending);
389 	if (sigpending == 1)
390 		get_uid(user);
391 	rcu_read_unlock();
392 
393 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
394 		q = kmem_cache_alloc(sigqueue_cachep, flags);
395 	} else {
396 		print_dropped_signal(sig);
397 	}
398 
399 	if (unlikely(q == NULL)) {
400 		if (atomic_dec_and_test(&user->sigpending))
401 			free_uid(user);
402 	} else {
403 		INIT_LIST_HEAD(&q->list);
404 		q->flags = 0;
405 		q->user = user;
406 	}
407 
408 	return q;
409 }
410 
__sigqueue_free(struct sigqueue * q)411 static void __sigqueue_free(struct sigqueue *q)
412 {
413 	if (q->flags & SIGQUEUE_PREALLOC)
414 		return;
415 	if (atomic_dec_and_test(&q->user->sigpending))
416 		free_uid(q->user);
417 	kmem_cache_free(sigqueue_cachep, q);
418 }
419 
flush_sigqueue(struct sigpending * queue)420 void flush_sigqueue(struct sigpending *queue)
421 {
422 	struct sigqueue *q;
423 
424 	sigemptyset(&queue->signal);
425 	while (!list_empty(&queue->list)) {
426 		q = list_entry(queue->list.next, struct sigqueue , list);
427 		list_del_init(&q->list);
428 		__sigqueue_free(q);
429 	}
430 }
431 
432 /*
433  * Flush all pending signals for this kthread.
434  */
flush_signals(struct task_struct * t)435 void flush_signals(struct task_struct *t)
436 {
437 	unsigned long flags;
438 
439 	spin_lock_irqsave(&t->sighand->siglock, flags);
440 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
441 	flush_sigqueue(&t->pending);
442 	flush_sigqueue(&t->signal->shared_pending);
443 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
444 }
445 
__flush_itimer_signals(struct sigpending * pending)446 static void __flush_itimer_signals(struct sigpending *pending)
447 {
448 	sigset_t signal, retain;
449 	struct sigqueue *q, *n;
450 
451 	signal = pending->signal;
452 	sigemptyset(&retain);
453 
454 	list_for_each_entry_safe(q, n, &pending->list, list) {
455 		int sig = q->info.si_signo;
456 
457 		if (likely(q->info.si_code != SI_TIMER)) {
458 			sigaddset(&retain, sig);
459 		} else {
460 			sigdelset(&signal, sig);
461 			list_del_init(&q->list);
462 			__sigqueue_free(q);
463 		}
464 	}
465 
466 	sigorsets(&pending->signal, &signal, &retain);
467 }
468 
flush_itimer_signals(void)469 void flush_itimer_signals(void)
470 {
471 	struct task_struct *tsk = current;
472 	unsigned long flags;
473 
474 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
475 	__flush_itimer_signals(&tsk->pending);
476 	__flush_itimer_signals(&tsk->signal->shared_pending);
477 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
478 }
479 
ignore_signals(struct task_struct * t)480 void ignore_signals(struct task_struct *t)
481 {
482 	int i;
483 
484 	for (i = 0; i < _NSIG; ++i)
485 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
486 
487 	flush_signals(t);
488 }
489 
490 /*
491  * Flush all handlers for a task.
492  */
493 
494 void
flush_signal_handlers(struct task_struct * t,int force_default)495 flush_signal_handlers(struct task_struct *t, int force_default)
496 {
497 	int i;
498 	struct k_sigaction *ka = &t->sighand->action[0];
499 	for (i = _NSIG ; i != 0 ; i--) {
500 		if (force_default || ka->sa.sa_handler != SIG_IGN)
501 			ka->sa.sa_handler = SIG_DFL;
502 		ka->sa.sa_flags = 0;
503 #ifdef __ARCH_HAS_SA_RESTORER
504 		ka->sa.sa_restorer = NULL;
505 #endif
506 		sigemptyset(&ka->sa.sa_mask);
507 		ka++;
508 	}
509 }
510 
unhandled_signal(struct task_struct * tsk,int sig)511 int unhandled_signal(struct task_struct *tsk, int sig)
512 {
513 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
514 	if (is_global_init(tsk))
515 		return 1;
516 	if (handler != SIG_IGN && handler != SIG_DFL)
517 		return 0;
518 	/* if ptraced, let the tracer determine */
519 	return !tsk->ptrace;
520 }
521 
collect_signal(int sig,struct sigpending * list,siginfo_t * info,bool * resched_timer)522 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
523 			   bool *resched_timer)
524 {
525 	struct sigqueue *q, *first = NULL;
526 
527 	/*
528 	 * Collect the siginfo appropriate to this signal.  Check if
529 	 * there is another siginfo for the same signal.
530 	*/
531 	list_for_each_entry(q, &list->list, list) {
532 		if (q->info.si_signo == sig) {
533 			if (first)
534 				goto still_pending;
535 			first = q;
536 		}
537 	}
538 
539 	sigdelset(&list->signal, sig);
540 
541 	if (first) {
542 still_pending:
543 		list_del_init(&first->list);
544 		copy_siginfo(info, &first->info);
545 
546 		*resched_timer =
547 			(first->flags & SIGQUEUE_PREALLOC) &&
548 			(info->si_code == SI_TIMER) &&
549 			(info->si_sys_private);
550 
551 		__sigqueue_free(first);
552 	} else {
553 		/*
554 		 * Ok, it wasn't in the queue.  This must be
555 		 * a fast-pathed signal or we must have been
556 		 * out of queue space.  So zero out the info.
557 		 */
558 		info->si_signo = sig;
559 		info->si_errno = 0;
560 		info->si_code = SI_USER;
561 		info->si_pid = 0;
562 		info->si_uid = 0;
563 	}
564 }
565 
__dequeue_signal(struct sigpending * pending,sigset_t * mask,siginfo_t * info,bool * resched_timer)566 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
567 			siginfo_t *info, bool *resched_timer)
568 {
569 	int sig = next_signal(pending, mask);
570 
571 	if (sig)
572 		collect_signal(sig, pending, info, resched_timer);
573 	return sig;
574 }
575 
576 /*
577  * Dequeue a signal and return the element to the caller, which is
578  * expected to free it.
579  *
580  * All callers have to hold the siglock.
581  */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,siginfo_t * info)582 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
583 {
584 	bool resched_timer = false;
585 	int signr;
586 
587 	/* We only dequeue private signals from ourselves, we don't let
588 	 * signalfd steal them
589 	 */
590 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
591 	if (!signr) {
592 		signr = __dequeue_signal(&tsk->signal->shared_pending,
593 					 mask, info, &resched_timer);
594 		/*
595 		 * itimer signal ?
596 		 *
597 		 * itimers are process shared and we restart periodic
598 		 * itimers in the signal delivery path to prevent DoS
599 		 * attacks in the high resolution timer case. This is
600 		 * compliant with the old way of self-restarting
601 		 * itimers, as the SIGALRM is a legacy signal and only
602 		 * queued once. Changing the restart behaviour to
603 		 * restart the timer in the signal dequeue path is
604 		 * reducing the timer noise on heavy loaded !highres
605 		 * systems too.
606 		 */
607 		if (unlikely(signr == SIGALRM)) {
608 			struct hrtimer *tmr = &tsk->signal->real_timer;
609 
610 			if (!hrtimer_is_queued(tmr) &&
611 			    tsk->signal->it_real_incr.tv64 != 0) {
612 				hrtimer_forward(tmr, tmr->base->get_time(),
613 						tsk->signal->it_real_incr);
614 				hrtimer_restart(tmr);
615 			}
616 		}
617 	}
618 
619 	recalc_sigpending();
620 	if (!signr)
621 		return 0;
622 
623 	if (unlikely(sig_kernel_stop(signr))) {
624 		/*
625 		 * Set a marker that we have dequeued a stop signal.  Our
626 		 * caller might release the siglock and then the pending
627 		 * stop signal it is about to process is no longer in the
628 		 * pending bitmasks, but must still be cleared by a SIGCONT
629 		 * (and overruled by a SIGKILL).  So those cases clear this
630 		 * shared flag after we've set it.  Note that this flag may
631 		 * remain set after the signal we return is ignored or
632 		 * handled.  That doesn't matter because its only purpose
633 		 * is to alert stop-signal processing code when another
634 		 * processor has come along and cleared the flag.
635 		 */
636 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
637 	}
638 	if (resched_timer) {
639 		/*
640 		 * Release the siglock to ensure proper locking order
641 		 * of timer locks outside of siglocks.  Note, we leave
642 		 * irqs disabled here, since the posix-timers code is
643 		 * about to disable them again anyway.
644 		 */
645 		spin_unlock(&tsk->sighand->siglock);
646 		do_schedule_next_timer(info);
647 		spin_lock(&tsk->sighand->siglock);
648 	}
649 	return signr;
650 }
651 
652 /*
653  * Tell a process that it has a new active signal..
654  *
655  * NOTE! we rely on the previous spin_lock to
656  * lock interrupts for us! We can only be called with
657  * "siglock" held, and the local interrupt must
658  * have been disabled when that got acquired!
659  *
660  * No need to set need_resched since signal event passing
661  * goes through ->blocked
662  */
signal_wake_up_state(struct task_struct * t,unsigned int state)663 void signal_wake_up_state(struct task_struct *t, unsigned int state)
664 {
665 	set_tsk_thread_flag(t, TIF_SIGPENDING);
666 	/*
667 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
668 	 * case. We don't check t->state here because there is a race with it
669 	 * executing another processor and just now entering stopped state.
670 	 * By using wake_up_state, we ensure the process will wake up and
671 	 * handle its death signal.
672 	 */
673 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
674 		kick_process(t);
675 }
676 
677 /*
678  * Remove signals in mask from the pending set and queue.
679  * Returns 1 if any signals were found.
680  *
681  * All callers must be holding the siglock.
682  */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)683 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
684 {
685 	struct sigqueue *q, *n;
686 	sigset_t m;
687 
688 	sigandsets(&m, mask, &s->signal);
689 	if (sigisemptyset(&m))
690 		return 0;
691 
692 	sigandnsets(&s->signal, &s->signal, mask);
693 	list_for_each_entry_safe(q, n, &s->list, list) {
694 		if (sigismember(mask, q->info.si_signo)) {
695 			list_del_init(&q->list);
696 			__sigqueue_free(q);
697 		}
698 	}
699 	return 1;
700 }
701 
is_si_special(const struct siginfo * info)702 static inline int is_si_special(const struct siginfo *info)
703 {
704 	return info <= SEND_SIG_FORCED;
705 }
706 
si_fromuser(const struct siginfo * info)707 static inline bool si_fromuser(const struct siginfo *info)
708 {
709 	return info == SEND_SIG_NOINFO ||
710 		(!is_si_special(info) && SI_FROMUSER(info));
711 }
712 
dequeue_synchronous_signal(siginfo_t * info)713 static int dequeue_synchronous_signal(siginfo_t *info)
714 {
715 	struct task_struct *tsk = current;
716 	struct sigpending *pending = &tsk->pending;
717 	struct sigqueue *q, *sync = NULL;
718 
719 	/*
720 	 * Might a synchronous signal be in the queue?
721 	 */
722 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
723 		return 0;
724 
725 	/*
726 	 * Return the first synchronous signal in the queue.
727 	 */
728 	list_for_each_entry(q, &pending->list, list) {
729 		/* Synchronous signals have a postive si_code */
730 		if ((q->info.si_code > SI_USER) &&
731 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 			sync = q;
733 			goto next;
734 		}
735 	}
736 	return 0;
737 next:
738 	/*
739 	 * Check if there is another siginfo for the same signal.
740 	 */
741 	list_for_each_entry_continue(q, &pending->list, list) {
742 		if (q->info.si_signo == sync->info.si_signo)
743 			goto still_pending;
744 	}
745 
746 	sigdelset(&pending->signal, sync->info.si_signo);
747 	recalc_sigpending();
748 still_pending:
749 	list_del_init(&sync->list);
750 	copy_siginfo(info, &sync->info);
751 	__sigqueue_free(sync);
752 	return info->si_signo;
753 }
754 
755 /*
756  * called with RCU read lock from check_kill_permission()
757  */
kill_ok_by_cred(struct task_struct * t)758 static int kill_ok_by_cred(struct task_struct *t)
759 {
760 	const struct cred *cred = current_cred();
761 	const struct cred *tcred = __task_cred(t);
762 
763 	if (uid_eq(cred->euid, tcred->suid) ||
764 	    uid_eq(cred->euid, tcred->uid)  ||
765 	    uid_eq(cred->uid,  tcred->suid) ||
766 	    uid_eq(cred->uid,  tcred->uid))
767 		return 1;
768 
769 	if (ns_capable(tcred->user_ns, CAP_KILL))
770 		return 1;
771 
772 	return 0;
773 }
774 
775 /*
776  * Bad permissions for sending the signal
777  * - the caller must hold the RCU read lock
778  */
check_kill_permission(int sig,struct siginfo * info,struct task_struct * t)779 static int check_kill_permission(int sig, struct siginfo *info,
780 				 struct task_struct *t)
781 {
782 	struct pid *sid;
783 	int error;
784 
785 	if (!valid_signal(sig))
786 		return -EINVAL;
787 
788 	if (!si_fromuser(info))
789 		return 0;
790 
791 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
792 	if (error)
793 		return error;
794 
795 	if (!same_thread_group(current, t) &&
796 	    !kill_ok_by_cred(t)) {
797 		switch (sig) {
798 		case SIGCONT:
799 			sid = task_session(t);
800 			/*
801 			 * We don't return the error if sid == NULL. The
802 			 * task was unhashed, the caller must notice this.
803 			 */
804 			if (!sid || sid == task_session(current))
805 				break;
806 		default:
807 			return -EPERM;
808 		}
809 	}
810 
811 	return security_task_kill(t, info, sig, 0);
812 }
813 
814 /**
815  * ptrace_trap_notify - schedule trap to notify ptracer
816  * @t: tracee wanting to notify tracer
817  *
818  * This function schedules sticky ptrace trap which is cleared on the next
819  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
820  * ptracer.
821  *
822  * If @t is running, STOP trap will be taken.  If trapped for STOP and
823  * ptracer is listening for events, tracee is woken up so that it can
824  * re-trap for the new event.  If trapped otherwise, STOP trap will be
825  * eventually taken without returning to userland after the existing traps
826  * are finished by PTRACE_CONT.
827  *
828  * CONTEXT:
829  * Must be called with @task->sighand->siglock held.
830  */
ptrace_trap_notify(struct task_struct * t)831 static void ptrace_trap_notify(struct task_struct *t)
832 {
833 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
834 	assert_spin_locked(&t->sighand->siglock);
835 
836 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
837 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
838 }
839 
840 /*
841  * Handle magic process-wide effects of stop/continue signals. Unlike
842  * the signal actions, these happen immediately at signal-generation
843  * time regardless of blocking, ignoring, or handling.  This does the
844  * actual continuing for SIGCONT, but not the actual stopping for stop
845  * signals. The process stop is done as a signal action for SIG_DFL.
846  *
847  * Returns true if the signal should be actually delivered, otherwise
848  * it should be dropped.
849  */
prepare_signal(int sig,struct task_struct * p,bool force)850 static bool prepare_signal(int sig, struct task_struct *p, bool force)
851 {
852 	struct signal_struct *signal = p->signal;
853 	struct task_struct *t;
854 	sigset_t flush;
855 
856 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
857 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
858 			return sig == SIGKILL;
859 		/*
860 		 * The process is in the middle of dying, nothing to do.
861 		 */
862 	} else if (sig_kernel_stop(sig)) {
863 		/*
864 		 * This is a stop signal.  Remove SIGCONT from all queues.
865 		 */
866 		siginitset(&flush, sigmask(SIGCONT));
867 		flush_sigqueue_mask(&flush, &signal->shared_pending);
868 		for_each_thread(p, t)
869 			flush_sigqueue_mask(&flush, &t->pending);
870 	} else if (sig == SIGCONT) {
871 		unsigned int why;
872 		/*
873 		 * Remove all stop signals from all queues, wake all threads.
874 		 */
875 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
876 		flush_sigqueue_mask(&flush, &signal->shared_pending);
877 		for_each_thread(p, t) {
878 			flush_sigqueue_mask(&flush, &t->pending);
879 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
880 			if (likely(!(t->ptrace & PT_SEIZED)))
881 				wake_up_state(t, __TASK_STOPPED);
882 			else
883 				ptrace_trap_notify(t);
884 		}
885 
886 		/*
887 		 * Notify the parent with CLD_CONTINUED if we were stopped.
888 		 *
889 		 * If we were in the middle of a group stop, we pretend it
890 		 * was already finished, and then continued. Since SIGCHLD
891 		 * doesn't queue we report only CLD_STOPPED, as if the next
892 		 * CLD_CONTINUED was dropped.
893 		 */
894 		why = 0;
895 		if (signal->flags & SIGNAL_STOP_STOPPED)
896 			why |= SIGNAL_CLD_CONTINUED;
897 		else if (signal->group_stop_count)
898 			why |= SIGNAL_CLD_STOPPED;
899 
900 		if (why) {
901 			/*
902 			 * The first thread which returns from do_signal_stop()
903 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
904 			 * notify its parent. See get_signal_to_deliver().
905 			 */
906 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
907 			signal->group_stop_count = 0;
908 			signal->group_exit_code = 0;
909 		}
910 	}
911 
912 	return !sig_ignored(p, sig, force);
913 }
914 
915 /*
916  * Test if P wants to take SIG.  After we've checked all threads with this,
917  * it's equivalent to finding no threads not blocking SIG.  Any threads not
918  * blocking SIG were ruled out because they are not running and already
919  * have pending signals.  Such threads will dequeue from the shared queue
920  * as soon as they're available, so putting the signal on the shared queue
921  * will be equivalent to sending it to one such thread.
922  */
wants_signal(int sig,struct task_struct * p)923 static inline int wants_signal(int sig, struct task_struct *p)
924 {
925 	if (sigismember(&p->blocked, sig))
926 		return 0;
927 	if (p->flags & PF_EXITING)
928 		return 0;
929 	if (sig == SIGKILL)
930 		return 1;
931 	if (task_is_stopped_or_traced(p))
932 		return 0;
933 	return task_curr(p) || !signal_pending(p);
934 }
935 
complete_signal(int sig,struct task_struct * p,int group)936 static void complete_signal(int sig, struct task_struct *p, int group)
937 {
938 	struct signal_struct *signal = p->signal;
939 	struct task_struct *t;
940 
941 	/*
942 	 * Now find a thread we can wake up to take the signal off the queue.
943 	 *
944 	 * If the main thread wants the signal, it gets first crack.
945 	 * Probably the least surprising to the average bear.
946 	 */
947 	if (wants_signal(sig, p))
948 		t = p;
949 	else if (!group || thread_group_empty(p))
950 		/*
951 		 * There is just one thread and it does not need to be woken.
952 		 * It will dequeue unblocked signals before it runs again.
953 		 */
954 		return;
955 	else {
956 		/*
957 		 * Otherwise try to find a suitable thread.
958 		 */
959 		t = signal->curr_target;
960 		while (!wants_signal(sig, t)) {
961 			t = next_thread(t);
962 			if (t == signal->curr_target)
963 				/*
964 				 * No thread needs to be woken.
965 				 * Any eligible threads will see
966 				 * the signal in the queue soon.
967 				 */
968 				return;
969 		}
970 		signal->curr_target = t;
971 	}
972 
973 	/*
974 	 * Found a killable thread.  If the signal will be fatal,
975 	 * then start taking the whole group down immediately.
976 	 */
977 	if (sig_fatal(p, sig) &&
978 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
979 	    !sigismember(&t->real_blocked, sig) &&
980 	    (sig == SIGKILL || !p->ptrace)) {
981 		/*
982 		 * This signal will be fatal to the whole group.
983 		 */
984 		if (!sig_kernel_coredump(sig)) {
985 			/*
986 			 * Start a group exit and wake everybody up.
987 			 * This way we don't have other threads
988 			 * running and doing things after a slower
989 			 * thread has the fatal signal pending.
990 			 */
991 			signal->flags = SIGNAL_GROUP_EXIT;
992 			signal->group_exit_code = sig;
993 			signal->group_stop_count = 0;
994 			t = p;
995 			do {
996 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
997 				sigaddset(&t->pending.signal, SIGKILL);
998 				signal_wake_up(t, 1);
999 			} while_each_thread(p, t);
1000 			return;
1001 		}
1002 	}
1003 
1004 	/*
1005 	 * The signal is already in the shared-pending queue.
1006 	 * Tell the chosen thread to wake up and dequeue it.
1007 	 */
1008 	signal_wake_up(t, sig == SIGKILL);
1009 	return;
1010 }
1011 
legacy_queue(struct sigpending * signals,int sig)1012 static inline int legacy_queue(struct sigpending *signals, int sig)
1013 {
1014 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1015 }
1016 
1017 #ifdef CONFIG_USER_NS
userns_fixup_signal_uid(struct siginfo * info,struct task_struct * t)1018 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1019 {
1020 	if (current_user_ns() == task_cred_xxx(t, user_ns))
1021 		return;
1022 
1023 	if (SI_FROMKERNEL(info))
1024 		return;
1025 
1026 	rcu_read_lock();
1027 	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1028 					make_kuid(current_user_ns(), info->si_uid));
1029 	rcu_read_unlock();
1030 }
1031 #else
userns_fixup_signal_uid(struct siginfo * info,struct task_struct * t)1032 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1033 {
1034 	return;
1035 }
1036 #endif
1037 
__send_signal(int sig,struct siginfo * info,struct task_struct * t,int group,int from_ancestor_ns)1038 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1039 			int group, int from_ancestor_ns)
1040 {
1041 	struct sigpending *pending;
1042 	struct sigqueue *q;
1043 	int override_rlimit;
1044 	int ret = 0, result;
1045 
1046 	assert_spin_locked(&t->sighand->siglock);
1047 
1048 	result = TRACE_SIGNAL_IGNORED;
1049 	if (!prepare_signal(sig, t,
1050 			from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1051 		goto ret;
1052 
1053 	pending = group ? &t->signal->shared_pending : &t->pending;
1054 	/*
1055 	 * Short-circuit ignored signals and support queuing
1056 	 * exactly one non-rt signal, so that we can get more
1057 	 * detailed information about the cause of the signal.
1058 	 */
1059 	result = TRACE_SIGNAL_ALREADY_PENDING;
1060 	if (legacy_queue(pending, sig))
1061 		goto ret;
1062 
1063 	result = TRACE_SIGNAL_DELIVERED;
1064 	/*
1065 	 * fast-pathed signals for kernel-internal things like SIGSTOP
1066 	 * or SIGKILL.
1067 	 */
1068 	if (info == SEND_SIG_FORCED)
1069 		goto out_set;
1070 
1071 	/*
1072 	 * Real-time signals must be queued if sent by sigqueue, or
1073 	 * some other real-time mechanism.  It is implementation
1074 	 * defined whether kill() does so.  We attempt to do so, on
1075 	 * the principle of least surprise, but since kill is not
1076 	 * allowed to fail with EAGAIN when low on memory we just
1077 	 * make sure at least one signal gets delivered and don't
1078 	 * pass on the info struct.
1079 	 */
1080 	if (sig < SIGRTMIN)
1081 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1082 	else
1083 		override_rlimit = 0;
1084 
1085 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1086 		override_rlimit);
1087 	if (q) {
1088 		list_add_tail(&q->list, &pending->list);
1089 		switch ((unsigned long) info) {
1090 		case (unsigned long) SEND_SIG_NOINFO:
1091 			q->info.si_signo = sig;
1092 			q->info.si_errno = 0;
1093 			q->info.si_code = SI_USER;
1094 			q->info.si_pid = task_tgid_nr_ns(current,
1095 							task_active_pid_ns(t));
1096 			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1097 			break;
1098 		case (unsigned long) SEND_SIG_PRIV:
1099 			q->info.si_signo = sig;
1100 			q->info.si_errno = 0;
1101 			q->info.si_code = SI_KERNEL;
1102 			q->info.si_pid = 0;
1103 			q->info.si_uid = 0;
1104 			break;
1105 		default:
1106 			copy_siginfo(&q->info, info);
1107 			if (from_ancestor_ns)
1108 				q->info.si_pid = 0;
1109 			break;
1110 		}
1111 
1112 		userns_fixup_signal_uid(&q->info, t);
1113 
1114 	} else if (!is_si_special(info)) {
1115 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1116 			/*
1117 			 * Queue overflow, abort.  We may abort if the
1118 			 * signal was rt and sent by user using something
1119 			 * other than kill().
1120 			 */
1121 			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1122 			ret = -EAGAIN;
1123 			goto ret;
1124 		} else {
1125 			/*
1126 			 * This is a silent loss of information.  We still
1127 			 * send the signal, but the *info bits are lost.
1128 			 */
1129 			result = TRACE_SIGNAL_LOSE_INFO;
1130 		}
1131 	}
1132 
1133 out_set:
1134 	signalfd_notify(t, sig);
1135 	sigaddset(&pending->signal, sig);
1136 	complete_signal(sig, t, group);
1137 ret:
1138 	trace_signal_generate(sig, info, t, group, result);
1139 	return ret;
1140 }
1141 
send_signal(int sig,struct siginfo * info,struct task_struct * t,int group)1142 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1143 			int group)
1144 {
1145 	int from_ancestor_ns = 0;
1146 
1147 #ifdef CONFIG_PID_NS
1148 	from_ancestor_ns = si_fromuser(info) &&
1149 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1150 #endif
1151 
1152 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1153 }
1154 
print_fatal_signal(int signr)1155 static void print_fatal_signal(int signr)
1156 {
1157 	struct pt_regs *regs = signal_pt_regs();
1158 	printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1159 
1160 #if defined(__i386__) && !defined(__arch_um__)
1161 	printk(KERN_INFO "code at %08lx: ", regs->ip);
1162 	{
1163 		int i;
1164 		for (i = 0; i < 16; i++) {
1165 			unsigned char insn;
1166 
1167 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1168 				break;
1169 			printk(KERN_CONT "%02x ", insn);
1170 		}
1171 	}
1172 	printk(KERN_CONT "\n");
1173 #endif
1174 	preempt_disable();
1175 	show_regs(regs);
1176 	preempt_enable();
1177 }
1178 
setup_print_fatal_signals(char * str)1179 static int __init setup_print_fatal_signals(char *str)
1180 {
1181 	get_option (&str, &print_fatal_signals);
1182 
1183 	return 1;
1184 }
1185 
1186 __setup("print-fatal-signals=", setup_print_fatal_signals);
1187 
1188 int
__group_send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1189 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190 {
1191 	return send_signal(sig, info, p, 1);
1192 }
1193 
1194 static int
specific_send_sig_info(int sig,struct siginfo * info,struct task_struct * t)1195 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1196 {
1197 	return send_signal(sig, info, t, 0);
1198 }
1199 
do_send_sig_info(int sig,struct siginfo * info,struct task_struct * p,bool group)1200 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1201 			bool group)
1202 {
1203 	unsigned long flags;
1204 	int ret = -ESRCH;
1205 
1206 	if (lock_task_sighand(p, &flags)) {
1207 		ret = send_signal(sig, info, p, group);
1208 		unlock_task_sighand(p, &flags);
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 /*
1215  * Force a signal that the process can't ignore: if necessary
1216  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1217  *
1218  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1219  * since we do not want to have a signal handler that was blocked
1220  * be invoked when user space had explicitly blocked it.
1221  *
1222  * We don't want to have recursive SIGSEGV's etc, for example,
1223  * that is why we also clear SIGNAL_UNKILLABLE.
1224  */
1225 int
force_sig_info(int sig,struct siginfo * info,struct task_struct * t)1226 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1227 {
1228 	unsigned long int flags;
1229 	int ret, blocked, ignored;
1230 	struct k_sigaction *action;
1231 
1232 	spin_lock_irqsave(&t->sighand->siglock, flags);
1233 	action = &t->sighand->action[sig-1];
1234 	ignored = action->sa.sa_handler == SIG_IGN;
1235 	blocked = sigismember(&t->blocked, sig);
1236 	if (blocked || ignored) {
1237 		action->sa.sa_handler = SIG_DFL;
1238 		if (blocked) {
1239 			sigdelset(&t->blocked, sig);
1240 			recalc_sigpending_and_wake(t);
1241 		}
1242 	}
1243 	if (action->sa.sa_handler == SIG_DFL)
1244 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1245 	ret = specific_send_sig_info(sig, info, t);
1246 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1247 
1248 	return ret;
1249 }
1250 
1251 /*
1252  * Nuke all other threads in the group.
1253  */
zap_other_threads(struct task_struct * p)1254 int zap_other_threads(struct task_struct *p)
1255 {
1256 	struct task_struct *t = p;
1257 	int count = 0;
1258 
1259 	p->signal->group_stop_count = 0;
1260 
1261 	while_each_thread(p, t) {
1262 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1263 		count++;
1264 
1265 		/* Don't bother with already dead threads */
1266 		if (t->exit_state)
1267 			continue;
1268 		sigaddset(&t->pending.signal, SIGKILL);
1269 		signal_wake_up(t, 1);
1270 	}
1271 
1272 	return count;
1273 }
1274 
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1275 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1276 					   unsigned long *flags)
1277 {
1278 	struct sighand_struct *sighand;
1279 
1280 	for (;;) {
1281 		/*
1282 		 * Disable interrupts early to avoid deadlocks.
1283 		 * See rcu_read_unlock() comment header for details.
1284 		 */
1285 		local_irq_save(*flags);
1286 		rcu_read_lock();
1287 		sighand = rcu_dereference(tsk->sighand);
1288 		if (unlikely(sighand == NULL)) {
1289 			rcu_read_unlock();
1290 			local_irq_restore(*flags);
1291 			break;
1292 		}
1293 		/*
1294 		 * This sighand can be already freed and even reused, but
1295 		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1296 		 * initializes ->siglock: this slab can't go away, it has
1297 		 * the same object type, ->siglock can't be reinitialized.
1298 		 *
1299 		 * We need to ensure that tsk->sighand is still the same
1300 		 * after we take the lock, we can race with de_thread() or
1301 		 * __exit_signal(). In the latter case the next iteration
1302 		 * must see ->sighand == NULL.
1303 		 */
1304 		spin_lock(&sighand->siglock);
1305 		if (likely(sighand == tsk->sighand)) {
1306 			rcu_read_unlock();
1307 			break;
1308 		}
1309 		spin_unlock(&sighand->siglock);
1310 		rcu_read_unlock();
1311 		local_irq_restore(*flags);
1312 	}
1313 
1314 	return sighand;
1315 }
1316 
1317 /*
1318  * send signal info to all the members of a group
1319  */
group_send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1320 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1321 {
1322 	int ret;
1323 
1324 	rcu_read_lock();
1325 	ret = check_kill_permission(sig, info, p);
1326 	rcu_read_unlock();
1327 
1328 	if (!ret && sig)
1329 		ret = do_send_sig_info(sig, info, p, true);
1330 
1331 	return ret;
1332 }
1333 
1334 /*
1335  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1336  * control characters do (^C, ^Z etc)
1337  * - the caller must hold at least a readlock on tasklist_lock
1338  */
__kill_pgrp_info(int sig,struct siginfo * info,struct pid * pgrp)1339 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1340 {
1341 	struct task_struct *p = NULL;
1342 	int retval, success;
1343 
1344 	success = 0;
1345 	retval = -ESRCH;
1346 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1347 		int err = group_send_sig_info(sig, info, p);
1348 		success |= !err;
1349 		retval = err;
1350 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1351 	return success ? 0 : retval;
1352 }
1353 
kill_pid_info(int sig,struct siginfo * info,struct pid * pid)1354 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1355 {
1356 	int error = -ESRCH;
1357 	struct task_struct *p;
1358 
1359 	for (;;) {
1360 		rcu_read_lock();
1361 		p = pid_task(pid, PIDTYPE_PID);
1362 		if (p)
1363 			error = group_send_sig_info(sig, info, p);
1364 		rcu_read_unlock();
1365 		if (likely(!p || error != -ESRCH))
1366 			return error;
1367 
1368 		/*
1369 		 * The task was unhashed in between, try again.  If it
1370 		 * is dead, pid_task() will return NULL, if we race with
1371 		 * de_thread() it will find the new leader.
1372 		 */
1373 	}
1374 }
1375 
kill_proc_info(int sig,struct siginfo * info,pid_t pid)1376 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1377 {
1378 	int error;
1379 	rcu_read_lock();
1380 	error = kill_pid_info(sig, info, find_vpid(pid));
1381 	rcu_read_unlock();
1382 	return error;
1383 }
1384 
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1385 static int kill_as_cred_perm(const struct cred *cred,
1386 			     struct task_struct *target)
1387 {
1388 	const struct cred *pcred = __task_cred(target);
1389 	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1390 	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1391 		return 0;
1392 	return 1;
1393 }
1394 
1395 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
kill_pid_info_as_cred(int sig,struct siginfo * info,struct pid * pid,const struct cred * cred,u32 secid)1396 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1397 			 const struct cred *cred, u32 secid)
1398 {
1399 	int ret = -EINVAL;
1400 	struct task_struct *p;
1401 	unsigned long flags;
1402 
1403 	if (!valid_signal(sig))
1404 		return ret;
1405 
1406 	rcu_read_lock();
1407 	p = pid_task(pid, PIDTYPE_PID);
1408 	if (!p) {
1409 		ret = -ESRCH;
1410 		goto out_unlock;
1411 	}
1412 	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1413 		ret = -EPERM;
1414 		goto out_unlock;
1415 	}
1416 	ret = security_task_kill(p, info, sig, secid);
1417 	if (ret)
1418 		goto out_unlock;
1419 
1420 	if (sig) {
1421 		if (lock_task_sighand(p, &flags)) {
1422 			ret = __send_signal(sig, info, p, 1, 0);
1423 			unlock_task_sighand(p, &flags);
1424 		} else
1425 			ret = -ESRCH;
1426 	}
1427 out_unlock:
1428 	rcu_read_unlock();
1429 	return ret;
1430 }
1431 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1432 
1433 /*
1434  * kill_something_info() interprets pid in interesting ways just like kill(2).
1435  *
1436  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1437  * is probably wrong.  Should make it like BSD or SYSV.
1438  */
1439 
kill_something_info(int sig,struct siginfo * info,pid_t pid)1440 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1441 {
1442 	int ret;
1443 
1444 	if (pid > 0) {
1445 		rcu_read_lock();
1446 		ret = kill_pid_info(sig, info, find_vpid(pid));
1447 		rcu_read_unlock();
1448 		return ret;
1449 	}
1450 
1451 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1452 	if (pid == INT_MIN)
1453 		return -ESRCH;
1454 
1455 	read_lock(&tasklist_lock);
1456 	if (pid != -1) {
1457 		ret = __kill_pgrp_info(sig, info,
1458 				pid ? find_vpid(-pid) : task_pgrp(current));
1459 	} else {
1460 		int retval = 0, count = 0;
1461 		struct task_struct * p;
1462 
1463 		for_each_process(p) {
1464 			if (task_pid_vnr(p) > 1 &&
1465 					!same_thread_group(p, current)) {
1466 				int err = group_send_sig_info(sig, info, p);
1467 				++count;
1468 				if (err != -EPERM)
1469 					retval = err;
1470 			}
1471 		}
1472 		ret = count ? retval : -ESRCH;
1473 	}
1474 	read_unlock(&tasklist_lock);
1475 
1476 	return ret;
1477 }
1478 
1479 /*
1480  * These are for backward compatibility with the rest of the kernel source.
1481  */
1482 
send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1483 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1484 {
1485 	/*
1486 	 * Make sure legacy kernel users don't send in bad values
1487 	 * (normal paths check this in check_kill_permission).
1488 	 */
1489 	if (!valid_signal(sig))
1490 		return -EINVAL;
1491 
1492 	return do_send_sig_info(sig, info, p, false);
1493 }
1494 
1495 #define __si_special(priv) \
1496 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1497 
1498 int
send_sig(int sig,struct task_struct * p,int priv)1499 send_sig(int sig, struct task_struct *p, int priv)
1500 {
1501 	return send_sig_info(sig, __si_special(priv), p);
1502 }
1503 
1504 void
force_sig(int sig,struct task_struct * p)1505 force_sig(int sig, struct task_struct *p)
1506 {
1507 	force_sig_info(sig, SEND_SIG_PRIV, p);
1508 }
1509 
1510 /*
1511  * When things go south during signal handling, we
1512  * will force a SIGSEGV. And if the signal that caused
1513  * the problem was already a SIGSEGV, we'll want to
1514  * make sure we don't even try to deliver the signal..
1515  */
1516 int
force_sigsegv(int sig,struct task_struct * p)1517 force_sigsegv(int sig, struct task_struct *p)
1518 {
1519 	if (sig == SIGSEGV) {
1520 		unsigned long flags;
1521 		spin_lock_irqsave(&p->sighand->siglock, flags);
1522 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1523 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1524 	}
1525 	force_sig(SIGSEGV, p);
1526 	return 0;
1527 }
1528 
kill_pgrp(struct pid * pid,int sig,int priv)1529 int kill_pgrp(struct pid *pid, int sig, int priv)
1530 {
1531 	int ret;
1532 
1533 	read_lock(&tasklist_lock);
1534 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1535 	read_unlock(&tasklist_lock);
1536 
1537 	return ret;
1538 }
1539 EXPORT_SYMBOL(kill_pgrp);
1540 
kill_pid(struct pid * pid,int sig,int priv)1541 int kill_pid(struct pid *pid, int sig, int priv)
1542 {
1543 	return kill_pid_info(sig, __si_special(priv), pid);
1544 }
1545 EXPORT_SYMBOL(kill_pid);
1546 
1547 /*
1548  * These functions support sending signals using preallocated sigqueue
1549  * structures.  This is needed "because realtime applications cannot
1550  * afford to lose notifications of asynchronous events, like timer
1551  * expirations or I/O completions".  In the case of POSIX Timers
1552  * we allocate the sigqueue structure from the timer_create.  If this
1553  * allocation fails we are able to report the failure to the application
1554  * with an EAGAIN error.
1555  */
sigqueue_alloc(void)1556 struct sigqueue *sigqueue_alloc(void)
1557 {
1558 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1559 
1560 	if (q)
1561 		q->flags |= SIGQUEUE_PREALLOC;
1562 
1563 	return q;
1564 }
1565 
sigqueue_free(struct sigqueue * q)1566 void sigqueue_free(struct sigqueue *q)
1567 {
1568 	unsigned long flags;
1569 	spinlock_t *lock = &current->sighand->siglock;
1570 
1571 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1572 	/*
1573 	 * We must hold ->siglock while testing q->list
1574 	 * to serialize with collect_signal() or with
1575 	 * __exit_signal()->flush_sigqueue().
1576 	 */
1577 	spin_lock_irqsave(lock, flags);
1578 	q->flags &= ~SIGQUEUE_PREALLOC;
1579 	/*
1580 	 * If it is queued it will be freed when dequeued,
1581 	 * like the "regular" sigqueue.
1582 	 */
1583 	if (!list_empty(&q->list))
1584 		q = NULL;
1585 	spin_unlock_irqrestore(lock, flags);
1586 
1587 	if (q)
1588 		__sigqueue_free(q);
1589 }
1590 
send_sigqueue(struct sigqueue * q,struct task_struct * t,int group)1591 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1592 {
1593 	int sig = q->info.si_signo;
1594 	struct sigpending *pending;
1595 	unsigned long flags;
1596 	int ret, result;
1597 
1598 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1599 
1600 	ret = -1;
1601 	if (!likely(lock_task_sighand(t, &flags)))
1602 		goto ret;
1603 
1604 	ret = 1; /* the signal is ignored */
1605 	result = TRACE_SIGNAL_IGNORED;
1606 	if (!prepare_signal(sig, t, false))
1607 		goto out;
1608 
1609 	ret = 0;
1610 	if (unlikely(!list_empty(&q->list))) {
1611 		/*
1612 		 * If an SI_TIMER entry is already queue just increment
1613 		 * the overrun count.
1614 		 */
1615 		BUG_ON(q->info.si_code != SI_TIMER);
1616 		q->info.si_overrun++;
1617 		result = TRACE_SIGNAL_ALREADY_PENDING;
1618 		goto out;
1619 	}
1620 	q->info.si_overrun = 0;
1621 
1622 	signalfd_notify(t, sig);
1623 	pending = group ? &t->signal->shared_pending : &t->pending;
1624 	list_add_tail(&q->list, &pending->list);
1625 	sigaddset(&pending->signal, sig);
1626 	complete_signal(sig, t, group);
1627 	result = TRACE_SIGNAL_DELIVERED;
1628 out:
1629 	trace_signal_generate(sig, &q->info, t, group, result);
1630 	unlock_task_sighand(t, &flags);
1631 ret:
1632 	return ret;
1633 }
1634 
1635 /*
1636  * Let a parent know about the death of a child.
1637  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1638  *
1639  * Returns true if our parent ignored us and so we've switched to
1640  * self-reaping.
1641  */
do_notify_parent(struct task_struct * tsk,int sig)1642 bool do_notify_parent(struct task_struct *tsk, int sig)
1643 {
1644 	struct siginfo info;
1645 	unsigned long flags;
1646 	struct sighand_struct *psig;
1647 	bool autoreap = false;
1648 	cputime_t utime, stime;
1649 
1650 	BUG_ON(sig == -1);
1651 
1652  	/* do_notify_parent_cldstop should have been called instead.  */
1653  	BUG_ON(task_is_stopped_or_traced(tsk));
1654 
1655 	BUG_ON(!tsk->ptrace &&
1656 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1657 
1658 	if (sig != SIGCHLD) {
1659 		/*
1660 		 * This is only possible if parent == real_parent.
1661 		 * Check if it has changed security domain.
1662 		 */
1663 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1664 			sig = SIGCHLD;
1665 	}
1666 
1667 	info.si_signo = sig;
1668 	info.si_errno = 0;
1669 	/*
1670 	 * We are under tasklist_lock here so our parent is tied to
1671 	 * us and cannot change.
1672 	 *
1673 	 * task_active_pid_ns will always return the same pid namespace
1674 	 * until a task passes through release_task.
1675 	 *
1676 	 * write_lock() currently calls preempt_disable() which is the
1677 	 * same as rcu_read_lock(), but according to Oleg, this is not
1678 	 * correct to rely on this
1679 	 */
1680 	rcu_read_lock();
1681 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1682 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1683 				       task_uid(tsk));
1684 	rcu_read_unlock();
1685 
1686 	task_cputime(tsk, &utime, &stime);
1687 	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1688 	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1689 
1690 	info.si_status = tsk->exit_code & 0x7f;
1691 	if (tsk->exit_code & 0x80)
1692 		info.si_code = CLD_DUMPED;
1693 	else if (tsk->exit_code & 0x7f)
1694 		info.si_code = CLD_KILLED;
1695 	else {
1696 		info.si_code = CLD_EXITED;
1697 		info.si_status = tsk->exit_code >> 8;
1698 	}
1699 
1700 	psig = tsk->parent->sighand;
1701 	spin_lock_irqsave(&psig->siglock, flags);
1702 	if (!tsk->ptrace && sig == SIGCHLD &&
1703 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1704 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1705 		/*
1706 		 * We are exiting and our parent doesn't care.  POSIX.1
1707 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1708 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1709 		 * automatically and not left for our parent's wait4 call.
1710 		 * Rather than having the parent do it as a magic kind of
1711 		 * signal handler, we just set this to tell do_exit that we
1712 		 * can be cleaned up without becoming a zombie.  Note that
1713 		 * we still call __wake_up_parent in this case, because a
1714 		 * blocked sys_wait4 might now return -ECHILD.
1715 		 *
1716 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1717 		 * is implementation-defined: we do (if you don't want
1718 		 * it, just use SIG_IGN instead).
1719 		 */
1720 		autoreap = true;
1721 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1722 			sig = 0;
1723 	}
1724 	if (valid_signal(sig) && sig)
1725 		__group_send_sig_info(sig, &info, tsk->parent);
1726 	__wake_up_parent(tsk, tsk->parent);
1727 	spin_unlock_irqrestore(&psig->siglock, flags);
1728 
1729 	return autoreap;
1730 }
1731 
1732 /**
1733  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1734  * @tsk: task reporting the state change
1735  * @for_ptracer: the notification is for ptracer
1736  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1737  *
1738  * Notify @tsk's parent that the stopped/continued state has changed.  If
1739  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1740  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1741  *
1742  * CONTEXT:
1743  * Must be called with tasklist_lock at least read locked.
1744  */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)1745 static void do_notify_parent_cldstop(struct task_struct *tsk,
1746 				     bool for_ptracer, int why)
1747 {
1748 	struct siginfo info;
1749 	unsigned long flags;
1750 	struct task_struct *parent;
1751 	struct sighand_struct *sighand;
1752 	cputime_t utime, stime;
1753 
1754 	if (for_ptracer) {
1755 		parent = tsk->parent;
1756 	} else {
1757 		tsk = tsk->group_leader;
1758 		parent = tsk->real_parent;
1759 	}
1760 
1761 	info.si_signo = SIGCHLD;
1762 	info.si_errno = 0;
1763 	/*
1764 	 * see comment in do_notify_parent() about the following 4 lines
1765 	 */
1766 	rcu_read_lock();
1767 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1768 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1769 	rcu_read_unlock();
1770 
1771 	task_cputime(tsk, &utime, &stime);
1772 	info.si_utime = cputime_to_clock_t(utime);
1773 	info.si_stime = cputime_to_clock_t(stime);
1774 
1775  	info.si_code = why;
1776  	switch (why) {
1777  	case CLD_CONTINUED:
1778  		info.si_status = SIGCONT;
1779  		break;
1780  	case CLD_STOPPED:
1781  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1782  		break;
1783  	case CLD_TRAPPED:
1784  		info.si_status = tsk->exit_code & 0x7f;
1785  		break;
1786  	default:
1787  		BUG();
1788  	}
1789 
1790 	sighand = parent->sighand;
1791 	spin_lock_irqsave(&sighand->siglock, flags);
1792 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1793 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1794 		__group_send_sig_info(SIGCHLD, &info, parent);
1795 	/*
1796 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1797 	 */
1798 	__wake_up_parent(tsk, parent);
1799 	spin_unlock_irqrestore(&sighand->siglock, flags);
1800 }
1801 
may_ptrace_stop(void)1802 static inline int may_ptrace_stop(void)
1803 {
1804 	if (!likely(current->ptrace))
1805 		return 0;
1806 	/*
1807 	 * Are we in the middle of do_coredump?
1808 	 * If so and our tracer is also part of the coredump stopping
1809 	 * is a deadlock situation, and pointless because our tracer
1810 	 * is dead so don't allow us to stop.
1811 	 * If SIGKILL was already sent before the caller unlocked
1812 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1813 	 * is safe to enter schedule().
1814 	 *
1815 	 * This is almost outdated, a task with the pending SIGKILL can't
1816 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1817 	 * after SIGKILL was already dequeued.
1818 	 */
1819 	if (unlikely(current->mm->core_state) &&
1820 	    unlikely(current->mm == current->parent->mm))
1821 		return 0;
1822 
1823 	return 1;
1824 }
1825 
1826 /*
1827  * This must be called with current->sighand->siglock held.
1828  *
1829  * This should be the path for all ptrace stops.
1830  * We always set current->last_siginfo while stopped here.
1831  * That makes it a way to test a stopped process for
1832  * being ptrace-stopped vs being job-control-stopped.
1833  *
1834  * If we actually decide not to stop at all because the tracer
1835  * is gone, we keep current->exit_code unless clear_code.
1836  */
ptrace_stop(int exit_code,int why,int clear_code,siginfo_t * info)1837 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1838 	__releases(&current->sighand->siglock)
1839 	__acquires(&current->sighand->siglock)
1840 {
1841 	bool gstop_done = false;
1842 
1843 	if (arch_ptrace_stop_needed(exit_code, info)) {
1844 		/*
1845 		 * The arch code has something special to do before a
1846 		 * ptrace stop.  This is allowed to block, e.g. for faults
1847 		 * on user stack pages.  We can't keep the siglock while
1848 		 * calling arch_ptrace_stop, so we must release it now.
1849 		 * To preserve proper semantics, we must do this before
1850 		 * any signal bookkeeping like checking group_stop_count.
1851 		 */
1852 		spin_unlock_irq(&current->sighand->siglock);
1853 		arch_ptrace_stop(exit_code, info);
1854 		spin_lock_irq(&current->sighand->siglock);
1855 	}
1856 
1857 	/*
1858 	 * We're committing to trapping.  TRACED should be visible before
1859 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1860 	 * Also, transition to TRACED and updates to ->jobctl should be
1861 	 * atomic with respect to siglock and should be done after the arch
1862 	 * hook as siglock is released and regrabbed across it.
1863 	 * schedule() will not sleep if there is a pending signal that
1864 	 * can awaken the task.
1865 	 */
1866 	set_current_state(TASK_TRACED);
1867 
1868 	current->last_siginfo = info;
1869 	current->exit_code = exit_code;
1870 
1871 	/*
1872 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1873 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1874 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1875 	 * could be clear now.  We act as if SIGCONT is received after
1876 	 * TASK_TRACED is entered - ignore it.
1877 	 */
1878 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1879 		gstop_done = task_participate_group_stop(current);
1880 
1881 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1882 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1883 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1884 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1885 
1886 	/* entering a trap, clear TRAPPING */
1887 	task_clear_jobctl_trapping(current);
1888 
1889 	spin_unlock_irq(&current->sighand->siglock);
1890 	read_lock(&tasklist_lock);
1891 	if (may_ptrace_stop()) {
1892 		/*
1893 		 * Notify parents of the stop.
1894 		 *
1895 		 * While ptraced, there are two parents - the ptracer and
1896 		 * the real_parent of the group_leader.  The ptracer should
1897 		 * know about every stop while the real parent is only
1898 		 * interested in the completion of group stop.  The states
1899 		 * for the two don't interact with each other.  Notify
1900 		 * separately unless they're gonna be duplicates.
1901 		 */
1902 		do_notify_parent_cldstop(current, true, why);
1903 		if (gstop_done && ptrace_reparented(current))
1904 			do_notify_parent_cldstop(current, false, why);
1905 
1906 		/*
1907 		 * Don't want to allow preemption here, because
1908 		 * sys_ptrace() needs this task to be inactive.
1909 		 *
1910 		 * XXX: implement read_unlock_no_resched().
1911 		 */
1912 		preempt_disable();
1913 		read_unlock(&tasklist_lock);
1914 		preempt_enable_no_resched();
1915 		freezable_schedule();
1916 	} else {
1917 		/*
1918 		 * By the time we got the lock, our tracer went away.
1919 		 * Don't drop the lock yet, another tracer may come.
1920 		 *
1921 		 * If @gstop_done, the ptracer went away between group stop
1922 		 * completion and here.  During detach, it would have set
1923 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1924 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1925 		 * the real parent of the group stop completion is enough.
1926 		 */
1927 		if (gstop_done)
1928 			do_notify_parent_cldstop(current, false, why);
1929 
1930 		/* tasklist protects us from ptrace_freeze_traced() */
1931 		__set_current_state(TASK_RUNNING);
1932 		if (clear_code)
1933 			current->exit_code = 0;
1934 		read_unlock(&tasklist_lock);
1935 	}
1936 
1937 	/*
1938 	 * We are back.  Now reacquire the siglock before touching
1939 	 * last_siginfo, so that we are sure to have synchronized with
1940 	 * any signal-sending on another CPU that wants to examine it.
1941 	 */
1942 	spin_lock_irq(&current->sighand->siglock);
1943 	current->last_siginfo = NULL;
1944 
1945 	/* LISTENING can be set only during STOP traps, clear it */
1946 	current->jobctl &= ~JOBCTL_LISTENING;
1947 
1948 	/*
1949 	 * Queued signals ignored us while we were stopped for tracing.
1950 	 * So check for any that we should take before resuming user mode.
1951 	 * This sets TIF_SIGPENDING, but never clears it.
1952 	 */
1953 	recalc_sigpending_tsk(current);
1954 }
1955 
ptrace_do_notify(int signr,int exit_code,int why)1956 static void ptrace_do_notify(int signr, int exit_code, int why)
1957 {
1958 	siginfo_t info;
1959 
1960 	memset(&info, 0, sizeof info);
1961 	info.si_signo = signr;
1962 	info.si_code = exit_code;
1963 	info.si_pid = task_pid_vnr(current);
1964 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1965 
1966 	/* Let the debugger run.  */
1967 	ptrace_stop(exit_code, why, 1, &info);
1968 }
1969 
ptrace_notify(int exit_code)1970 void ptrace_notify(int exit_code)
1971 {
1972 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1973 	if (unlikely(current->task_works))
1974 		task_work_run();
1975 
1976 	spin_lock_irq(&current->sighand->siglock);
1977 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1978 	spin_unlock_irq(&current->sighand->siglock);
1979 }
1980 
1981 /**
1982  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1983  * @signr: signr causing group stop if initiating
1984  *
1985  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1986  * and participate in it.  If already set, participate in the existing
1987  * group stop.  If participated in a group stop (and thus slept), %true is
1988  * returned with siglock released.
1989  *
1990  * If ptraced, this function doesn't handle stop itself.  Instead,
1991  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1992  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1993  * places afterwards.
1994  *
1995  * CONTEXT:
1996  * Must be called with @current->sighand->siglock held, which is released
1997  * on %true return.
1998  *
1999  * RETURNS:
2000  * %false if group stop is already cancelled or ptrace trap is scheduled.
2001  * %true if participated in group stop.
2002  */
do_signal_stop(int signr)2003 static bool do_signal_stop(int signr)
2004 	__releases(&current->sighand->siglock)
2005 {
2006 	struct signal_struct *sig = current->signal;
2007 
2008 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2009 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2010 		struct task_struct *t;
2011 
2012 		/* signr will be recorded in task->jobctl for retries */
2013 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2014 
2015 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2016 		    unlikely(signal_group_exit(sig)))
2017 			return false;
2018 		/*
2019 		 * There is no group stop already in progress.  We must
2020 		 * initiate one now.
2021 		 *
2022 		 * While ptraced, a task may be resumed while group stop is
2023 		 * still in effect and then receive a stop signal and
2024 		 * initiate another group stop.  This deviates from the
2025 		 * usual behavior as two consecutive stop signals can't
2026 		 * cause two group stops when !ptraced.  That is why we
2027 		 * also check !task_is_stopped(t) below.
2028 		 *
2029 		 * The condition can be distinguished by testing whether
2030 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2031 		 * group_exit_code in such case.
2032 		 *
2033 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2034 		 * an intervening stop signal is required to cause two
2035 		 * continued events regardless of ptrace.
2036 		 */
2037 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2038 			sig->group_exit_code = signr;
2039 
2040 		sig->group_stop_count = 0;
2041 
2042 		if (task_set_jobctl_pending(current, signr | gstop))
2043 			sig->group_stop_count++;
2044 
2045 		t = current;
2046 		while_each_thread(current, t) {
2047 			/*
2048 			 * Setting state to TASK_STOPPED for a group
2049 			 * stop is always done with the siglock held,
2050 			 * so this check has no races.
2051 			 */
2052 			if (!task_is_stopped(t) &&
2053 			    task_set_jobctl_pending(t, signr | gstop)) {
2054 				sig->group_stop_count++;
2055 				if (likely(!(t->ptrace & PT_SEIZED)))
2056 					signal_wake_up(t, 0);
2057 				else
2058 					ptrace_trap_notify(t);
2059 			}
2060 		}
2061 	}
2062 
2063 	if (likely(!current->ptrace)) {
2064 		int notify = 0;
2065 
2066 		/*
2067 		 * If there are no other threads in the group, or if there
2068 		 * is a group stop in progress and we are the last to stop,
2069 		 * report to the parent.
2070 		 */
2071 		if (task_participate_group_stop(current))
2072 			notify = CLD_STOPPED;
2073 
2074 		__set_current_state(TASK_STOPPED);
2075 		spin_unlock_irq(&current->sighand->siglock);
2076 
2077 		/*
2078 		 * Notify the parent of the group stop completion.  Because
2079 		 * we're not holding either the siglock or tasklist_lock
2080 		 * here, ptracer may attach inbetween; however, this is for
2081 		 * group stop and should always be delivered to the real
2082 		 * parent of the group leader.  The new ptracer will get
2083 		 * its notification when this task transitions into
2084 		 * TASK_TRACED.
2085 		 */
2086 		if (notify) {
2087 			read_lock(&tasklist_lock);
2088 			do_notify_parent_cldstop(current, false, notify);
2089 			read_unlock(&tasklist_lock);
2090 		}
2091 
2092 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2093 		freezable_schedule();
2094 		return true;
2095 	} else {
2096 		/*
2097 		 * While ptraced, group stop is handled by STOP trap.
2098 		 * Schedule it and let the caller deal with it.
2099 		 */
2100 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2101 		return false;
2102 	}
2103 }
2104 
2105 /**
2106  * do_jobctl_trap - take care of ptrace jobctl traps
2107  *
2108  * When PT_SEIZED, it's used for both group stop and explicit
2109  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2110  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2111  * the stop signal; otherwise, %SIGTRAP.
2112  *
2113  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2114  * number as exit_code and no siginfo.
2115  *
2116  * CONTEXT:
2117  * Must be called with @current->sighand->siglock held, which may be
2118  * released and re-acquired before returning with intervening sleep.
2119  */
do_jobctl_trap(void)2120 static void do_jobctl_trap(void)
2121 {
2122 	struct signal_struct *signal = current->signal;
2123 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2124 
2125 	if (current->ptrace & PT_SEIZED) {
2126 		if (!signal->group_stop_count &&
2127 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2128 			signr = SIGTRAP;
2129 		WARN_ON_ONCE(!signr);
2130 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2131 				 CLD_STOPPED);
2132 	} else {
2133 		WARN_ON_ONCE(!signr);
2134 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2135 		current->exit_code = 0;
2136 	}
2137 }
2138 
ptrace_signal(int signr,siginfo_t * info)2139 static int ptrace_signal(int signr, siginfo_t *info)
2140 {
2141 	ptrace_signal_deliver();
2142 	/*
2143 	 * We do not check sig_kernel_stop(signr) but set this marker
2144 	 * unconditionally because we do not know whether debugger will
2145 	 * change signr. This flag has no meaning unless we are going
2146 	 * to stop after return from ptrace_stop(). In this case it will
2147 	 * be checked in do_signal_stop(), we should only stop if it was
2148 	 * not cleared by SIGCONT while we were sleeping. See also the
2149 	 * comment in dequeue_signal().
2150 	 */
2151 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2152 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2153 
2154 	/* We're back.  Did the debugger cancel the sig?  */
2155 	signr = current->exit_code;
2156 	if (signr == 0)
2157 		return signr;
2158 
2159 	current->exit_code = 0;
2160 
2161 	/*
2162 	 * Update the siginfo structure if the signal has
2163 	 * changed.  If the debugger wanted something
2164 	 * specific in the siginfo structure then it should
2165 	 * have updated *info via PTRACE_SETSIGINFO.
2166 	 */
2167 	if (signr != info->si_signo) {
2168 		info->si_signo = signr;
2169 		info->si_errno = 0;
2170 		info->si_code = SI_USER;
2171 		rcu_read_lock();
2172 		info->si_pid = task_pid_vnr(current->parent);
2173 		info->si_uid = from_kuid_munged(current_user_ns(),
2174 						task_uid(current->parent));
2175 		rcu_read_unlock();
2176 	}
2177 
2178 	/* If the (new) signal is now blocked, requeue it.  */
2179 	if (sigismember(&current->blocked, signr)) {
2180 		specific_send_sig_info(signr, info, current);
2181 		signr = 0;
2182 	}
2183 
2184 	return signr;
2185 }
2186 
get_signal(struct ksignal * ksig)2187 int get_signal(struct ksignal *ksig)
2188 {
2189 	struct sighand_struct *sighand = current->sighand;
2190 	struct signal_struct *signal = current->signal;
2191 	int signr;
2192 
2193 	if (unlikely(current->task_works))
2194 		task_work_run();
2195 
2196 	if (unlikely(uprobe_deny_signal()))
2197 		return 0;
2198 
2199 	/*
2200 	 * Do this once, we can't return to user-mode if freezing() == T.
2201 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2202 	 * thus do not need another check after return.
2203 	 */
2204 	try_to_freeze();
2205 
2206 relock:
2207 	spin_lock_irq(&sighand->siglock);
2208 	/*
2209 	 * Every stopped thread goes here after wakeup. Check to see if
2210 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2211 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2212 	 */
2213 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2214 		int why;
2215 
2216 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2217 			why = CLD_CONTINUED;
2218 		else
2219 			why = CLD_STOPPED;
2220 
2221 		signal->flags &= ~SIGNAL_CLD_MASK;
2222 
2223 		spin_unlock_irq(&sighand->siglock);
2224 
2225 		/*
2226 		 * Notify the parent that we're continuing.  This event is
2227 		 * always per-process and doesn't make whole lot of sense
2228 		 * for ptracers, who shouldn't consume the state via
2229 		 * wait(2) either, but, for backward compatibility, notify
2230 		 * the ptracer of the group leader too unless it's gonna be
2231 		 * a duplicate.
2232 		 */
2233 		read_lock(&tasklist_lock);
2234 		do_notify_parent_cldstop(current, false, why);
2235 
2236 		if (ptrace_reparented(current->group_leader))
2237 			do_notify_parent_cldstop(current->group_leader,
2238 						true, why);
2239 		read_unlock(&tasklist_lock);
2240 
2241 		goto relock;
2242 	}
2243 
2244 	/* Has this task already been marked for death? */
2245 	if (signal_group_exit(signal)) {
2246 		ksig->info.si_signo = signr = SIGKILL;
2247 		sigdelset(&current->pending.signal, SIGKILL);
2248 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2249 				&sighand->action[SIGKILL - 1]);
2250 		recalc_sigpending();
2251 		goto fatal;
2252 	}
2253 
2254 	for (;;) {
2255 		struct k_sigaction *ka;
2256 
2257 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2258 		    do_signal_stop(0))
2259 			goto relock;
2260 
2261 		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2262 			do_jobctl_trap();
2263 			spin_unlock_irq(&sighand->siglock);
2264 			goto relock;
2265 		}
2266 
2267 		/*
2268 		 * Signals generated by the execution of an instruction
2269 		 * need to be delivered before any other pending signals
2270 		 * so that the instruction pointer in the signal stack
2271 		 * frame points to the faulting instruction.
2272 		 */
2273 		signr = dequeue_synchronous_signal(&ksig->info);
2274 		if (!signr)
2275 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2276 
2277 		if (!signr)
2278 			break; /* will return 0 */
2279 
2280 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2281 			signr = ptrace_signal(signr, &ksig->info);
2282 			if (!signr)
2283 				continue;
2284 		}
2285 
2286 		ka = &sighand->action[signr-1];
2287 
2288 		/* Trace actually delivered signals. */
2289 		trace_signal_deliver(signr, &ksig->info, ka);
2290 
2291 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2292 			continue;
2293 		if (ka->sa.sa_handler != SIG_DFL) {
2294 			/* Run the handler.  */
2295 			ksig->ka = *ka;
2296 
2297 			if (ka->sa.sa_flags & SA_ONESHOT)
2298 				ka->sa.sa_handler = SIG_DFL;
2299 
2300 			break; /* will return non-zero "signr" value */
2301 		}
2302 
2303 		/*
2304 		 * Now we are doing the default action for this signal.
2305 		 */
2306 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2307 			continue;
2308 
2309 		/*
2310 		 * Global init gets no signals it doesn't want.
2311 		 * Container-init gets no signals it doesn't want from same
2312 		 * container.
2313 		 *
2314 		 * Note that if global/container-init sees a sig_kernel_only()
2315 		 * signal here, the signal must have been generated internally
2316 		 * or must have come from an ancestor namespace. In either
2317 		 * case, the signal cannot be dropped.
2318 		 */
2319 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2320 				!sig_kernel_only(signr))
2321 			continue;
2322 
2323 		if (sig_kernel_stop(signr)) {
2324 			/*
2325 			 * The default action is to stop all threads in
2326 			 * the thread group.  The job control signals
2327 			 * do nothing in an orphaned pgrp, but SIGSTOP
2328 			 * always works.  Note that siglock needs to be
2329 			 * dropped during the call to is_orphaned_pgrp()
2330 			 * because of lock ordering with tasklist_lock.
2331 			 * This allows an intervening SIGCONT to be posted.
2332 			 * We need to check for that and bail out if necessary.
2333 			 */
2334 			if (signr != SIGSTOP) {
2335 				spin_unlock_irq(&sighand->siglock);
2336 
2337 				/* signals can be posted during this window */
2338 
2339 				if (is_current_pgrp_orphaned())
2340 					goto relock;
2341 
2342 				spin_lock_irq(&sighand->siglock);
2343 			}
2344 
2345 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2346 				/* It released the siglock.  */
2347 				goto relock;
2348 			}
2349 
2350 			/*
2351 			 * We didn't actually stop, due to a race
2352 			 * with SIGCONT or something like that.
2353 			 */
2354 			continue;
2355 		}
2356 
2357 	fatal:
2358 		spin_unlock_irq(&sighand->siglock);
2359 
2360 		/*
2361 		 * Anything else is fatal, maybe with a core dump.
2362 		 */
2363 		current->flags |= PF_SIGNALED;
2364 
2365 		if (sig_kernel_coredump(signr)) {
2366 			if (print_fatal_signals)
2367 				print_fatal_signal(ksig->info.si_signo);
2368 			proc_coredump_connector(current);
2369 			/*
2370 			 * If it was able to dump core, this kills all
2371 			 * other threads in the group and synchronizes with
2372 			 * their demise.  If we lost the race with another
2373 			 * thread getting here, it set group_exit_code
2374 			 * first and our do_group_exit call below will use
2375 			 * that value and ignore the one we pass it.
2376 			 */
2377 			do_coredump(&ksig->info);
2378 		}
2379 
2380 		/*
2381 		 * Death signals, no core dump.
2382 		 */
2383 		do_group_exit(ksig->info.si_signo);
2384 		/* NOTREACHED */
2385 	}
2386 	spin_unlock_irq(&sighand->siglock);
2387 
2388 	ksig->sig = signr;
2389 	return ksig->sig > 0;
2390 }
2391 
2392 /**
2393  * signal_delivered -
2394  * @ksig:		kernel signal struct
2395  * @stepping:		nonzero if debugger single-step or block-step in use
2396  *
2397  * This function should be called when a signal has successfully been
2398  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2399  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2400  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2401  */
signal_delivered(struct ksignal * ksig,int stepping)2402 static void signal_delivered(struct ksignal *ksig, int stepping)
2403 {
2404 	sigset_t blocked;
2405 
2406 	/* A signal was successfully delivered, and the
2407 	   saved sigmask was stored on the signal frame,
2408 	   and will be restored by sigreturn.  So we can
2409 	   simply clear the restore sigmask flag.  */
2410 	clear_restore_sigmask();
2411 
2412 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2413 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2414 		sigaddset(&blocked, ksig->sig);
2415 	set_current_blocked(&blocked);
2416 	tracehook_signal_handler(stepping);
2417 }
2418 
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2419 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2420 {
2421 	if (failed)
2422 		force_sigsegv(ksig->sig, current);
2423 	else
2424 		signal_delivered(ksig, stepping);
2425 }
2426 
2427 /*
2428  * It could be that complete_signal() picked us to notify about the
2429  * group-wide signal. Other threads should be notified now to take
2430  * the shared signals in @which since we will not.
2431  */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2432 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2433 {
2434 	sigset_t retarget;
2435 	struct task_struct *t;
2436 
2437 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2438 	if (sigisemptyset(&retarget))
2439 		return;
2440 
2441 	t = tsk;
2442 	while_each_thread(tsk, t) {
2443 		if (t->flags & PF_EXITING)
2444 			continue;
2445 
2446 		if (!has_pending_signals(&retarget, &t->blocked))
2447 			continue;
2448 		/* Remove the signals this thread can handle. */
2449 		sigandsets(&retarget, &retarget, &t->blocked);
2450 
2451 		if (!signal_pending(t))
2452 			signal_wake_up(t, 0);
2453 
2454 		if (sigisemptyset(&retarget))
2455 			break;
2456 	}
2457 }
2458 
exit_signals(struct task_struct * tsk)2459 void exit_signals(struct task_struct *tsk)
2460 {
2461 	int group_stop = 0;
2462 	sigset_t unblocked;
2463 
2464 	/*
2465 	 * @tsk is about to have PF_EXITING set - lock out users which
2466 	 * expect stable threadgroup.
2467 	 */
2468 	threadgroup_change_begin(tsk);
2469 
2470 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2471 		tsk->flags |= PF_EXITING;
2472 		threadgroup_change_end(tsk);
2473 		return;
2474 	}
2475 
2476 	spin_lock_irq(&tsk->sighand->siglock);
2477 	/*
2478 	 * From now this task is not visible for group-wide signals,
2479 	 * see wants_signal(), do_signal_stop().
2480 	 */
2481 	tsk->flags |= PF_EXITING;
2482 
2483 	threadgroup_change_end(tsk);
2484 
2485 	if (!signal_pending(tsk))
2486 		goto out;
2487 
2488 	unblocked = tsk->blocked;
2489 	signotset(&unblocked);
2490 	retarget_shared_pending(tsk, &unblocked);
2491 
2492 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2493 	    task_participate_group_stop(tsk))
2494 		group_stop = CLD_STOPPED;
2495 out:
2496 	spin_unlock_irq(&tsk->sighand->siglock);
2497 
2498 	/*
2499 	 * If group stop has completed, deliver the notification.  This
2500 	 * should always go to the real parent of the group leader.
2501 	 */
2502 	if (unlikely(group_stop)) {
2503 		read_lock(&tasklist_lock);
2504 		do_notify_parent_cldstop(tsk, false, group_stop);
2505 		read_unlock(&tasklist_lock);
2506 	}
2507 }
2508 
2509 EXPORT_SYMBOL(recalc_sigpending);
2510 EXPORT_SYMBOL_GPL(dequeue_signal);
2511 EXPORT_SYMBOL(flush_signals);
2512 EXPORT_SYMBOL(force_sig);
2513 EXPORT_SYMBOL(send_sig);
2514 EXPORT_SYMBOL(send_sig_info);
2515 EXPORT_SYMBOL(sigprocmask);
2516 
2517 /*
2518  * System call entry points.
2519  */
2520 
2521 /**
2522  *  sys_restart_syscall - restart a system call
2523  */
SYSCALL_DEFINE0(restart_syscall)2524 SYSCALL_DEFINE0(restart_syscall)
2525 {
2526 	struct restart_block *restart = &current->restart_block;
2527 	return restart->fn(restart);
2528 }
2529 
do_no_restart_syscall(struct restart_block * param)2530 long do_no_restart_syscall(struct restart_block *param)
2531 {
2532 	return -EINTR;
2533 }
2534 
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)2535 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2536 {
2537 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2538 		sigset_t newblocked;
2539 		/* A set of now blocked but previously unblocked signals. */
2540 		sigandnsets(&newblocked, newset, &current->blocked);
2541 		retarget_shared_pending(tsk, &newblocked);
2542 	}
2543 	tsk->blocked = *newset;
2544 	recalc_sigpending();
2545 }
2546 
2547 /**
2548  * set_current_blocked - change current->blocked mask
2549  * @newset: new mask
2550  *
2551  * It is wrong to change ->blocked directly, this helper should be used
2552  * to ensure the process can't miss a shared signal we are going to block.
2553  */
set_current_blocked(sigset_t * newset)2554 void set_current_blocked(sigset_t *newset)
2555 {
2556 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2557 	__set_current_blocked(newset);
2558 }
2559 
__set_current_blocked(const sigset_t * newset)2560 void __set_current_blocked(const sigset_t *newset)
2561 {
2562 	struct task_struct *tsk = current;
2563 
2564 	/*
2565 	 * In case the signal mask hasn't changed, there is nothing we need
2566 	 * to do. The current->blocked shouldn't be modified by other task.
2567 	 */
2568 	if (sigequalsets(&tsk->blocked, newset))
2569 		return;
2570 
2571 	spin_lock_irq(&tsk->sighand->siglock);
2572 	__set_task_blocked(tsk, newset);
2573 	spin_unlock_irq(&tsk->sighand->siglock);
2574 }
2575 
2576 /*
2577  * This is also useful for kernel threads that want to temporarily
2578  * (or permanently) block certain signals.
2579  *
2580  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2581  * interface happily blocks "unblockable" signals like SIGKILL
2582  * and friends.
2583  */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)2584 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2585 {
2586 	struct task_struct *tsk = current;
2587 	sigset_t newset;
2588 
2589 	/* Lockless, only current can change ->blocked, never from irq */
2590 	if (oldset)
2591 		*oldset = tsk->blocked;
2592 
2593 	switch (how) {
2594 	case SIG_BLOCK:
2595 		sigorsets(&newset, &tsk->blocked, set);
2596 		break;
2597 	case SIG_UNBLOCK:
2598 		sigandnsets(&newset, &tsk->blocked, set);
2599 		break;
2600 	case SIG_SETMASK:
2601 		newset = *set;
2602 		break;
2603 	default:
2604 		return -EINVAL;
2605 	}
2606 
2607 	__set_current_blocked(&newset);
2608 	return 0;
2609 }
2610 
2611 /**
2612  *  sys_rt_sigprocmask - change the list of currently blocked signals
2613  *  @how: whether to add, remove, or set signals
2614  *  @nset: stores pending signals
2615  *  @oset: previous value of signal mask if non-null
2616  *  @sigsetsize: size of sigset_t type
2617  */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)2618 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2619 		sigset_t __user *, oset, size_t, sigsetsize)
2620 {
2621 	sigset_t old_set, new_set;
2622 	int error;
2623 
2624 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2625 	if (sigsetsize != sizeof(sigset_t))
2626 		return -EINVAL;
2627 
2628 	old_set = current->blocked;
2629 
2630 	if (nset) {
2631 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2632 			return -EFAULT;
2633 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2634 
2635 		error = sigprocmask(how, &new_set, NULL);
2636 		if (error)
2637 			return error;
2638 	}
2639 
2640 	if (oset) {
2641 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2642 			return -EFAULT;
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)2649 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2650 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2651 {
2652 #ifdef __BIG_ENDIAN
2653 	sigset_t old_set = current->blocked;
2654 
2655 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2656 	if (sigsetsize != sizeof(sigset_t))
2657 		return -EINVAL;
2658 
2659 	if (nset) {
2660 		compat_sigset_t new32;
2661 		sigset_t new_set;
2662 		int error;
2663 		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2664 			return -EFAULT;
2665 
2666 		sigset_from_compat(&new_set, &new32);
2667 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2668 
2669 		error = sigprocmask(how, &new_set, NULL);
2670 		if (error)
2671 			return error;
2672 	}
2673 	if (oset) {
2674 		compat_sigset_t old32;
2675 		sigset_to_compat(&old32, &old_set);
2676 		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2677 			return -EFAULT;
2678 	}
2679 	return 0;
2680 #else
2681 	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2682 				  (sigset_t __user *)oset, sigsetsize);
2683 #endif
2684 }
2685 #endif
2686 
do_sigpending(void * set,unsigned long sigsetsize)2687 static int do_sigpending(void *set, unsigned long sigsetsize)
2688 {
2689 	if (sigsetsize > sizeof(sigset_t))
2690 		return -EINVAL;
2691 
2692 	spin_lock_irq(&current->sighand->siglock);
2693 	sigorsets(set, &current->pending.signal,
2694 		  &current->signal->shared_pending.signal);
2695 	spin_unlock_irq(&current->sighand->siglock);
2696 
2697 	/* Outside the lock because only this thread touches it.  */
2698 	sigandsets(set, &current->blocked, set);
2699 	return 0;
2700 }
2701 
2702 /**
2703  *  sys_rt_sigpending - examine a pending signal that has been raised
2704  *			while blocked
2705  *  @uset: stores pending signals
2706  *  @sigsetsize: size of sigset_t type or larger
2707  */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)2708 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2709 {
2710 	sigset_t set;
2711 	int err = do_sigpending(&set, sigsetsize);
2712 	if (!err && copy_to_user(uset, &set, sigsetsize))
2713 		err = -EFAULT;
2714 	return err;
2715 }
2716 
2717 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)2718 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2719 		compat_size_t, sigsetsize)
2720 {
2721 #ifdef __BIG_ENDIAN
2722 	sigset_t set;
2723 	int err = do_sigpending(&set, sigsetsize);
2724 	if (!err) {
2725 		compat_sigset_t set32;
2726 		sigset_to_compat(&set32, &set);
2727 		/* we can get here only if sigsetsize <= sizeof(set) */
2728 		if (copy_to_user(uset, &set32, sigsetsize))
2729 			err = -EFAULT;
2730 	}
2731 	return err;
2732 #else
2733 	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2734 #endif
2735 }
2736 #endif
2737 
2738 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2739 
copy_siginfo_to_user(siginfo_t __user * to,const siginfo_t * from)2740 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2741 {
2742 	int err;
2743 
2744 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2745 		return -EFAULT;
2746 	if (from->si_code < 0)
2747 		return __copy_to_user(to, from, sizeof(siginfo_t))
2748 			? -EFAULT : 0;
2749 	/*
2750 	 * If you change siginfo_t structure, please be sure
2751 	 * this code is fixed accordingly.
2752 	 * Please remember to update the signalfd_copyinfo() function
2753 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2754 	 * It should never copy any pad contained in the structure
2755 	 * to avoid security leaks, but must copy the generic
2756 	 * 3 ints plus the relevant union member.
2757 	 */
2758 	err = __put_user(from->si_signo, &to->si_signo);
2759 	err |= __put_user(from->si_errno, &to->si_errno);
2760 	err |= __put_user((short)from->si_code, &to->si_code);
2761 	switch (from->si_code & __SI_MASK) {
2762 	case __SI_KILL:
2763 		err |= __put_user(from->si_pid, &to->si_pid);
2764 		err |= __put_user(from->si_uid, &to->si_uid);
2765 		break;
2766 	case __SI_TIMER:
2767 		 err |= __put_user(from->si_tid, &to->si_tid);
2768 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2769 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2770 		break;
2771 	case __SI_POLL:
2772 		err |= __put_user(from->si_band, &to->si_band);
2773 		err |= __put_user(from->si_fd, &to->si_fd);
2774 		break;
2775 	case __SI_FAULT:
2776 		err |= __put_user(from->si_addr, &to->si_addr);
2777 #ifdef __ARCH_SI_TRAPNO
2778 		err |= __put_user(from->si_trapno, &to->si_trapno);
2779 #endif
2780 #ifdef BUS_MCEERR_AO
2781 		/*
2782 		 * Other callers might not initialize the si_lsb field,
2783 		 * so check explicitly for the right codes here.
2784 		 */
2785 		if (from->si_signo == SIGBUS &&
2786 		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2787 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2788 #endif
2789 #ifdef SEGV_BNDERR
2790 		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2791 			err |= __put_user(from->si_lower, &to->si_lower);
2792 			err |= __put_user(from->si_upper, &to->si_upper);
2793 		}
2794 #endif
2795 		break;
2796 	case __SI_CHLD:
2797 		err |= __put_user(from->si_pid, &to->si_pid);
2798 		err |= __put_user(from->si_uid, &to->si_uid);
2799 		err |= __put_user(from->si_status, &to->si_status);
2800 		err |= __put_user(from->si_utime, &to->si_utime);
2801 		err |= __put_user(from->si_stime, &to->si_stime);
2802 		break;
2803 	case __SI_RT: /* This is not generated by the kernel as of now. */
2804 	case __SI_MESGQ: /* But this is */
2805 		err |= __put_user(from->si_pid, &to->si_pid);
2806 		err |= __put_user(from->si_uid, &to->si_uid);
2807 		err |= __put_user(from->si_ptr, &to->si_ptr);
2808 		break;
2809 #ifdef __ARCH_SIGSYS
2810 	case __SI_SYS:
2811 		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2812 		err |= __put_user(from->si_syscall, &to->si_syscall);
2813 		err |= __put_user(from->si_arch, &to->si_arch);
2814 		break;
2815 #endif
2816 	default: /* this is just in case for now ... */
2817 		err |= __put_user(from->si_pid, &to->si_pid);
2818 		err |= __put_user(from->si_uid, &to->si_uid);
2819 		break;
2820 	}
2821 	return err;
2822 }
2823 
2824 #endif
2825 
2826 /**
2827  *  do_sigtimedwait - wait for queued signals specified in @which
2828  *  @which: queued signals to wait for
2829  *  @info: if non-null, the signal's siginfo is returned here
2830  *  @ts: upper bound on process time suspension
2831  */
do_sigtimedwait(const sigset_t * which,siginfo_t * info,const struct timespec * ts)2832 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2833 			const struct timespec *ts)
2834 {
2835 	struct task_struct *tsk = current;
2836 	long timeout = MAX_SCHEDULE_TIMEOUT;
2837 	sigset_t mask = *which;
2838 	int sig;
2839 
2840 	if (ts) {
2841 		if (!timespec_valid(ts))
2842 			return -EINVAL;
2843 		timeout = timespec_to_jiffies(ts);
2844 		/*
2845 		 * We can be close to the next tick, add another one
2846 		 * to ensure we will wait at least the time asked for.
2847 		 */
2848 		if (ts->tv_sec || ts->tv_nsec)
2849 			timeout++;
2850 	}
2851 
2852 	/*
2853 	 * Invert the set of allowed signals to get those we want to block.
2854 	 */
2855 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2856 	signotset(&mask);
2857 
2858 	spin_lock_irq(&tsk->sighand->siglock);
2859 	sig = dequeue_signal(tsk, &mask, info);
2860 	if (!sig && timeout) {
2861 		/*
2862 		 * None ready, temporarily unblock those we're interested
2863 		 * while we are sleeping in so that we'll be awakened when
2864 		 * they arrive. Unblocking is always fine, we can avoid
2865 		 * set_current_blocked().
2866 		 */
2867 		tsk->real_blocked = tsk->blocked;
2868 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2869 		recalc_sigpending();
2870 		spin_unlock_irq(&tsk->sighand->siglock);
2871 
2872 		timeout = freezable_schedule_timeout_interruptible(timeout);
2873 
2874 		spin_lock_irq(&tsk->sighand->siglock);
2875 		__set_task_blocked(tsk, &tsk->real_blocked);
2876 		sigemptyset(&tsk->real_blocked);
2877 		sig = dequeue_signal(tsk, &mask, info);
2878 	}
2879 	spin_unlock_irq(&tsk->sighand->siglock);
2880 
2881 	if (sig)
2882 		return sig;
2883 	return timeout ? -EINTR : -EAGAIN;
2884 }
2885 
2886 /**
2887  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2888  *			in @uthese
2889  *  @uthese: queued signals to wait for
2890  *  @uinfo: if non-null, the signal's siginfo is returned here
2891  *  @uts: upper bound on process time suspension
2892  *  @sigsetsize: size of sigset_t type
2893  */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct timespec __user *,uts,size_t,sigsetsize)2894 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2895 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2896 		size_t, sigsetsize)
2897 {
2898 	sigset_t these;
2899 	struct timespec ts;
2900 	siginfo_t info;
2901 	int ret;
2902 
2903 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2904 	if (sigsetsize != sizeof(sigset_t))
2905 		return -EINVAL;
2906 
2907 	if (copy_from_user(&these, uthese, sizeof(these)))
2908 		return -EFAULT;
2909 
2910 	if (uts) {
2911 		if (copy_from_user(&ts, uts, sizeof(ts)))
2912 			return -EFAULT;
2913 	}
2914 
2915 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2916 
2917 	if (ret > 0 && uinfo) {
2918 		if (copy_siginfo_to_user(uinfo, &info))
2919 			ret = -EFAULT;
2920 	}
2921 
2922 	return ret;
2923 }
2924 
2925 /**
2926  *  sys_kill - send a signal to a process
2927  *  @pid: the PID of the process
2928  *  @sig: signal to be sent
2929  */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)2930 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2931 {
2932 	struct siginfo info;
2933 
2934 	info.si_signo = sig;
2935 	info.si_errno = 0;
2936 	info.si_code = SI_USER;
2937 	info.si_pid = task_tgid_vnr(current);
2938 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2939 
2940 	return kill_something_info(sig, &info, pid);
2941 }
2942 
2943 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct siginfo * info)2944 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2945 {
2946 	struct task_struct *p;
2947 	int error = -ESRCH;
2948 
2949 	rcu_read_lock();
2950 	p = find_task_by_vpid(pid);
2951 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2952 		error = check_kill_permission(sig, info, p);
2953 		/*
2954 		 * The null signal is a permissions and process existence
2955 		 * probe.  No signal is actually delivered.
2956 		 */
2957 		if (!error && sig) {
2958 			error = do_send_sig_info(sig, info, p, false);
2959 			/*
2960 			 * If lock_task_sighand() failed we pretend the task
2961 			 * dies after receiving the signal. The window is tiny,
2962 			 * and the signal is private anyway.
2963 			 */
2964 			if (unlikely(error == -ESRCH))
2965 				error = 0;
2966 		}
2967 	}
2968 	rcu_read_unlock();
2969 
2970 	return error;
2971 }
2972 
do_tkill(pid_t tgid,pid_t pid,int sig)2973 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2974 {
2975 	struct siginfo info = {};
2976 
2977 	info.si_signo = sig;
2978 	info.si_errno = 0;
2979 	info.si_code = SI_TKILL;
2980 	info.si_pid = task_tgid_vnr(current);
2981 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2982 
2983 	return do_send_specific(tgid, pid, sig, &info);
2984 }
2985 
2986 /**
2987  *  sys_tgkill - send signal to one specific thread
2988  *  @tgid: the thread group ID of the thread
2989  *  @pid: the PID of the thread
2990  *  @sig: signal to be sent
2991  *
2992  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2993  *  exists but it's not belonging to the target process anymore. This
2994  *  method solves the problem of threads exiting and PIDs getting reused.
2995  */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)2996 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2997 {
2998 	/* This is only valid for single tasks */
2999 	if (pid <= 0 || tgid <= 0)
3000 		return -EINVAL;
3001 
3002 	return do_tkill(tgid, pid, sig);
3003 }
3004 
3005 /**
3006  *  sys_tkill - send signal to one specific task
3007  *  @pid: the PID of the task
3008  *  @sig: signal to be sent
3009  *
3010  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3011  */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)3012 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3013 {
3014 	/* This is only valid for single tasks */
3015 	if (pid <= 0)
3016 		return -EINVAL;
3017 
3018 	return do_tkill(0, pid, sig);
3019 }
3020 
do_rt_sigqueueinfo(pid_t pid,int sig,siginfo_t * info)3021 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3022 {
3023 	/* Not even root can pretend to send signals from the kernel.
3024 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3025 	 */
3026 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3027 	    (task_pid_vnr(current) != pid))
3028 		return -EPERM;
3029 
3030 	info->si_signo = sig;
3031 
3032 	/* POSIX.1b doesn't mention process groups.  */
3033 	return kill_proc_info(sig, info, pid);
3034 }
3035 
3036 /**
3037  *  sys_rt_sigqueueinfo - send signal information to a signal
3038  *  @pid: the PID of the thread
3039  *  @sig: signal to be sent
3040  *  @uinfo: signal info to be sent
3041  */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)3042 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3043 		siginfo_t __user *, uinfo)
3044 {
3045 	siginfo_t info;
3046 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3047 		return -EFAULT;
3048 	return do_rt_sigqueueinfo(pid, sig, &info);
3049 }
3050 
3051 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)3052 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3053 			compat_pid_t, pid,
3054 			int, sig,
3055 			struct compat_siginfo __user *, uinfo)
3056 {
3057 	siginfo_t info = {};
3058 	int ret = copy_siginfo_from_user32(&info, uinfo);
3059 	if (unlikely(ret))
3060 		return ret;
3061 	return do_rt_sigqueueinfo(pid, sig, &info);
3062 }
3063 #endif
3064 
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,siginfo_t * info)3065 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3066 {
3067 	/* This is only valid for single tasks */
3068 	if (pid <= 0 || tgid <= 0)
3069 		return -EINVAL;
3070 
3071 	/* Not even root can pretend to send signals from the kernel.
3072 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3073 	 */
3074 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3075 	    (task_pid_vnr(current) != pid))
3076 		return -EPERM;
3077 
3078 	info->si_signo = sig;
3079 
3080 	return do_send_specific(tgid, pid, sig, info);
3081 }
3082 
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)3083 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3084 		siginfo_t __user *, uinfo)
3085 {
3086 	siginfo_t info;
3087 
3088 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3089 		return -EFAULT;
3090 
3091 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3092 }
3093 
3094 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)3095 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3096 			compat_pid_t, tgid,
3097 			compat_pid_t, pid,
3098 			int, sig,
3099 			struct compat_siginfo __user *, uinfo)
3100 {
3101 	siginfo_t info = {};
3102 
3103 	if (copy_siginfo_from_user32(&info, uinfo))
3104 		return -EFAULT;
3105 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3106 }
3107 #endif
3108 
3109 /*
3110  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3111  */
kernel_sigaction(int sig,__sighandler_t action)3112 void kernel_sigaction(int sig, __sighandler_t action)
3113 {
3114 	spin_lock_irq(&current->sighand->siglock);
3115 	current->sighand->action[sig - 1].sa.sa_handler = action;
3116 	if (action == SIG_IGN) {
3117 		sigset_t mask;
3118 
3119 		sigemptyset(&mask);
3120 		sigaddset(&mask, sig);
3121 
3122 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3123 		flush_sigqueue_mask(&mask, &current->pending);
3124 		recalc_sigpending();
3125 	}
3126 	spin_unlock_irq(&current->sighand->siglock);
3127 }
3128 EXPORT_SYMBOL(kernel_sigaction);
3129 
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)3130 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3131 {
3132 	struct task_struct *p = current, *t;
3133 	struct k_sigaction *k;
3134 	sigset_t mask;
3135 
3136 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3137 		return -EINVAL;
3138 
3139 	k = &p->sighand->action[sig-1];
3140 
3141 	spin_lock_irq(&p->sighand->siglock);
3142 	if (oact)
3143 		*oact = *k;
3144 
3145 	if (act) {
3146 		sigdelsetmask(&act->sa.sa_mask,
3147 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3148 		*k = *act;
3149 		/*
3150 		 * POSIX 3.3.1.3:
3151 		 *  "Setting a signal action to SIG_IGN for a signal that is
3152 		 *   pending shall cause the pending signal to be discarded,
3153 		 *   whether or not it is blocked."
3154 		 *
3155 		 *  "Setting a signal action to SIG_DFL for a signal that is
3156 		 *   pending and whose default action is to ignore the signal
3157 		 *   (for example, SIGCHLD), shall cause the pending signal to
3158 		 *   be discarded, whether or not it is blocked"
3159 		 */
3160 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3161 			sigemptyset(&mask);
3162 			sigaddset(&mask, sig);
3163 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3164 			for_each_thread(p, t)
3165 				flush_sigqueue_mask(&mask, &t->pending);
3166 		}
3167 	}
3168 
3169 	spin_unlock_irq(&p->sighand->siglock);
3170 	return 0;
3171 }
3172 
3173 static int
do_sigaltstack(const stack_t __user * uss,stack_t __user * uoss,unsigned long sp)3174 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3175 {
3176 	stack_t oss;
3177 	int error;
3178 
3179 	oss.ss_sp = (void __user *) current->sas_ss_sp;
3180 	oss.ss_size = current->sas_ss_size;
3181 	oss.ss_flags = sas_ss_flags(sp);
3182 
3183 	if (uss) {
3184 		void __user *ss_sp;
3185 		size_t ss_size;
3186 		int ss_flags;
3187 
3188 		error = -EFAULT;
3189 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3190 			goto out;
3191 		error = __get_user(ss_sp, &uss->ss_sp) |
3192 			__get_user(ss_flags, &uss->ss_flags) |
3193 			__get_user(ss_size, &uss->ss_size);
3194 		if (error)
3195 			goto out;
3196 
3197 		error = -EPERM;
3198 		if (on_sig_stack(sp))
3199 			goto out;
3200 
3201 		error = -EINVAL;
3202 		/*
3203 		 * Note - this code used to test ss_flags incorrectly:
3204 		 *  	  old code may have been written using ss_flags==0
3205 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
3206 		 *	  way that worked) - this fix preserves that older
3207 		 *	  mechanism.
3208 		 */
3209 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3210 			goto out;
3211 
3212 		if (ss_flags == SS_DISABLE) {
3213 			ss_size = 0;
3214 			ss_sp = NULL;
3215 		} else {
3216 			error = -ENOMEM;
3217 			if (ss_size < MINSIGSTKSZ)
3218 				goto out;
3219 		}
3220 
3221 		current->sas_ss_sp = (unsigned long) ss_sp;
3222 		current->sas_ss_size = ss_size;
3223 	}
3224 
3225 	error = 0;
3226 	if (uoss) {
3227 		error = -EFAULT;
3228 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3229 			goto out;
3230 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3231 			__put_user(oss.ss_size, &uoss->ss_size) |
3232 			__put_user(oss.ss_flags, &uoss->ss_flags);
3233 	}
3234 
3235 out:
3236 	return error;
3237 }
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)3238 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3239 {
3240 	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3241 }
3242 
restore_altstack(const stack_t __user * uss)3243 int restore_altstack(const stack_t __user *uss)
3244 {
3245 	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3246 	/* squash all but EFAULT for now */
3247 	return err == -EFAULT ? err : 0;
3248 }
3249 
__save_altstack(stack_t __user * uss,unsigned long sp)3250 int __save_altstack(stack_t __user *uss, unsigned long sp)
3251 {
3252 	struct task_struct *t = current;
3253 	return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3254 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3255 		__put_user(t->sas_ss_size, &uss->ss_size);
3256 }
3257 
3258 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)3259 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3260 			const compat_stack_t __user *, uss_ptr,
3261 			compat_stack_t __user *, uoss_ptr)
3262 {
3263 	stack_t uss, uoss;
3264 	int ret;
3265 	mm_segment_t seg;
3266 
3267 	if (uss_ptr) {
3268 		compat_stack_t uss32;
3269 
3270 		memset(&uss, 0, sizeof(stack_t));
3271 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3272 			return -EFAULT;
3273 		uss.ss_sp = compat_ptr(uss32.ss_sp);
3274 		uss.ss_flags = uss32.ss_flags;
3275 		uss.ss_size = uss32.ss_size;
3276 	}
3277 	seg = get_fs();
3278 	set_fs(KERNEL_DS);
3279 	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3280 			     (stack_t __force __user *) &uoss,
3281 			     compat_user_stack_pointer());
3282 	set_fs(seg);
3283 	if (ret >= 0 && uoss_ptr)  {
3284 		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3285 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3286 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3287 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3288 			ret = -EFAULT;
3289 	}
3290 	return ret;
3291 }
3292 
compat_restore_altstack(const compat_stack_t __user * uss)3293 int compat_restore_altstack(const compat_stack_t __user *uss)
3294 {
3295 	int err = compat_sys_sigaltstack(uss, NULL);
3296 	/* squash all but -EFAULT for now */
3297 	return err == -EFAULT ? err : 0;
3298 }
3299 
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)3300 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3301 {
3302 	struct task_struct *t = current;
3303 	return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3304 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3305 		__put_user(t->sas_ss_size, &uss->ss_size);
3306 }
3307 #endif
3308 
3309 #ifdef __ARCH_WANT_SYS_SIGPENDING
3310 
3311 /**
3312  *  sys_sigpending - examine pending signals
3313  *  @set: where mask of pending signal is returned
3314  */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,set)3315 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3316 {
3317 	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3318 }
3319 
3320 #endif
3321 
3322 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3323 /**
3324  *  sys_sigprocmask - examine and change blocked signals
3325  *  @how: whether to add, remove, or set signals
3326  *  @nset: signals to add or remove (if non-null)
3327  *  @oset: previous value of signal mask if non-null
3328  *
3329  * Some platforms have their own version with special arguments;
3330  * others support only sys_rt_sigprocmask.
3331  */
3332 
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)3333 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3334 		old_sigset_t __user *, oset)
3335 {
3336 	old_sigset_t old_set, new_set;
3337 	sigset_t new_blocked;
3338 
3339 	old_set = current->blocked.sig[0];
3340 
3341 	if (nset) {
3342 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3343 			return -EFAULT;
3344 
3345 		new_blocked = current->blocked;
3346 
3347 		switch (how) {
3348 		case SIG_BLOCK:
3349 			sigaddsetmask(&new_blocked, new_set);
3350 			break;
3351 		case SIG_UNBLOCK:
3352 			sigdelsetmask(&new_blocked, new_set);
3353 			break;
3354 		case SIG_SETMASK:
3355 			new_blocked.sig[0] = new_set;
3356 			break;
3357 		default:
3358 			return -EINVAL;
3359 		}
3360 
3361 		set_current_blocked(&new_blocked);
3362 	}
3363 
3364 	if (oset) {
3365 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3366 			return -EFAULT;
3367 	}
3368 
3369 	return 0;
3370 }
3371 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3372 
3373 #ifndef CONFIG_ODD_RT_SIGACTION
3374 /**
3375  *  sys_rt_sigaction - alter an action taken by a process
3376  *  @sig: signal to be sent
3377  *  @act: new sigaction
3378  *  @oact: used to save the previous sigaction
3379  *  @sigsetsize: size of sigset_t type
3380  */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)3381 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3382 		const struct sigaction __user *, act,
3383 		struct sigaction __user *, oact,
3384 		size_t, sigsetsize)
3385 {
3386 	struct k_sigaction new_sa, old_sa;
3387 	int ret = -EINVAL;
3388 
3389 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3390 	if (sigsetsize != sizeof(sigset_t))
3391 		goto out;
3392 
3393 	if (act) {
3394 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3395 			return -EFAULT;
3396 	}
3397 
3398 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3399 
3400 	if (!ret && oact) {
3401 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3402 			return -EFAULT;
3403 	}
3404 out:
3405 	return ret;
3406 }
3407 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)3408 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3409 		const struct compat_sigaction __user *, act,
3410 		struct compat_sigaction __user *, oact,
3411 		compat_size_t, sigsetsize)
3412 {
3413 	struct k_sigaction new_ka, old_ka;
3414 	compat_sigset_t mask;
3415 #ifdef __ARCH_HAS_SA_RESTORER
3416 	compat_uptr_t restorer;
3417 #endif
3418 	int ret;
3419 
3420 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3421 	if (sigsetsize != sizeof(compat_sigset_t))
3422 		return -EINVAL;
3423 
3424 	if (act) {
3425 		compat_uptr_t handler;
3426 		ret = get_user(handler, &act->sa_handler);
3427 		new_ka.sa.sa_handler = compat_ptr(handler);
3428 #ifdef __ARCH_HAS_SA_RESTORER
3429 		ret |= get_user(restorer, &act->sa_restorer);
3430 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3431 #endif
3432 		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3433 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3434 		if (ret)
3435 			return -EFAULT;
3436 		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3437 	}
3438 
3439 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3440 	if (!ret && oact) {
3441 		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3442 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3443 			       &oact->sa_handler);
3444 		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3445 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3446 #ifdef __ARCH_HAS_SA_RESTORER
3447 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3448 				&oact->sa_restorer);
3449 #endif
3450 	}
3451 	return ret;
3452 }
3453 #endif
3454 #endif /* !CONFIG_ODD_RT_SIGACTION */
3455 
3456 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)3457 SYSCALL_DEFINE3(sigaction, int, sig,
3458 		const struct old_sigaction __user *, act,
3459 	        struct old_sigaction __user *, oact)
3460 {
3461 	struct k_sigaction new_ka, old_ka;
3462 	int ret;
3463 
3464 	if (act) {
3465 		old_sigset_t mask;
3466 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3467 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3468 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3469 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3470 		    __get_user(mask, &act->sa_mask))
3471 			return -EFAULT;
3472 #ifdef __ARCH_HAS_KA_RESTORER
3473 		new_ka.ka_restorer = NULL;
3474 #endif
3475 		siginitset(&new_ka.sa.sa_mask, mask);
3476 	}
3477 
3478 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3479 
3480 	if (!ret && oact) {
3481 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3482 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3483 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3484 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3485 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3486 			return -EFAULT;
3487 	}
3488 
3489 	return ret;
3490 }
3491 #endif
3492 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)3493 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3494 		const struct compat_old_sigaction __user *, act,
3495 	        struct compat_old_sigaction __user *, oact)
3496 {
3497 	struct k_sigaction new_ka, old_ka;
3498 	int ret;
3499 	compat_old_sigset_t mask;
3500 	compat_uptr_t handler, restorer;
3501 
3502 	if (act) {
3503 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3504 		    __get_user(handler, &act->sa_handler) ||
3505 		    __get_user(restorer, &act->sa_restorer) ||
3506 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3507 		    __get_user(mask, &act->sa_mask))
3508 			return -EFAULT;
3509 
3510 #ifdef __ARCH_HAS_KA_RESTORER
3511 		new_ka.ka_restorer = NULL;
3512 #endif
3513 		new_ka.sa.sa_handler = compat_ptr(handler);
3514 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3515 		siginitset(&new_ka.sa.sa_mask, mask);
3516 	}
3517 
3518 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3519 
3520 	if (!ret && oact) {
3521 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3522 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3523 			       &oact->sa_handler) ||
3524 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3525 			       &oact->sa_restorer) ||
3526 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3527 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3528 			return -EFAULT;
3529 	}
3530 	return ret;
3531 }
3532 #endif
3533 
3534 #ifdef CONFIG_SGETMASK_SYSCALL
3535 
3536 /*
3537  * For backwards compatibility.  Functionality superseded by sigprocmask.
3538  */
SYSCALL_DEFINE0(sgetmask)3539 SYSCALL_DEFINE0(sgetmask)
3540 {
3541 	/* SMP safe */
3542 	return current->blocked.sig[0];
3543 }
3544 
SYSCALL_DEFINE1(ssetmask,int,newmask)3545 SYSCALL_DEFINE1(ssetmask, int, newmask)
3546 {
3547 	int old = current->blocked.sig[0];
3548 	sigset_t newset;
3549 
3550 	siginitset(&newset, newmask);
3551 	set_current_blocked(&newset);
3552 
3553 	return old;
3554 }
3555 #endif /* CONFIG_SGETMASK_SYSCALL */
3556 
3557 #ifdef __ARCH_WANT_SYS_SIGNAL
3558 /*
3559  * For backwards compatibility.  Functionality superseded by sigaction.
3560  */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)3561 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3562 {
3563 	struct k_sigaction new_sa, old_sa;
3564 	int ret;
3565 
3566 	new_sa.sa.sa_handler = handler;
3567 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3568 	sigemptyset(&new_sa.sa.sa_mask);
3569 
3570 	ret = do_sigaction(sig, &new_sa, &old_sa);
3571 
3572 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3573 }
3574 #endif /* __ARCH_WANT_SYS_SIGNAL */
3575 
3576 #ifdef __ARCH_WANT_SYS_PAUSE
3577 
SYSCALL_DEFINE0(pause)3578 SYSCALL_DEFINE0(pause)
3579 {
3580 	while (!signal_pending(current)) {
3581 		__set_current_state(TASK_INTERRUPTIBLE);
3582 		schedule();
3583 	}
3584 	return -ERESTARTNOHAND;
3585 }
3586 
3587 #endif
3588 
sigsuspend(sigset_t * set)3589 static int sigsuspend(sigset_t *set)
3590 {
3591 	current->saved_sigmask = current->blocked;
3592 	set_current_blocked(set);
3593 
3594 	__set_current_state(TASK_INTERRUPTIBLE);
3595 	schedule();
3596 	set_restore_sigmask();
3597 	return -ERESTARTNOHAND;
3598 }
3599 
3600 /**
3601  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3602  *	@unewset value until a signal is received
3603  *  @unewset: new signal mask value
3604  *  @sigsetsize: size of sigset_t type
3605  */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)3606 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3607 {
3608 	sigset_t newset;
3609 
3610 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3611 	if (sigsetsize != sizeof(sigset_t))
3612 		return -EINVAL;
3613 
3614 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3615 		return -EFAULT;
3616 	return sigsuspend(&newset);
3617 }
3618 
3619 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)3620 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3621 {
3622 #ifdef __BIG_ENDIAN
3623 	sigset_t newset;
3624 	compat_sigset_t newset32;
3625 
3626 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3627 	if (sigsetsize != sizeof(sigset_t))
3628 		return -EINVAL;
3629 
3630 	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3631 		return -EFAULT;
3632 	sigset_from_compat(&newset, &newset32);
3633 	return sigsuspend(&newset);
3634 #else
3635 	/* on little-endian bitmaps don't care about granularity */
3636 	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3637 #endif
3638 }
3639 #endif
3640 
3641 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)3642 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3643 {
3644 	sigset_t blocked;
3645 	siginitset(&blocked, mask);
3646 	return sigsuspend(&blocked);
3647 }
3648 #endif
3649 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)3650 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3651 {
3652 	sigset_t blocked;
3653 	siginitset(&blocked, mask);
3654 	return sigsuspend(&blocked);
3655 }
3656 #endif
3657 
arch_vma_name(struct vm_area_struct * vma)3658 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3659 {
3660 	return NULL;
3661 }
3662 
signals_init(void)3663 void __init signals_init(void)
3664 {
3665 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3666 }
3667 
3668 #ifdef CONFIG_KGDB_KDB
3669 #include <linux/kdb.h>
3670 /*
3671  * kdb_send_sig_info - Allows kdb to send signals without exposing
3672  * signal internals.  This function checks if the required locks are
3673  * available before calling the main signal code, to avoid kdb
3674  * deadlocks.
3675  */
3676 void
kdb_send_sig_info(struct task_struct * t,struct siginfo * info)3677 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3678 {
3679 	static struct task_struct *kdb_prev_t;
3680 	int sig, new_t;
3681 	if (!spin_trylock(&t->sighand->siglock)) {
3682 		kdb_printf("Can't do kill command now.\n"
3683 			   "The sigmask lock is held somewhere else in "
3684 			   "kernel, try again later\n");
3685 		return;
3686 	}
3687 	spin_unlock(&t->sighand->siglock);
3688 	new_t = kdb_prev_t != t;
3689 	kdb_prev_t = t;
3690 	if (t->state != TASK_RUNNING && new_t) {
3691 		kdb_printf("Process is not RUNNING, sending a signal from "
3692 			   "kdb risks deadlock\n"
3693 			   "on the run queue locks. "
3694 			   "The signal has _not_ been sent.\n"
3695 			   "Reissue the kill command if you want to risk "
3696 			   "the deadlock.\n");
3697 		return;
3698 	}
3699 	sig = info->si_signo;
3700 	if (send_sig_info(sig, info, t))
3701 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3702 			   sig, t->pid);
3703 	else
3704 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3705 }
3706 #endif	/* CONFIG_KGDB_KDB */
3707