• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
40 
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h"	/* audit_signal_info() */
47 
48 /*
49  * SLAB caches for signal bits.
50  */
51 
52 static struct kmem_cache *sigqueue_cachep;
53 
54 int print_fatal_signals __read_mostly;
55 
sig_handler(struct task_struct * t,int sig)56 static void __user *sig_handler(struct task_struct *t, int sig)
57 {
58 	return t->sighand->action[sig - 1].sa.sa_handler;
59 }
60 
sig_handler_ignored(void __user * handler,int sig)61 static int sig_handler_ignored(void __user *handler, int sig)
62 {
63 	/* Is it explicitly or implicitly ignored? */
64 	return handler == SIG_IGN ||
65 		(handler == SIG_DFL && sig_kernel_ignore(sig));
66 }
67 
sig_task_ignored(struct task_struct * t,int sig,bool force)68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 {
70 	void __user *handler;
71 
72 	handler = sig_handler(t, sig);
73 
74 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
76 		return 1;
77 
78 	return sig_handler_ignored(handler, sig);
79 }
80 
sig_ignored(struct task_struct * t,int sig,bool force)81 static int sig_ignored(struct task_struct *t, int sig, bool force)
82 {
83 	/*
84 	 * Blocked signals are never ignored, since the
85 	 * signal handler may change by the time it is
86 	 * unblocked.
87 	 */
88 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 		return 0;
90 
91 	/*
92 	 * Tracers may want to know about even ignored signal unless it
93 	 * is SIGKILL which can't be reported anyway but can be ignored
94 	 * by SIGNAL_UNKILLABLE task.
95 	 */
96 	if (t->ptrace && sig != SIGKILL)
97 		return 0;
98 
99 	return sig_task_ignored(t, sig, force);
100 }
101 
102 /*
103  * Re-calculate pending state from the set of locally pending
104  * signals, globally pending signals, and blocked signals.
105  */
has_pending_signals(sigset_t * signal,sigset_t * blocked)106 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
107 {
108 	unsigned long ready;
109 	long i;
110 
111 	switch (_NSIG_WORDS) {
112 	default:
113 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
114 			ready |= signal->sig[i] &~ blocked->sig[i];
115 		break;
116 
117 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
118 		ready |= signal->sig[2] &~ blocked->sig[2];
119 		ready |= signal->sig[1] &~ blocked->sig[1];
120 		ready |= signal->sig[0] &~ blocked->sig[0];
121 		break;
122 
123 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
124 		ready |= signal->sig[0] &~ blocked->sig[0];
125 		break;
126 
127 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
128 	}
129 	return ready !=	0;
130 }
131 
132 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
133 
recalc_sigpending_tsk(struct task_struct * t)134 static int recalc_sigpending_tsk(struct task_struct *t)
135 {
136 	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
137 	    PENDING(&t->pending, &t->blocked) ||
138 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
139 		set_tsk_thread_flag(t, TIF_SIGPENDING);
140 		return 1;
141 	}
142 	/*
143 	 * We must never clear the flag in another thread, or in current
144 	 * when it's possible the current syscall is returning -ERESTART*.
145 	 * So we don't clear it here, and only callers who know they should do.
146 	 */
147 	return 0;
148 }
149 
150 /*
151  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
152  * This is superfluous when called on current, the wakeup is a harmless no-op.
153  */
recalc_sigpending_and_wake(struct task_struct * t)154 void recalc_sigpending_and_wake(struct task_struct *t)
155 {
156 	if (recalc_sigpending_tsk(t))
157 		signal_wake_up(t, 0);
158 }
159 
recalc_sigpending(void)160 void recalc_sigpending(void)
161 {
162 	if (!recalc_sigpending_tsk(current) && !freezing(current))
163 		clear_thread_flag(TIF_SIGPENDING);
164 
165 }
166 
167 /* Given the mask, find the first available signal that should be serviced. */
168 
169 #define SYNCHRONOUS_MASK \
170 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
171 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
172 
next_signal(struct sigpending * pending,sigset_t * mask)173 int next_signal(struct sigpending *pending, sigset_t *mask)
174 {
175 	unsigned long i, *s, *m, x;
176 	int sig = 0;
177 
178 	s = pending->signal.sig;
179 	m = mask->sig;
180 
181 	/*
182 	 * Handle the first word specially: it contains the
183 	 * synchronous signals that need to be dequeued first.
184 	 */
185 	x = *s &~ *m;
186 	if (x) {
187 		if (x & SYNCHRONOUS_MASK)
188 			x &= SYNCHRONOUS_MASK;
189 		sig = ffz(~x) + 1;
190 		return sig;
191 	}
192 
193 	switch (_NSIG_WORDS) {
194 	default:
195 		for (i = 1; i < _NSIG_WORDS; ++i) {
196 			x = *++s &~ *++m;
197 			if (!x)
198 				continue;
199 			sig = ffz(~x) + i*_NSIG_BPW + 1;
200 			break;
201 		}
202 		break;
203 
204 	case 2:
205 		x = s[1] &~ m[1];
206 		if (!x)
207 			break;
208 		sig = ffz(~x) + _NSIG_BPW + 1;
209 		break;
210 
211 	case 1:
212 		/* Nothing to do */
213 		break;
214 	}
215 
216 	return sig;
217 }
218 
print_dropped_signal(int sig)219 static inline void print_dropped_signal(int sig)
220 {
221 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
222 
223 	if (!print_fatal_signals)
224 		return;
225 
226 	if (!__ratelimit(&ratelimit_state))
227 		return;
228 
229 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
230 				current->comm, current->pid, sig);
231 }
232 
233 /**
234  * task_set_jobctl_pending - set jobctl pending bits
235  * @task: target task
236  * @mask: pending bits to set
237  *
238  * Clear @mask from @task->jobctl.  @mask must be subset of
239  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
240  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
241  * cleared.  If @task is already being killed or exiting, this function
242  * becomes noop.
243  *
244  * CONTEXT:
245  * Must be called with @task->sighand->siglock held.
246  *
247  * RETURNS:
248  * %true if @mask is set, %false if made noop because @task was dying.
249  */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)250 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
251 {
252 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
253 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
254 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
255 
256 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
257 		return false;
258 
259 	if (mask & JOBCTL_STOP_SIGMASK)
260 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
261 
262 	task->jobctl |= mask;
263 	return true;
264 }
265 
266 /**
267  * task_clear_jobctl_trapping - clear jobctl trapping bit
268  * @task: target task
269  *
270  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
271  * Clear it and wake up the ptracer.  Note that we don't need any further
272  * locking.  @task->siglock guarantees that @task->parent points to the
273  * ptracer.
274  *
275  * CONTEXT:
276  * Must be called with @task->sighand->siglock held.
277  */
task_clear_jobctl_trapping(struct task_struct * task)278 void task_clear_jobctl_trapping(struct task_struct *task)
279 {
280 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
281 		task->jobctl &= ~JOBCTL_TRAPPING;
282 		smp_mb();	/* advised by wake_up_bit() */
283 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
284 	}
285 }
286 
287 /**
288  * task_clear_jobctl_pending - clear jobctl pending bits
289  * @task: target task
290  * @mask: pending bits to clear
291  *
292  * Clear @mask from @task->jobctl.  @mask must be subset of
293  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
294  * STOP bits are cleared together.
295  *
296  * If clearing of @mask leaves no stop or trap pending, this function calls
297  * task_clear_jobctl_trapping().
298  *
299  * CONTEXT:
300  * Must be called with @task->sighand->siglock held.
301  */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)302 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
303 {
304 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
305 
306 	if (mask & JOBCTL_STOP_PENDING)
307 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
308 
309 	task->jobctl &= ~mask;
310 
311 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
312 		task_clear_jobctl_trapping(task);
313 }
314 
315 /**
316  * task_participate_group_stop - participate in a group stop
317  * @task: task participating in a group stop
318  *
319  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
320  * Group stop states are cleared and the group stop count is consumed if
321  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
322  * stop, the appropriate %SIGNAL_* flags are set.
323  *
324  * CONTEXT:
325  * Must be called with @task->sighand->siglock held.
326  *
327  * RETURNS:
328  * %true if group stop completion should be notified to the parent, %false
329  * otherwise.
330  */
task_participate_group_stop(struct task_struct * task)331 static bool task_participate_group_stop(struct task_struct *task)
332 {
333 	struct signal_struct *sig = task->signal;
334 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
335 
336 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
337 
338 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
339 
340 	if (!consume)
341 		return false;
342 
343 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
344 		sig->group_stop_count--;
345 
346 	/*
347 	 * Tell the caller to notify completion iff we are entering into a
348 	 * fresh group stop.  Read comment in do_signal_stop() for details.
349 	 */
350 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
351 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
352 		return true;
353 	}
354 	return false;
355 }
356 
357 /*
358  * allocate a new signal queue record
359  * - this may be called without locks if and only if t == current, otherwise an
360  *   appropriate lock must be held to stop the target task from exiting
361  */
362 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t flags,int override_rlimit)363 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
364 {
365 	struct sigqueue *q = NULL;
366 	struct user_struct *user;
367 
368 	/*
369 	 * Protect access to @t credentials. This can go away when all
370 	 * callers hold rcu read lock.
371 	 */
372 	rcu_read_lock();
373 	user = get_uid(__task_cred(t)->user);
374 	atomic_inc(&user->sigpending);
375 	rcu_read_unlock();
376 
377 	if (override_rlimit ||
378 	    atomic_read(&user->sigpending) <=
379 			task_rlimit(t, RLIMIT_SIGPENDING)) {
380 		q = kmem_cache_alloc(sigqueue_cachep, flags);
381 	} else {
382 		print_dropped_signal(sig);
383 	}
384 
385 	if (unlikely(q == NULL)) {
386 		atomic_dec(&user->sigpending);
387 		free_uid(user);
388 	} else {
389 		INIT_LIST_HEAD(&q->list);
390 		q->flags = 0;
391 		q->user = user;
392 	}
393 
394 	return q;
395 }
396 
__sigqueue_free(struct sigqueue * q)397 static void __sigqueue_free(struct sigqueue *q)
398 {
399 	if (q->flags & SIGQUEUE_PREALLOC)
400 		return;
401 	atomic_dec(&q->user->sigpending);
402 	free_uid(q->user);
403 	kmem_cache_free(sigqueue_cachep, q);
404 }
405 
flush_sigqueue(struct sigpending * queue)406 void flush_sigqueue(struct sigpending *queue)
407 {
408 	struct sigqueue *q;
409 
410 	sigemptyset(&queue->signal);
411 	while (!list_empty(&queue->list)) {
412 		q = list_entry(queue->list.next, struct sigqueue , list);
413 		list_del_init(&q->list);
414 		__sigqueue_free(q);
415 	}
416 }
417 
418 /*
419  * Flush all pending signals for this kthread.
420  */
flush_signals(struct task_struct * t)421 void flush_signals(struct task_struct *t)
422 {
423 	unsigned long flags;
424 
425 	spin_lock_irqsave(&t->sighand->siglock, flags);
426 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
427 	flush_sigqueue(&t->pending);
428 	flush_sigqueue(&t->signal->shared_pending);
429 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
430 }
431 
__flush_itimer_signals(struct sigpending * pending)432 static void __flush_itimer_signals(struct sigpending *pending)
433 {
434 	sigset_t signal, retain;
435 	struct sigqueue *q, *n;
436 
437 	signal = pending->signal;
438 	sigemptyset(&retain);
439 
440 	list_for_each_entry_safe(q, n, &pending->list, list) {
441 		int sig = q->info.si_signo;
442 
443 		if (likely(q->info.si_code != SI_TIMER)) {
444 			sigaddset(&retain, sig);
445 		} else {
446 			sigdelset(&signal, sig);
447 			list_del_init(&q->list);
448 			__sigqueue_free(q);
449 		}
450 	}
451 
452 	sigorsets(&pending->signal, &signal, &retain);
453 }
454 
flush_itimer_signals(void)455 void flush_itimer_signals(void)
456 {
457 	struct task_struct *tsk = current;
458 	unsigned long flags;
459 
460 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
461 	__flush_itimer_signals(&tsk->pending);
462 	__flush_itimer_signals(&tsk->signal->shared_pending);
463 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
464 }
465 
ignore_signals(struct task_struct * t)466 void ignore_signals(struct task_struct *t)
467 {
468 	int i;
469 
470 	for (i = 0; i < _NSIG; ++i)
471 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
472 
473 	flush_signals(t);
474 }
475 
476 /*
477  * Flush all handlers for a task.
478  */
479 
480 void
flush_signal_handlers(struct task_struct * t,int force_default)481 flush_signal_handlers(struct task_struct *t, int force_default)
482 {
483 	int i;
484 	struct k_sigaction *ka = &t->sighand->action[0];
485 	for (i = _NSIG ; i != 0 ; i--) {
486 		if (force_default || ka->sa.sa_handler != SIG_IGN)
487 			ka->sa.sa_handler = SIG_DFL;
488 		ka->sa.sa_flags = 0;
489 #ifdef __ARCH_HAS_SA_RESTORER
490 		ka->sa.sa_restorer = NULL;
491 #endif
492 		sigemptyset(&ka->sa.sa_mask);
493 		ka++;
494 	}
495 }
496 
unhandled_signal(struct task_struct * tsk,int sig)497 int unhandled_signal(struct task_struct *tsk, int sig)
498 {
499 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
500 	if (is_global_init(tsk))
501 		return 1;
502 	if (handler != SIG_IGN && handler != SIG_DFL)
503 		return 0;
504 	/* if ptraced, let the tracer determine */
505 	return !tsk->ptrace;
506 }
507 
collect_signal(int sig,struct sigpending * list,siginfo_t * info,bool * resched_timer)508 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
509 			   bool *resched_timer)
510 {
511 	struct sigqueue *q, *first = NULL;
512 
513 	/*
514 	 * Collect the siginfo appropriate to this signal.  Check if
515 	 * there is another siginfo for the same signal.
516 	*/
517 	list_for_each_entry(q, &list->list, list) {
518 		if (q->info.si_signo == sig) {
519 			if (first)
520 				goto still_pending;
521 			first = q;
522 		}
523 	}
524 
525 	sigdelset(&list->signal, sig);
526 
527 	if (first) {
528 still_pending:
529 		list_del_init(&first->list);
530 		copy_siginfo(info, &first->info);
531 
532 		*resched_timer =
533 			(first->flags & SIGQUEUE_PREALLOC) &&
534 			(info->si_code == SI_TIMER) &&
535 			(info->si_sys_private);
536 
537 		__sigqueue_free(first);
538 	} else {
539 		/*
540 		 * Ok, it wasn't in the queue.  This must be
541 		 * a fast-pathed signal or we must have been
542 		 * out of queue space.  So zero out the info.
543 		 */
544 		info->si_signo = sig;
545 		info->si_errno = 0;
546 		info->si_code = SI_USER;
547 		info->si_pid = 0;
548 		info->si_uid = 0;
549 	}
550 }
551 
__dequeue_signal(struct sigpending * pending,sigset_t * mask,siginfo_t * info,bool * resched_timer)552 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
553 			siginfo_t *info, bool *resched_timer)
554 {
555 	int sig = next_signal(pending, mask);
556 
557 	if (sig)
558 		collect_signal(sig, pending, info, resched_timer);
559 	return sig;
560 }
561 
562 /*
563  * Dequeue a signal and return the element to the caller, which is
564  * expected to free it.
565  *
566  * All callers have to hold the siglock.
567  */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,siginfo_t * info)568 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
569 {
570 	bool resched_timer = false;
571 	int signr;
572 
573 	/* We only dequeue private signals from ourselves, we don't let
574 	 * signalfd steal them
575 	 */
576 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
577 	if (!signr) {
578 		signr = __dequeue_signal(&tsk->signal->shared_pending,
579 					 mask, info, &resched_timer);
580 		/*
581 		 * itimer signal ?
582 		 *
583 		 * itimers are process shared and we restart periodic
584 		 * itimers in the signal delivery path to prevent DoS
585 		 * attacks in the high resolution timer case. This is
586 		 * compliant with the old way of self-restarting
587 		 * itimers, as the SIGALRM is a legacy signal and only
588 		 * queued once. Changing the restart behaviour to
589 		 * restart the timer in the signal dequeue path is
590 		 * reducing the timer noise on heavy loaded !highres
591 		 * systems too.
592 		 */
593 		if (unlikely(signr == SIGALRM)) {
594 			struct hrtimer *tmr = &tsk->signal->real_timer;
595 
596 			if (!hrtimer_is_queued(tmr) &&
597 			    tsk->signal->it_real_incr.tv64 != 0) {
598 				hrtimer_forward(tmr, tmr->base->get_time(),
599 						tsk->signal->it_real_incr);
600 				hrtimer_restart(tmr);
601 			}
602 		}
603 	}
604 
605 	recalc_sigpending();
606 	if (!signr)
607 		return 0;
608 
609 	if (unlikely(sig_kernel_stop(signr))) {
610 		/*
611 		 * Set a marker that we have dequeued a stop signal.  Our
612 		 * caller might release the siglock and then the pending
613 		 * stop signal it is about to process is no longer in the
614 		 * pending bitmasks, but must still be cleared by a SIGCONT
615 		 * (and overruled by a SIGKILL).  So those cases clear this
616 		 * shared flag after we've set it.  Note that this flag may
617 		 * remain set after the signal we return is ignored or
618 		 * handled.  That doesn't matter because its only purpose
619 		 * is to alert stop-signal processing code when another
620 		 * processor has come along and cleared the flag.
621 		 */
622 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
623 	}
624 	if (resched_timer) {
625 		/*
626 		 * Release the siglock to ensure proper locking order
627 		 * of timer locks outside of siglocks.  Note, we leave
628 		 * irqs disabled here, since the posix-timers code is
629 		 * about to disable them again anyway.
630 		 */
631 		spin_unlock(&tsk->sighand->siglock);
632 		do_schedule_next_timer(info);
633 		spin_lock(&tsk->sighand->siglock);
634 	}
635 	return signr;
636 }
637 
638 /*
639  * Tell a process that it has a new active signal..
640  *
641  * NOTE! we rely on the previous spin_lock to
642  * lock interrupts for us! We can only be called with
643  * "siglock" held, and the local interrupt must
644  * have been disabled when that got acquired!
645  *
646  * No need to set need_resched since signal event passing
647  * goes through ->blocked
648  */
signal_wake_up_state(struct task_struct * t,unsigned int state)649 void signal_wake_up_state(struct task_struct *t, unsigned int state)
650 {
651 	set_tsk_thread_flag(t, TIF_SIGPENDING);
652 	/*
653 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
654 	 * case. We don't check t->state here because there is a race with it
655 	 * executing another processor and just now entering stopped state.
656 	 * By using wake_up_state, we ensure the process will wake up and
657 	 * handle its death signal.
658 	 */
659 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
660 		kick_process(t);
661 }
662 
663 /*
664  * Remove signals in mask from the pending set and queue.
665  * Returns 1 if any signals were found.
666  *
667  * All callers must be holding the siglock.
668  */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)669 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
670 {
671 	struct sigqueue *q, *n;
672 	sigset_t m;
673 
674 	sigandsets(&m, mask, &s->signal);
675 	if (sigisemptyset(&m))
676 		return 0;
677 
678 	sigandnsets(&s->signal, &s->signal, mask);
679 	list_for_each_entry_safe(q, n, &s->list, list) {
680 		if (sigismember(mask, q->info.si_signo)) {
681 			list_del_init(&q->list);
682 			__sigqueue_free(q);
683 		}
684 	}
685 	return 1;
686 }
687 
is_si_special(const struct siginfo * info)688 static inline int is_si_special(const struct siginfo *info)
689 {
690 	return info <= SEND_SIG_FORCED;
691 }
692 
si_fromuser(const struct siginfo * info)693 static inline bool si_fromuser(const struct siginfo *info)
694 {
695 	return info == SEND_SIG_NOINFO ||
696 		(!is_si_special(info) && SI_FROMUSER(info));
697 }
698 
699 /*
700  * called with RCU read lock from check_kill_permission()
701  */
kill_ok_by_cred(struct task_struct * t)702 static int kill_ok_by_cred(struct task_struct *t)
703 {
704 	const struct cred *cred = current_cred();
705 	const struct cred *tcred = __task_cred(t);
706 
707 	if (uid_eq(cred->euid, tcred->suid) ||
708 	    uid_eq(cred->euid, tcred->uid)  ||
709 	    uid_eq(cred->uid,  tcred->suid) ||
710 	    uid_eq(cred->uid,  tcred->uid))
711 		return 1;
712 
713 	if (ns_capable(tcred->user_ns, CAP_KILL))
714 		return 1;
715 
716 	return 0;
717 }
718 
719 /*
720  * Bad permissions for sending the signal
721  * - the caller must hold the RCU read lock
722  */
check_kill_permission(int sig,struct siginfo * info,struct task_struct * t)723 static int check_kill_permission(int sig, struct siginfo *info,
724 				 struct task_struct *t)
725 {
726 	struct pid *sid;
727 	int error;
728 
729 	if (!valid_signal(sig))
730 		return -EINVAL;
731 
732 	if (!si_fromuser(info))
733 		return 0;
734 
735 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
736 	if (error)
737 		return error;
738 
739 	if (!same_thread_group(current, t) &&
740 	    !kill_ok_by_cred(t)) {
741 		switch (sig) {
742 		case SIGCONT:
743 			sid = task_session(t);
744 			/*
745 			 * We don't return the error if sid == NULL. The
746 			 * task was unhashed, the caller must notice this.
747 			 */
748 			if (!sid || sid == task_session(current))
749 				break;
750 		default:
751 			return -EPERM;
752 		}
753 	}
754 
755 	return security_task_kill(t, info, sig, 0);
756 }
757 
758 /**
759  * ptrace_trap_notify - schedule trap to notify ptracer
760  * @t: tracee wanting to notify tracer
761  *
762  * This function schedules sticky ptrace trap which is cleared on the next
763  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
764  * ptracer.
765  *
766  * If @t is running, STOP trap will be taken.  If trapped for STOP and
767  * ptracer is listening for events, tracee is woken up so that it can
768  * re-trap for the new event.  If trapped otherwise, STOP trap will be
769  * eventually taken without returning to userland after the existing traps
770  * are finished by PTRACE_CONT.
771  *
772  * CONTEXT:
773  * Must be called with @task->sighand->siglock held.
774  */
ptrace_trap_notify(struct task_struct * t)775 static void ptrace_trap_notify(struct task_struct *t)
776 {
777 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
778 	assert_spin_locked(&t->sighand->siglock);
779 
780 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
781 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
782 }
783 
784 /*
785  * Handle magic process-wide effects of stop/continue signals. Unlike
786  * the signal actions, these happen immediately at signal-generation
787  * time regardless of blocking, ignoring, or handling.  This does the
788  * actual continuing for SIGCONT, but not the actual stopping for stop
789  * signals. The process stop is done as a signal action for SIG_DFL.
790  *
791  * Returns true if the signal should be actually delivered, otherwise
792  * it should be dropped.
793  */
prepare_signal(int sig,struct task_struct * p,bool force)794 static bool prepare_signal(int sig, struct task_struct *p, bool force)
795 {
796 	struct signal_struct *signal = p->signal;
797 	struct task_struct *t;
798 	sigset_t flush;
799 
800 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
801 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
802 			return sig == SIGKILL;
803 		/*
804 		 * The process is in the middle of dying, nothing to do.
805 		 */
806 	} else if (sig_kernel_stop(sig)) {
807 		/*
808 		 * This is a stop signal.  Remove SIGCONT from all queues.
809 		 */
810 		siginitset(&flush, sigmask(SIGCONT));
811 		flush_sigqueue_mask(&flush, &signal->shared_pending);
812 		for_each_thread(p, t)
813 			flush_sigqueue_mask(&flush, &t->pending);
814 	} else if (sig == SIGCONT) {
815 		unsigned int why;
816 		/*
817 		 * Remove all stop signals from all queues, wake all threads.
818 		 */
819 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
820 		flush_sigqueue_mask(&flush, &signal->shared_pending);
821 		for_each_thread(p, t) {
822 			flush_sigqueue_mask(&flush, &t->pending);
823 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
824 			if (likely(!(t->ptrace & PT_SEIZED)))
825 				wake_up_state(t, __TASK_STOPPED);
826 			else
827 				ptrace_trap_notify(t);
828 		}
829 
830 		/*
831 		 * Notify the parent with CLD_CONTINUED if we were stopped.
832 		 *
833 		 * If we were in the middle of a group stop, we pretend it
834 		 * was already finished, and then continued. Since SIGCHLD
835 		 * doesn't queue we report only CLD_STOPPED, as if the next
836 		 * CLD_CONTINUED was dropped.
837 		 */
838 		why = 0;
839 		if (signal->flags & SIGNAL_STOP_STOPPED)
840 			why |= SIGNAL_CLD_CONTINUED;
841 		else if (signal->group_stop_count)
842 			why |= SIGNAL_CLD_STOPPED;
843 
844 		if (why) {
845 			/*
846 			 * The first thread which returns from do_signal_stop()
847 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
848 			 * notify its parent. See get_signal_to_deliver().
849 			 */
850 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
851 			signal->group_stop_count = 0;
852 			signal->group_exit_code = 0;
853 		}
854 	}
855 
856 	return !sig_ignored(p, sig, force);
857 }
858 
859 /*
860  * Test if P wants to take SIG.  After we've checked all threads with this,
861  * it's equivalent to finding no threads not blocking SIG.  Any threads not
862  * blocking SIG were ruled out because they are not running and already
863  * have pending signals.  Such threads will dequeue from the shared queue
864  * as soon as they're available, so putting the signal on the shared queue
865  * will be equivalent to sending it to one such thread.
866  */
wants_signal(int sig,struct task_struct * p)867 static inline int wants_signal(int sig, struct task_struct *p)
868 {
869 	if (sigismember(&p->blocked, sig))
870 		return 0;
871 	if (p->flags & PF_EXITING)
872 		return 0;
873 	if (sig == SIGKILL)
874 		return 1;
875 	if (task_is_stopped_or_traced(p))
876 		return 0;
877 	return task_curr(p) || !signal_pending(p);
878 }
879 
complete_signal(int sig,struct task_struct * p,int group)880 static void complete_signal(int sig, struct task_struct *p, int group)
881 {
882 	struct signal_struct *signal = p->signal;
883 	struct task_struct *t;
884 
885 	/*
886 	 * Now find a thread we can wake up to take the signal off the queue.
887 	 *
888 	 * If the main thread wants the signal, it gets first crack.
889 	 * Probably the least surprising to the average bear.
890 	 */
891 	if (wants_signal(sig, p))
892 		t = p;
893 	else if (!group || thread_group_empty(p))
894 		/*
895 		 * There is just one thread and it does not need to be woken.
896 		 * It will dequeue unblocked signals before it runs again.
897 		 */
898 		return;
899 	else {
900 		/*
901 		 * Otherwise try to find a suitable thread.
902 		 */
903 		t = signal->curr_target;
904 		while (!wants_signal(sig, t)) {
905 			t = next_thread(t);
906 			if (t == signal->curr_target)
907 				/*
908 				 * No thread needs to be woken.
909 				 * Any eligible threads will see
910 				 * the signal in the queue soon.
911 				 */
912 				return;
913 		}
914 		signal->curr_target = t;
915 	}
916 
917 	/*
918 	 * Found a killable thread.  If the signal will be fatal,
919 	 * then start taking the whole group down immediately.
920 	 */
921 	if (sig_fatal(p, sig) &&
922 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
923 	    !sigismember(&t->real_blocked, sig) &&
924 	    (sig == SIGKILL || !p->ptrace)) {
925 		/*
926 		 * This signal will be fatal to the whole group.
927 		 */
928 		if (!sig_kernel_coredump(sig)) {
929 			/*
930 			 * Start a group exit and wake everybody up.
931 			 * This way we don't have other threads
932 			 * running and doing things after a slower
933 			 * thread has the fatal signal pending.
934 			 */
935 			signal->flags = SIGNAL_GROUP_EXIT;
936 			signal->group_exit_code = sig;
937 			signal->group_stop_count = 0;
938 			t = p;
939 			do {
940 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
941 				sigaddset(&t->pending.signal, SIGKILL);
942 				signal_wake_up(t, 1);
943 			} while_each_thread(p, t);
944 			return;
945 		}
946 	}
947 
948 	/*
949 	 * The signal is already in the shared-pending queue.
950 	 * Tell the chosen thread to wake up and dequeue it.
951 	 */
952 	signal_wake_up(t, sig == SIGKILL);
953 	return;
954 }
955 
legacy_queue(struct sigpending * signals,int sig)956 static inline int legacy_queue(struct sigpending *signals, int sig)
957 {
958 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
959 }
960 
961 #ifdef CONFIG_USER_NS
userns_fixup_signal_uid(struct siginfo * info,struct task_struct * t)962 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
963 {
964 	if (current_user_ns() == task_cred_xxx(t, user_ns))
965 		return;
966 
967 	if (SI_FROMKERNEL(info))
968 		return;
969 
970 	rcu_read_lock();
971 	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
972 					make_kuid(current_user_ns(), info->si_uid));
973 	rcu_read_unlock();
974 }
975 #else
userns_fixup_signal_uid(struct siginfo * info,struct task_struct * t)976 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
977 {
978 	return;
979 }
980 #endif
981 
__send_signal(int sig,struct siginfo * info,struct task_struct * t,int group,int from_ancestor_ns)982 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
983 			int group, int from_ancestor_ns)
984 {
985 	struct sigpending *pending;
986 	struct sigqueue *q;
987 	int override_rlimit;
988 	int ret = 0, result;
989 
990 	assert_spin_locked(&t->sighand->siglock);
991 
992 	result = TRACE_SIGNAL_IGNORED;
993 	if (!prepare_signal(sig, t,
994 			from_ancestor_ns || (info == SEND_SIG_FORCED)))
995 		goto ret;
996 
997 	pending = group ? &t->signal->shared_pending : &t->pending;
998 	/*
999 	 * Short-circuit ignored signals and support queuing
1000 	 * exactly one non-rt signal, so that we can get more
1001 	 * detailed information about the cause of the signal.
1002 	 */
1003 	result = TRACE_SIGNAL_ALREADY_PENDING;
1004 	if (legacy_queue(pending, sig))
1005 		goto ret;
1006 
1007 	result = TRACE_SIGNAL_DELIVERED;
1008 	/*
1009 	 * fast-pathed signals for kernel-internal things like SIGSTOP
1010 	 * or SIGKILL.
1011 	 */
1012 	if (info == SEND_SIG_FORCED)
1013 		goto out_set;
1014 
1015 	/*
1016 	 * Real-time signals must be queued if sent by sigqueue, or
1017 	 * some other real-time mechanism.  It is implementation
1018 	 * defined whether kill() does so.  We attempt to do so, on
1019 	 * the principle of least surprise, but since kill is not
1020 	 * allowed to fail with EAGAIN when low on memory we just
1021 	 * make sure at least one signal gets delivered and don't
1022 	 * pass on the info struct.
1023 	 */
1024 	if (sig < SIGRTMIN)
1025 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1026 	else
1027 		override_rlimit = 0;
1028 
1029 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1030 		override_rlimit);
1031 	if (q) {
1032 		list_add_tail(&q->list, &pending->list);
1033 		switch ((unsigned long) info) {
1034 		case (unsigned long) SEND_SIG_NOINFO:
1035 			q->info.si_signo = sig;
1036 			q->info.si_errno = 0;
1037 			q->info.si_code = SI_USER;
1038 			q->info.si_pid = task_tgid_nr_ns(current,
1039 							task_active_pid_ns(t));
1040 			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1041 			break;
1042 		case (unsigned long) SEND_SIG_PRIV:
1043 			q->info.si_signo = sig;
1044 			q->info.si_errno = 0;
1045 			q->info.si_code = SI_KERNEL;
1046 			q->info.si_pid = 0;
1047 			q->info.si_uid = 0;
1048 			break;
1049 		default:
1050 			copy_siginfo(&q->info, info);
1051 			if (from_ancestor_ns)
1052 				q->info.si_pid = 0;
1053 			break;
1054 		}
1055 
1056 		userns_fixup_signal_uid(&q->info, t);
1057 
1058 	} else if (!is_si_special(info)) {
1059 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1060 			/*
1061 			 * Queue overflow, abort.  We may abort if the
1062 			 * signal was rt and sent by user using something
1063 			 * other than kill().
1064 			 */
1065 			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1066 			ret = -EAGAIN;
1067 			goto ret;
1068 		} else {
1069 			/*
1070 			 * This is a silent loss of information.  We still
1071 			 * send the signal, but the *info bits are lost.
1072 			 */
1073 			result = TRACE_SIGNAL_LOSE_INFO;
1074 		}
1075 	}
1076 
1077 out_set:
1078 	signalfd_notify(t, sig);
1079 	sigaddset(&pending->signal, sig);
1080 	complete_signal(sig, t, group);
1081 ret:
1082 	trace_signal_generate(sig, info, t, group, result);
1083 	return ret;
1084 }
1085 
send_signal(int sig,struct siginfo * info,struct task_struct * t,int group)1086 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1087 			int group)
1088 {
1089 	int from_ancestor_ns = 0;
1090 
1091 #ifdef CONFIG_PID_NS
1092 	from_ancestor_ns = si_fromuser(info) &&
1093 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1094 #endif
1095 
1096 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1097 }
1098 
print_fatal_signal(int signr)1099 static void print_fatal_signal(int signr)
1100 {
1101 	struct pt_regs *regs = signal_pt_regs();
1102 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1103 
1104 #if defined(__i386__) && !defined(__arch_um__)
1105 	pr_info("code at %08lx: ", regs->ip);
1106 	{
1107 		int i;
1108 		for (i = 0; i < 16; i++) {
1109 			unsigned char insn;
1110 
1111 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1112 				break;
1113 			pr_cont("%02x ", insn);
1114 		}
1115 	}
1116 	pr_cont("\n");
1117 #endif
1118 	preempt_disable();
1119 	show_regs(regs);
1120 	preempt_enable();
1121 }
1122 
setup_print_fatal_signals(char * str)1123 static int __init setup_print_fatal_signals(char *str)
1124 {
1125 	get_option (&str, &print_fatal_signals);
1126 
1127 	return 1;
1128 }
1129 
1130 __setup("print-fatal-signals=", setup_print_fatal_signals);
1131 
1132 int
__group_send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1133 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1134 {
1135 	return send_signal(sig, info, p, 1);
1136 }
1137 
1138 static int
specific_send_sig_info(int sig,struct siginfo * info,struct task_struct * t)1139 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1140 {
1141 	return send_signal(sig, info, t, 0);
1142 }
1143 
do_send_sig_info(int sig,struct siginfo * info,struct task_struct * p,bool group)1144 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1145 			bool group)
1146 {
1147 	unsigned long flags;
1148 	int ret = -ESRCH;
1149 
1150 	if (lock_task_sighand(p, &flags)) {
1151 		ret = send_signal(sig, info, p, group);
1152 		unlock_task_sighand(p, &flags);
1153 	}
1154 
1155 	return ret;
1156 }
1157 
1158 /*
1159  * Force a signal that the process can't ignore: if necessary
1160  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1161  *
1162  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1163  * since we do not want to have a signal handler that was blocked
1164  * be invoked when user space had explicitly blocked it.
1165  *
1166  * We don't want to have recursive SIGSEGV's etc, for example,
1167  * that is why we also clear SIGNAL_UNKILLABLE.
1168  */
1169 int
force_sig_info(int sig,struct siginfo * info,struct task_struct * t)1170 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1171 {
1172 	unsigned long int flags;
1173 	int ret, blocked, ignored;
1174 	struct k_sigaction *action;
1175 
1176 	spin_lock_irqsave(&t->sighand->siglock, flags);
1177 	action = &t->sighand->action[sig-1];
1178 	ignored = action->sa.sa_handler == SIG_IGN;
1179 	blocked = sigismember(&t->blocked, sig);
1180 	if (blocked || ignored) {
1181 		action->sa.sa_handler = SIG_DFL;
1182 		if (blocked) {
1183 			sigdelset(&t->blocked, sig);
1184 			recalc_sigpending_and_wake(t);
1185 		}
1186 	}
1187 	if (action->sa.sa_handler == SIG_DFL)
1188 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1189 	ret = specific_send_sig_info(sig, info, t);
1190 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1191 
1192 	return ret;
1193 }
1194 
1195 /*
1196  * Nuke all other threads in the group.
1197  */
zap_other_threads(struct task_struct * p)1198 int zap_other_threads(struct task_struct *p)
1199 {
1200 	struct task_struct *t = p;
1201 	int count = 0;
1202 
1203 	p->signal->group_stop_count = 0;
1204 
1205 	while_each_thread(p, t) {
1206 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1207 		count++;
1208 
1209 		/* Don't bother with already dead threads */
1210 		if (t->exit_state)
1211 			continue;
1212 		sigaddset(&t->pending.signal, SIGKILL);
1213 		signal_wake_up(t, 1);
1214 	}
1215 
1216 	return count;
1217 }
1218 
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1219 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1220 					   unsigned long *flags)
1221 {
1222 	struct sighand_struct *sighand;
1223 
1224 	for (;;) {
1225 		/*
1226 		 * Disable interrupts early to avoid deadlocks.
1227 		 * See rcu_read_unlock() comment header for details.
1228 		 */
1229 		local_irq_save(*flags);
1230 		rcu_read_lock();
1231 		sighand = rcu_dereference(tsk->sighand);
1232 		if (unlikely(sighand == NULL)) {
1233 			rcu_read_unlock();
1234 			local_irq_restore(*flags);
1235 			break;
1236 		}
1237 		/*
1238 		 * This sighand can be already freed and even reused, but
1239 		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1240 		 * initializes ->siglock: this slab can't go away, it has
1241 		 * the same object type, ->siglock can't be reinitialized.
1242 		 *
1243 		 * We need to ensure that tsk->sighand is still the same
1244 		 * after we take the lock, we can race with de_thread() or
1245 		 * __exit_signal(). In the latter case the next iteration
1246 		 * must see ->sighand == NULL.
1247 		 */
1248 		spin_lock(&sighand->siglock);
1249 		if (likely(sighand == tsk->sighand)) {
1250 			rcu_read_unlock();
1251 			break;
1252 		}
1253 		spin_unlock(&sighand->siglock);
1254 		rcu_read_unlock();
1255 		local_irq_restore(*flags);
1256 	}
1257 
1258 	return sighand;
1259 }
1260 
1261 /*
1262  * send signal info to all the members of a group
1263  */
group_send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1264 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1265 {
1266 	int ret;
1267 
1268 	rcu_read_lock();
1269 	ret = check_kill_permission(sig, info, p);
1270 	rcu_read_unlock();
1271 
1272 	if (!ret && sig)
1273 		ret = do_send_sig_info(sig, info, p, true);
1274 
1275 	return ret;
1276 }
1277 
1278 /*
1279  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1280  * control characters do (^C, ^Z etc)
1281  * - the caller must hold at least a readlock on tasklist_lock
1282  */
__kill_pgrp_info(int sig,struct siginfo * info,struct pid * pgrp)1283 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1284 {
1285 	struct task_struct *p = NULL;
1286 	int retval, success;
1287 
1288 	success = 0;
1289 	retval = -ESRCH;
1290 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1291 		int err = group_send_sig_info(sig, info, p);
1292 		success |= !err;
1293 		retval = err;
1294 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1295 	return success ? 0 : retval;
1296 }
1297 
kill_pid_info(int sig,struct siginfo * info,struct pid * pid)1298 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1299 {
1300 	int error = -ESRCH;
1301 	struct task_struct *p;
1302 
1303 	for (;;) {
1304 		rcu_read_lock();
1305 		p = pid_task(pid, PIDTYPE_PID);
1306 		if (p)
1307 			error = group_send_sig_info(sig, info, p);
1308 		rcu_read_unlock();
1309 		if (likely(!p || error != -ESRCH))
1310 			return error;
1311 
1312 		/*
1313 		 * The task was unhashed in between, try again.  If it
1314 		 * is dead, pid_task() will return NULL, if we race with
1315 		 * de_thread() it will find the new leader.
1316 		 */
1317 	}
1318 }
1319 
kill_proc_info(int sig,struct siginfo * info,pid_t pid)1320 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1321 {
1322 	int error;
1323 	rcu_read_lock();
1324 	error = kill_pid_info(sig, info, find_vpid(pid));
1325 	rcu_read_unlock();
1326 	return error;
1327 }
1328 
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1329 static int kill_as_cred_perm(const struct cred *cred,
1330 			     struct task_struct *target)
1331 {
1332 	const struct cred *pcred = __task_cred(target);
1333 	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1334 	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1335 		return 0;
1336 	return 1;
1337 }
1338 
1339 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
kill_pid_info_as_cred(int sig,struct siginfo * info,struct pid * pid,const struct cred * cred,u32 secid)1340 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1341 			 const struct cred *cred, u32 secid)
1342 {
1343 	int ret = -EINVAL;
1344 	struct task_struct *p;
1345 	unsigned long flags;
1346 
1347 	if (!valid_signal(sig))
1348 		return ret;
1349 
1350 	rcu_read_lock();
1351 	p = pid_task(pid, PIDTYPE_PID);
1352 	if (!p) {
1353 		ret = -ESRCH;
1354 		goto out_unlock;
1355 	}
1356 	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1357 		ret = -EPERM;
1358 		goto out_unlock;
1359 	}
1360 	ret = security_task_kill(p, info, sig, secid);
1361 	if (ret)
1362 		goto out_unlock;
1363 
1364 	if (sig) {
1365 		if (lock_task_sighand(p, &flags)) {
1366 			ret = __send_signal(sig, info, p, 1, 0);
1367 			unlock_task_sighand(p, &flags);
1368 		} else
1369 			ret = -ESRCH;
1370 	}
1371 out_unlock:
1372 	rcu_read_unlock();
1373 	return ret;
1374 }
1375 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1376 
1377 /*
1378  * kill_something_info() interprets pid in interesting ways just like kill(2).
1379  *
1380  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1381  * is probably wrong.  Should make it like BSD or SYSV.
1382  */
1383 
kill_something_info(int sig,struct siginfo * info,pid_t pid)1384 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1385 {
1386 	int ret;
1387 
1388 	if (pid > 0) {
1389 		rcu_read_lock();
1390 		ret = kill_pid_info(sig, info, find_vpid(pid));
1391 		rcu_read_unlock();
1392 		return ret;
1393 	}
1394 
1395 	read_lock(&tasklist_lock);
1396 	if (pid != -1) {
1397 		ret = __kill_pgrp_info(sig, info,
1398 				pid ? find_vpid(-pid) : task_pgrp(current));
1399 	} else {
1400 		int retval = 0, count = 0;
1401 		struct task_struct * p;
1402 
1403 		for_each_process(p) {
1404 			if (task_pid_vnr(p) > 1 &&
1405 					!same_thread_group(p, current)) {
1406 				int err = group_send_sig_info(sig, info, p);
1407 				++count;
1408 				if (err != -EPERM)
1409 					retval = err;
1410 			}
1411 		}
1412 		ret = count ? retval : -ESRCH;
1413 	}
1414 	read_unlock(&tasklist_lock);
1415 
1416 	return ret;
1417 }
1418 
1419 /*
1420  * These are for backward compatibility with the rest of the kernel source.
1421  */
1422 
send_sig_info(int sig,struct siginfo * info,struct task_struct * p)1423 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1424 {
1425 	/*
1426 	 * Make sure legacy kernel users don't send in bad values
1427 	 * (normal paths check this in check_kill_permission).
1428 	 */
1429 	if (!valid_signal(sig))
1430 		return -EINVAL;
1431 
1432 	return do_send_sig_info(sig, info, p, false);
1433 }
1434 
1435 #define __si_special(priv) \
1436 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1437 
1438 int
send_sig(int sig,struct task_struct * p,int priv)1439 send_sig(int sig, struct task_struct *p, int priv)
1440 {
1441 	return send_sig_info(sig, __si_special(priv), p);
1442 }
1443 
1444 void
force_sig(int sig,struct task_struct * p)1445 force_sig(int sig, struct task_struct *p)
1446 {
1447 	force_sig_info(sig, SEND_SIG_PRIV, p);
1448 }
1449 
1450 /*
1451  * When things go south during signal handling, we
1452  * will force a SIGSEGV. And if the signal that caused
1453  * the problem was already a SIGSEGV, we'll want to
1454  * make sure we don't even try to deliver the signal..
1455  */
1456 int
force_sigsegv(int sig,struct task_struct * p)1457 force_sigsegv(int sig, struct task_struct *p)
1458 {
1459 	if (sig == SIGSEGV) {
1460 		unsigned long flags;
1461 		spin_lock_irqsave(&p->sighand->siglock, flags);
1462 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1463 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1464 	}
1465 	force_sig(SIGSEGV, p);
1466 	return 0;
1467 }
1468 
kill_pgrp(struct pid * pid,int sig,int priv)1469 int kill_pgrp(struct pid *pid, int sig, int priv)
1470 {
1471 	int ret;
1472 
1473 	read_lock(&tasklist_lock);
1474 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1475 	read_unlock(&tasklist_lock);
1476 
1477 	return ret;
1478 }
1479 EXPORT_SYMBOL(kill_pgrp);
1480 
kill_pid(struct pid * pid,int sig,int priv)1481 int kill_pid(struct pid *pid, int sig, int priv)
1482 {
1483 	return kill_pid_info(sig, __si_special(priv), pid);
1484 }
1485 EXPORT_SYMBOL(kill_pid);
1486 
1487 /*
1488  * These functions support sending signals using preallocated sigqueue
1489  * structures.  This is needed "because realtime applications cannot
1490  * afford to lose notifications of asynchronous events, like timer
1491  * expirations or I/O completions".  In the case of POSIX Timers
1492  * we allocate the sigqueue structure from the timer_create.  If this
1493  * allocation fails we are able to report the failure to the application
1494  * with an EAGAIN error.
1495  */
sigqueue_alloc(void)1496 struct sigqueue *sigqueue_alloc(void)
1497 {
1498 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1499 
1500 	if (q)
1501 		q->flags |= SIGQUEUE_PREALLOC;
1502 
1503 	return q;
1504 }
1505 
sigqueue_free(struct sigqueue * q)1506 void sigqueue_free(struct sigqueue *q)
1507 {
1508 	unsigned long flags;
1509 	spinlock_t *lock = &current->sighand->siglock;
1510 
1511 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1512 	/*
1513 	 * We must hold ->siglock while testing q->list
1514 	 * to serialize with collect_signal() or with
1515 	 * __exit_signal()->flush_sigqueue().
1516 	 */
1517 	spin_lock_irqsave(lock, flags);
1518 	q->flags &= ~SIGQUEUE_PREALLOC;
1519 	/*
1520 	 * If it is queued it will be freed when dequeued,
1521 	 * like the "regular" sigqueue.
1522 	 */
1523 	if (!list_empty(&q->list))
1524 		q = NULL;
1525 	spin_unlock_irqrestore(lock, flags);
1526 
1527 	if (q)
1528 		__sigqueue_free(q);
1529 }
1530 
send_sigqueue(struct sigqueue * q,struct task_struct * t,int group)1531 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1532 {
1533 	int sig = q->info.si_signo;
1534 	struct sigpending *pending;
1535 	unsigned long flags;
1536 	int ret, result;
1537 
1538 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1539 
1540 	ret = -1;
1541 	if (!likely(lock_task_sighand(t, &flags)))
1542 		goto ret;
1543 
1544 	ret = 1; /* the signal is ignored */
1545 	result = TRACE_SIGNAL_IGNORED;
1546 	if (!prepare_signal(sig, t, false))
1547 		goto out;
1548 
1549 	ret = 0;
1550 	if (unlikely(!list_empty(&q->list))) {
1551 		/*
1552 		 * If an SI_TIMER entry is already queue just increment
1553 		 * the overrun count.
1554 		 */
1555 		BUG_ON(q->info.si_code != SI_TIMER);
1556 		q->info.si_overrun++;
1557 		result = TRACE_SIGNAL_ALREADY_PENDING;
1558 		goto out;
1559 	}
1560 	q->info.si_overrun = 0;
1561 
1562 	signalfd_notify(t, sig);
1563 	pending = group ? &t->signal->shared_pending : &t->pending;
1564 	list_add_tail(&q->list, &pending->list);
1565 	sigaddset(&pending->signal, sig);
1566 	complete_signal(sig, t, group);
1567 	result = TRACE_SIGNAL_DELIVERED;
1568 out:
1569 	trace_signal_generate(sig, &q->info, t, group, result);
1570 	unlock_task_sighand(t, &flags);
1571 ret:
1572 	return ret;
1573 }
1574 
1575 /*
1576  * Let a parent know about the death of a child.
1577  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1578  *
1579  * Returns true if our parent ignored us and so we've switched to
1580  * self-reaping.
1581  */
do_notify_parent(struct task_struct * tsk,int sig)1582 bool do_notify_parent(struct task_struct *tsk, int sig)
1583 {
1584 	struct siginfo info;
1585 	unsigned long flags;
1586 	struct sighand_struct *psig;
1587 	bool autoreap = false;
1588 	cputime_t utime, stime;
1589 
1590 	BUG_ON(sig == -1);
1591 
1592  	/* do_notify_parent_cldstop should have been called instead.  */
1593  	BUG_ON(task_is_stopped_or_traced(tsk));
1594 
1595 	BUG_ON(!tsk->ptrace &&
1596 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1597 
1598 	if (sig != SIGCHLD) {
1599 		/*
1600 		 * This is only possible if parent == real_parent.
1601 		 * Check if it has changed security domain.
1602 		 */
1603 		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1604 			sig = SIGCHLD;
1605 	}
1606 
1607 	info.si_signo = sig;
1608 	info.si_errno = 0;
1609 	/*
1610 	 * We are under tasklist_lock here so our parent is tied to
1611 	 * us and cannot change.
1612 	 *
1613 	 * task_active_pid_ns will always return the same pid namespace
1614 	 * until a task passes through release_task.
1615 	 *
1616 	 * write_lock() currently calls preempt_disable() which is the
1617 	 * same as rcu_read_lock(), but according to Oleg, this is not
1618 	 * correct to rely on this
1619 	 */
1620 	rcu_read_lock();
1621 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1622 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1623 				       task_uid(tsk));
1624 	rcu_read_unlock();
1625 
1626 	task_cputime(tsk, &utime, &stime);
1627 	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1628 	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1629 
1630 	info.si_status = tsk->exit_code & 0x7f;
1631 	if (tsk->exit_code & 0x80)
1632 		info.si_code = CLD_DUMPED;
1633 	else if (tsk->exit_code & 0x7f)
1634 		info.si_code = CLD_KILLED;
1635 	else {
1636 		info.si_code = CLD_EXITED;
1637 		info.si_status = tsk->exit_code >> 8;
1638 	}
1639 
1640 	psig = tsk->parent->sighand;
1641 	spin_lock_irqsave(&psig->siglock, flags);
1642 	if (!tsk->ptrace && sig == SIGCHLD &&
1643 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1644 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1645 		/*
1646 		 * We are exiting and our parent doesn't care.  POSIX.1
1647 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1648 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1649 		 * automatically and not left for our parent's wait4 call.
1650 		 * Rather than having the parent do it as a magic kind of
1651 		 * signal handler, we just set this to tell do_exit that we
1652 		 * can be cleaned up without becoming a zombie.  Note that
1653 		 * we still call __wake_up_parent in this case, because a
1654 		 * blocked sys_wait4 might now return -ECHILD.
1655 		 *
1656 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1657 		 * is implementation-defined: we do (if you don't want
1658 		 * it, just use SIG_IGN instead).
1659 		 */
1660 		autoreap = true;
1661 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1662 			sig = 0;
1663 	}
1664 	if (valid_signal(sig) && sig)
1665 		__group_send_sig_info(sig, &info, tsk->parent);
1666 	__wake_up_parent(tsk, tsk->parent);
1667 	spin_unlock_irqrestore(&psig->siglock, flags);
1668 
1669 	return autoreap;
1670 }
1671 
1672 /**
1673  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1674  * @tsk: task reporting the state change
1675  * @for_ptracer: the notification is for ptracer
1676  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1677  *
1678  * Notify @tsk's parent that the stopped/continued state has changed.  If
1679  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1680  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1681  *
1682  * CONTEXT:
1683  * Must be called with tasklist_lock at least read locked.
1684  */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)1685 static void do_notify_parent_cldstop(struct task_struct *tsk,
1686 				     bool for_ptracer, int why)
1687 {
1688 	struct siginfo info;
1689 	unsigned long flags;
1690 	struct task_struct *parent;
1691 	struct sighand_struct *sighand;
1692 	cputime_t utime, stime;
1693 
1694 	if (for_ptracer) {
1695 		parent = tsk->parent;
1696 	} else {
1697 		tsk = tsk->group_leader;
1698 		parent = tsk->real_parent;
1699 	}
1700 
1701 	info.si_signo = SIGCHLD;
1702 	info.si_errno = 0;
1703 	/*
1704 	 * see comment in do_notify_parent() about the following 4 lines
1705 	 */
1706 	rcu_read_lock();
1707 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1708 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1709 	rcu_read_unlock();
1710 
1711 	task_cputime(tsk, &utime, &stime);
1712 	info.si_utime = cputime_to_clock_t(utime);
1713 	info.si_stime = cputime_to_clock_t(stime);
1714 
1715  	info.si_code = why;
1716  	switch (why) {
1717  	case CLD_CONTINUED:
1718  		info.si_status = SIGCONT;
1719  		break;
1720  	case CLD_STOPPED:
1721  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1722  		break;
1723  	case CLD_TRAPPED:
1724  		info.si_status = tsk->exit_code & 0x7f;
1725  		break;
1726  	default:
1727  		BUG();
1728  	}
1729 
1730 	sighand = parent->sighand;
1731 	spin_lock_irqsave(&sighand->siglock, flags);
1732 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1733 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1734 		__group_send_sig_info(SIGCHLD, &info, parent);
1735 	/*
1736 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1737 	 */
1738 	__wake_up_parent(tsk, parent);
1739 	spin_unlock_irqrestore(&sighand->siglock, flags);
1740 }
1741 
may_ptrace_stop(void)1742 static inline int may_ptrace_stop(void)
1743 {
1744 	if (!likely(current->ptrace))
1745 		return 0;
1746 	/*
1747 	 * Are we in the middle of do_coredump?
1748 	 * If so and our tracer is also part of the coredump stopping
1749 	 * is a deadlock situation, and pointless because our tracer
1750 	 * is dead so don't allow us to stop.
1751 	 * If SIGKILL was already sent before the caller unlocked
1752 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1753 	 * is safe to enter schedule().
1754 	 *
1755 	 * This is almost outdated, a task with the pending SIGKILL can't
1756 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1757 	 * after SIGKILL was already dequeued.
1758 	 */
1759 	if (unlikely(current->mm->core_state) &&
1760 	    unlikely(current->mm == current->parent->mm))
1761 		return 0;
1762 
1763 	return 1;
1764 }
1765 
1766 /*
1767  * Return non-zero if there is a SIGKILL that should be waking us up.
1768  * Called with the siglock held.
1769  */
sigkill_pending(struct task_struct * tsk)1770 static int sigkill_pending(struct task_struct *tsk)
1771 {
1772 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1773 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1774 }
1775 
1776 /*
1777  * This must be called with current->sighand->siglock held.
1778  *
1779  * This should be the path for all ptrace stops.
1780  * We always set current->last_siginfo while stopped here.
1781  * That makes it a way to test a stopped process for
1782  * being ptrace-stopped vs being job-control-stopped.
1783  *
1784  * If we actually decide not to stop at all because the tracer
1785  * is gone, we keep current->exit_code unless clear_code.
1786  */
ptrace_stop(int exit_code,int why,int clear_code,siginfo_t * info)1787 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1788 	__releases(&current->sighand->siglock)
1789 	__acquires(&current->sighand->siglock)
1790 {
1791 	bool gstop_done = false;
1792 
1793 	if (arch_ptrace_stop_needed(exit_code, info)) {
1794 		/*
1795 		 * The arch code has something special to do before a
1796 		 * ptrace stop.  This is allowed to block, e.g. for faults
1797 		 * on user stack pages.  We can't keep the siglock while
1798 		 * calling arch_ptrace_stop, so we must release it now.
1799 		 * To preserve proper semantics, we must do this before
1800 		 * any signal bookkeeping like checking group_stop_count.
1801 		 * Meanwhile, a SIGKILL could come in before we retake the
1802 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1803 		 * So after regaining the lock, we must check for SIGKILL.
1804 		 */
1805 		spin_unlock_irq(&current->sighand->siglock);
1806 		arch_ptrace_stop(exit_code, info);
1807 		spin_lock_irq(&current->sighand->siglock);
1808 		if (sigkill_pending(current))
1809 			return;
1810 	}
1811 
1812 	/*
1813 	 * We're committing to trapping.  TRACED should be visible before
1814 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1815 	 * Also, transition to TRACED and updates to ->jobctl should be
1816 	 * atomic with respect to siglock and should be done after the arch
1817 	 * hook as siglock is released and regrabbed across it.
1818 	 */
1819 	set_current_state(TASK_TRACED);
1820 
1821 	current->last_siginfo = info;
1822 	current->exit_code = exit_code;
1823 
1824 	/*
1825 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1826 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1827 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1828 	 * could be clear now.  We act as if SIGCONT is received after
1829 	 * TASK_TRACED is entered - ignore it.
1830 	 */
1831 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1832 		gstop_done = task_participate_group_stop(current);
1833 
1834 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1835 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1836 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1837 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1838 
1839 	/* entering a trap, clear TRAPPING */
1840 	task_clear_jobctl_trapping(current);
1841 
1842 	spin_unlock_irq(&current->sighand->siglock);
1843 	read_lock(&tasklist_lock);
1844 	if (may_ptrace_stop()) {
1845 		/*
1846 		 * Notify parents of the stop.
1847 		 *
1848 		 * While ptraced, there are two parents - the ptracer and
1849 		 * the real_parent of the group_leader.  The ptracer should
1850 		 * know about every stop while the real parent is only
1851 		 * interested in the completion of group stop.  The states
1852 		 * for the two don't interact with each other.  Notify
1853 		 * separately unless they're gonna be duplicates.
1854 		 */
1855 		do_notify_parent_cldstop(current, true, why);
1856 		if (gstop_done && ptrace_reparented(current))
1857 			do_notify_parent_cldstop(current, false, why);
1858 
1859 		/*
1860 		 * Don't want to allow preemption here, because
1861 		 * sys_ptrace() needs this task to be inactive.
1862 		 *
1863 		 * XXX: implement read_unlock_no_resched().
1864 		 */
1865 		preempt_disable();
1866 		read_unlock(&tasklist_lock);
1867 		preempt_enable_no_resched();
1868 		freezable_schedule();
1869 	} else {
1870 		/*
1871 		 * By the time we got the lock, our tracer went away.
1872 		 * Don't drop the lock yet, another tracer may come.
1873 		 *
1874 		 * If @gstop_done, the ptracer went away between group stop
1875 		 * completion and here.  During detach, it would have set
1876 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1877 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1878 		 * the real parent of the group stop completion is enough.
1879 		 */
1880 		if (gstop_done)
1881 			do_notify_parent_cldstop(current, false, why);
1882 
1883 		/* tasklist protects us from ptrace_freeze_traced() */
1884 		__set_current_state(TASK_RUNNING);
1885 		if (clear_code)
1886 			current->exit_code = 0;
1887 		read_unlock(&tasklist_lock);
1888 	}
1889 
1890 	/*
1891 	 * We are back.  Now reacquire the siglock before touching
1892 	 * last_siginfo, so that we are sure to have synchronized with
1893 	 * any signal-sending on another CPU that wants to examine it.
1894 	 */
1895 	spin_lock_irq(&current->sighand->siglock);
1896 	current->last_siginfo = NULL;
1897 
1898 	/* LISTENING can be set only during STOP traps, clear it */
1899 	current->jobctl &= ~JOBCTL_LISTENING;
1900 
1901 	/*
1902 	 * Queued signals ignored us while we were stopped for tracing.
1903 	 * So check for any that we should take before resuming user mode.
1904 	 * This sets TIF_SIGPENDING, but never clears it.
1905 	 */
1906 	recalc_sigpending_tsk(current);
1907 }
1908 
ptrace_do_notify(int signr,int exit_code,int why)1909 static void ptrace_do_notify(int signr, int exit_code, int why)
1910 {
1911 	siginfo_t info;
1912 
1913 	memset(&info, 0, sizeof info);
1914 	info.si_signo = signr;
1915 	info.si_code = exit_code;
1916 	info.si_pid = task_pid_vnr(current);
1917 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1918 
1919 	/* Let the debugger run.  */
1920 	ptrace_stop(exit_code, why, 1, &info);
1921 }
1922 
ptrace_notify(int exit_code)1923 void ptrace_notify(int exit_code)
1924 {
1925 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1926 	if (unlikely(current->task_works))
1927 		task_work_run();
1928 
1929 	spin_lock_irq(&current->sighand->siglock);
1930 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1931 	spin_unlock_irq(&current->sighand->siglock);
1932 }
1933 
1934 /**
1935  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1936  * @signr: signr causing group stop if initiating
1937  *
1938  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1939  * and participate in it.  If already set, participate in the existing
1940  * group stop.  If participated in a group stop (and thus slept), %true is
1941  * returned with siglock released.
1942  *
1943  * If ptraced, this function doesn't handle stop itself.  Instead,
1944  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1945  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1946  * places afterwards.
1947  *
1948  * CONTEXT:
1949  * Must be called with @current->sighand->siglock held, which is released
1950  * on %true return.
1951  *
1952  * RETURNS:
1953  * %false if group stop is already cancelled or ptrace trap is scheduled.
1954  * %true if participated in group stop.
1955  */
do_signal_stop(int signr)1956 static bool do_signal_stop(int signr)
1957 	__releases(&current->sighand->siglock)
1958 {
1959 	struct signal_struct *sig = current->signal;
1960 
1961 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1962 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1963 		struct task_struct *t;
1964 
1965 		/* signr will be recorded in task->jobctl for retries */
1966 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1967 
1968 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1969 		    unlikely(signal_group_exit(sig)))
1970 			return false;
1971 		/*
1972 		 * There is no group stop already in progress.  We must
1973 		 * initiate one now.
1974 		 *
1975 		 * While ptraced, a task may be resumed while group stop is
1976 		 * still in effect and then receive a stop signal and
1977 		 * initiate another group stop.  This deviates from the
1978 		 * usual behavior as two consecutive stop signals can't
1979 		 * cause two group stops when !ptraced.  That is why we
1980 		 * also check !task_is_stopped(t) below.
1981 		 *
1982 		 * The condition can be distinguished by testing whether
1983 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1984 		 * group_exit_code in such case.
1985 		 *
1986 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1987 		 * an intervening stop signal is required to cause two
1988 		 * continued events regardless of ptrace.
1989 		 */
1990 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1991 			sig->group_exit_code = signr;
1992 
1993 		sig->group_stop_count = 0;
1994 
1995 		if (task_set_jobctl_pending(current, signr | gstop))
1996 			sig->group_stop_count++;
1997 
1998 		t = current;
1999 		while_each_thread(current, t) {
2000 			/*
2001 			 * Setting state to TASK_STOPPED for a group
2002 			 * stop is always done with the siglock held,
2003 			 * so this check has no races.
2004 			 */
2005 			if (!task_is_stopped(t) &&
2006 			    task_set_jobctl_pending(t, signr | gstop)) {
2007 				sig->group_stop_count++;
2008 				if (likely(!(t->ptrace & PT_SEIZED)))
2009 					signal_wake_up(t, 0);
2010 				else
2011 					ptrace_trap_notify(t);
2012 			}
2013 		}
2014 	}
2015 
2016 	if (likely(!current->ptrace)) {
2017 		int notify = 0;
2018 
2019 		/*
2020 		 * If there are no other threads in the group, or if there
2021 		 * is a group stop in progress and we are the last to stop,
2022 		 * report to the parent.
2023 		 */
2024 		if (task_participate_group_stop(current))
2025 			notify = CLD_STOPPED;
2026 
2027 		__set_current_state(TASK_STOPPED);
2028 		spin_unlock_irq(&current->sighand->siglock);
2029 
2030 		/*
2031 		 * Notify the parent of the group stop completion.  Because
2032 		 * we're not holding either the siglock or tasklist_lock
2033 		 * here, ptracer may attach inbetween; however, this is for
2034 		 * group stop and should always be delivered to the real
2035 		 * parent of the group leader.  The new ptracer will get
2036 		 * its notification when this task transitions into
2037 		 * TASK_TRACED.
2038 		 */
2039 		if (notify) {
2040 			read_lock(&tasklist_lock);
2041 			do_notify_parent_cldstop(current, false, notify);
2042 			read_unlock(&tasklist_lock);
2043 		}
2044 
2045 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2046 		freezable_schedule();
2047 		return true;
2048 	} else {
2049 		/*
2050 		 * While ptraced, group stop is handled by STOP trap.
2051 		 * Schedule it and let the caller deal with it.
2052 		 */
2053 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2054 		return false;
2055 	}
2056 }
2057 
2058 /**
2059  * do_jobctl_trap - take care of ptrace jobctl traps
2060  *
2061  * When PT_SEIZED, it's used for both group stop and explicit
2062  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2063  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2064  * the stop signal; otherwise, %SIGTRAP.
2065  *
2066  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2067  * number as exit_code and no siginfo.
2068  *
2069  * CONTEXT:
2070  * Must be called with @current->sighand->siglock held, which may be
2071  * released and re-acquired before returning with intervening sleep.
2072  */
do_jobctl_trap(void)2073 static void do_jobctl_trap(void)
2074 {
2075 	struct signal_struct *signal = current->signal;
2076 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2077 
2078 	if (current->ptrace & PT_SEIZED) {
2079 		if (!signal->group_stop_count &&
2080 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2081 			signr = SIGTRAP;
2082 		WARN_ON_ONCE(!signr);
2083 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2084 				 CLD_STOPPED);
2085 	} else {
2086 		WARN_ON_ONCE(!signr);
2087 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2088 		current->exit_code = 0;
2089 	}
2090 }
2091 
ptrace_signal(int signr,siginfo_t * info)2092 static int ptrace_signal(int signr, siginfo_t *info)
2093 {
2094 	ptrace_signal_deliver();
2095 	/*
2096 	 * We do not check sig_kernel_stop(signr) but set this marker
2097 	 * unconditionally because we do not know whether debugger will
2098 	 * change signr. This flag has no meaning unless we are going
2099 	 * to stop after return from ptrace_stop(). In this case it will
2100 	 * be checked in do_signal_stop(), we should only stop if it was
2101 	 * not cleared by SIGCONT while we were sleeping. See also the
2102 	 * comment in dequeue_signal().
2103 	 */
2104 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2105 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2106 
2107 	/* We're back.  Did the debugger cancel the sig?  */
2108 	signr = current->exit_code;
2109 	if (signr == 0)
2110 		return signr;
2111 
2112 	current->exit_code = 0;
2113 
2114 	/*
2115 	 * Update the siginfo structure if the signal has
2116 	 * changed.  If the debugger wanted something
2117 	 * specific in the siginfo structure then it should
2118 	 * have updated *info via PTRACE_SETSIGINFO.
2119 	 */
2120 	if (signr != info->si_signo) {
2121 		info->si_signo = signr;
2122 		info->si_errno = 0;
2123 		info->si_code = SI_USER;
2124 		rcu_read_lock();
2125 		info->si_pid = task_pid_vnr(current->parent);
2126 		info->si_uid = from_kuid_munged(current_user_ns(),
2127 						task_uid(current->parent));
2128 		rcu_read_unlock();
2129 	}
2130 
2131 	/* If the (new) signal is now blocked, requeue it.  */
2132 	if (sigismember(&current->blocked, signr)) {
2133 		specific_send_sig_info(signr, info, current);
2134 		signr = 0;
2135 	}
2136 
2137 	return signr;
2138 }
2139 
get_signal(struct ksignal * ksig)2140 int get_signal(struct ksignal *ksig)
2141 {
2142 	struct sighand_struct *sighand = current->sighand;
2143 	struct signal_struct *signal = current->signal;
2144 	int signr;
2145 
2146 	if (unlikely(current->task_works))
2147 		task_work_run();
2148 
2149 	if (unlikely(uprobe_deny_signal()))
2150 		return 0;
2151 
2152 	/*
2153 	 * Do this once, we can't return to user-mode if freezing() == T.
2154 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2155 	 * thus do not need another check after return.
2156 	 */
2157 	try_to_freeze();
2158 
2159 relock:
2160 	spin_lock_irq(&sighand->siglock);
2161 	/*
2162 	 * Every stopped thread goes here after wakeup. Check to see if
2163 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2164 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2165 	 */
2166 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2167 		int why;
2168 
2169 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2170 			why = CLD_CONTINUED;
2171 		else
2172 			why = CLD_STOPPED;
2173 
2174 		signal->flags &= ~SIGNAL_CLD_MASK;
2175 
2176 		spin_unlock_irq(&sighand->siglock);
2177 
2178 		/*
2179 		 * Notify the parent that we're continuing.  This event is
2180 		 * always per-process and doesn't make whole lot of sense
2181 		 * for ptracers, who shouldn't consume the state via
2182 		 * wait(2) either, but, for backward compatibility, notify
2183 		 * the ptracer of the group leader too unless it's gonna be
2184 		 * a duplicate.
2185 		 */
2186 		read_lock(&tasklist_lock);
2187 		do_notify_parent_cldstop(current, false, why);
2188 
2189 		if (ptrace_reparented(current->group_leader))
2190 			do_notify_parent_cldstop(current->group_leader,
2191 						true, why);
2192 		read_unlock(&tasklist_lock);
2193 
2194 		goto relock;
2195 	}
2196 
2197 	for (;;) {
2198 		struct k_sigaction *ka;
2199 
2200 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2201 		    do_signal_stop(0))
2202 			goto relock;
2203 
2204 		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2205 			do_jobctl_trap();
2206 			spin_unlock_irq(&sighand->siglock);
2207 			goto relock;
2208 		}
2209 
2210 		signr = dequeue_signal(current, &current->blocked, &ksig->info);
2211 
2212 		if (!signr)
2213 			break; /* will return 0 */
2214 
2215 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2216 			signr = ptrace_signal(signr, &ksig->info);
2217 			if (!signr)
2218 				continue;
2219 		}
2220 
2221 		ka = &sighand->action[signr-1];
2222 
2223 		/* Trace actually delivered signals. */
2224 		trace_signal_deliver(signr, &ksig->info, ka);
2225 
2226 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2227 			continue;
2228 		if (ka->sa.sa_handler != SIG_DFL) {
2229 			/* Run the handler.  */
2230 			ksig->ka = *ka;
2231 
2232 			if (ka->sa.sa_flags & SA_ONESHOT)
2233 				ka->sa.sa_handler = SIG_DFL;
2234 
2235 			break; /* will return non-zero "signr" value */
2236 		}
2237 
2238 		/*
2239 		 * Now we are doing the default action for this signal.
2240 		 */
2241 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2242 			continue;
2243 
2244 		/*
2245 		 * Global init gets no signals it doesn't want.
2246 		 * Container-init gets no signals it doesn't want from same
2247 		 * container.
2248 		 *
2249 		 * Note that if global/container-init sees a sig_kernel_only()
2250 		 * signal here, the signal must have been generated internally
2251 		 * or must have come from an ancestor namespace. In either
2252 		 * case, the signal cannot be dropped.
2253 		 */
2254 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2255 				!sig_kernel_only(signr))
2256 			continue;
2257 
2258 		if (sig_kernel_stop(signr)) {
2259 			/*
2260 			 * The default action is to stop all threads in
2261 			 * the thread group.  The job control signals
2262 			 * do nothing in an orphaned pgrp, but SIGSTOP
2263 			 * always works.  Note that siglock needs to be
2264 			 * dropped during the call to is_orphaned_pgrp()
2265 			 * because of lock ordering with tasklist_lock.
2266 			 * This allows an intervening SIGCONT to be posted.
2267 			 * We need to check for that and bail out if necessary.
2268 			 */
2269 			if (signr != SIGSTOP) {
2270 				spin_unlock_irq(&sighand->siglock);
2271 
2272 				/* signals can be posted during this window */
2273 
2274 				if (is_current_pgrp_orphaned())
2275 					goto relock;
2276 
2277 				spin_lock_irq(&sighand->siglock);
2278 			}
2279 
2280 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2281 				/* It released the siglock.  */
2282 				goto relock;
2283 			}
2284 
2285 			/*
2286 			 * We didn't actually stop, due to a race
2287 			 * with SIGCONT or something like that.
2288 			 */
2289 			continue;
2290 		}
2291 
2292 		spin_unlock_irq(&sighand->siglock);
2293 
2294 		/*
2295 		 * Anything else is fatal, maybe with a core dump.
2296 		 */
2297 		current->flags |= PF_SIGNALED;
2298 
2299 		if (sig_kernel_coredump(signr)) {
2300 			if (print_fatal_signals)
2301 				print_fatal_signal(ksig->info.si_signo);
2302 			proc_coredump_connector(current);
2303 			/*
2304 			 * If it was able to dump core, this kills all
2305 			 * other threads in the group and synchronizes with
2306 			 * their demise.  If we lost the race with another
2307 			 * thread getting here, it set group_exit_code
2308 			 * first and our do_group_exit call below will use
2309 			 * that value and ignore the one we pass it.
2310 			 */
2311 			do_coredump(&ksig->info);
2312 		}
2313 
2314 		/*
2315 		 * Death signals, no core dump.
2316 		 */
2317 		do_group_exit(ksig->info.si_signo);
2318 		/* NOTREACHED */
2319 	}
2320 	spin_unlock_irq(&sighand->siglock);
2321 
2322 	ksig->sig = signr;
2323 	return ksig->sig > 0;
2324 }
2325 
2326 /**
2327  * signal_delivered -
2328  * @ksig:		kernel signal struct
2329  * @stepping:		nonzero if debugger single-step or block-step in use
2330  *
2331  * This function should be called when a signal has successfully been
2332  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2333  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2334  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2335  */
signal_delivered(struct ksignal * ksig,int stepping)2336 static void signal_delivered(struct ksignal *ksig, int stepping)
2337 {
2338 	sigset_t blocked;
2339 
2340 	/* A signal was successfully delivered, and the
2341 	   saved sigmask was stored on the signal frame,
2342 	   and will be restored by sigreturn.  So we can
2343 	   simply clear the restore sigmask flag.  */
2344 	clear_restore_sigmask();
2345 
2346 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2347 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2348 		sigaddset(&blocked, ksig->sig);
2349 	set_current_blocked(&blocked);
2350 	tracehook_signal_handler(stepping);
2351 }
2352 
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2353 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2354 {
2355 	if (failed)
2356 		force_sigsegv(ksig->sig, current);
2357 	else
2358 		signal_delivered(ksig, stepping);
2359 }
2360 
2361 /*
2362  * It could be that complete_signal() picked us to notify about the
2363  * group-wide signal. Other threads should be notified now to take
2364  * the shared signals in @which since we will not.
2365  */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2366 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2367 {
2368 	sigset_t retarget;
2369 	struct task_struct *t;
2370 
2371 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2372 	if (sigisemptyset(&retarget))
2373 		return;
2374 
2375 	t = tsk;
2376 	while_each_thread(tsk, t) {
2377 		if (t->flags & PF_EXITING)
2378 			continue;
2379 
2380 		if (!has_pending_signals(&retarget, &t->blocked))
2381 			continue;
2382 		/* Remove the signals this thread can handle. */
2383 		sigandsets(&retarget, &retarget, &t->blocked);
2384 
2385 		if (!signal_pending(t))
2386 			signal_wake_up(t, 0);
2387 
2388 		if (sigisemptyset(&retarget))
2389 			break;
2390 	}
2391 }
2392 
exit_signals(struct task_struct * tsk)2393 void exit_signals(struct task_struct *tsk)
2394 {
2395 	int group_stop = 0;
2396 	sigset_t unblocked;
2397 
2398 	/*
2399 	 * @tsk is about to have PF_EXITING set - lock out users which
2400 	 * expect stable threadgroup.
2401 	 */
2402 	threadgroup_change_begin(tsk);
2403 
2404 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2405 		tsk->flags |= PF_EXITING;
2406 		threadgroup_change_end(tsk);
2407 		return;
2408 	}
2409 
2410 	spin_lock_irq(&tsk->sighand->siglock);
2411 	/*
2412 	 * From now this task is not visible for group-wide signals,
2413 	 * see wants_signal(), do_signal_stop().
2414 	 */
2415 	tsk->flags |= PF_EXITING;
2416 
2417 	threadgroup_change_end(tsk);
2418 
2419 	if (!signal_pending(tsk))
2420 		goto out;
2421 
2422 	unblocked = tsk->blocked;
2423 	signotset(&unblocked);
2424 	retarget_shared_pending(tsk, &unblocked);
2425 
2426 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2427 	    task_participate_group_stop(tsk))
2428 		group_stop = CLD_STOPPED;
2429 out:
2430 	spin_unlock_irq(&tsk->sighand->siglock);
2431 
2432 	/*
2433 	 * If group stop has completed, deliver the notification.  This
2434 	 * should always go to the real parent of the group leader.
2435 	 */
2436 	if (unlikely(group_stop)) {
2437 		read_lock(&tasklist_lock);
2438 		do_notify_parent_cldstop(tsk, false, group_stop);
2439 		read_unlock(&tasklist_lock);
2440 	}
2441 }
2442 
2443 EXPORT_SYMBOL(recalc_sigpending);
2444 EXPORT_SYMBOL_GPL(dequeue_signal);
2445 EXPORT_SYMBOL(flush_signals);
2446 EXPORT_SYMBOL(force_sig);
2447 EXPORT_SYMBOL(send_sig);
2448 EXPORT_SYMBOL(send_sig_info);
2449 EXPORT_SYMBOL(sigprocmask);
2450 
2451 /*
2452  * System call entry points.
2453  */
2454 
2455 /**
2456  *  sys_restart_syscall - restart a system call
2457  */
SYSCALL_DEFINE0(restart_syscall)2458 SYSCALL_DEFINE0(restart_syscall)
2459 {
2460 	struct restart_block *restart = &current->restart_block;
2461 	return restart->fn(restart);
2462 }
2463 
do_no_restart_syscall(struct restart_block * param)2464 long do_no_restart_syscall(struct restart_block *param)
2465 {
2466 	return -EINTR;
2467 }
2468 
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)2469 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2470 {
2471 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2472 		sigset_t newblocked;
2473 		/* A set of now blocked but previously unblocked signals. */
2474 		sigandnsets(&newblocked, newset, &current->blocked);
2475 		retarget_shared_pending(tsk, &newblocked);
2476 	}
2477 	tsk->blocked = *newset;
2478 	recalc_sigpending();
2479 }
2480 
2481 /**
2482  * set_current_blocked - change current->blocked mask
2483  * @newset: new mask
2484  *
2485  * It is wrong to change ->blocked directly, this helper should be used
2486  * to ensure the process can't miss a shared signal we are going to block.
2487  */
set_current_blocked(sigset_t * newset)2488 void set_current_blocked(sigset_t *newset)
2489 {
2490 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2491 	__set_current_blocked(newset);
2492 }
2493 
__set_current_blocked(const sigset_t * newset)2494 void __set_current_blocked(const sigset_t *newset)
2495 {
2496 	struct task_struct *tsk = current;
2497 
2498 	spin_lock_irq(&tsk->sighand->siglock);
2499 	__set_task_blocked(tsk, newset);
2500 	spin_unlock_irq(&tsk->sighand->siglock);
2501 }
2502 
2503 /*
2504  * This is also useful for kernel threads that want to temporarily
2505  * (or permanently) block certain signals.
2506  *
2507  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2508  * interface happily blocks "unblockable" signals like SIGKILL
2509  * and friends.
2510  */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)2511 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2512 {
2513 	struct task_struct *tsk = current;
2514 	sigset_t newset;
2515 
2516 	/* Lockless, only current can change ->blocked, never from irq */
2517 	if (oldset)
2518 		*oldset = tsk->blocked;
2519 
2520 	switch (how) {
2521 	case SIG_BLOCK:
2522 		sigorsets(&newset, &tsk->blocked, set);
2523 		break;
2524 	case SIG_UNBLOCK:
2525 		sigandnsets(&newset, &tsk->blocked, set);
2526 		break;
2527 	case SIG_SETMASK:
2528 		newset = *set;
2529 		break;
2530 	default:
2531 		return -EINVAL;
2532 	}
2533 
2534 	__set_current_blocked(&newset);
2535 	return 0;
2536 }
2537 
2538 /**
2539  *  sys_rt_sigprocmask - change the list of currently blocked signals
2540  *  @how: whether to add, remove, or set signals
2541  *  @nset: stores pending signals
2542  *  @oset: previous value of signal mask if non-null
2543  *  @sigsetsize: size of sigset_t type
2544  */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)2545 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2546 		sigset_t __user *, oset, size_t, sigsetsize)
2547 {
2548 	sigset_t old_set, new_set;
2549 	int error;
2550 
2551 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2552 	if (sigsetsize != sizeof(sigset_t))
2553 		return -EINVAL;
2554 
2555 	old_set = current->blocked;
2556 
2557 	if (nset) {
2558 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2559 			return -EFAULT;
2560 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2561 
2562 		error = sigprocmask(how, &new_set, NULL);
2563 		if (error)
2564 			return error;
2565 	}
2566 
2567 	if (oset) {
2568 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2569 			return -EFAULT;
2570 	}
2571 
2572 	return 0;
2573 }
2574 
2575 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)2576 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2577 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2578 {
2579 #ifdef __BIG_ENDIAN
2580 	sigset_t old_set = current->blocked;
2581 
2582 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2583 	if (sigsetsize != sizeof(sigset_t))
2584 		return -EINVAL;
2585 
2586 	if (nset) {
2587 		compat_sigset_t new32;
2588 		sigset_t new_set;
2589 		int error;
2590 		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2591 			return -EFAULT;
2592 
2593 		sigset_from_compat(&new_set, &new32);
2594 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2595 
2596 		error = sigprocmask(how, &new_set, NULL);
2597 		if (error)
2598 			return error;
2599 	}
2600 	if (oset) {
2601 		compat_sigset_t old32;
2602 		sigset_to_compat(&old32, &old_set);
2603 		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2604 			return -EFAULT;
2605 	}
2606 	return 0;
2607 #else
2608 	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2609 				  (sigset_t __user *)oset, sigsetsize);
2610 #endif
2611 }
2612 #endif
2613 
do_sigpending(void * set,unsigned long sigsetsize)2614 static int do_sigpending(void *set, unsigned long sigsetsize)
2615 {
2616 	if (sigsetsize > sizeof(sigset_t))
2617 		return -EINVAL;
2618 
2619 	spin_lock_irq(&current->sighand->siglock);
2620 	sigorsets(set, &current->pending.signal,
2621 		  &current->signal->shared_pending.signal);
2622 	spin_unlock_irq(&current->sighand->siglock);
2623 
2624 	/* Outside the lock because only this thread touches it.  */
2625 	sigandsets(set, &current->blocked, set);
2626 	return 0;
2627 }
2628 
2629 /**
2630  *  sys_rt_sigpending - examine a pending signal that has been raised
2631  *			while blocked
2632  *  @uset: stores pending signals
2633  *  @sigsetsize: size of sigset_t type or larger
2634  */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)2635 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2636 {
2637 	sigset_t set;
2638 	int err = do_sigpending(&set, sigsetsize);
2639 	if (!err && copy_to_user(uset, &set, sigsetsize))
2640 		err = -EFAULT;
2641 	return err;
2642 }
2643 
2644 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)2645 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2646 		compat_size_t, sigsetsize)
2647 {
2648 #ifdef __BIG_ENDIAN
2649 	sigset_t set;
2650 	int err = do_sigpending(&set, sigsetsize);
2651 	if (!err) {
2652 		compat_sigset_t set32;
2653 		sigset_to_compat(&set32, &set);
2654 		/* we can get here only if sigsetsize <= sizeof(set) */
2655 		if (copy_to_user(uset, &set32, sigsetsize))
2656 			err = -EFAULT;
2657 	}
2658 	return err;
2659 #else
2660 	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2661 #endif
2662 }
2663 #endif
2664 
2665 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2666 
copy_siginfo_to_user(siginfo_t __user * to,const siginfo_t * from)2667 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2668 {
2669 	int err;
2670 
2671 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2672 		return -EFAULT;
2673 	if (from->si_code < 0)
2674 		return __copy_to_user(to, from, sizeof(siginfo_t))
2675 			? -EFAULT : 0;
2676 	/*
2677 	 * If you change siginfo_t structure, please be sure
2678 	 * this code is fixed accordingly.
2679 	 * Please remember to update the signalfd_copyinfo() function
2680 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2681 	 * It should never copy any pad contained in the structure
2682 	 * to avoid security leaks, but must copy the generic
2683 	 * 3 ints plus the relevant union member.
2684 	 */
2685 	err = __put_user(from->si_signo, &to->si_signo);
2686 	err |= __put_user(from->si_errno, &to->si_errno);
2687 	err |= __put_user((short)from->si_code, &to->si_code);
2688 	switch (from->si_code & __SI_MASK) {
2689 	case __SI_KILL:
2690 		err |= __put_user(from->si_pid, &to->si_pid);
2691 		err |= __put_user(from->si_uid, &to->si_uid);
2692 		break;
2693 	case __SI_TIMER:
2694 		 err |= __put_user(from->si_tid, &to->si_tid);
2695 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2696 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2697 		break;
2698 	case __SI_POLL:
2699 		err |= __put_user(from->si_band, &to->si_band);
2700 		err |= __put_user(from->si_fd, &to->si_fd);
2701 		break;
2702 	case __SI_FAULT:
2703 		err |= __put_user(from->si_addr, &to->si_addr);
2704 #ifdef __ARCH_SI_TRAPNO
2705 		err |= __put_user(from->si_trapno, &to->si_trapno);
2706 #endif
2707 #ifdef BUS_MCEERR_AO
2708 		/*
2709 		 * Other callers might not initialize the si_lsb field,
2710 		 * so check explicitly for the right codes here.
2711 		 */
2712 		if (from->si_signo == SIGBUS &&
2713 		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2714 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2715 #endif
2716 #ifdef SEGV_BNDERR
2717 		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2718 			err |= __put_user(from->si_lower, &to->si_lower);
2719 			err |= __put_user(from->si_upper, &to->si_upper);
2720 		}
2721 #endif
2722 #ifdef SEGV_PKUERR
2723 		if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2724 			err |= __put_user(from->si_pkey, &to->si_pkey);
2725 #endif
2726 		break;
2727 	case __SI_CHLD:
2728 		err |= __put_user(from->si_pid, &to->si_pid);
2729 		err |= __put_user(from->si_uid, &to->si_uid);
2730 		err |= __put_user(from->si_status, &to->si_status);
2731 		err |= __put_user(from->si_utime, &to->si_utime);
2732 		err |= __put_user(from->si_stime, &to->si_stime);
2733 		break;
2734 	case __SI_RT: /* This is not generated by the kernel as of now. */
2735 	case __SI_MESGQ: /* But this is */
2736 		err |= __put_user(from->si_pid, &to->si_pid);
2737 		err |= __put_user(from->si_uid, &to->si_uid);
2738 		err |= __put_user(from->si_ptr, &to->si_ptr);
2739 		break;
2740 #ifdef __ARCH_SIGSYS
2741 	case __SI_SYS:
2742 		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2743 		err |= __put_user(from->si_syscall, &to->si_syscall);
2744 		err |= __put_user(from->si_arch, &to->si_arch);
2745 		break;
2746 #endif
2747 	default: /* this is just in case for now ... */
2748 		err |= __put_user(from->si_pid, &to->si_pid);
2749 		err |= __put_user(from->si_uid, &to->si_uid);
2750 		break;
2751 	}
2752 	return err;
2753 }
2754 
2755 #endif
2756 
2757 /**
2758  *  do_sigtimedwait - wait for queued signals specified in @which
2759  *  @which: queued signals to wait for
2760  *  @info: if non-null, the signal's siginfo is returned here
2761  *  @ts: upper bound on process time suspension
2762  */
do_sigtimedwait(const sigset_t * which,siginfo_t * info,const struct timespec * ts)2763 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2764 		    const struct timespec *ts)
2765 {
2766 	ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
2767 	struct task_struct *tsk = current;
2768 	sigset_t mask = *which;
2769 	int sig, ret = 0;
2770 
2771 	if (ts) {
2772 		if (!timespec_valid(ts))
2773 			return -EINVAL;
2774 		timeout = timespec_to_ktime(*ts);
2775 		to = &timeout;
2776 	}
2777 
2778 	/*
2779 	 * Invert the set of allowed signals to get those we want to block.
2780 	 */
2781 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2782 	signotset(&mask);
2783 
2784 	spin_lock_irq(&tsk->sighand->siglock);
2785 	sig = dequeue_signal(tsk, &mask, info);
2786 	if (!sig && timeout.tv64) {
2787 		/*
2788 		 * None ready, temporarily unblock those we're interested
2789 		 * while we are sleeping in so that we'll be awakened when
2790 		 * they arrive. Unblocking is always fine, we can avoid
2791 		 * set_current_blocked().
2792 		 */
2793 		tsk->real_blocked = tsk->blocked;
2794 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2795 		recalc_sigpending();
2796 		spin_unlock_irq(&tsk->sighand->siglock);
2797 
2798 		__set_current_state(TASK_INTERRUPTIBLE);
2799 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2800 							 HRTIMER_MODE_REL);
2801 		spin_lock_irq(&tsk->sighand->siglock);
2802 		__set_task_blocked(tsk, &tsk->real_blocked);
2803 		sigemptyset(&tsk->real_blocked);
2804 		sig = dequeue_signal(tsk, &mask, info);
2805 	}
2806 	spin_unlock_irq(&tsk->sighand->siglock);
2807 
2808 	if (sig)
2809 		return sig;
2810 	return ret ? -EINTR : -EAGAIN;
2811 }
2812 
2813 /**
2814  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2815  *			in @uthese
2816  *  @uthese: queued signals to wait for
2817  *  @uinfo: if non-null, the signal's siginfo is returned here
2818  *  @uts: upper bound on process time suspension
2819  *  @sigsetsize: size of sigset_t type
2820  */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct timespec __user *,uts,size_t,sigsetsize)2821 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2822 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2823 		size_t, sigsetsize)
2824 {
2825 	sigset_t these;
2826 	struct timespec ts;
2827 	siginfo_t info;
2828 	int ret;
2829 
2830 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2831 	if (sigsetsize != sizeof(sigset_t))
2832 		return -EINVAL;
2833 
2834 	if (copy_from_user(&these, uthese, sizeof(these)))
2835 		return -EFAULT;
2836 
2837 	if (uts) {
2838 		if (copy_from_user(&ts, uts, sizeof(ts)))
2839 			return -EFAULT;
2840 	}
2841 
2842 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2843 
2844 	if (ret > 0 && uinfo) {
2845 		if (copy_siginfo_to_user(uinfo, &info))
2846 			ret = -EFAULT;
2847 	}
2848 
2849 	return ret;
2850 }
2851 
2852 /**
2853  *  sys_kill - send a signal to a process
2854  *  @pid: the PID of the process
2855  *  @sig: signal to be sent
2856  */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)2857 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2858 {
2859 	struct siginfo info;
2860 
2861 	info.si_signo = sig;
2862 	info.si_errno = 0;
2863 	info.si_code = SI_USER;
2864 	info.si_pid = task_tgid_vnr(current);
2865 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2866 
2867 	return kill_something_info(sig, &info, pid);
2868 }
2869 
2870 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct siginfo * info)2871 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2872 {
2873 	struct task_struct *p;
2874 	int error = -ESRCH;
2875 
2876 	rcu_read_lock();
2877 	p = find_task_by_vpid(pid);
2878 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2879 		error = check_kill_permission(sig, info, p);
2880 		/*
2881 		 * The null signal is a permissions and process existence
2882 		 * probe.  No signal is actually delivered.
2883 		 */
2884 		if (!error && sig) {
2885 			error = do_send_sig_info(sig, info, p, false);
2886 			/*
2887 			 * If lock_task_sighand() failed we pretend the task
2888 			 * dies after receiving the signal. The window is tiny,
2889 			 * and the signal is private anyway.
2890 			 */
2891 			if (unlikely(error == -ESRCH))
2892 				error = 0;
2893 		}
2894 	}
2895 	rcu_read_unlock();
2896 
2897 	return error;
2898 }
2899 
do_tkill(pid_t tgid,pid_t pid,int sig)2900 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2901 {
2902 	struct siginfo info = {};
2903 
2904 	info.si_signo = sig;
2905 	info.si_errno = 0;
2906 	info.si_code = SI_TKILL;
2907 	info.si_pid = task_tgid_vnr(current);
2908 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2909 
2910 	return do_send_specific(tgid, pid, sig, &info);
2911 }
2912 
2913 /**
2914  *  sys_tgkill - send signal to one specific thread
2915  *  @tgid: the thread group ID of the thread
2916  *  @pid: the PID of the thread
2917  *  @sig: signal to be sent
2918  *
2919  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2920  *  exists but it's not belonging to the target process anymore. This
2921  *  method solves the problem of threads exiting and PIDs getting reused.
2922  */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)2923 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2924 {
2925 	/* This is only valid for single tasks */
2926 	if (pid <= 0 || tgid <= 0)
2927 		return -EINVAL;
2928 
2929 	return do_tkill(tgid, pid, sig);
2930 }
2931 
2932 /**
2933  *  sys_tkill - send signal to one specific task
2934  *  @pid: the PID of the task
2935  *  @sig: signal to be sent
2936  *
2937  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2938  */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)2939 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2940 {
2941 	/* This is only valid for single tasks */
2942 	if (pid <= 0)
2943 		return -EINVAL;
2944 
2945 	return do_tkill(0, pid, sig);
2946 }
2947 
do_rt_sigqueueinfo(pid_t pid,int sig,siginfo_t * info)2948 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2949 {
2950 	/* Not even root can pretend to send signals from the kernel.
2951 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2952 	 */
2953 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2954 	    (task_pid_vnr(current) != pid))
2955 		return -EPERM;
2956 
2957 	info->si_signo = sig;
2958 
2959 	/* POSIX.1b doesn't mention process groups.  */
2960 	return kill_proc_info(sig, info, pid);
2961 }
2962 
2963 /**
2964  *  sys_rt_sigqueueinfo - send signal information to a signal
2965  *  @pid: the PID of the thread
2966  *  @sig: signal to be sent
2967  *  @uinfo: signal info to be sent
2968  */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)2969 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2970 		siginfo_t __user *, uinfo)
2971 {
2972 	siginfo_t info;
2973 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2974 		return -EFAULT;
2975 	return do_rt_sigqueueinfo(pid, sig, &info);
2976 }
2977 
2978 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)2979 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2980 			compat_pid_t, pid,
2981 			int, sig,
2982 			struct compat_siginfo __user *, uinfo)
2983 {
2984 	siginfo_t info = {};
2985 	int ret = copy_siginfo_from_user32(&info, uinfo);
2986 	if (unlikely(ret))
2987 		return ret;
2988 	return do_rt_sigqueueinfo(pid, sig, &info);
2989 }
2990 #endif
2991 
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,siginfo_t * info)2992 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2993 {
2994 	/* This is only valid for single tasks */
2995 	if (pid <= 0 || tgid <= 0)
2996 		return -EINVAL;
2997 
2998 	/* Not even root can pretend to send signals from the kernel.
2999 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3000 	 */
3001 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3002 	    (task_pid_vnr(current) != pid))
3003 		return -EPERM;
3004 
3005 	info->si_signo = sig;
3006 
3007 	return do_send_specific(tgid, pid, sig, info);
3008 }
3009 
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)3010 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3011 		siginfo_t __user *, uinfo)
3012 {
3013 	siginfo_t info;
3014 
3015 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3016 		return -EFAULT;
3017 
3018 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3019 }
3020 
3021 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)3022 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3023 			compat_pid_t, tgid,
3024 			compat_pid_t, pid,
3025 			int, sig,
3026 			struct compat_siginfo __user *, uinfo)
3027 {
3028 	siginfo_t info = {};
3029 
3030 	if (copy_siginfo_from_user32(&info, uinfo))
3031 		return -EFAULT;
3032 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3033 }
3034 #endif
3035 
3036 /*
3037  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3038  */
kernel_sigaction(int sig,__sighandler_t action)3039 void kernel_sigaction(int sig, __sighandler_t action)
3040 {
3041 	spin_lock_irq(&current->sighand->siglock);
3042 	current->sighand->action[sig - 1].sa.sa_handler = action;
3043 	if (action == SIG_IGN) {
3044 		sigset_t mask;
3045 
3046 		sigemptyset(&mask);
3047 		sigaddset(&mask, sig);
3048 
3049 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3050 		flush_sigqueue_mask(&mask, &current->pending);
3051 		recalc_sigpending();
3052 	}
3053 	spin_unlock_irq(&current->sighand->siglock);
3054 }
3055 EXPORT_SYMBOL(kernel_sigaction);
3056 
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)3057 void __weak sigaction_compat_abi(struct k_sigaction *act,
3058 		struct k_sigaction *oact)
3059 {
3060 }
3061 
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)3062 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3063 {
3064 	struct task_struct *p = current, *t;
3065 	struct k_sigaction *k;
3066 	sigset_t mask;
3067 
3068 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3069 		return -EINVAL;
3070 
3071 	k = &p->sighand->action[sig-1];
3072 
3073 	spin_lock_irq(&p->sighand->siglock);
3074 	if (oact)
3075 		*oact = *k;
3076 
3077 	sigaction_compat_abi(act, oact);
3078 
3079 	if (act) {
3080 		sigdelsetmask(&act->sa.sa_mask,
3081 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3082 		*k = *act;
3083 		/*
3084 		 * POSIX 3.3.1.3:
3085 		 *  "Setting a signal action to SIG_IGN for a signal that is
3086 		 *   pending shall cause the pending signal to be discarded,
3087 		 *   whether or not it is blocked."
3088 		 *
3089 		 *  "Setting a signal action to SIG_DFL for a signal that is
3090 		 *   pending and whose default action is to ignore the signal
3091 		 *   (for example, SIGCHLD), shall cause the pending signal to
3092 		 *   be discarded, whether or not it is blocked"
3093 		 */
3094 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3095 			sigemptyset(&mask);
3096 			sigaddset(&mask, sig);
3097 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3098 			for_each_thread(p, t)
3099 				flush_sigqueue_mask(&mask, &t->pending);
3100 		}
3101 	}
3102 
3103 	spin_unlock_irq(&p->sighand->siglock);
3104 	return 0;
3105 }
3106 
3107 static int
do_sigaltstack(const stack_t __user * uss,stack_t __user * uoss,unsigned long sp)3108 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3109 {
3110 	stack_t oss;
3111 	int error;
3112 
3113 	oss.ss_sp = (void __user *) current->sas_ss_sp;
3114 	oss.ss_size = current->sas_ss_size;
3115 	oss.ss_flags = sas_ss_flags(sp) |
3116 		(current->sas_ss_flags & SS_FLAG_BITS);
3117 
3118 	if (uss) {
3119 		void __user *ss_sp;
3120 		size_t ss_size;
3121 		unsigned ss_flags;
3122 		int ss_mode;
3123 
3124 		error = -EFAULT;
3125 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3126 			goto out;
3127 		error = __get_user(ss_sp, &uss->ss_sp) |
3128 			__get_user(ss_flags, &uss->ss_flags) |
3129 			__get_user(ss_size, &uss->ss_size);
3130 		if (error)
3131 			goto out;
3132 
3133 		error = -EPERM;
3134 		if (on_sig_stack(sp))
3135 			goto out;
3136 
3137 		ss_mode = ss_flags & ~SS_FLAG_BITS;
3138 		error = -EINVAL;
3139 		if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3140 				ss_mode != 0)
3141 			goto out;
3142 
3143 		if (ss_mode == SS_DISABLE) {
3144 			ss_size = 0;
3145 			ss_sp = NULL;
3146 		} else {
3147 			error = -ENOMEM;
3148 			if (ss_size < MINSIGSTKSZ)
3149 				goto out;
3150 		}
3151 
3152 		current->sas_ss_sp = (unsigned long) ss_sp;
3153 		current->sas_ss_size = ss_size;
3154 		current->sas_ss_flags = ss_flags;
3155 	}
3156 
3157 	error = 0;
3158 	if (uoss) {
3159 		error = -EFAULT;
3160 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3161 			goto out;
3162 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3163 			__put_user(oss.ss_size, &uoss->ss_size) |
3164 			__put_user(oss.ss_flags, &uoss->ss_flags);
3165 	}
3166 
3167 out:
3168 	return error;
3169 }
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)3170 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3171 {
3172 	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3173 }
3174 
restore_altstack(const stack_t __user * uss)3175 int restore_altstack(const stack_t __user *uss)
3176 {
3177 	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3178 	/* squash all but EFAULT for now */
3179 	return err == -EFAULT ? err : 0;
3180 }
3181 
__save_altstack(stack_t __user * uss,unsigned long sp)3182 int __save_altstack(stack_t __user *uss, unsigned long sp)
3183 {
3184 	struct task_struct *t = current;
3185 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3186 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3187 		__put_user(t->sas_ss_size, &uss->ss_size);
3188 	if (err)
3189 		return err;
3190 	if (t->sas_ss_flags & SS_AUTODISARM)
3191 		sas_ss_reset(t);
3192 	return 0;
3193 }
3194 
3195 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)3196 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3197 			const compat_stack_t __user *, uss_ptr,
3198 			compat_stack_t __user *, uoss_ptr)
3199 {
3200 	stack_t uss, uoss;
3201 	int ret;
3202 	mm_segment_t seg;
3203 
3204 	if (uss_ptr) {
3205 		compat_stack_t uss32;
3206 
3207 		memset(&uss, 0, sizeof(stack_t));
3208 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3209 			return -EFAULT;
3210 		uss.ss_sp = compat_ptr(uss32.ss_sp);
3211 		uss.ss_flags = uss32.ss_flags;
3212 		uss.ss_size = uss32.ss_size;
3213 	}
3214 	seg = get_fs();
3215 	set_fs(KERNEL_DS);
3216 	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3217 			     (stack_t __force __user *) &uoss,
3218 			     compat_user_stack_pointer());
3219 	set_fs(seg);
3220 	if (ret >= 0 && uoss_ptr)  {
3221 		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3222 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3223 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3224 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3225 			ret = -EFAULT;
3226 	}
3227 	return ret;
3228 }
3229 
compat_restore_altstack(const compat_stack_t __user * uss)3230 int compat_restore_altstack(const compat_stack_t __user *uss)
3231 {
3232 	int err = compat_sys_sigaltstack(uss, NULL);
3233 	/* squash all but -EFAULT for now */
3234 	return err == -EFAULT ? err : 0;
3235 }
3236 
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)3237 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3238 {
3239 	int err;
3240 	struct task_struct *t = current;
3241 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3242 			 &uss->ss_sp) |
3243 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3244 		__put_user(t->sas_ss_size, &uss->ss_size);
3245 	if (err)
3246 		return err;
3247 	if (t->sas_ss_flags & SS_AUTODISARM)
3248 		sas_ss_reset(t);
3249 	return 0;
3250 }
3251 #endif
3252 
3253 #ifdef __ARCH_WANT_SYS_SIGPENDING
3254 
3255 /**
3256  *  sys_sigpending - examine pending signals
3257  *  @set: where mask of pending signal is returned
3258  */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,set)3259 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3260 {
3261 	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3262 }
3263 
3264 #endif
3265 
3266 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3267 /**
3268  *  sys_sigprocmask - examine and change blocked signals
3269  *  @how: whether to add, remove, or set signals
3270  *  @nset: signals to add or remove (if non-null)
3271  *  @oset: previous value of signal mask if non-null
3272  *
3273  * Some platforms have their own version with special arguments;
3274  * others support only sys_rt_sigprocmask.
3275  */
3276 
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)3277 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3278 		old_sigset_t __user *, oset)
3279 {
3280 	old_sigset_t old_set, new_set;
3281 	sigset_t new_blocked;
3282 
3283 	old_set = current->blocked.sig[0];
3284 
3285 	if (nset) {
3286 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3287 			return -EFAULT;
3288 
3289 		new_blocked = current->blocked;
3290 
3291 		switch (how) {
3292 		case SIG_BLOCK:
3293 			sigaddsetmask(&new_blocked, new_set);
3294 			break;
3295 		case SIG_UNBLOCK:
3296 			sigdelsetmask(&new_blocked, new_set);
3297 			break;
3298 		case SIG_SETMASK:
3299 			new_blocked.sig[0] = new_set;
3300 			break;
3301 		default:
3302 			return -EINVAL;
3303 		}
3304 
3305 		set_current_blocked(&new_blocked);
3306 	}
3307 
3308 	if (oset) {
3309 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3310 			return -EFAULT;
3311 	}
3312 
3313 	return 0;
3314 }
3315 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3316 
3317 #ifndef CONFIG_ODD_RT_SIGACTION
3318 /**
3319  *  sys_rt_sigaction - alter an action taken by a process
3320  *  @sig: signal to be sent
3321  *  @act: new sigaction
3322  *  @oact: used to save the previous sigaction
3323  *  @sigsetsize: size of sigset_t type
3324  */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)3325 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3326 		const struct sigaction __user *, act,
3327 		struct sigaction __user *, oact,
3328 		size_t, sigsetsize)
3329 {
3330 	struct k_sigaction new_sa, old_sa;
3331 	int ret = -EINVAL;
3332 
3333 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3334 	if (sigsetsize != sizeof(sigset_t))
3335 		goto out;
3336 
3337 	if (act) {
3338 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3339 			return -EFAULT;
3340 	}
3341 
3342 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3343 
3344 	if (!ret && oact) {
3345 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3346 			return -EFAULT;
3347 	}
3348 out:
3349 	return ret;
3350 }
3351 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)3352 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3353 		const struct compat_sigaction __user *, act,
3354 		struct compat_sigaction __user *, oact,
3355 		compat_size_t, sigsetsize)
3356 {
3357 	struct k_sigaction new_ka, old_ka;
3358 	compat_sigset_t mask;
3359 #ifdef __ARCH_HAS_SA_RESTORER
3360 	compat_uptr_t restorer;
3361 #endif
3362 	int ret;
3363 
3364 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3365 	if (sigsetsize != sizeof(compat_sigset_t))
3366 		return -EINVAL;
3367 
3368 	if (act) {
3369 		compat_uptr_t handler;
3370 		ret = get_user(handler, &act->sa_handler);
3371 		new_ka.sa.sa_handler = compat_ptr(handler);
3372 #ifdef __ARCH_HAS_SA_RESTORER
3373 		ret |= get_user(restorer, &act->sa_restorer);
3374 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3375 #endif
3376 		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3377 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3378 		if (ret)
3379 			return -EFAULT;
3380 		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3381 	}
3382 
3383 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3384 	if (!ret && oact) {
3385 		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3386 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3387 			       &oact->sa_handler);
3388 		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3389 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3390 #ifdef __ARCH_HAS_SA_RESTORER
3391 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3392 				&oact->sa_restorer);
3393 #endif
3394 	}
3395 	return ret;
3396 }
3397 #endif
3398 #endif /* !CONFIG_ODD_RT_SIGACTION */
3399 
3400 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)3401 SYSCALL_DEFINE3(sigaction, int, sig,
3402 		const struct old_sigaction __user *, act,
3403 	        struct old_sigaction __user *, oact)
3404 {
3405 	struct k_sigaction new_ka, old_ka;
3406 	int ret;
3407 
3408 	if (act) {
3409 		old_sigset_t mask;
3410 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3411 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3412 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3413 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3414 		    __get_user(mask, &act->sa_mask))
3415 			return -EFAULT;
3416 #ifdef __ARCH_HAS_KA_RESTORER
3417 		new_ka.ka_restorer = NULL;
3418 #endif
3419 		siginitset(&new_ka.sa.sa_mask, mask);
3420 	}
3421 
3422 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3423 
3424 	if (!ret && oact) {
3425 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3426 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3427 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3428 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3429 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3430 			return -EFAULT;
3431 	}
3432 
3433 	return ret;
3434 }
3435 #endif
3436 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)3437 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3438 		const struct compat_old_sigaction __user *, act,
3439 	        struct compat_old_sigaction __user *, oact)
3440 {
3441 	struct k_sigaction new_ka, old_ka;
3442 	int ret;
3443 	compat_old_sigset_t mask;
3444 	compat_uptr_t handler, restorer;
3445 
3446 	if (act) {
3447 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3448 		    __get_user(handler, &act->sa_handler) ||
3449 		    __get_user(restorer, &act->sa_restorer) ||
3450 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3451 		    __get_user(mask, &act->sa_mask))
3452 			return -EFAULT;
3453 
3454 #ifdef __ARCH_HAS_KA_RESTORER
3455 		new_ka.ka_restorer = NULL;
3456 #endif
3457 		new_ka.sa.sa_handler = compat_ptr(handler);
3458 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3459 		siginitset(&new_ka.sa.sa_mask, mask);
3460 	}
3461 
3462 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3463 
3464 	if (!ret && oact) {
3465 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3466 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3467 			       &oact->sa_handler) ||
3468 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3469 			       &oact->sa_restorer) ||
3470 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3471 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3472 			return -EFAULT;
3473 	}
3474 	return ret;
3475 }
3476 #endif
3477 
3478 #ifdef CONFIG_SGETMASK_SYSCALL
3479 
3480 /*
3481  * For backwards compatibility.  Functionality superseded by sigprocmask.
3482  */
SYSCALL_DEFINE0(sgetmask)3483 SYSCALL_DEFINE0(sgetmask)
3484 {
3485 	/* SMP safe */
3486 	return current->blocked.sig[0];
3487 }
3488 
SYSCALL_DEFINE1(ssetmask,int,newmask)3489 SYSCALL_DEFINE1(ssetmask, int, newmask)
3490 {
3491 	int old = current->blocked.sig[0];
3492 	sigset_t newset;
3493 
3494 	siginitset(&newset, newmask);
3495 	set_current_blocked(&newset);
3496 
3497 	return old;
3498 }
3499 #endif /* CONFIG_SGETMASK_SYSCALL */
3500 
3501 #ifdef __ARCH_WANT_SYS_SIGNAL
3502 /*
3503  * For backwards compatibility.  Functionality superseded by sigaction.
3504  */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)3505 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3506 {
3507 	struct k_sigaction new_sa, old_sa;
3508 	int ret;
3509 
3510 	new_sa.sa.sa_handler = handler;
3511 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3512 	sigemptyset(&new_sa.sa.sa_mask);
3513 
3514 	ret = do_sigaction(sig, &new_sa, &old_sa);
3515 
3516 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3517 }
3518 #endif /* __ARCH_WANT_SYS_SIGNAL */
3519 
3520 #ifdef __ARCH_WANT_SYS_PAUSE
3521 
SYSCALL_DEFINE0(pause)3522 SYSCALL_DEFINE0(pause)
3523 {
3524 	while (!signal_pending(current)) {
3525 		__set_current_state(TASK_INTERRUPTIBLE);
3526 		schedule();
3527 	}
3528 	return -ERESTARTNOHAND;
3529 }
3530 
3531 #endif
3532 
sigsuspend(sigset_t * set)3533 static int sigsuspend(sigset_t *set)
3534 {
3535 	current->saved_sigmask = current->blocked;
3536 	set_current_blocked(set);
3537 
3538 	while (!signal_pending(current)) {
3539 		__set_current_state(TASK_INTERRUPTIBLE);
3540 		schedule();
3541 	}
3542 	set_restore_sigmask();
3543 	return -ERESTARTNOHAND;
3544 }
3545 
3546 /**
3547  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3548  *	@unewset value until a signal is received
3549  *  @unewset: new signal mask value
3550  *  @sigsetsize: size of sigset_t type
3551  */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)3552 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3553 {
3554 	sigset_t newset;
3555 
3556 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3557 	if (sigsetsize != sizeof(sigset_t))
3558 		return -EINVAL;
3559 
3560 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3561 		return -EFAULT;
3562 	return sigsuspend(&newset);
3563 }
3564 
3565 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)3566 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3567 {
3568 #ifdef __BIG_ENDIAN
3569 	sigset_t newset;
3570 	compat_sigset_t newset32;
3571 
3572 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3573 	if (sigsetsize != sizeof(sigset_t))
3574 		return -EINVAL;
3575 
3576 	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3577 		return -EFAULT;
3578 	sigset_from_compat(&newset, &newset32);
3579 	return sigsuspend(&newset);
3580 #else
3581 	/* on little-endian bitmaps don't care about granularity */
3582 	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3583 #endif
3584 }
3585 #endif
3586 
3587 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)3588 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3589 {
3590 	sigset_t blocked;
3591 	siginitset(&blocked, mask);
3592 	return sigsuspend(&blocked);
3593 }
3594 #endif
3595 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)3596 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3597 {
3598 	sigset_t blocked;
3599 	siginitset(&blocked, mask);
3600 	return sigsuspend(&blocked);
3601 }
3602 #endif
3603 
arch_vma_name(struct vm_area_struct * vma)3604 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3605 {
3606 	return NULL;
3607 }
3608 
signals_init(void)3609 void __init signals_init(void)
3610 {
3611 	/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3612 	BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3613 		!= offsetof(struct siginfo, _sifields._pad));
3614 
3615 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3616 }
3617 
3618 #ifdef CONFIG_KGDB_KDB
3619 #include <linux/kdb.h>
3620 /*
3621  * kdb_send_sig_info - Allows kdb to send signals without exposing
3622  * signal internals.  This function checks if the required locks are
3623  * available before calling the main signal code, to avoid kdb
3624  * deadlocks.
3625  */
3626 void
kdb_send_sig_info(struct task_struct * t,struct siginfo * info)3627 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3628 {
3629 	static struct task_struct *kdb_prev_t;
3630 	int sig, new_t;
3631 	if (!spin_trylock(&t->sighand->siglock)) {
3632 		kdb_printf("Can't do kill command now.\n"
3633 			   "The sigmask lock is held somewhere else in "
3634 			   "kernel, try again later\n");
3635 		return;
3636 	}
3637 	spin_unlock(&t->sighand->siglock);
3638 	new_t = kdb_prev_t != t;
3639 	kdb_prev_t = t;
3640 	if (t->state != TASK_RUNNING && new_t) {
3641 		kdb_printf("Process is not RUNNING, sending a signal from "
3642 			   "kdb risks deadlock\n"
3643 			   "on the run queue locks. "
3644 			   "The signal has _not_ been sent.\n"
3645 			   "Reissue the kill command if you want to risk "
3646 			   "the deadlock.\n");
3647 		return;
3648 	}
3649 	sig = info->si_signo;
3650 	if (send_sig_info(sig, info, t))
3651 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3652 			   sig, t->pid);
3653 	else
3654 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3655 }
3656 #endif	/* CONFIG_KGDB_KDB */
3657