1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/task_work.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 #include <linux/oom.h>
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58 #include <asm/syscall.h> /* for syscall_get_* */
59
60 #undef CREATE_TRACE_POINTS
61 #include <trace/hooks/signal.h>
62 #include <trace/hooks/dtask.h>
63 /*
64 * SLAB caches for signal bits.
65 */
66
67 static struct kmem_cache *sigqueue_cachep;
68
69 int print_fatal_signals __read_mostly;
70
sig_handler(struct task_struct * t,int sig)71 static void __user *sig_handler(struct task_struct *t, int sig)
72 {
73 return t->sighand->action[sig - 1].sa.sa_handler;
74 }
75
sig_handler_ignored(void __user * handler,int sig)76 static inline bool sig_handler_ignored(void __user *handler, int sig)
77 {
78 /* Is it explicitly or implicitly ignored? */
79 return handler == SIG_IGN ||
80 (handler == SIG_DFL && sig_kernel_ignore(sig));
81 }
82
sig_task_ignored(struct task_struct * t,int sig,bool force)83 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
84 {
85 void __user *handler;
86
87 handler = sig_handler(t, sig);
88
89 /* SIGKILL and SIGSTOP may not be sent to the global init */
90 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
91 return true;
92
93 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
94 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
95 return true;
96
97 /* Only allow kernel generated signals to this kthread */
98 if (unlikely((t->flags & PF_KTHREAD) &&
99 (handler == SIG_KTHREAD_KERNEL) && !force))
100 return true;
101
102 return sig_handler_ignored(handler, sig);
103 }
104
sig_ignored(struct task_struct * t,int sig,bool force)105 static bool sig_ignored(struct task_struct *t, int sig, bool force)
106 {
107 /*
108 * Blocked signals are never ignored, since the
109 * signal handler may change by the time it is
110 * unblocked.
111 */
112 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
113 return false;
114
115 /*
116 * Tracers may want to know about even ignored signal unless it
117 * is SIGKILL which can't be reported anyway but can be ignored
118 * by SIGNAL_UNKILLABLE task.
119 */
120 if (t->ptrace && sig != SIGKILL)
121 return false;
122
123 return sig_task_ignored(t, sig, force);
124 }
125
126 /*
127 * Re-calculate pending state from the set of locally pending
128 * signals, globally pending signals, and blocked signals.
129 */
has_pending_signals(sigset_t * signal,sigset_t * blocked)130 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 {
132 unsigned long ready;
133 long i;
134
135 switch (_NSIG_WORDS) {
136 default:
137 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
138 ready |= signal->sig[i] &~ blocked->sig[i];
139 break;
140
141 case 4: ready = signal->sig[3] &~ blocked->sig[3];
142 ready |= signal->sig[2] &~ blocked->sig[2];
143 ready |= signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 2: ready = signal->sig[1] &~ blocked->sig[1];
148 ready |= signal->sig[0] &~ blocked->sig[0];
149 break;
150
151 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 }
153 return ready != 0;
154 }
155
156 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
157
recalc_sigpending_tsk(struct task_struct * t)158 static bool recalc_sigpending_tsk(struct task_struct *t)
159 {
160 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
161 PENDING(&t->pending, &t->blocked) ||
162 PENDING(&t->signal->shared_pending, &t->blocked) ||
163 cgroup_task_frozen(t)) {
164 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 return true;
166 }
167
168 /*
169 * We must never clear the flag in another thread, or in current
170 * when it's possible the current syscall is returning -ERESTART*.
171 * So we don't clear it here, and only callers who know they should do.
172 */
173 return false;
174 }
175
176 /*
177 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
178 * This is superfluous when called on current, the wakeup is a harmless no-op.
179 */
recalc_sigpending_and_wake(struct task_struct * t)180 void recalc_sigpending_and_wake(struct task_struct *t)
181 {
182 if (recalc_sigpending_tsk(t))
183 signal_wake_up(t, 0);
184 }
185
recalc_sigpending(void)186 void recalc_sigpending(void)
187 {
188 if (!recalc_sigpending_tsk(current) && !freezing(current))
189 clear_thread_flag(TIF_SIGPENDING);
190
191 }
192 EXPORT_SYMBOL(recalc_sigpending);
193
calculate_sigpending(void)194 void calculate_sigpending(void)
195 {
196 /* Have any signals or users of TIF_SIGPENDING been delayed
197 * until after fork?
198 */
199 spin_lock_irq(¤t->sighand->siglock);
200 set_tsk_thread_flag(current, TIF_SIGPENDING);
201 recalc_sigpending();
202 spin_unlock_irq(¤t->sighand->siglock);
203 }
204
205 /* Given the mask, find the first available signal that should be serviced. */
206
207 #define SYNCHRONOUS_MASK \
208 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
209 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
210
next_signal(struct sigpending * pending,sigset_t * mask)211 int next_signal(struct sigpending *pending, sigset_t *mask)
212 {
213 unsigned long i, *s, *m, x;
214 int sig = 0;
215
216 s = pending->signal.sig;
217 m = mask->sig;
218
219 /*
220 * Handle the first word specially: it contains the
221 * synchronous signals that need to be dequeued first.
222 */
223 x = *s &~ *m;
224 if (x) {
225 if (x & SYNCHRONOUS_MASK)
226 x &= SYNCHRONOUS_MASK;
227 sig = ffz(~x) + 1;
228 return sig;
229 }
230
231 switch (_NSIG_WORDS) {
232 default:
233 for (i = 1; i < _NSIG_WORDS; ++i) {
234 x = *++s &~ *++m;
235 if (!x)
236 continue;
237 sig = ffz(~x) + i*_NSIG_BPW + 1;
238 break;
239 }
240 break;
241
242 case 2:
243 x = s[1] &~ m[1];
244 if (!x)
245 break;
246 sig = ffz(~x) + _NSIG_BPW + 1;
247 break;
248
249 case 1:
250 /* Nothing to do */
251 break;
252 }
253
254 return sig;
255 }
256
print_dropped_signal(int sig)257 static inline void print_dropped_signal(int sig)
258 {
259 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
260
261 if (!print_fatal_signals)
262 return;
263
264 if (!__ratelimit(&ratelimit_state))
265 return;
266
267 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
268 current->comm, current->pid, sig);
269 }
270
271 /**
272 * task_set_jobctl_pending - set jobctl pending bits
273 * @task: target task
274 * @mask: pending bits to set
275 *
276 * Clear @mask from @task->jobctl. @mask must be subset of
277 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
278 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
279 * cleared. If @task is already being killed or exiting, this function
280 * becomes noop.
281 *
282 * CONTEXT:
283 * Must be called with @task->sighand->siglock held.
284 *
285 * RETURNS:
286 * %true if @mask is set, %false if made noop because @task was dying.
287 */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)288 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
289 {
290 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
291 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
292 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
293
294 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
295 return false;
296
297 if (mask & JOBCTL_STOP_SIGMASK)
298 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
299
300 task->jobctl |= mask;
301 return true;
302 }
303
304 /**
305 * task_clear_jobctl_trapping - clear jobctl trapping bit
306 * @task: target task
307 *
308 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
309 * Clear it and wake up the ptracer. Note that we don't need any further
310 * locking. @task->siglock guarantees that @task->parent points to the
311 * ptracer.
312 *
313 * CONTEXT:
314 * Must be called with @task->sighand->siglock held.
315 */
task_clear_jobctl_trapping(struct task_struct * task)316 void task_clear_jobctl_trapping(struct task_struct *task)
317 {
318 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
319 task->jobctl &= ~JOBCTL_TRAPPING;
320 smp_mb(); /* advised by wake_up_bit() */
321 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 }
323 }
324
325 /**
326 * task_clear_jobctl_pending - clear jobctl pending bits
327 * @task: target task
328 * @mask: pending bits to clear
329 *
330 * Clear @mask from @task->jobctl. @mask must be subset of
331 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
332 * STOP bits are cleared together.
333 *
334 * If clearing of @mask leaves no stop or trap pending, this function calls
335 * task_clear_jobctl_trapping().
336 *
337 * CONTEXT:
338 * Must be called with @task->sighand->siglock held.
339 */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)340 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
341 {
342 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
343
344 if (mask & JOBCTL_STOP_PENDING)
345 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
346
347 task->jobctl &= ~mask;
348
349 if (!(task->jobctl & JOBCTL_PENDING_MASK))
350 task_clear_jobctl_trapping(task);
351 }
352
353 /**
354 * task_participate_group_stop - participate in a group stop
355 * @task: task participating in a group stop
356 *
357 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
358 * Group stop states are cleared and the group stop count is consumed if
359 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
360 * stop, the appropriate `SIGNAL_*` flags are set.
361 *
362 * CONTEXT:
363 * Must be called with @task->sighand->siglock held.
364 *
365 * RETURNS:
366 * %true if group stop completion should be notified to the parent, %false
367 * otherwise.
368 */
task_participate_group_stop(struct task_struct * task)369 static bool task_participate_group_stop(struct task_struct *task)
370 {
371 struct signal_struct *sig = task->signal;
372 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
373
374 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
375
376 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377
378 if (!consume)
379 return false;
380
381 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
382 sig->group_stop_count--;
383
384 /*
385 * Tell the caller to notify completion iff we are entering into a
386 * fresh group stop. Read comment in do_signal_stop() for details.
387 */
388 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
389 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 return true;
391 }
392 return false;
393 }
394
task_join_group_stop(struct task_struct * task)395 void task_join_group_stop(struct task_struct *task)
396 {
397 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
398 struct signal_struct *sig = current->signal;
399
400 if (sig->group_stop_count) {
401 sig->group_stop_count++;
402 mask |= JOBCTL_STOP_CONSUME;
403 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 return;
405
406 /* Have the new thread join an on-going signal group stop */
407 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 }
409
410 /*
411 * allocate a new signal queue record
412 * - this may be called without locks if and only if t == current, otherwise an
413 * appropriate lock must be held to stop the target task from exiting
414 */
415 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t gfp_flags,int override_rlimit,const unsigned int sigqueue_flags)416 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
417 int override_rlimit, const unsigned int sigqueue_flags)
418 {
419 struct sigqueue *q = NULL;
420 struct ucounts *ucounts = NULL;
421 long sigpending;
422
423 /*
424 * Protect access to @t credentials. This can go away when all
425 * callers hold rcu read lock.
426 *
427 * NOTE! A pending signal will hold on to the user refcount,
428 * and we get/put the refcount only when the sigpending count
429 * changes from/to zero.
430 */
431 rcu_read_lock();
432 ucounts = task_ucounts(t);
433 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 rcu_read_unlock();
435 if (!sigpending)
436 return NULL;
437
438 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
439 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
440 } else {
441 print_dropped_signal(sig);
442 }
443
444 if (unlikely(q == NULL)) {
445 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
446 } else {
447 INIT_LIST_HEAD(&q->list);
448 q->flags = sigqueue_flags;
449 q->ucounts = ucounts;
450 }
451 return q;
452 }
453
__sigqueue_free(struct sigqueue * q)454 static void __sigqueue_free(struct sigqueue *q)
455 {
456 if (q->flags & SIGQUEUE_PREALLOC)
457 return;
458 if (q->ucounts) {
459 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
460 q->ucounts = NULL;
461 }
462 kmem_cache_free(sigqueue_cachep, q);
463 }
464
flush_sigqueue(struct sigpending * queue)465 void flush_sigqueue(struct sigpending *queue)
466 {
467 struct sigqueue *q;
468
469 sigemptyset(&queue->signal);
470 while (!list_empty(&queue->list)) {
471 q = list_entry(queue->list.next, struct sigqueue , list);
472 list_del_init(&q->list);
473 __sigqueue_free(q);
474 }
475 }
476
477 /*
478 * Flush all pending signals for this kthread.
479 */
flush_signals(struct task_struct * t)480 void flush_signals(struct task_struct *t)
481 {
482 unsigned long flags;
483
484 spin_lock_irqsave(&t->sighand->siglock, flags);
485 clear_tsk_thread_flag(t, TIF_SIGPENDING);
486 flush_sigqueue(&t->pending);
487 flush_sigqueue(&t->signal->shared_pending);
488 spin_unlock_irqrestore(&t->sighand->siglock, flags);
489 }
490 EXPORT_SYMBOL(flush_signals);
491
492 #ifdef CONFIG_POSIX_TIMERS
__flush_itimer_signals(struct sigpending * pending)493 static void __flush_itimer_signals(struct sigpending *pending)
494 {
495 sigset_t signal, retain;
496 struct sigqueue *q, *n;
497
498 signal = pending->signal;
499 sigemptyset(&retain);
500
501 list_for_each_entry_safe(q, n, &pending->list, list) {
502 int sig = q->info.si_signo;
503
504 if (likely(q->info.si_code != SI_TIMER)) {
505 sigaddset(&retain, sig);
506 } else {
507 sigdelset(&signal, sig);
508 list_del_init(&q->list);
509 __sigqueue_free(q);
510 }
511 }
512
513 sigorsets(&pending->signal, &signal, &retain);
514 }
515
flush_itimer_signals(void)516 void flush_itimer_signals(void)
517 {
518 struct task_struct *tsk = current;
519 unsigned long flags;
520
521 spin_lock_irqsave(&tsk->sighand->siglock, flags);
522 __flush_itimer_signals(&tsk->pending);
523 __flush_itimer_signals(&tsk->signal->shared_pending);
524 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
525 }
526 #endif
527
ignore_signals(struct task_struct * t)528 void ignore_signals(struct task_struct *t)
529 {
530 int i;
531
532 for (i = 0; i < _NSIG; ++i)
533 t->sighand->action[i].sa.sa_handler = SIG_IGN;
534
535 flush_signals(t);
536 }
537
538 /*
539 * Flush all handlers for a task.
540 */
541
542 void
flush_signal_handlers(struct task_struct * t,int force_default)543 flush_signal_handlers(struct task_struct *t, int force_default)
544 {
545 int i;
546 struct k_sigaction *ka = &t->sighand->action[0];
547 for (i = _NSIG ; i != 0 ; i--) {
548 if (force_default || ka->sa.sa_handler != SIG_IGN)
549 ka->sa.sa_handler = SIG_DFL;
550 ka->sa.sa_flags = 0;
551 #ifdef __ARCH_HAS_SA_RESTORER
552 ka->sa.sa_restorer = NULL;
553 #endif
554 sigemptyset(&ka->sa.sa_mask);
555 ka++;
556 }
557 }
558
unhandled_signal(struct task_struct * tsk,int sig)559 bool unhandled_signal(struct task_struct *tsk, int sig)
560 {
561 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
562 if (is_global_init(tsk))
563 return true;
564
565 if (handler != SIG_IGN && handler != SIG_DFL)
566 return false;
567
568 /* If dying, we handle all new signals by ignoring them */
569 if (fatal_signal_pending(tsk))
570 return false;
571
572 /* if ptraced, let the tracer determine */
573 return !tsk->ptrace;
574 }
575
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,bool * resched_timer)576 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
577 bool *resched_timer)
578 {
579 struct sigqueue *q, *first = NULL;
580
581 /*
582 * Collect the siginfo appropriate to this signal. Check if
583 * there is another siginfo for the same signal.
584 */
585 list_for_each_entry(q, &list->list, list) {
586 if (q->info.si_signo == sig) {
587 if (first)
588 goto still_pending;
589 first = q;
590 }
591 }
592
593 sigdelset(&list->signal, sig);
594
595 if (first) {
596 still_pending:
597 list_del_init(&first->list);
598 copy_siginfo(info, &first->info);
599
600 *resched_timer =
601 (first->flags & SIGQUEUE_PREALLOC) &&
602 (info->si_code == SI_TIMER) &&
603 (info->si_sys_private);
604
605 __sigqueue_free(first);
606 } else {
607 /*
608 * Ok, it wasn't in the queue. This must be
609 * a fast-pathed signal or we must have been
610 * out of queue space. So zero out the info.
611 */
612 clear_siginfo(info);
613 info->si_signo = sig;
614 info->si_errno = 0;
615 info->si_code = SI_USER;
616 info->si_pid = 0;
617 info->si_uid = 0;
618 }
619 }
620
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,bool * resched_timer)621 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
622 kernel_siginfo_t *info, bool *resched_timer)
623 {
624 int sig = next_signal(pending, mask);
625
626 if (sig)
627 collect_signal(sig, pending, info, resched_timer);
628 return sig;
629 }
630
631 /*
632 * Dequeue a signal and return the element to the caller, which is
633 * expected to free it.
634 *
635 * All callers have to hold the siglock.
636 */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,kernel_siginfo_t * info,enum pid_type * type)637 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
638 kernel_siginfo_t *info, enum pid_type *type)
639 {
640 bool resched_timer = false;
641 int signr;
642
643 /* We only dequeue private signals from ourselves, we don't let
644 * signalfd steal them
645 */
646 *type = PIDTYPE_PID;
647 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
648 if (!signr) {
649 *type = PIDTYPE_TGID;
650 signr = __dequeue_signal(&tsk->signal->shared_pending,
651 mask, info, &resched_timer);
652 #ifdef CONFIG_POSIX_TIMERS
653 /*
654 * itimer signal ?
655 *
656 * itimers are process shared and we restart periodic
657 * itimers in the signal delivery path to prevent DoS
658 * attacks in the high resolution timer case. This is
659 * compliant with the old way of self-restarting
660 * itimers, as the SIGALRM is a legacy signal and only
661 * queued once. Changing the restart behaviour to
662 * restart the timer in the signal dequeue path is
663 * reducing the timer noise on heavy loaded !highres
664 * systems too.
665 */
666 if (unlikely(signr == SIGALRM)) {
667 struct hrtimer *tmr = &tsk->signal->real_timer;
668
669 if (!hrtimer_is_queued(tmr) &&
670 tsk->signal->it_real_incr != 0) {
671 hrtimer_forward(tmr, tmr->base->get_time(),
672 tsk->signal->it_real_incr);
673 hrtimer_restart(tmr);
674 }
675 }
676 #endif
677 }
678
679 recalc_sigpending();
680 if (!signr)
681 return 0;
682
683 if (unlikely(sig_kernel_stop(signr))) {
684 /*
685 * Set a marker that we have dequeued a stop signal. Our
686 * caller might release the siglock and then the pending
687 * stop signal it is about to process is no longer in the
688 * pending bitmasks, but must still be cleared by a SIGCONT
689 * (and overruled by a SIGKILL). So those cases clear this
690 * shared flag after we've set it. Note that this flag may
691 * remain set after the signal we return is ignored or
692 * handled. That doesn't matter because its only purpose
693 * is to alert stop-signal processing code when another
694 * processor has come along and cleared the flag.
695 */
696 current->jobctl |= JOBCTL_STOP_DEQUEUED;
697 }
698 #ifdef CONFIG_POSIX_TIMERS
699 if (resched_timer) {
700 /*
701 * Release the siglock to ensure proper locking order
702 * of timer locks outside of siglocks. Note, we leave
703 * irqs disabled here, since the posix-timers code is
704 * about to disable them again anyway.
705 */
706 spin_unlock(&tsk->sighand->siglock);
707 posixtimer_rearm(info);
708 spin_lock(&tsk->sighand->siglock);
709
710 /* Don't expose the si_sys_private value to userspace */
711 info->si_sys_private = 0;
712 }
713 #endif
714 return signr;
715 }
716 EXPORT_SYMBOL_GPL(dequeue_signal);
717
dequeue_synchronous_signal(kernel_siginfo_t * info)718 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
719 {
720 struct task_struct *tsk = current;
721 struct sigpending *pending = &tsk->pending;
722 struct sigqueue *q, *sync = NULL;
723
724 /*
725 * Might a synchronous signal be in the queue?
726 */
727 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
728 return 0;
729
730 /*
731 * Return the first synchronous signal in the queue.
732 */
733 list_for_each_entry(q, &pending->list, list) {
734 /* Synchronous signals have a positive si_code */
735 if ((q->info.si_code > SI_USER) &&
736 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
737 sync = q;
738 goto next;
739 }
740 }
741 return 0;
742 next:
743 /*
744 * Check if there is another siginfo for the same signal.
745 */
746 list_for_each_entry_continue(q, &pending->list, list) {
747 if (q->info.si_signo == sync->info.si_signo)
748 goto still_pending;
749 }
750
751 sigdelset(&pending->signal, sync->info.si_signo);
752 recalc_sigpending();
753 still_pending:
754 list_del_init(&sync->list);
755 copy_siginfo(info, &sync->info);
756 __sigqueue_free(sync);
757 return info->si_signo;
758 }
759
760 /*
761 * Tell a process that it has a new active signal..
762 *
763 * NOTE! we rely on the previous spin_lock to
764 * lock interrupts for us! We can only be called with
765 * "siglock" held, and the local interrupt must
766 * have been disabled when that got acquired!
767 *
768 * No need to set need_resched since signal event passing
769 * goes through ->blocked
770 */
signal_wake_up_state(struct task_struct * t,unsigned int state)771 void signal_wake_up_state(struct task_struct *t, unsigned int state)
772 {
773 lockdep_assert_held(&t->sighand->siglock);
774
775 set_tsk_thread_flag(t, TIF_SIGPENDING);
776
777 /*
778 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
779 * case. We don't check t->state here because there is a race with it
780 * executing another processor and just now entering stopped state.
781 * By using wake_up_state, we ensure the process will wake up and
782 * handle its death signal.
783 */
784 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
785 kick_process(t);
786 }
787
788 /*
789 * Remove signals in mask from the pending set and queue.
790 * Returns 1 if any signals were found.
791 *
792 * All callers must be holding the siglock.
793 */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)794 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
795 {
796 struct sigqueue *q, *n;
797 sigset_t m;
798
799 sigandsets(&m, mask, &s->signal);
800 if (sigisemptyset(&m))
801 return;
802
803 sigandnsets(&s->signal, &s->signal, mask);
804 list_for_each_entry_safe(q, n, &s->list, list) {
805 if (sigismember(mask, q->info.si_signo)) {
806 list_del_init(&q->list);
807 __sigqueue_free(q);
808 }
809 }
810 }
811
is_si_special(const struct kernel_siginfo * info)812 static inline int is_si_special(const struct kernel_siginfo *info)
813 {
814 return info <= SEND_SIG_PRIV;
815 }
816
si_fromuser(const struct kernel_siginfo * info)817 static inline bool si_fromuser(const struct kernel_siginfo *info)
818 {
819 return info == SEND_SIG_NOINFO ||
820 (!is_si_special(info) && SI_FROMUSER(info));
821 }
822
823 /*
824 * called with RCU read lock from check_kill_permission()
825 */
kill_ok_by_cred(struct task_struct * t)826 static bool kill_ok_by_cred(struct task_struct *t)
827 {
828 const struct cred *cred = current_cred();
829 const struct cred *tcred = __task_cred(t);
830
831 return uid_eq(cred->euid, tcred->suid) ||
832 uid_eq(cred->euid, tcred->uid) ||
833 uid_eq(cred->uid, tcred->suid) ||
834 uid_eq(cred->uid, tcred->uid) ||
835 ns_capable(tcred->user_ns, CAP_KILL);
836 }
837
838 /*
839 * Bad permissions for sending the signal
840 * - the caller must hold the RCU read lock
841 */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)842 static int check_kill_permission(int sig, struct kernel_siginfo *info,
843 struct task_struct *t)
844 {
845 struct pid *sid;
846 int error;
847
848 if (!valid_signal(sig))
849 return -EINVAL;
850
851 if (!si_fromuser(info))
852 return 0;
853
854 error = audit_signal_info(sig, t); /* Let audit system see the signal */
855 if (error)
856 return error;
857
858 if (!same_thread_group(current, t) &&
859 !kill_ok_by_cred(t)) {
860 switch (sig) {
861 case SIGCONT:
862 sid = task_session(t);
863 /*
864 * We don't return the error if sid == NULL. The
865 * task was unhashed, the caller must notice this.
866 */
867 if (!sid || sid == task_session(current))
868 break;
869 fallthrough;
870 default:
871 return -EPERM;
872 }
873 }
874
875 return security_task_kill(t, info, sig, NULL);
876 }
877
878 /**
879 * ptrace_trap_notify - schedule trap to notify ptracer
880 * @t: tracee wanting to notify tracer
881 *
882 * This function schedules sticky ptrace trap which is cleared on the next
883 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
884 * ptracer.
885 *
886 * If @t is running, STOP trap will be taken. If trapped for STOP and
887 * ptracer is listening for events, tracee is woken up so that it can
888 * re-trap for the new event. If trapped otherwise, STOP trap will be
889 * eventually taken without returning to userland after the existing traps
890 * are finished by PTRACE_CONT.
891 *
892 * CONTEXT:
893 * Must be called with @task->sighand->siglock held.
894 */
ptrace_trap_notify(struct task_struct * t)895 static void ptrace_trap_notify(struct task_struct *t)
896 {
897 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
898 lockdep_assert_held(&t->sighand->siglock);
899
900 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
901 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
902 }
903
904 /*
905 * Handle magic process-wide effects of stop/continue signals. Unlike
906 * the signal actions, these happen immediately at signal-generation
907 * time regardless of blocking, ignoring, or handling. This does the
908 * actual continuing for SIGCONT, but not the actual stopping for stop
909 * signals. The process stop is done as a signal action for SIG_DFL.
910 *
911 * Returns true if the signal should be actually delivered, otherwise
912 * it should be dropped.
913 */
prepare_signal(int sig,struct task_struct * p,bool force)914 static bool prepare_signal(int sig, struct task_struct *p, bool force)
915 {
916 struct signal_struct *signal = p->signal;
917 struct task_struct *t;
918 sigset_t flush;
919
920 if (signal->flags & SIGNAL_GROUP_EXIT) {
921 if (signal->core_state)
922 return sig == SIGKILL;
923 /*
924 * The process is in the middle of dying, drop the signal.
925 */
926 return false;
927 } else if (sig_kernel_stop(sig)) {
928 /*
929 * This is a stop signal. Remove SIGCONT from all queues.
930 */
931 siginitset(&flush, sigmask(SIGCONT));
932 flush_sigqueue_mask(&flush, &signal->shared_pending);
933 for_each_thread(p, t)
934 flush_sigqueue_mask(&flush, &t->pending);
935 } else if (sig == SIGCONT) {
936 unsigned int why;
937 /*
938 * Remove all stop signals from all queues, wake all threads.
939 */
940 siginitset(&flush, SIG_KERNEL_STOP_MASK);
941 flush_sigqueue_mask(&flush, &signal->shared_pending);
942 for_each_thread(p, t) {
943 flush_sigqueue_mask(&flush, &t->pending);
944 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
945 if (likely(!(t->ptrace & PT_SEIZED))) {
946 t->jobctl &= ~JOBCTL_STOPPED;
947 wake_up_state(t, __TASK_STOPPED);
948 } else
949 ptrace_trap_notify(t);
950 }
951
952 /*
953 * Notify the parent with CLD_CONTINUED if we were stopped.
954 *
955 * If we were in the middle of a group stop, we pretend it
956 * was already finished, and then continued. Since SIGCHLD
957 * doesn't queue we report only CLD_STOPPED, as if the next
958 * CLD_CONTINUED was dropped.
959 */
960 why = 0;
961 if (signal->flags & SIGNAL_STOP_STOPPED)
962 why |= SIGNAL_CLD_CONTINUED;
963 else if (signal->group_stop_count)
964 why |= SIGNAL_CLD_STOPPED;
965
966 if (why) {
967 /*
968 * The first thread which returns from do_signal_stop()
969 * will take ->siglock, notice SIGNAL_CLD_MASK, and
970 * notify its parent. See get_signal().
971 */
972 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
973 signal->group_stop_count = 0;
974 signal->group_exit_code = 0;
975 }
976 }
977
978 return !sig_ignored(p, sig, force);
979 }
980
981 /*
982 * Test if P wants to take SIG. After we've checked all threads with this,
983 * it's equivalent to finding no threads not blocking SIG. Any threads not
984 * blocking SIG were ruled out because they are not running and already
985 * have pending signals. Such threads will dequeue from the shared queue
986 * as soon as they're available, so putting the signal on the shared queue
987 * will be equivalent to sending it to one such thread.
988 */
wants_signal(int sig,struct task_struct * p)989 static inline bool wants_signal(int sig, struct task_struct *p)
990 {
991 if (sigismember(&p->blocked, sig))
992 return false;
993
994 if (p->flags & PF_EXITING)
995 return false;
996
997 if (sig == SIGKILL)
998 return true;
999
1000 if (task_is_stopped_or_traced(p))
1001 return false;
1002
1003 return task_curr(p) || !task_sigpending(p);
1004 }
1005
complete_signal(int sig,struct task_struct * p,enum pid_type type)1006 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1007 {
1008 struct signal_struct *signal = p->signal;
1009 struct task_struct *t;
1010 bool wake;
1011
1012 /*
1013 * Now find a thread we can wake up to take the signal off the queue.
1014 *
1015 * If the main thread wants the signal, it gets first crack.
1016 * Probably the least surprising to the average bear.
1017 */
1018 if (wants_signal(sig, p))
1019 t = p;
1020 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1021 /*
1022 * There is just one thread and it does not need to be woken.
1023 * It will dequeue unblocked signals before it runs again.
1024 */
1025 return;
1026 else {
1027 /*
1028 * Otherwise try to find a suitable thread.
1029 */
1030 t = signal->curr_target;
1031 while (!wants_signal(sig, t)) {
1032 t = next_thread(t);
1033 if (t == signal->curr_target)
1034 /*
1035 * No thread needs to be woken.
1036 * Any eligible threads will see
1037 * the signal in the queue soon.
1038 */
1039 return;
1040 }
1041 signal->curr_target = t;
1042 }
1043
1044 /*
1045 * Found a killable thread. If the signal will be fatal,
1046 * then start taking the whole group down immediately.
1047 */
1048 if (sig_fatal(p, sig) &&
1049 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1050 !sigismember(&t->real_blocked, sig) &&
1051 (sig == SIGKILL || !p->ptrace)) {
1052 /*
1053 * This signal will be fatal to the whole group.
1054 */
1055 if (!sig_kernel_coredump(sig)) {
1056 /*
1057 * Start a group exit and wake everybody up.
1058 * This way we don't have other threads
1059 * running and doing things after a slower
1060 * thread has the fatal signal pending.
1061 */
1062 signal->flags = SIGNAL_GROUP_EXIT;
1063 signal->group_exit_code = sig;
1064 signal->group_stop_count = 0;
1065 t = p;
1066 do {
1067 trace_android_vh_exit_signal(t);
1068 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1069 sigaddset(&t->pending.signal, SIGKILL);
1070 wake = true;
1071 trace_android_vh_exit_signal_whether_wake(t, &wake);
1072 if (wake)
1073 signal_wake_up(t, 1);
1074 } while_each_thread(p, t);
1075 return;
1076 }
1077 }
1078
1079 /*
1080 * The signal is already in the shared-pending queue.
1081 * Tell the chosen thread to wake up and dequeue it.
1082 */
1083 signal_wake_up(t, sig == SIGKILL);
1084 return;
1085 }
1086
legacy_queue(struct sigpending * signals,int sig)1087 static inline bool legacy_queue(struct sigpending *signals, int sig)
1088 {
1089 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1090 }
1091
__send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1092 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1093 struct task_struct *t, enum pid_type type, bool force)
1094 {
1095 struct sigpending *pending;
1096 struct sigqueue *q;
1097 int override_rlimit;
1098 int ret = 0, result;
1099
1100 lockdep_assert_held(&t->sighand->siglock);
1101
1102 result = TRACE_SIGNAL_IGNORED;
1103 if (!prepare_signal(sig, t, force))
1104 goto ret;
1105
1106 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1107 /*
1108 * Short-circuit ignored signals and support queuing
1109 * exactly one non-rt signal, so that we can get more
1110 * detailed information about the cause of the signal.
1111 */
1112 result = TRACE_SIGNAL_ALREADY_PENDING;
1113 if (legacy_queue(pending, sig))
1114 goto ret;
1115
1116 result = TRACE_SIGNAL_DELIVERED;
1117 /*
1118 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1119 */
1120 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1121 goto out_set;
1122
1123 /*
1124 * Real-time signals must be queued if sent by sigqueue, or
1125 * some other real-time mechanism. It is implementation
1126 * defined whether kill() does so. We attempt to do so, on
1127 * the principle of least surprise, but since kill is not
1128 * allowed to fail with EAGAIN when low on memory we just
1129 * make sure at least one signal gets delivered and don't
1130 * pass on the info struct.
1131 */
1132 if (sig < SIGRTMIN)
1133 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1134 else
1135 override_rlimit = 0;
1136
1137 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1138
1139 if (q) {
1140 list_add_tail(&q->list, &pending->list);
1141 switch ((unsigned long) info) {
1142 case (unsigned long) SEND_SIG_NOINFO:
1143 clear_siginfo(&q->info);
1144 q->info.si_signo = sig;
1145 q->info.si_errno = 0;
1146 q->info.si_code = SI_USER;
1147 q->info.si_pid = task_tgid_nr_ns(current,
1148 task_active_pid_ns(t));
1149 rcu_read_lock();
1150 q->info.si_uid =
1151 from_kuid_munged(task_cred_xxx(t, user_ns),
1152 current_uid());
1153 rcu_read_unlock();
1154 break;
1155 case (unsigned long) SEND_SIG_PRIV:
1156 clear_siginfo(&q->info);
1157 q->info.si_signo = sig;
1158 q->info.si_errno = 0;
1159 q->info.si_code = SI_KERNEL;
1160 q->info.si_pid = 0;
1161 q->info.si_uid = 0;
1162 break;
1163 default:
1164 copy_siginfo(&q->info, info);
1165 break;
1166 }
1167 } else if (!is_si_special(info) &&
1168 sig >= SIGRTMIN && info->si_code != SI_USER) {
1169 /*
1170 * Queue overflow, abort. We may abort if the
1171 * signal was rt and sent by user using something
1172 * other than kill().
1173 */
1174 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1175 ret = -EAGAIN;
1176 goto ret;
1177 } else {
1178 /*
1179 * This is a silent loss of information. We still
1180 * send the signal, but the *info bits are lost.
1181 */
1182 result = TRACE_SIGNAL_LOSE_INFO;
1183 }
1184
1185 out_set:
1186 signalfd_notify(t, sig);
1187 sigaddset(&pending->signal, sig);
1188
1189 /* Let multiprocess signals appear after on-going forks */
1190 if (type > PIDTYPE_TGID) {
1191 struct multiprocess_signals *delayed;
1192 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1193 sigset_t *signal = &delayed->signal;
1194 /* Can't queue both a stop and a continue signal */
1195 if (sig == SIGCONT)
1196 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1197 else if (sig_kernel_stop(sig))
1198 sigdelset(signal, SIGCONT);
1199 sigaddset(signal, sig);
1200 }
1201 }
1202
1203 complete_signal(sig, t, type);
1204 ret:
1205 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1206 return ret;
1207 }
1208
has_si_pid_and_uid(struct kernel_siginfo * info)1209 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1210 {
1211 bool ret = false;
1212 switch (siginfo_layout(info->si_signo, info->si_code)) {
1213 case SIL_KILL:
1214 case SIL_CHLD:
1215 case SIL_RT:
1216 ret = true;
1217 break;
1218 case SIL_TIMER:
1219 case SIL_POLL:
1220 case SIL_FAULT:
1221 case SIL_FAULT_TRAPNO:
1222 case SIL_FAULT_MCEERR:
1223 case SIL_FAULT_BNDERR:
1224 case SIL_FAULT_PKUERR:
1225 case SIL_FAULT_PERF_EVENT:
1226 case SIL_SYS:
1227 ret = false;
1228 break;
1229 }
1230 return ret;
1231 }
1232
send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1233 int send_signal_locked(int sig, struct kernel_siginfo *info,
1234 struct task_struct *t, enum pid_type type)
1235 {
1236 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1237 bool force = false;
1238
1239 if (info == SEND_SIG_NOINFO) {
1240 /* Force if sent from an ancestor pid namespace */
1241 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1242 } else if (info == SEND_SIG_PRIV) {
1243 /* Don't ignore kernel generated signals */
1244 force = true;
1245 } else if (has_si_pid_and_uid(info)) {
1246 /* SIGKILL and SIGSTOP is special or has ids */
1247 struct user_namespace *t_user_ns;
1248
1249 rcu_read_lock();
1250 t_user_ns = task_cred_xxx(t, user_ns);
1251 if (current_user_ns() != t_user_ns) {
1252 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1253 info->si_uid = from_kuid_munged(t_user_ns, uid);
1254 }
1255 rcu_read_unlock();
1256
1257 /* A kernel generated signal? */
1258 force = (info->si_code == SI_KERNEL);
1259
1260 /* From an ancestor pid namespace? */
1261 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1262 info->si_pid = 0;
1263 force = true;
1264 }
1265 }
1266 return __send_signal_locked(sig, info, t, type, force);
1267 }
1268
print_fatal_signal(int signr)1269 static void print_fatal_signal(int signr)
1270 {
1271 struct pt_regs *regs = signal_pt_regs();
1272 pr_info("potentially unexpected fatal signal %d.\n", signr);
1273
1274 #if defined(__i386__) && !defined(__arch_um__)
1275 pr_info("code at %08lx: ", regs->ip);
1276 {
1277 int i;
1278 for (i = 0; i < 16; i++) {
1279 unsigned char insn;
1280
1281 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1282 break;
1283 pr_cont("%02x ", insn);
1284 }
1285 }
1286 pr_cont("\n");
1287 #endif
1288 preempt_disable();
1289 show_regs(regs);
1290 preempt_enable();
1291 }
1292
setup_print_fatal_signals(char * str)1293 static int __init setup_print_fatal_signals(char *str)
1294 {
1295 get_option (&str, &print_fatal_signals);
1296
1297 return 1;
1298 }
1299
1300 __setup("print-fatal-signals=", setup_print_fatal_signals);
1301
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1302 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1303 enum pid_type type)
1304 {
1305 unsigned long flags;
1306 int ret = -ESRCH;
1307 trace_android_vh_do_send_sig_info(sig, current, p);
1308 if (lock_task_sighand(p, &flags)) {
1309 ret = send_signal_locked(sig, info, p, type);
1310 unlock_task_sighand(p, &flags);
1311 }
1312
1313 return ret;
1314 }
1315 EXPORT_SYMBOL_GPL(do_send_sig_info);
1316
1317 enum sig_handler {
1318 HANDLER_CURRENT, /* If reachable use the current handler */
1319 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1320 HANDLER_EXIT, /* Only visible as the process exit code */
1321 };
1322
1323 /*
1324 * Force a signal that the process can't ignore: if necessary
1325 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1326 *
1327 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1328 * since we do not want to have a signal handler that was blocked
1329 * be invoked when user space had explicitly blocked it.
1330 *
1331 * We don't want to have recursive SIGSEGV's etc, for example,
1332 * that is why we also clear SIGNAL_UNKILLABLE.
1333 */
1334 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t,enum sig_handler handler)1335 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1336 enum sig_handler handler)
1337 {
1338 unsigned long int flags;
1339 int ret, blocked, ignored;
1340 struct k_sigaction *action;
1341 int sig = info->si_signo;
1342
1343 spin_lock_irqsave(&t->sighand->siglock, flags);
1344 action = &t->sighand->action[sig-1];
1345 ignored = action->sa.sa_handler == SIG_IGN;
1346 blocked = sigismember(&t->blocked, sig);
1347 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1348 action->sa.sa_handler = SIG_DFL;
1349 if (handler == HANDLER_EXIT)
1350 action->sa.sa_flags |= SA_IMMUTABLE;
1351 if (blocked) {
1352 sigdelset(&t->blocked, sig);
1353 recalc_sigpending_and_wake(t);
1354 }
1355 }
1356 /*
1357 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1358 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1359 */
1360 if (action->sa.sa_handler == SIG_DFL &&
1361 (!t->ptrace || (handler == HANDLER_EXIT)))
1362 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1363 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1364 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1365
1366 return ret;
1367 }
1368
force_sig_info(struct kernel_siginfo * info)1369 int force_sig_info(struct kernel_siginfo *info)
1370 {
1371 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1372 }
1373
1374 /*
1375 * Nuke all other threads in the group.
1376 */
zap_other_threads(struct task_struct * p)1377 int zap_other_threads(struct task_struct *p)
1378 {
1379 struct task_struct *t = p;
1380 int count = 0;
1381
1382 p->signal->group_stop_count = 0;
1383
1384 while_each_thread(p, t) {
1385 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1386 count++;
1387
1388 /* Don't bother with already dead threads */
1389 if (t->exit_state)
1390 continue;
1391 sigaddset(&t->pending.signal, SIGKILL);
1392 signal_wake_up(t, 1);
1393 }
1394
1395 return count;
1396 }
1397
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1398 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1399 unsigned long *flags)
1400 {
1401 struct sighand_struct *sighand;
1402
1403 rcu_read_lock();
1404 for (;;) {
1405 sighand = rcu_dereference(tsk->sighand);
1406 if (unlikely(sighand == NULL))
1407 break;
1408
1409 /*
1410 * This sighand can be already freed and even reused, but
1411 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1412 * initializes ->siglock: this slab can't go away, it has
1413 * the same object type, ->siglock can't be reinitialized.
1414 *
1415 * We need to ensure that tsk->sighand is still the same
1416 * after we take the lock, we can race with de_thread() or
1417 * __exit_signal(). In the latter case the next iteration
1418 * must see ->sighand == NULL.
1419 */
1420 spin_lock_irqsave(&sighand->siglock, *flags);
1421 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1422 break;
1423 spin_unlock_irqrestore(&sighand->siglock, *flags);
1424 }
1425 rcu_read_unlock();
1426
1427 return sighand;
1428 }
1429
1430 #ifdef CONFIG_LOCKDEP
lockdep_assert_task_sighand_held(struct task_struct * task)1431 void lockdep_assert_task_sighand_held(struct task_struct *task)
1432 {
1433 struct sighand_struct *sighand;
1434
1435 rcu_read_lock();
1436 sighand = rcu_dereference(task->sighand);
1437 if (sighand)
1438 lockdep_assert_held(&sighand->siglock);
1439 else
1440 WARN_ON_ONCE(1);
1441 rcu_read_unlock();
1442 }
1443 #endif
1444
1445 /*
1446 * send signal info to all the members of a group
1447 */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1448 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1449 struct task_struct *p, enum pid_type type)
1450 {
1451 int ret;
1452
1453 rcu_read_lock();
1454 ret = check_kill_permission(sig, info, p);
1455 rcu_read_unlock();
1456
1457 if (!ret && sig) {
1458 ret = do_send_sig_info(sig, info, p, type);
1459 if (!ret && sig == SIGKILL) {
1460 bool reap = false;
1461
1462 trace_android_vh_killed_process(current, p, &reap);
1463 if (reap)
1464 add_to_oom_reaper(p);
1465 }
1466 }
1467
1468 return ret;
1469 }
1470
1471 /*
1472 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1473 * control characters do (^C, ^Z etc)
1474 * - the caller must hold at least a readlock on tasklist_lock
1475 */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1476 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1477 {
1478 struct task_struct *p = NULL;
1479 int retval, success;
1480
1481 success = 0;
1482 retval = -ESRCH;
1483 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1484 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1485 success |= !err;
1486 retval = err;
1487 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1488 return success ? 0 : retval;
1489 }
1490
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1491 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1492 {
1493 int error = -ESRCH;
1494 struct task_struct *p;
1495
1496 for (;;) {
1497 rcu_read_lock();
1498 p = pid_task(pid, PIDTYPE_PID);
1499 if (p)
1500 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1501 rcu_read_unlock();
1502 if (likely(!p || error != -ESRCH))
1503 return error;
1504
1505 /*
1506 * The task was unhashed in between, try again. If it
1507 * is dead, pid_task() will return NULL, if we race with
1508 * de_thread() it will find the new leader.
1509 */
1510 }
1511 }
1512
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1513 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1514 {
1515 int error;
1516 rcu_read_lock();
1517 error = kill_pid_info(sig, info, find_vpid(pid));
1518 rcu_read_unlock();
1519 return error;
1520 }
1521
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1522 static inline bool kill_as_cred_perm(const struct cred *cred,
1523 struct task_struct *target)
1524 {
1525 const struct cred *pcred = __task_cred(target);
1526
1527 return uid_eq(cred->euid, pcred->suid) ||
1528 uid_eq(cred->euid, pcred->uid) ||
1529 uid_eq(cred->uid, pcred->suid) ||
1530 uid_eq(cred->uid, pcred->uid);
1531 }
1532
1533 /*
1534 * The usb asyncio usage of siginfo is wrong. The glibc support
1535 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1536 * AKA after the generic fields:
1537 * kernel_pid_t si_pid;
1538 * kernel_uid32_t si_uid;
1539 * sigval_t si_value;
1540 *
1541 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1542 * after the generic fields is:
1543 * void __user *si_addr;
1544 *
1545 * This is a practical problem when there is a 64bit big endian kernel
1546 * and a 32bit userspace. As the 32bit address will encoded in the low
1547 * 32bits of the pointer. Those low 32bits will be stored at higher
1548 * address than appear in a 32 bit pointer. So userspace will not
1549 * see the address it was expecting for it's completions.
1550 *
1551 * There is nothing in the encoding that can allow
1552 * copy_siginfo_to_user32 to detect this confusion of formats, so
1553 * handle this by requiring the caller of kill_pid_usb_asyncio to
1554 * notice when this situration takes place and to store the 32bit
1555 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1556 * parameter.
1557 */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1558 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1559 struct pid *pid, const struct cred *cred)
1560 {
1561 struct kernel_siginfo info;
1562 struct task_struct *p;
1563 unsigned long flags;
1564 int ret = -EINVAL;
1565
1566 if (!valid_signal(sig))
1567 return ret;
1568
1569 clear_siginfo(&info);
1570 info.si_signo = sig;
1571 info.si_errno = errno;
1572 info.si_code = SI_ASYNCIO;
1573 *((sigval_t *)&info.si_pid) = addr;
1574
1575 rcu_read_lock();
1576 p = pid_task(pid, PIDTYPE_PID);
1577 if (!p) {
1578 ret = -ESRCH;
1579 goto out_unlock;
1580 }
1581 if (!kill_as_cred_perm(cred, p)) {
1582 ret = -EPERM;
1583 goto out_unlock;
1584 }
1585 ret = security_task_kill(p, &info, sig, cred);
1586 if (ret)
1587 goto out_unlock;
1588
1589 if (sig) {
1590 if (lock_task_sighand(p, &flags)) {
1591 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1592 unlock_task_sighand(p, &flags);
1593 } else
1594 ret = -ESRCH;
1595 }
1596 out_unlock:
1597 rcu_read_unlock();
1598 return ret;
1599 }
1600 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1601
1602 /*
1603 * kill_something_info() interprets pid in interesting ways just like kill(2).
1604 *
1605 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1606 * is probably wrong. Should make it like BSD or SYSV.
1607 */
1608
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1609 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1610 {
1611 int ret;
1612
1613 if (pid > 0)
1614 return kill_proc_info(sig, info, pid);
1615
1616 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1617 if (pid == INT_MIN)
1618 return -ESRCH;
1619
1620 read_lock(&tasklist_lock);
1621 if (pid != -1) {
1622 ret = __kill_pgrp_info(sig, info,
1623 pid ? find_vpid(-pid) : task_pgrp(current));
1624 } else {
1625 int retval = 0, count = 0;
1626 struct task_struct * p;
1627
1628 for_each_process(p) {
1629 if (task_pid_vnr(p) > 1 &&
1630 !same_thread_group(p, current)) {
1631 int err = group_send_sig_info(sig, info, p,
1632 PIDTYPE_MAX);
1633 ++count;
1634 if (err != -EPERM)
1635 retval = err;
1636 }
1637 }
1638 ret = count ? retval : -ESRCH;
1639 }
1640 read_unlock(&tasklist_lock);
1641
1642 return ret;
1643 }
1644
1645 /*
1646 * These are for backward compatibility with the rest of the kernel source.
1647 */
1648
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1649 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1650 {
1651 /*
1652 * Make sure legacy kernel users don't send in bad values
1653 * (normal paths check this in check_kill_permission).
1654 */
1655 if (!valid_signal(sig))
1656 return -EINVAL;
1657
1658 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1659 }
1660 EXPORT_SYMBOL(send_sig_info);
1661
1662 #define __si_special(priv) \
1663 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1664
1665 int
send_sig(int sig,struct task_struct * p,int priv)1666 send_sig(int sig, struct task_struct *p, int priv)
1667 {
1668 return send_sig_info(sig, __si_special(priv), p);
1669 }
1670 EXPORT_SYMBOL(send_sig);
1671
force_sig(int sig)1672 void force_sig(int sig)
1673 {
1674 struct kernel_siginfo info;
1675
1676 clear_siginfo(&info);
1677 info.si_signo = sig;
1678 info.si_errno = 0;
1679 info.si_code = SI_KERNEL;
1680 info.si_pid = 0;
1681 info.si_uid = 0;
1682 force_sig_info(&info);
1683 }
1684 EXPORT_SYMBOL(force_sig);
1685
force_fatal_sig(int sig)1686 void force_fatal_sig(int sig)
1687 {
1688 struct kernel_siginfo info;
1689
1690 clear_siginfo(&info);
1691 info.si_signo = sig;
1692 info.si_errno = 0;
1693 info.si_code = SI_KERNEL;
1694 info.si_pid = 0;
1695 info.si_uid = 0;
1696 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1697 }
1698
force_exit_sig(int sig)1699 void force_exit_sig(int sig)
1700 {
1701 struct kernel_siginfo info;
1702
1703 clear_siginfo(&info);
1704 info.si_signo = sig;
1705 info.si_errno = 0;
1706 info.si_code = SI_KERNEL;
1707 info.si_pid = 0;
1708 info.si_uid = 0;
1709 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1710 }
1711
1712 /*
1713 * When things go south during signal handling, we
1714 * will force a SIGSEGV. And if the signal that caused
1715 * the problem was already a SIGSEGV, we'll want to
1716 * make sure we don't even try to deliver the signal..
1717 */
force_sigsegv(int sig)1718 void force_sigsegv(int sig)
1719 {
1720 if (sig == SIGSEGV)
1721 force_fatal_sig(SIGSEGV);
1722 else
1723 force_sig(SIGSEGV);
1724 }
1725
force_sig_fault_to_task(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1726 int force_sig_fault_to_task(int sig, int code, void __user *addr
1727 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1728 , struct task_struct *t)
1729 {
1730 struct kernel_siginfo info;
1731
1732 clear_siginfo(&info);
1733 info.si_signo = sig;
1734 info.si_errno = 0;
1735 info.si_code = code;
1736 info.si_addr = addr;
1737 #ifdef __ia64__
1738 info.si_imm = imm;
1739 info.si_flags = flags;
1740 info.si_isr = isr;
1741 #endif
1742 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1743 }
1744
force_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr))1745 int force_sig_fault(int sig, int code, void __user *addr
1746 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1747 {
1748 return force_sig_fault_to_task(sig, code, addr
1749 ___ARCH_SI_IA64(imm, flags, isr), current);
1750 }
1751
send_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1752 int send_sig_fault(int sig, int code, void __user *addr
1753 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1754 , struct task_struct *t)
1755 {
1756 struct kernel_siginfo info;
1757
1758 clear_siginfo(&info);
1759 info.si_signo = sig;
1760 info.si_errno = 0;
1761 info.si_code = code;
1762 info.si_addr = addr;
1763 #ifdef __ia64__
1764 info.si_imm = imm;
1765 info.si_flags = flags;
1766 info.si_isr = isr;
1767 #endif
1768 return send_sig_info(info.si_signo, &info, t);
1769 }
1770
force_sig_mceerr(int code,void __user * addr,short lsb)1771 int force_sig_mceerr(int code, void __user *addr, short lsb)
1772 {
1773 struct kernel_siginfo info;
1774
1775 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1776 clear_siginfo(&info);
1777 info.si_signo = SIGBUS;
1778 info.si_errno = 0;
1779 info.si_code = code;
1780 info.si_addr = addr;
1781 info.si_addr_lsb = lsb;
1782 return force_sig_info(&info);
1783 }
1784
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1785 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1786 {
1787 struct kernel_siginfo info;
1788
1789 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1790 clear_siginfo(&info);
1791 info.si_signo = SIGBUS;
1792 info.si_errno = 0;
1793 info.si_code = code;
1794 info.si_addr = addr;
1795 info.si_addr_lsb = lsb;
1796 return send_sig_info(info.si_signo, &info, t);
1797 }
1798 EXPORT_SYMBOL(send_sig_mceerr);
1799
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1800 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1801 {
1802 struct kernel_siginfo info;
1803
1804 clear_siginfo(&info);
1805 info.si_signo = SIGSEGV;
1806 info.si_errno = 0;
1807 info.si_code = SEGV_BNDERR;
1808 info.si_addr = addr;
1809 info.si_lower = lower;
1810 info.si_upper = upper;
1811 return force_sig_info(&info);
1812 }
1813
1814 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1815 int force_sig_pkuerr(void __user *addr, u32 pkey)
1816 {
1817 struct kernel_siginfo info;
1818
1819 clear_siginfo(&info);
1820 info.si_signo = SIGSEGV;
1821 info.si_errno = 0;
1822 info.si_code = SEGV_PKUERR;
1823 info.si_addr = addr;
1824 info.si_pkey = pkey;
1825 return force_sig_info(&info);
1826 }
1827 #endif
1828
send_sig_perf(void __user * addr,u32 type,u64 sig_data)1829 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1830 {
1831 struct kernel_siginfo info;
1832
1833 clear_siginfo(&info);
1834 info.si_signo = SIGTRAP;
1835 info.si_errno = 0;
1836 info.si_code = TRAP_PERF;
1837 info.si_addr = addr;
1838 info.si_perf_data = sig_data;
1839 info.si_perf_type = type;
1840
1841 /*
1842 * Signals generated by perf events should not terminate the whole
1843 * process if SIGTRAP is blocked, however, delivering the signal
1844 * asynchronously is better than not delivering at all. But tell user
1845 * space if the signal was asynchronous, so it can clearly be
1846 * distinguished from normal synchronous ones.
1847 */
1848 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1849 TRAP_PERF_FLAG_ASYNC :
1850 0;
1851
1852 return send_sig_info(info.si_signo, &info, current);
1853 }
1854
1855 /**
1856 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1857 * @syscall: syscall number to send to userland
1858 * @reason: filter-supplied reason code to send to userland (via si_errno)
1859 * @force_coredump: true to trigger a coredump
1860 *
1861 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1862 */
force_sig_seccomp(int syscall,int reason,bool force_coredump)1863 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1864 {
1865 struct kernel_siginfo info;
1866
1867 clear_siginfo(&info);
1868 info.si_signo = SIGSYS;
1869 info.si_code = SYS_SECCOMP;
1870 info.si_call_addr = (void __user *)KSTK_EIP(current);
1871 info.si_errno = reason;
1872 info.si_arch = syscall_get_arch(current);
1873 info.si_syscall = syscall;
1874 return force_sig_info_to_task(&info, current,
1875 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1876 }
1877
1878 /* For the crazy architectures that include trap information in
1879 * the errno field, instead of an actual errno value.
1880 */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1881 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1882 {
1883 struct kernel_siginfo info;
1884
1885 clear_siginfo(&info);
1886 info.si_signo = SIGTRAP;
1887 info.si_errno = errno;
1888 info.si_code = TRAP_HWBKPT;
1889 info.si_addr = addr;
1890 return force_sig_info(&info);
1891 }
1892
1893 /* For the rare architectures that include trap information using
1894 * si_trapno.
1895 */
force_sig_fault_trapno(int sig,int code,void __user * addr,int trapno)1896 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1897 {
1898 struct kernel_siginfo info;
1899
1900 clear_siginfo(&info);
1901 info.si_signo = sig;
1902 info.si_errno = 0;
1903 info.si_code = code;
1904 info.si_addr = addr;
1905 info.si_trapno = trapno;
1906 return force_sig_info(&info);
1907 }
1908
1909 /* For the rare architectures that include trap information using
1910 * si_trapno.
1911 */
send_sig_fault_trapno(int sig,int code,void __user * addr,int trapno,struct task_struct * t)1912 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1913 struct task_struct *t)
1914 {
1915 struct kernel_siginfo info;
1916
1917 clear_siginfo(&info);
1918 info.si_signo = sig;
1919 info.si_errno = 0;
1920 info.si_code = code;
1921 info.si_addr = addr;
1922 info.si_trapno = trapno;
1923 return send_sig_info(info.si_signo, &info, t);
1924 }
1925
kill_pgrp(struct pid * pid,int sig,int priv)1926 int kill_pgrp(struct pid *pid, int sig, int priv)
1927 {
1928 int ret;
1929
1930 read_lock(&tasklist_lock);
1931 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1932 read_unlock(&tasklist_lock);
1933
1934 return ret;
1935 }
1936 EXPORT_SYMBOL(kill_pgrp);
1937
kill_pid(struct pid * pid,int sig,int priv)1938 int kill_pid(struct pid *pid, int sig, int priv)
1939 {
1940 return kill_pid_info(sig, __si_special(priv), pid);
1941 }
1942 EXPORT_SYMBOL(kill_pid);
1943
1944 /*
1945 * These functions support sending signals using preallocated sigqueue
1946 * structures. This is needed "because realtime applications cannot
1947 * afford to lose notifications of asynchronous events, like timer
1948 * expirations or I/O completions". In the case of POSIX Timers
1949 * we allocate the sigqueue structure from the timer_create. If this
1950 * allocation fails we are able to report the failure to the application
1951 * with an EAGAIN error.
1952 */
sigqueue_alloc(void)1953 struct sigqueue *sigqueue_alloc(void)
1954 {
1955 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1956 }
1957
sigqueue_free(struct sigqueue * q)1958 void sigqueue_free(struct sigqueue *q)
1959 {
1960 unsigned long flags;
1961 spinlock_t *lock = ¤t->sighand->siglock;
1962
1963 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1964 /*
1965 * We must hold ->siglock while testing q->list
1966 * to serialize with collect_signal() or with
1967 * __exit_signal()->flush_sigqueue().
1968 */
1969 spin_lock_irqsave(lock, flags);
1970 q->flags &= ~SIGQUEUE_PREALLOC;
1971 /*
1972 * If it is queued it will be freed when dequeued,
1973 * like the "regular" sigqueue.
1974 */
1975 if (!list_empty(&q->list))
1976 q = NULL;
1977 spin_unlock_irqrestore(lock, flags);
1978
1979 if (q)
1980 __sigqueue_free(q);
1981 }
1982
send_sigqueue(struct sigqueue * q,struct pid * pid,enum pid_type type)1983 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1984 {
1985 int sig = q->info.si_signo;
1986 struct sigpending *pending;
1987 struct task_struct *t;
1988 unsigned long flags;
1989 int ret, result;
1990
1991 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1992
1993 ret = -1;
1994 rcu_read_lock();
1995 t = pid_task(pid, type);
1996 if (!t || !likely(lock_task_sighand(t, &flags)))
1997 goto ret;
1998
1999 ret = 1; /* the signal is ignored */
2000 result = TRACE_SIGNAL_IGNORED;
2001 if (!prepare_signal(sig, t, false))
2002 goto out;
2003
2004 ret = 0;
2005 if (unlikely(!list_empty(&q->list))) {
2006 /*
2007 * If an SI_TIMER entry is already queue just increment
2008 * the overrun count.
2009 */
2010 BUG_ON(q->info.si_code != SI_TIMER);
2011 q->info.si_overrun++;
2012 result = TRACE_SIGNAL_ALREADY_PENDING;
2013 goto out;
2014 }
2015 q->info.si_overrun = 0;
2016
2017 signalfd_notify(t, sig);
2018 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2019 list_add_tail(&q->list, &pending->list);
2020 sigaddset(&pending->signal, sig);
2021 complete_signal(sig, t, type);
2022 result = TRACE_SIGNAL_DELIVERED;
2023 out:
2024 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2025 unlock_task_sighand(t, &flags);
2026 ret:
2027 rcu_read_unlock();
2028 return ret;
2029 }
2030
do_notify_pidfd(struct task_struct * task)2031 static void do_notify_pidfd(struct task_struct *task)
2032 {
2033 struct pid *pid;
2034
2035 WARN_ON(task->exit_state == 0);
2036 pid = task_pid(task);
2037 wake_up_all(&pid->wait_pidfd);
2038 }
2039
2040 /*
2041 * Let a parent know about the death of a child.
2042 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2043 *
2044 * Returns true if our parent ignored us and so we've switched to
2045 * self-reaping.
2046 */
do_notify_parent(struct task_struct * tsk,int sig)2047 bool do_notify_parent(struct task_struct *tsk, int sig)
2048 {
2049 struct kernel_siginfo info;
2050 unsigned long flags;
2051 struct sighand_struct *psig;
2052 bool autoreap = false;
2053 u64 utime, stime;
2054
2055 WARN_ON_ONCE(sig == -1);
2056
2057 /* do_notify_parent_cldstop should have been called instead. */
2058 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2059
2060 WARN_ON_ONCE(!tsk->ptrace &&
2061 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2062
2063 /* Wake up all pidfd waiters */
2064 do_notify_pidfd(tsk);
2065
2066 if (sig != SIGCHLD) {
2067 /*
2068 * This is only possible if parent == real_parent.
2069 * Check if it has changed security domain.
2070 */
2071 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2072 sig = SIGCHLD;
2073 }
2074
2075 clear_siginfo(&info);
2076 info.si_signo = sig;
2077 info.si_errno = 0;
2078 /*
2079 * We are under tasklist_lock here so our parent is tied to
2080 * us and cannot change.
2081 *
2082 * task_active_pid_ns will always return the same pid namespace
2083 * until a task passes through release_task.
2084 *
2085 * write_lock() currently calls preempt_disable() which is the
2086 * same as rcu_read_lock(), but according to Oleg, this is not
2087 * correct to rely on this
2088 */
2089 rcu_read_lock();
2090 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2091 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2092 task_uid(tsk));
2093 rcu_read_unlock();
2094
2095 task_cputime(tsk, &utime, &stime);
2096 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2097 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2098
2099 info.si_status = tsk->exit_code & 0x7f;
2100 if (tsk->exit_code & 0x80)
2101 info.si_code = CLD_DUMPED;
2102 else if (tsk->exit_code & 0x7f)
2103 info.si_code = CLD_KILLED;
2104 else {
2105 info.si_code = CLD_EXITED;
2106 info.si_status = tsk->exit_code >> 8;
2107 }
2108
2109 psig = tsk->parent->sighand;
2110 spin_lock_irqsave(&psig->siglock, flags);
2111 if (!tsk->ptrace && sig == SIGCHLD &&
2112 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2113 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2114 /*
2115 * We are exiting and our parent doesn't care. POSIX.1
2116 * defines special semantics for setting SIGCHLD to SIG_IGN
2117 * or setting the SA_NOCLDWAIT flag: we should be reaped
2118 * automatically and not left for our parent's wait4 call.
2119 * Rather than having the parent do it as a magic kind of
2120 * signal handler, we just set this to tell do_exit that we
2121 * can be cleaned up without becoming a zombie. Note that
2122 * we still call __wake_up_parent in this case, because a
2123 * blocked sys_wait4 might now return -ECHILD.
2124 *
2125 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2126 * is implementation-defined: we do (if you don't want
2127 * it, just use SIG_IGN instead).
2128 */
2129 autoreap = true;
2130 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2131 sig = 0;
2132 }
2133 /*
2134 * Send with __send_signal as si_pid and si_uid are in the
2135 * parent's namespaces.
2136 */
2137 if (valid_signal(sig) && sig)
2138 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2139 __wake_up_parent(tsk, tsk->parent);
2140 spin_unlock_irqrestore(&psig->siglock, flags);
2141
2142 return autoreap;
2143 }
2144
2145 /**
2146 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2147 * @tsk: task reporting the state change
2148 * @for_ptracer: the notification is for ptracer
2149 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2150 *
2151 * Notify @tsk's parent that the stopped/continued state has changed. If
2152 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2153 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2154 *
2155 * CONTEXT:
2156 * Must be called with tasklist_lock at least read locked.
2157 */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2158 static void do_notify_parent_cldstop(struct task_struct *tsk,
2159 bool for_ptracer, int why)
2160 {
2161 struct kernel_siginfo info;
2162 unsigned long flags;
2163 struct task_struct *parent;
2164 struct sighand_struct *sighand;
2165 u64 utime, stime;
2166
2167 if (for_ptracer) {
2168 parent = tsk->parent;
2169 } else {
2170 tsk = tsk->group_leader;
2171 parent = tsk->real_parent;
2172 }
2173
2174 clear_siginfo(&info);
2175 info.si_signo = SIGCHLD;
2176 info.si_errno = 0;
2177 /*
2178 * see comment in do_notify_parent() about the following 4 lines
2179 */
2180 rcu_read_lock();
2181 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2182 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2183 rcu_read_unlock();
2184
2185 task_cputime(tsk, &utime, &stime);
2186 info.si_utime = nsec_to_clock_t(utime);
2187 info.si_stime = nsec_to_clock_t(stime);
2188
2189 info.si_code = why;
2190 switch (why) {
2191 case CLD_CONTINUED:
2192 info.si_status = SIGCONT;
2193 break;
2194 case CLD_STOPPED:
2195 info.si_status = tsk->signal->group_exit_code & 0x7f;
2196 break;
2197 case CLD_TRAPPED:
2198 info.si_status = tsk->exit_code & 0x7f;
2199 break;
2200 default:
2201 BUG();
2202 }
2203
2204 sighand = parent->sighand;
2205 spin_lock_irqsave(&sighand->siglock, flags);
2206 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2207 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2208 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2209 /*
2210 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2211 */
2212 __wake_up_parent(tsk, parent);
2213 spin_unlock_irqrestore(&sighand->siglock, flags);
2214 }
2215
2216 /*
2217 * This must be called with current->sighand->siglock held.
2218 *
2219 * This should be the path for all ptrace stops.
2220 * We always set current->last_siginfo while stopped here.
2221 * That makes it a way to test a stopped process for
2222 * being ptrace-stopped vs being job-control-stopped.
2223 *
2224 * Returns the signal the ptracer requested the code resume
2225 * with. If the code did not stop because the tracer is gone,
2226 * the stop signal remains unchanged unless clear_code.
2227 */
ptrace_stop(int exit_code,int why,unsigned long message,kernel_siginfo_t * info)2228 static int ptrace_stop(int exit_code, int why, unsigned long message,
2229 kernel_siginfo_t *info)
2230 __releases(¤t->sighand->siglock)
2231 __acquires(¤t->sighand->siglock)
2232 {
2233 bool gstop_done = false;
2234
2235 if (arch_ptrace_stop_needed()) {
2236 /*
2237 * The arch code has something special to do before a
2238 * ptrace stop. This is allowed to block, e.g. for faults
2239 * on user stack pages. We can't keep the siglock while
2240 * calling arch_ptrace_stop, so we must release it now.
2241 * To preserve proper semantics, we must do this before
2242 * any signal bookkeeping like checking group_stop_count.
2243 */
2244 spin_unlock_irq(¤t->sighand->siglock);
2245 arch_ptrace_stop();
2246 spin_lock_irq(¤t->sighand->siglock);
2247 }
2248
2249 /*
2250 * After this point ptrace_signal_wake_up or signal_wake_up
2251 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2252 * signal comes in. Handle previous ptrace_unlinks and fatal
2253 * signals here to prevent ptrace_stop sleeping in schedule.
2254 */
2255 if (!current->ptrace || __fatal_signal_pending(current))
2256 return exit_code;
2257
2258 set_special_state(TASK_TRACED);
2259 current->jobctl |= JOBCTL_TRACED;
2260
2261 /*
2262 * We're committing to trapping. TRACED should be visible before
2263 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2264 * Also, transition to TRACED and updates to ->jobctl should be
2265 * atomic with respect to siglock and should be done after the arch
2266 * hook as siglock is released and regrabbed across it.
2267 *
2268 * TRACER TRACEE
2269 *
2270 * ptrace_attach()
2271 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2272 * do_wait()
2273 * set_current_state() smp_wmb();
2274 * ptrace_do_wait()
2275 * wait_task_stopped()
2276 * task_stopped_code()
2277 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2278 */
2279 smp_wmb();
2280
2281 current->ptrace_message = message;
2282 current->last_siginfo = info;
2283 current->exit_code = exit_code;
2284
2285 /*
2286 * If @why is CLD_STOPPED, we're trapping to participate in a group
2287 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2288 * across siglock relocks since INTERRUPT was scheduled, PENDING
2289 * could be clear now. We act as if SIGCONT is received after
2290 * TASK_TRACED is entered - ignore it.
2291 */
2292 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2293 gstop_done = task_participate_group_stop(current);
2294
2295 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2296 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2297 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2298 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2299
2300 /* entering a trap, clear TRAPPING */
2301 task_clear_jobctl_trapping(current);
2302
2303 spin_unlock_irq(¤t->sighand->siglock);
2304 read_lock(&tasklist_lock);
2305 /*
2306 * Notify parents of the stop.
2307 *
2308 * While ptraced, there are two parents - the ptracer and
2309 * the real_parent of the group_leader. The ptracer should
2310 * know about every stop while the real parent is only
2311 * interested in the completion of group stop. The states
2312 * for the two don't interact with each other. Notify
2313 * separately unless they're gonna be duplicates.
2314 */
2315 if (current->ptrace)
2316 do_notify_parent_cldstop(current, true, why);
2317 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2318 do_notify_parent_cldstop(current, false, why);
2319
2320 /*
2321 * Don't want to allow preemption here, because
2322 * sys_ptrace() needs this task to be inactive.
2323 *
2324 * XXX: implement read_unlock_no_resched().
2325 */
2326 preempt_disable();
2327 read_unlock(&tasklist_lock);
2328 cgroup_enter_frozen();
2329 preempt_enable_no_resched();
2330 schedule();
2331 cgroup_leave_frozen(true);
2332
2333 /*
2334 * We are back. Now reacquire the siglock before touching
2335 * last_siginfo, so that we are sure to have synchronized with
2336 * any signal-sending on another CPU that wants to examine it.
2337 */
2338 spin_lock_irq(¤t->sighand->siglock);
2339 exit_code = current->exit_code;
2340 current->last_siginfo = NULL;
2341 current->ptrace_message = 0;
2342 current->exit_code = 0;
2343
2344 /* LISTENING can be set only during STOP traps, clear it */
2345 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2346
2347 /*
2348 * Queued signals ignored us while we were stopped for tracing.
2349 * So check for any that we should take before resuming user mode.
2350 * This sets TIF_SIGPENDING, but never clears it.
2351 */
2352 recalc_sigpending_tsk(current);
2353 return exit_code;
2354 }
2355
ptrace_do_notify(int signr,int exit_code,int why,unsigned long message)2356 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2357 {
2358 kernel_siginfo_t info;
2359
2360 clear_siginfo(&info);
2361 info.si_signo = signr;
2362 info.si_code = exit_code;
2363 info.si_pid = task_pid_vnr(current);
2364 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2365
2366 /* Let the debugger run. */
2367 return ptrace_stop(exit_code, why, message, &info);
2368 }
2369
ptrace_notify(int exit_code,unsigned long message)2370 int ptrace_notify(int exit_code, unsigned long message)
2371 {
2372 int signr;
2373
2374 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2375 if (unlikely(task_work_pending(current)))
2376 task_work_run();
2377
2378 spin_lock_irq(¤t->sighand->siglock);
2379 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2380 spin_unlock_irq(¤t->sighand->siglock);
2381 return signr;
2382 }
2383
2384 /**
2385 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2386 * @signr: signr causing group stop if initiating
2387 *
2388 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2389 * and participate in it. If already set, participate in the existing
2390 * group stop. If participated in a group stop (and thus slept), %true is
2391 * returned with siglock released.
2392 *
2393 * If ptraced, this function doesn't handle stop itself. Instead,
2394 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2395 * untouched. The caller must ensure that INTERRUPT trap handling takes
2396 * places afterwards.
2397 *
2398 * CONTEXT:
2399 * Must be called with @current->sighand->siglock held, which is released
2400 * on %true return.
2401 *
2402 * RETURNS:
2403 * %false if group stop is already cancelled or ptrace trap is scheduled.
2404 * %true if participated in group stop.
2405 */
do_signal_stop(int signr)2406 static bool do_signal_stop(int signr)
2407 __releases(¤t->sighand->siglock)
2408 {
2409 struct signal_struct *sig = current->signal;
2410
2411 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2412 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2413 struct task_struct *t;
2414
2415 /* signr will be recorded in task->jobctl for retries */
2416 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2417
2418 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2419 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2420 unlikely(sig->group_exec_task))
2421 return false;
2422 /*
2423 * There is no group stop already in progress. We must
2424 * initiate one now.
2425 *
2426 * While ptraced, a task may be resumed while group stop is
2427 * still in effect and then receive a stop signal and
2428 * initiate another group stop. This deviates from the
2429 * usual behavior as two consecutive stop signals can't
2430 * cause two group stops when !ptraced. That is why we
2431 * also check !task_is_stopped(t) below.
2432 *
2433 * The condition can be distinguished by testing whether
2434 * SIGNAL_STOP_STOPPED is already set. Don't generate
2435 * group_exit_code in such case.
2436 *
2437 * This is not necessary for SIGNAL_STOP_CONTINUED because
2438 * an intervening stop signal is required to cause two
2439 * continued events regardless of ptrace.
2440 */
2441 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2442 sig->group_exit_code = signr;
2443
2444 sig->group_stop_count = 0;
2445
2446 if (task_set_jobctl_pending(current, signr | gstop))
2447 sig->group_stop_count++;
2448
2449 t = current;
2450 while_each_thread(current, t) {
2451 /*
2452 * Setting state to TASK_STOPPED for a group
2453 * stop is always done with the siglock held,
2454 * so this check has no races.
2455 */
2456 if (!task_is_stopped(t) &&
2457 task_set_jobctl_pending(t, signr | gstop)) {
2458 sig->group_stop_count++;
2459 if (likely(!(t->ptrace & PT_SEIZED)))
2460 signal_wake_up(t, 0);
2461 else
2462 ptrace_trap_notify(t);
2463 }
2464 }
2465 }
2466
2467 if (likely(!current->ptrace)) {
2468 int notify = 0;
2469
2470 /*
2471 * If there are no other threads in the group, or if there
2472 * is a group stop in progress and we are the last to stop,
2473 * report to the parent.
2474 */
2475 if (task_participate_group_stop(current))
2476 notify = CLD_STOPPED;
2477
2478 current->jobctl |= JOBCTL_STOPPED;
2479 set_special_state(TASK_STOPPED);
2480 spin_unlock_irq(¤t->sighand->siglock);
2481
2482 /*
2483 * Notify the parent of the group stop completion. Because
2484 * we're not holding either the siglock or tasklist_lock
2485 * here, ptracer may attach inbetween; however, this is for
2486 * group stop and should always be delivered to the real
2487 * parent of the group leader. The new ptracer will get
2488 * its notification when this task transitions into
2489 * TASK_TRACED.
2490 */
2491 if (notify) {
2492 read_lock(&tasklist_lock);
2493 do_notify_parent_cldstop(current, false, notify);
2494 read_unlock(&tasklist_lock);
2495 }
2496
2497 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2498 cgroup_enter_frozen();
2499 schedule();
2500 return true;
2501 } else {
2502 /*
2503 * While ptraced, group stop is handled by STOP trap.
2504 * Schedule it and let the caller deal with it.
2505 */
2506 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2507 return false;
2508 }
2509 }
2510
2511 /**
2512 * do_jobctl_trap - take care of ptrace jobctl traps
2513 *
2514 * When PT_SEIZED, it's used for both group stop and explicit
2515 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2516 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2517 * the stop signal; otherwise, %SIGTRAP.
2518 *
2519 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2520 * number as exit_code and no siginfo.
2521 *
2522 * CONTEXT:
2523 * Must be called with @current->sighand->siglock held, which may be
2524 * released and re-acquired before returning with intervening sleep.
2525 */
do_jobctl_trap(void)2526 static void do_jobctl_trap(void)
2527 {
2528 struct signal_struct *signal = current->signal;
2529 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2530
2531 if (current->ptrace & PT_SEIZED) {
2532 if (!signal->group_stop_count &&
2533 !(signal->flags & SIGNAL_STOP_STOPPED))
2534 signr = SIGTRAP;
2535 WARN_ON_ONCE(!signr);
2536 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2537 CLD_STOPPED, 0);
2538 } else {
2539 WARN_ON_ONCE(!signr);
2540 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2541 }
2542 }
2543
2544 /**
2545 * do_freezer_trap - handle the freezer jobctl trap
2546 *
2547 * Puts the task into frozen state, if only the task is not about to quit.
2548 * In this case it drops JOBCTL_TRAP_FREEZE.
2549 *
2550 * CONTEXT:
2551 * Must be called with @current->sighand->siglock held,
2552 * which is always released before returning.
2553 */
do_freezer_trap(void)2554 static void do_freezer_trap(void)
2555 __releases(¤t->sighand->siglock)
2556 {
2557 /*
2558 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2559 * let's make another loop to give it a chance to be handled.
2560 * In any case, we'll return back.
2561 */
2562 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2563 JOBCTL_TRAP_FREEZE) {
2564 spin_unlock_irq(¤t->sighand->siglock);
2565 return;
2566 }
2567
2568 /*
2569 * Now we're sure that there is no pending fatal signal and no
2570 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2571 * immediately (if there is a non-fatal signal pending), and
2572 * put the task into sleep.
2573 */
2574 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2575 clear_thread_flag(TIF_SIGPENDING);
2576 spin_unlock_irq(¤t->sighand->siglock);
2577 cgroup_enter_frozen();
2578 schedule();
2579 }
2580
ptrace_signal(int signr,kernel_siginfo_t * info,enum pid_type type)2581 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2582 {
2583 /*
2584 * We do not check sig_kernel_stop(signr) but set this marker
2585 * unconditionally because we do not know whether debugger will
2586 * change signr. This flag has no meaning unless we are going
2587 * to stop after return from ptrace_stop(). In this case it will
2588 * be checked in do_signal_stop(), we should only stop if it was
2589 * not cleared by SIGCONT while we were sleeping. See also the
2590 * comment in dequeue_signal().
2591 */
2592 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2593 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2594
2595 /* We're back. Did the debugger cancel the sig? */
2596 if (signr == 0)
2597 return signr;
2598
2599 /*
2600 * Update the siginfo structure if the signal has
2601 * changed. If the debugger wanted something
2602 * specific in the siginfo structure then it should
2603 * have updated *info via PTRACE_SETSIGINFO.
2604 */
2605 if (signr != info->si_signo) {
2606 clear_siginfo(info);
2607 info->si_signo = signr;
2608 info->si_errno = 0;
2609 info->si_code = SI_USER;
2610 rcu_read_lock();
2611 info->si_pid = task_pid_vnr(current->parent);
2612 info->si_uid = from_kuid_munged(current_user_ns(),
2613 task_uid(current->parent));
2614 rcu_read_unlock();
2615 }
2616
2617 /* If the (new) signal is now blocked, requeue it. */
2618 if (sigismember(¤t->blocked, signr) ||
2619 fatal_signal_pending(current)) {
2620 send_signal_locked(signr, info, current, type);
2621 signr = 0;
2622 }
2623
2624 return signr;
2625 }
2626
hide_si_addr_tag_bits(struct ksignal * ksig)2627 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2628 {
2629 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2630 case SIL_FAULT:
2631 case SIL_FAULT_TRAPNO:
2632 case SIL_FAULT_MCEERR:
2633 case SIL_FAULT_BNDERR:
2634 case SIL_FAULT_PKUERR:
2635 case SIL_FAULT_PERF_EVENT:
2636 ksig->info.si_addr = arch_untagged_si_addr(
2637 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2638 break;
2639 case SIL_KILL:
2640 case SIL_TIMER:
2641 case SIL_POLL:
2642 case SIL_CHLD:
2643 case SIL_RT:
2644 case SIL_SYS:
2645 break;
2646 }
2647 }
2648
get_signal(struct ksignal * ksig)2649 bool get_signal(struct ksignal *ksig)
2650 {
2651 struct sighand_struct *sighand = current->sighand;
2652 struct signal_struct *signal = current->signal;
2653 int signr;
2654
2655 clear_notify_signal();
2656 if (unlikely(task_work_pending(current)))
2657 task_work_run();
2658
2659 if (!task_sigpending(current))
2660 return false;
2661
2662 if (unlikely(uprobe_deny_signal()))
2663 return false;
2664
2665 /*
2666 * Do this once, we can't return to user-mode if freezing() == T.
2667 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2668 * thus do not need another check after return.
2669 */
2670 try_to_freeze();
2671
2672 relock:
2673 spin_lock_irq(&sighand->siglock);
2674
2675 /*
2676 * Every stopped thread goes here after wakeup. Check to see if
2677 * we should notify the parent, prepare_signal(SIGCONT) encodes
2678 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2679 */
2680 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2681 int why;
2682
2683 if (signal->flags & SIGNAL_CLD_CONTINUED)
2684 why = CLD_CONTINUED;
2685 else
2686 why = CLD_STOPPED;
2687
2688 signal->flags &= ~SIGNAL_CLD_MASK;
2689
2690 spin_unlock_irq(&sighand->siglock);
2691
2692 /*
2693 * Notify the parent that we're continuing. This event is
2694 * always per-process and doesn't make whole lot of sense
2695 * for ptracers, who shouldn't consume the state via
2696 * wait(2) either, but, for backward compatibility, notify
2697 * the ptracer of the group leader too unless it's gonna be
2698 * a duplicate.
2699 */
2700 read_lock(&tasklist_lock);
2701 do_notify_parent_cldstop(current, false, why);
2702
2703 if (ptrace_reparented(current->group_leader))
2704 do_notify_parent_cldstop(current->group_leader,
2705 true, why);
2706 read_unlock(&tasklist_lock);
2707
2708 goto relock;
2709 }
2710
2711 for (;;) {
2712 struct k_sigaction *ka;
2713 enum pid_type type;
2714
2715 /* Has this task already been marked for death? */
2716 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2717 signal->group_exec_task) {
2718 ksig->info.si_signo = signr = SIGKILL;
2719 sigdelset(¤t->pending.signal, SIGKILL);
2720 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2721 &sighand->action[SIGKILL - 1]);
2722 recalc_sigpending();
2723 goto fatal;
2724 }
2725
2726 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2727 do_signal_stop(0))
2728 goto relock;
2729
2730 if (unlikely(current->jobctl &
2731 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2732 if (current->jobctl & JOBCTL_TRAP_MASK) {
2733 do_jobctl_trap();
2734 spin_unlock_irq(&sighand->siglock);
2735 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2736 do_freezer_trap();
2737
2738 goto relock;
2739 }
2740
2741 /*
2742 * If the task is leaving the frozen state, let's update
2743 * cgroup counters and reset the frozen bit.
2744 */
2745 if (unlikely(cgroup_task_frozen(current))) {
2746 spin_unlock_irq(&sighand->siglock);
2747 cgroup_leave_frozen(false);
2748 goto relock;
2749 }
2750
2751 /*
2752 * Signals generated by the execution of an instruction
2753 * need to be delivered before any other pending signals
2754 * so that the instruction pointer in the signal stack
2755 * frame points to the faulting instruction.
2756 */
2757 type = PIDTYPE_PID;
2758 signr = dequeue_synchronous_signal(&ksig->info);
2759 if (!signr)
2760 signr = dequeue_signal(current, ¤t->blocked,
2761 &ksig->info, &type);
2762
2763 if (!signr)
2764 break; /* will return 0 */
2765
2766 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2767 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2768 signr = ptrace_signal(signr, &ksig->info, type);
2769 if (!signr)
2770 continue;
2771 }
2772
2773 ka = &sighand->action[signr-1];
2774
2775 /* Trace actually delivered signals. */
2776 trace_signal_deliver(signr, &ksig->info, ka);
2777
2778 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2779 continue;
2780 if (ka->sa.sa_handler != SIG_DFL) {
2781 /* Run the handler. */
2782 ksig->ka = *ka;
2783
2784 if (ka->sa.sa_flags & SA_ONESHOT)
2785 ka->sa.sa_handler = SIG_DFL;
2786
2787 break; /* will return non-zero "signr" value */
2788 }
2789
2790 /*
2791 * Now we are doing the default action for this signal.
2792 */
2793 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2794 continue;
2795
2796 /*
2797 * Global init gets no signals it doesn't want.
2798 * Container-init gets no signals it doesn't want from same
2799 * container.
2800 *
2801 * Note that if global/container-init sees a sig_kernel_only()
2802 * signal here, the signal must have been generated internally
2803 * or must have come from an ancestor namespace. In either
2804 * case, the signal cannot be dropped.
2805 */
2806 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2807 !sig_kernel_only(signr))
2808 continue;
2809
2810 if (sig_kernel_stop(signr)) {
2811 /*
2812 * The default action is to stop all threads in
2813 * the thread group. The job control signals
2814 * do nothing in an orphaned pgrp, but SIGSTOP
2815 * always works. Note that siglock needs to be
2816 * dropped during the call to is_orphaned_pgrp()
2817 * because of lock ordering with tasklist_lock.
2818 * This allows an intervening SIGCONT to be posted.
2819 * We need to check for that and bail out if necessary.
2820 */
2821 if (signr != SIGSTOP) {
2822 spin_unlock_irq(&sighand->siglock);
2823
2824 /* signals can be posted during this window */
2825
2826 if (is_current_pgrp_orphaned())
2827 goto relock;
2828
2829 spin_lock_irq(&sighand->siglock);
2830 }
2831
2832 if (likely(do_signal_stop(ksig->info.si_signo))) {
2833 /* It released the siglock. */
2834 goto relock;
2835 }
2836
2837 /*
2838 * We didn't actually stop, due to a race
2839 * with SIGCONT or something like that.
2840 */
2841 continue;
2842 }
2843
2844 fatal:
2845 spin_unlock_irq(&sighand->siglock);
2846 if (unlikely(cgroup_task_frozen(current)))
2847 cgroup_leave_frozen(true);
2848
2849 /*
2850 * Anything else is fatal, maybe with a core dump.
2851 */
2852 current->flags |= PF_SIGNALED;
2853
2854 if (sig_kernel_coredump(signr)) {
2855 if (print_fatal_signals)
2856 print_fatal_signal(ksig->info.si_signo);
2857 proc_coredump_connector(current);
2858 /*
2859 * If it was able to dump core, this kills all
2860 * other threads in the group and synchronizes with
2861 * their demise. If we lost the race with another
2862 * thread getting here, it set group_exit_code
2863 * first and our do_group_exit call below will use
2864 * that value and ignore the one we pass it.
2865 */
2866 do_coredump(&ksig->info);
2867 }
2868
2869 /*
2870 * PF_IO_WORKER threads will catch and exit on fatal signals
2871 * themselves. They have cleanup that must be performed, so
2872 * we cannot call do_exit() on their behalf.
2873 */
2874 if (current->flags & PF_IO_WORKER)
2875 goto out;
2876
2877 /*
2878 * Death signals, no core dump.
2879 */
2880 do_group_exit(ksig->info.si_signo);
2881 /* NOTREACHED */
2882 }
2883 spin_unlock_irq(&sighand->siglock);
2884 out:
2885 ksig->sig = signr;
2886
2887 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2888 hide_si_addr_tag_bits(ksig);
2889
2890 return ksig->sig > 0;
2891 }
2892
2893 /**
2894 * signal_delivered - called after signal delivery to update blocked signals
2895 * @ksig: kernel signal struct
2896 * @stepping: nonzero if debugger single-step or block-step in use
2897 *
2898 * This function should be called when a signal has successfully been
2899 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2900 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2901 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2902 */
signal_delivered(struct ksignal * ksig,int stepping)2903 static void signal_delivered(struct ksignal *ksig, int stepping)
2904 {
2905 sigset_t blocked;
2906
2907 /* A signal was successfully delivered, and the
2908 saved sigmask was stored on the signal frame,
2909 and will be restored by sigreturn. So we can
2910 simply clear the restore sigmask flag. */
2911 clear_restore_sigmask();
2912
2913 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2914 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2915 sigaddset(&blocked, ksig->sig);
2916 set_current_blocked(&blocked);
2917 if (current->sas_ss_flags & SS_AUTODISARM)
2918 sas_ss_reset(current);
2919 if (stepping)
2920 ptrace_notify(SIGTRAP, 0);
2921 }
2922
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2923 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2924 {
2925 if (failed)
2926 force_sigsegv(ksig->sig);
2927 else
2928 signal_delivered(ksig, stepping);
2929 }
2930
2931 /*
2932 * It could be that complete_signal() picked us to notify about the
2933 * group-wide signal. Other threads should be notified now to take
2934 * the shared signals in @which since we will not.
2935 */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2936 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2937 {
2938 sigset_t retarget;
2939 struct task_struct *t;
2940
2941 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2942 if (sigisemptyset(&retarget))
2943 return;
2944
2945 t = tsk;
2946 while_each_thread(tsk, t) {
2947 if (t->flags & PF_EXITING)
2948 continue;
2949
2950 if (!has_pending_signals(&retarget, &t->blocked))
2951 continue;
2952 /* Remove the signals this thread can handle. */
2953 sigandsets(&retarget, &retarget, &t->blocked);
2954
2955 if (!task_sigpending(t))
2956 signal_wake_up(t, 0);
2957
2958 if (sigisemptyset(&retarget))
2959 break;
2960 }
2961 }
2962
exit_signals(struct task_struct * tsk)2963 void exit_signals(struct task_struct *tsk)
2964 {
2965 int group_stop = 0;
2966 sigset_t unblocked;
2967
2968 /*
2969 * @tsk is about to have PF_EXITING set - lock out users which
2970 * expect stable threadgroup.
2971 */
2972 cgroup_threadgroup_change_begin(tsk);
2973
2974 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2975 tsk->flags |= PF_EXITING;
2976 cgroup_threadgroup_change_end(tsk);
2977 return;
2978 }
2979
2980 spin_lock_irq(&tsk->sighand->siglock);
2981 /*
2982 * From now this task is not visible for group-wide signals,
2983 * see wants_signal(), do_signal_stop().
2984 */
2985 tsk->flags |= PF_EXITING;
2986
2987 cgroup_threadgroup_change_end(tsk);
2988
2989 if (!task_sigpending(tsk))
2990 goto out;
2991
2992 unblocked = tsk->blocked;
2993 signotset(&unblocked);
2994 retarget_shared_pending(tsk, &unblocked);
2995
2996 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2997 task_participate_group_stop(tsk))
2998 group_stop = CLD_STOPPED;
2999 out:
3000 spin_unlock_irq(&tsk->sighand->siglock);
3001
3002 /*
3003 * If group stop has completed, deliver the notification. This
3004 * should always go to the real parent of the group leader.
3005 */
3006 if (unlikely(group_stop)) {
3007 read_lock(&tasklist_lock);
3008 do_notify_parent_cldstop(tsk, false, group_stop);
3009 read_unlock(&tasklist_lock);
3010 }
3011 }
3012
3013 /*
3014 * System call entry points.
3015 */
3016
3017 /**
3018 * sys_restart_syscall - restart a system call
3019 */
SYSCALL_DEFINE0(restart_syscall)3020 SYSCALL_DEFINE0(restart_syscall)
3021 {
3022 struct restart_block *restart = ¤t->restart_block;
3023 return restart->fn(restart);
3024 }
3025
do_no_restart_syscall(struct restart_block * param)3026 long do_no_restart_syscall(struct restart_block *param)
3027 {
3028 return -EINTR;
3029 }
3030
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)3031 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3032 {
3033 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3034 sigset_t newblocked;
3035 /* A set of now blocked but previously unblocked signals. */
3036 sigandnsets(&newblocked, newset, ¤t->blocked);
3037 retarget_shared_pending(tsk, &newblocked);
3038 }
3039 tsk->blocked = *newset;
3040 recalc_sigpending();
3041 }
3042
3043 /**
3044 * set_current_blocked - change current->blocked mask
3045 * @newset: new mask
3046 *
3047 * It is wrong to change ->blocked directly, this helper should be used
3048 * to ensure the process can't miss a shared signal we are going to block.
3049 */
set_current_blocked(sigset_t * newset)3050 void set_current_blocked(sigset_t *newset)
3051 {
3052 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3053 __set_current_blocked(newset);
3054 }
3055
__set_current_blocked(const sigset_t * newset)3056 void __set_current_blocked(const sigset_t *newset)
3057 {
3058 struct task_struct *tsk = current;
3059
3060 /*
3061 * In case the signal mask hasn't changed, there is nothing we need
3062 * to do. The current->blocked shouldn't be modified by other task.
3063 */
3064 if (sigequalsets(&tsk->blocked, newset))
3065 return;
3066
3067 spin_lock_irq(&tsk->sighand->siglock);
3068 __set_task_blocked(tsk, newset);
3069 spin_unlock_irq(&tsk->sighand->siglock);
3070 }
3071
3072 /*
3073 * This is also useful for kernel threads that want to temporarily
3074 * (or permanently) block certain signals.
3075 *
3076 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3077 * interface happily blocks "unblockable" signals like SIGKILL
3078 * and friends.
3079 */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)3080 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3081 {
3082 struct task_struct *tsk = current;
3083 sigset_t newset;
3084
3085 /* Lockless, only current can change ->blocked, never from irq */
3086 if (oldset)
3087 *oldset = tsk->blocked;
3088
3089 switch (how) {
3090 case SIG_BLOCK:
3091 sigorsets(&newset, &tsk->blocked, set);
3092 break;
3093 case SIG_UNBLOCK:
3094 sigandnsets(&newset, &tsk->blocked, set);
3095 break;
3096 case SIG_SETMASK:
3097 newset = *set;
3098 break;
3099 default:
3100 return -EINVAL;
3101 }
3102
3103 __set_current_blocked(&newset);
3104 return 0;
3105 }
3106 EXPORT_SYMBOL(sigprocmask);
3107
3108 /*
3109 * The api helps set app-provided sigmasks.
3110 *
3111 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3112 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3113 *
3114 * Note that it does set_restore_sigmask() in advance, so it must be always
3115 * paired with restore_saved_sigmask_unless() before return from syscall.
3116 */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)3117 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3118 {
3119 sigset_t kmask;
3120
3121 if (!umask)
3122 return 0;
3123 if (sigsetsize != sizeof(sigset_t))
3124 return -EINVAL;
3125 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3126 return -EFAULT;
3127
3128 set_restore_sigmask();
3129 current->saved_sigmask = current->blocked;
3130 set_current_blocked(&kmask);
3131
3132 return 0;
3133 }
3134
3135 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3136 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3137 size_t sigsetsize)
3138 {
3139 sigset_t kmask;
3140
3141 if (!umask)
3142 return 0;
3143 if (sigsetsize != sizeof(compat_sigset_t))
3144 return -EINVAL;
3145 if (get_compat_sigset(&kmask, umask))
3146 return -EFAULT;
3147
3148 set_restore_sigmask();
3149 current->saved_sigmask = current->blocked;
3150 set_current_blocked(&kmask);
3151
3152 return 0;
3153 }
3154 #endif
3155
3156 /**
3157 * sys_rt_sigprocmask - change the list of currently blocked signals
3158 * @how: whether to add, remove, or set signals
3159 * @nset: stores pending signals
3160 * @oset: previous value of signal mask if non-null
3161 * @sigsetsize: size of sigset_t type
3162 */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3163 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3164 sigset_t __user *, oset, size_t, sigsetsize)
3165 {
3166 sigset_t old_set, new_set;
3167 int error;
3168
3169 /* XXX: Don't preclude handling different sized sigset_t's. */
3170 if (sigsetsize != sizeof(sigset_t))
3171 return -EINVAL;
3172
3173 old_set = current->blocked;
3174
3175 if (nset) {
3176 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3177 return -EFAULT;
3178 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3179
3180 error = sigprocmask(how, &new_set, NULL);
3181 if (error)
3182 return error;
3183 }
3184
3185 if (oset) {
3186 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3187 return -EFAULT;
3188 }
3189
3190 return 0;
3191 }
3192
3193 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3194 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3195 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3196 {
3197 sigset_t old_set = current->blocked;
3198
3199 /* XXX: Don't preclude handling different sized sigset_t's. */
3200 if (sigsetsize != sizeof(sigset_t))
3201 return -EINVAL;
3202
3203 if (nset) {
3204 sigset_t new_set;
3205 int error;
3206 if (get_compat_sigset(&new_set, nset))
3207 return -EFAULT;
3208 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3209
3210 error = sigprocmask(how, &new_set, NULL);
3211 if (error)
3212 return error;
3213 }
3214 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3215 }
3216 #endif
3217
do_sigpending(sigset_t * set)3218 static void do_sigpending(sigset_t *set)
3219 {
3220 spin_lock_irq(¤t->sighand->siglock);
3221 sigorsets(set, ¤t->pending.signal,
3222 ¤t->signal->shared_pending.signal);
3223 spin_unlock_irq(¤t->sighand->siglock);
3224
3225 /* Outside the lock because only this thread touches it. */
3226 sigandsets(set, ¤t->blocked, set);
3227 }
3228
3229 /**
3230 * sys_rt_sigpending - examine a pending signal that has been raised
3231 * while blocked
3232 * @uset: stores pending signals
3233 * @sigsetsize: size of sigset_t type or larger
3234 */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3235 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3236 {
3237 sigset_t set;
3238
3239 if (sigsetsize > sizeof(*uset))
3240 return -EINVAL;
3241
3242 do_sigpending(&set);
3243
3244 if (copy_to_user(uset, &set, sigsetsize))
3245 return -EFAULT;
3246
3247 return 0;
3248 }
3249
3250 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3251 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3252 compat_size_t, sigsetsize)
3253 {
3254 sigset_t set;
3255
3256 if (sigsetsize > sizeof(*uset))
3257 return -EINVAL;
3258
3259 do_sigpending(&set);
3260
3261 return put_compat_sigset(uset, &set, sigsetsize);
3262 }
3263 #endif
3264
3265 static const struct {
3266 unsigned char limit, layout;
3267 } sig_sicodes[] = {
3268 [SIGILL] = { NSIGILL, SIL_FAULT },
3269 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3270 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3271 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3272 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3273 #if defined(SIGEMT)
3274 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3275 #endif
3276 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3277 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3278 [SIGSYS] = { NSIGSYS, SIL_SYS },
3279 };
3280
known_siginfo_layout(unsigned sig,int si_code)3281 static bool known_siginfo_layout(unsigned sig, int si_code)
3282 {
3283 if (si_code == SI_KERNEL)
3284 return true;
3285 else if ((si_code > SI_USER)) {
3286 if (sig_specific_sicodes(sig)) {
3287 if (si_code <= sig_sicodes[sig].limit)
3288 return true;
3289 }
3290 else if (si_code <= NSIGPOLL)
3291 return true;
3292 }
3293 else if (si_code >= SI_DETHREAD)
3294 return true;
3295 else if (si_code == SI_ASYNCNL)
3296 return true;
3297 return false;
3298 }
3299
siginfo_layout(unsigned sig,int si_code)3300 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3301 {
3302 enum siginfo_layout layout = SIL_KILL;
3303 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3304 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3305 (si_code <= sig_sicodes[sig].limit)) {
3306 layout = sig_sicodes[sig].layout;
3307 /* Handle the exceptions */
3308 if ((sig == SIGBUS) &&
3309 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3310 layout = SIL_FAULT_MCEERR;
3311 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3312 layout = SIL_FAULT_BNDERR;
3313 #ifdef SEGV_PKUERR
3314 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3315 layout = SIL_FAULT_PKUERR;
3316 #endif
3317 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3318 layout = SIL_FAULT_PERF_EVENT;
3319 else if (IS_ENABLED(CONFIG_SPARC) &&
3320 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3321 layout = SIL_FAULT_TRAPNO;
3322 else if (IS_ENABLED(CONFIG_ALPHA) &&
3323 ((sig == SIGFPE) ||
3324 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3325 layout = SIL_FAULT_TRAPNO;
3326 }
3327 else if (si_code <= NSIGPOLL)
3328 layout = SIL_POLL;
3329 } else {
3330 if (si_code == SI_TIMER)
3331 layout = SIL_TIMER;
3332 else if (si_code == SI_SIGIO)
3333 layout = SIL_POLL;
3334 else if (si_code < 0)
3335 layout = SIL_RT;
3336 }
3337 return layout;
3338 }
3339
si_expansion(const siginfo_t __user * info)3340 static inline char __user *si_expansion(const siginfo_t __user *info)
3341 {
3342 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3343 }
3344
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3345 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3346 {
3347 char __user *expansion = si_expansion(to);
3348 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3349 return -EFAULT;
3350 if (clear_user(expansion, SI_EXPANSION_SIZE))
3351 return -EFAULT;
3352 return 0;
3353 }
3354
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3355 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3356 const siginfo_t __user *from)
3357 {
3358 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3359 char __user *expansion = si_expansion(from);
3360 char buf[SI_EXPANSION_SIZE];
3361 int i;
3362 /*
3363 * An unknown si_code might need more than
3364 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3365 * extra bytes are 0. This guarantees copy_siginfo_to_user
3366 * will return this data to userspace exactly.
3367 */
3368 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3369 return -EFAULT;
3370 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3371 if (buf[i] != 0)
3372 return -E2BIG;
3373 }
3374 }
3375 return 0;
3376 }
3377
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3378 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3379 const siginfo_t __user *from)
3380 {
3381 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3382 return -EFAULT;
3383 to->si_signo = signo;
3384 return post_copy_siginfo_from_user(to, from);
3385 }
3386
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3387 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3388 {
3389 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3390 return -EFAULT;
3391 return post_copy_siginfo_from_user(to, from);
3392 }
3393
3394 #ifdef CONFIG_COMPAT
3395 /**
3396 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3397 * @to: compat siginfo destination
3398 * @from: kernel siginfo source
3399 *
3400 * Note: This function does not work properly for the SIGCHLD on x32, but
3401 * fortunately it doesn't have to. The only valid callers for this function are
3402 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3403 * The latter does not care because SIGCHLD will never cause a coredump.
3404 */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3405 void copy_siginfo_to_external32(struct compat_siginfo *to,
3406 const struct kernel_siginfo *from)
3407 {
3408 memset(to, 0, sizeof(*to));
3409
3410 to->si_signo = from->si_signo;
3411 to->si_errno = from->si_errno;
3412 to->si_code = from->si_code;
3413 switch(siginfo_layout(from->si_signo, from->si_code)) {
3414 case SIL_KILL:
3415 to->si_pid = from->si_pid;
3416 to->si_uid = from->si_uid;
3417 break;
3418 case SIL_TIMER:
3419 to->si_tid = from->si_tid;
3420 to->si_overrun = from->si_overrun;
3421 to->si_int = from->si_int;
3422 break;
3423 case SIL_POLL:
3424 to->si_band = from->si_band;
3425 to->si_fd = from->si_fd;
3426 break;
3427 case SIL_FAULT:
3428 to->si_addr = ptr_to_compat(from->si_addr);
3429 break;
3430 case SIL_FAULT_TRAPNO:
3431 to->si_addr = ptr_to_compat(from->si_addr);
3432 to->si_trapno = from->si_trapno;
3433 break;
3434 case SIL_FAULT_MCEERR:
3435 to->si_addr = ptr_to_compat(from->si_addr);
3436 to->si_addr_lsb = from->si_addr_lsb;
3437 break;
3438 case SIL_FAULT_BNDERR:
3439 to->si_addr = ptr_to_compat(from->si_addr);
3440 to->si_lower = ptr_to_compat(from->si_lower);
3441 to->si_upper = ptr_to_compat(from->si_upper);
3442 break;
3443 case SIL_FAULT_PKUERR:
3444 to->si_addr = ptr_to_compat(from->si_addr);
3445 to->si_pkey = from->si_pkey;
3446 break;
3447 case SIL_FAULT_PERF_EVENT:
3448 to->si_addr = ptr_to_compat(from->si_addr);
3449 to->si_perf_data = from->si_perf_data;
3450 to->si_perf_type = from->si_perf_type;
3451 to->si_perf_flags = from->si_perf_flags;
3452 break;
3453 case SIL_CHLD:
3454 to->si_pid = from->si_pid;
3455 to->si_uid = from->si_uid;
3456 to->si_status = from->si_status;
3457 to->si_utime = from->si_utime;
3458 to->si_stime = from->si_stime;
3459 break;
3460 case SIL_RT:
3461 to->si_pid = from->si_pid;
3462 to->si_uid = from->si_uid;
3463 to->si_int = from->si_int;
3464 break;
3465 case SIL_SYS:
3466 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3467 to->si_syscall = from->si_syscall;
3468 to->si_arch = from->si_arch;
3469 break;
3470 }
3471 }
3472
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3473 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3474 const struct kernel_siginfo *from)
3475 {
3476 struct compat_siginfo new;
3477
3478 copy_siginfo_to_external32(&new, from);
3479 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3480 return -EFAULT;
3481 return 0;
3482 }
3483
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3484 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3485 const struct compat_siginfo *from)
3486 {
3487 clear_siginfo(to);
3488 to->si_signo = from->si_signo;
3489 to->si_errno = from->si_errno;
3490 to->si_code = from->si_code;
3491 switch(siginfo_layout(from->si_signo, from->si_code)) {
3492 case SIL_KILL:
3493 to->si_pid = from->si_pid;
3494 to->si_uid = from->si_uid;
3495 break;
3496 case SIL_TIMER:
3497 to->si_tid = from->si_tid;
3498 to->si_overrun = from->si_overrun;
3499 to->si_int = from->si_int;
3500 break;
3501 case SIL_POLL:
3502 to->si_band = from->si_band;
3503 to->si_fd = from->si_fd;
3504 break;
3505 case SIL_FAULT:
3506 to->si_addr = compat_ptr(from->si_addr);
3507 break;
3508 case SIL_FAULT_TRAPNO:
3509 to->si_addr = compat_ptr(from->si_addr);
3510 to->si_trapno = from->si_trapno;
3511 break;
3512 case SIL_FAULT_MCEERR:
3513 to->si_addr = compat_ptr(from->si_addr);
3514 to->si_addr_lsb = from->si_addr_lsb;
3515 break;
3516 case SIL_FAULT_BNDERR:
3517 to->si_addr = compat_ptr(from->si_addr);
3518 to->si_lower = compat_ptr(from->si_lower);
3519 to->si_upper = compat_ptr(from->si_upper);
3520 break;
3521 case SIL_FAULT_PKUERR:
3522 to->si_addr = compat_ptr(from->si_addr);
3523 to->si_pkey = from->si_pkey;
3524 break;
3525 case SIL_FAULT_PERF_EVENT:
3526 to->si_addr = compat_ptr(from->si_addr);
3527 to->si_perf_data = from->si_perf_data;
3528 to->si_perf_type = from->si_perf_type;
3529 to->si_perf_flags = from->si_perf_flags;
3530 break;
3531 case SIL_CHLD:
3532 to->si_pid = from->si_pid;
3533 to->si_uid = from->si_uid;
3534 to->si_status = from->si_status;
3535 #ifdef CONFIG_X86_X32_ABI
3536 if (in_x32_syscall()) {
3537 to->si_utime = from->_sifields._sigchld_x32._utime;
3538 to->si_stime = from->_sifields._sigchld_x32._stime;
3539 } else
3540 #endif
3541 {
3542 to->si_utime = from->si_utime;
3543 to->si_stime = from->si_stime;
3544 }
3545 break;
3546 case SIL_RT:
3547 to->si_pid = from->si_pid;
3548 to->si_uid = from->si_uid;
3549 to->si_int = from->si_int;
3550 break;
3551 case SIL_SYS:
3552 to->si_call_addr = compat_ptr(from->si_call_addr);
3553 to->si_syscall = from->si_syscall;
3554 to->si_arch = from->si_arch;
3555 break;
3556 }
3557 return 0;
3558 }
3559
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3560 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3561 const struct compat_siginfo __user *ufrom)
3562 {
3563 struct compat_siginfo from;
3564
3565 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3566 return -EFAULT;
3567
3568 from.si_signo = signo;
3569 return post_copy_siginfo_from_user32(to, &from);
3570 }
3571
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3572 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3573 const struct compat_siginfo __user *ufrom)
3574 {
3575 struct compat_siginfo from;
3576
3577 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3578 return -EFAULT;
3579
3580 return post_copy_siginfo_from_user32(to, &from);
3581 }
3582 #endif /* CONFIG_COMPAT */
3583
3584 /**
3585 * do_sigtimedwait - wait for queued signals specified in @which
3586 * @which: queued signals to wait for
3587 * @info: if non-null, the signal's siginfo is returned here
3588 * @ts: upper bound on process time suspension
3589 */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3590 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3591 const struct timespec64 *ts)
3592 {
3593 ktime_t *to = NULL, timeout = KTIME_MAX;
3594 struct task_struct *tsk = current;
3595 sigset_t mask = *which;
3596 enum pid_type type;
3597 int sig, ret = 0;
3598
3599 if (ts) {
3600 if (!timespec64_valid(ts))
3601 return -EINVAL;
3602 timeout = timespec64_to_ktime(*ts);
3603 to = &timeout;
3604 }
3605
3606 /*
3607 * Invert the set of allowed signals to get those we want to block.
3608 */
3609 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3610 signotset(&mask);
3611
3612 spin_lock_irq(&tsk->sighand->siglock);
3613 sig = dequeue_signal(tsk, &mask, info, &type);
3614 if (!sig && timeout) {
3615 /*
3616 * None ready, temporarily unblock those we're interested
3617 * while we are sleeping in so that we'll be awakened when
3618 * they arrive. Unblocking is always fine, we can avoid
3619 * set_current_blocked().
3620 */
3621 tsk->real_blocked = tsk->blocked;
3622 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3623 recalc_sigpending();
3624 spin_unlock_irq(&tsk->sighand->siglock);
3625
3626 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3627 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3628 HRTIMER_MODE_REL);
3629 spin_lock_irq(&tsk->sighand->siglock);
3630 __set_task_blocked(tsk, &tsk->real_blocked);
3631 sigemptyset(&tsk->real_blocked);
3632 sig = dequeue_signal(tsk, &mask, info, &type);
3633 }
3634 spin_unlock_irq(&tsk->sighand->siglock);
3635
3636 if (sig)
3637 return sig;
3638 return ret ? -EINTR : -EAGAIN;
3639 }
3640
3641 /**
3642 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3643 * in @uthese
3644 * @uthese: queued signals to wait for
3645 * @uinfo: if non-null, the signal's siginfo is returned here
3646 * @uts: upper bound on process time suspension
3647 * @sigsetsize: size of sigset_t type
3648 */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3649 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3650 siginfo_t __user *, uinfo,
3651 const struct __kernel_timespec __user *, uts,
3652 size_t, sigsetsize)
3653 {
3654 sigset_t these;
3655 struct timespec64 ts;
3656 kernel_siginfo_t info;
3657 int ret;
3658
3659 /* XXX: Don't preclude handling different sized sigset_t's. */
3660 if (sigsetsize != sizeof(sigset_t))
3661 return -EINVAL;
3662
3663 if (copy_from_user(&these, uthese, sizeof(these)))
3664 return -EFAULT;
3665
3666 if (uts) {
3667 if (get_timespec64(&ts, uts))
3668 return -EFAULT;
3669 }
3670
3671 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3672
3673 if (ret > 0 && uinfo) {
3674 if (copy_siginfo_to_user(uinfo, &info))
3675 ret = -EFAULT;
3676 }
3677
3678 return ret;
3679 }
3680
3681 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3682 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3683 siginfo_t __user *, uinfo,
3684 const struct old_timespec32 __user *, uts,
3685 size_t, sigsetsize)
3686 {
3687 sigset_t these;
3688 struct timespec64 ts;
3689 kernel_siginfo_t info;
3690 int ret;
3691
3692 if (sigsetsize != sizeof(sigset_t))
3693 return -EINVAL;
3694
3695 if (copy_from_user(&these, uthese, sizeof(these)))
3696 return -EFAULT;
3697
3698 if (uts) {
3699 if (get_old_timespec32(&ts, uts))
3700 return -EFAULT;
3701 }
3702
3703 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3704
3705 if (ret > 0 && uinfo) {
3706 if (copy_siginfo_to_user(uinfo, &info))
3707 ret = -EFAULT;
3708 }
3709
3710 return ret;
3711 }
3712 #endif
3713
3714 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3715 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3716 struct compat_siginfo __user *, uinfo,
3717 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3718 {
3719 sigset_t s;
3720 struct timespec64 t;
3721 kernel_siginfo_t info;
3722 long ret;
3723
3724 if (sigsetsize != sizeof(sigset_t))
3725 return -EINVAL;
3726
3727 if (get_compat_sigset(&s, uthese))
3728 return -EFAULT;
3729
3730 if (uts) {
3731 if (get_timespec64(&t, uts))
3732 return -EFAULT;
3733 }
3734
3735 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3736
3737 if (ret > 0 && uinfo) {
3738 if (copy_siginfo_to_user32(uinfo, &info))
3739 ret = -EFAULT;
3740 }
3741
3742 return ret;
3743 }
3744
3745 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3746 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3747 struct compat_siginfo __user *, uinfo,
3748 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3749 {
3750 sigset_t s;
3751 struct timespec64 t;
3752 kernel_siginfo_t info;
3753 long ret;
3754
3755 if (sigsetsize != sizeof(sigset_t))
3756 return -EINVAL;
3757
3758 if (get_compat_sigset(&s, uthese))
3759 return -EFAULT;
3760
3761 if (uts) {
3762 if (get_old_timespec32(&t, uts))
3763 return -EFAULT;
3764 }
3765
3766 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3767
3768 if (ret > 0 && uinfo) {
3769 if (copy_siginfo_to_user32(uinfo, &info))
3770 ret = -EFAULT;
3771 }
3772
3773 return ret;
3774 }
3775 #endif
3776 #endif
3777
prepare_kill_siginfo(int sig,struct kernel_siginfo * info)3778 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3779 {
3780 clear_siginfo(info);
3781 info->si_signo = sig;
3782 info->si_errno = 0;
3783 info->si_code = SI_USER;
3784 info->si_pid = task_tgid_vnr(current);
3785 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3786 }
3787
3788 /**
3789 * sys_kill - send a signal to a process
3790 * @pid: the PID of the process
3791 * @sig: signal to be sent
3792 */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3793 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3794 {
3795 struct kernel_siginfo info;
3796
3797 prepare_kill_siginfo(sig, &info);
3798
3799 return kill_something_info(sig, &info, pid);
3800 }
3801
3802 /*
3803 * Verify that the signaler and signalee either are in the same pid namespace
3804 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3805 * namespace.
3806 */
access_pidfd_pidns(struct pid * pid)3807 static bool access_pidfd_pidns(struct pid *pid)
3808 {
3809 struct pid_namespace *active = task_active_pid_ns(current);
3810 struct pid_namespace *p = ns_of_pid(pid);
3811
3812 for (;;) {
3813 if (!p)
3814 return false;
3815 if (p == active)
3816 break;
3817 p = p->parent;
3818 }
3819
3820 return true;
3821 }
3822
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t __user * info)3823 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3824 siginfo_t __user *info)
3825 {
3826 #ifdef CONFIG_COMPAT
3827 /*
3828 * Avoid hooking up compat syscalls and instead handle necessary
3829 * conversions here. Note, this is a stop-gap measure and should not be
3830 * considered a generic solution.
3831 */
3832 if (in_compat_syscall())
3833 return copy_siginfo_from_user32(
3834 kinfo, (struct compat_siginfo __user *)info);
3835 #endif
3836 return copy_siginfo_from_user(kinfo, info);
3837 }
3838
pidfd_to_pid(const struct file * file)3839 static struct pid *pidfd_to_pid(const struct file *file)
3840 {
3841 struct pid *pid;
3842
3843 pid = pidfd_pid(file);
3844 if (!IS_ERR(pid))
3845 return pid;
3846
3847 return tgid_pidfd_to_pid(file);
3848 }
3849
3850 /**
3851 * sys_pidfd_send_signal - Signal a process through a pidfd
3852 * @pidfd: file descriptor of the process
3853 * @sig: signal to send
3854 * @info: signal info
3855 * @flags: future flags
3856 *
3857 * The syscall currently only signals via PIDTYPE_PID which covers
3858 * kill(<positive-pid>, <signal>. It does not signal threads or process
3859 * groups.
3860 * In order to extend the syscall to threads and process groups the @flags
3861 * argument should be used. In essence, the @flags argument will determine
3862 * what is signaled and not the file descriptor itself. Put in other words,
3863 * grouping is a property of the flags argument not a property of the file
3864 * descriptor.
3865 *
3866 * Return: 0 on success, negative errno on failure
3867 */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)3868 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3869 siginfo_t __user *, info, unsigned int, flags)
3870 {
3871 int ret;
3872 struct fd f;
3873 struct pid *pid;
3874 kernel_siginfo_t kinfo;
3875
3876 /* Enforce flags be set to 0 until we add an extension. */
3877 if (flags)
3878 return -EINVAL;
3879
3880 f = fdget(pidfd);
3881 if (!f.file)
3882 return -EBADF;
3883
3884 /* Is this a pidfd? */
3885 pid = pidfd_to_pid(f.file);
3886 if (IS_ERR(pid)) {
3887 ret = PTR_ERR(pid);
3888 goto err;
3889 }
3890
3891 ret = -EINVAL;
3892 if (!access_pidfd_pidns(pid))
3893 goto err;
3894
3895 if (info) {
3896 ret = copy_siginfo_from_user_any(&kinfo, info);
3897 if (unlikely(ret))
3898 goto err;
3899
3900 ret = -EINVAL;
3901 if (unlikely(sig != kinfo.si_signo))
3902 goto err;
3903
3904 /* Only allow sending arbitrary signals to yourself. */
3905 ret = -EPERM;
3906 if ((task_pid(current) != pid) &&
3907 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3908 goto err;
3909 } else {
3910 prepare_kill_siginfo(sig, &kinfo);
3911 }
3912
3913 ret = kill_pid_info(sig, &kinfo, pid);
3914
3915 err:
3916 fdput(f);
3917 return ret;
3918 }
3919
3920 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)3921 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3922 {
3923 struct task_struct *p;
3924 int error = -ESRCH;
3925
3926 rcu_read_lock();
3927 p = find_task_by_vpid(pid);
3928 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3929 error = check_kill_permission(sig, info, p);
3930 /*
3931 * The null signal is a permissions and process existence
3932 * probe. No signal is actually delivered.
3933 */
3934 if (!error && sig) {
3935 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3936 /*
3937 * If lock_task_sighand() failed we pretend the task
3938 * dies after receiving the signal. The window is tiny,
3939 * and the signal is private anyway.
3940 */
3941 if (unlikely(error == -ESRCH))
3942 error = 0;
3943 }
3944 }
3945 rcu_read_unlock();
3946
3947 return error;
3948 }
3949
do_tkill(pid_t tgid,pid_t pid,int sig)3950 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3951 {
3952 struct kernel_siginfo info;
3953
3954 clear_siginfo(&info);
3955 info.si_signo = sig;
3956 info.si_errno = 0;
3957 info.si_code = SI_TKILL;
3958 info.si_pid = task_tgid_vnr(current);
3959 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3960
3961 return do_send_specific(tgid, pid, sig, &info);
3962 }
3963
3964 /**
3965 * sys_tgkill - send signal to one specific thread
3966 * @tgid: the thread group ID of the thread
3967 * @pid: the PID of the thread
3968 * @sig: signal to be sent
3969 *
3970 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3971 * exists but it's not belonging to the target process anymore. This
3972 * method solves the problem of threads exiting and PIDs getting reused.
3973 */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)3974 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3975 {
3976 /* This is only valid for single tasks */
3977 if (pid <= 0 || tgid <= 0)
3978 return -EINVAL;
3979
3980 return do_tkill(tgid, pid, sig);
3981 }
3982
3983 /**
3984 * sys_tkill - send signal to one specific task
3985 * @pid: the PID of the task
3986 * @sig: signal to be sent
3987 *
3988 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3989 */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)3990 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3991 {
3992 /* This is only valid for single tasks */
3993 if (pid <= 0)
3994 return -EINVAL;
3995
3996 return do_tkill(0, pid, sig);
3997 }
3998
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)3999 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4000 {
4001 /* Not even root can pretend to send signals from the kernel.
4002 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4003 */
4004 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4005 (task_pid_vnr(current) != pid))
4006 return -EPERM;
4007
4008 /* POSIX.1b doesn't mention process groups. */
4009 return kill_proc_info(sig, info, pid);
4010 }
4011
4012 /**
4013 * sys_rt_sigqueueinfo - send signal information to a signal
4014 * @pid: the PID of the thread
4015 * @sig: signal to be sent
4016 * @uinfo: signal info to be sent
4017 */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4018 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4019 siginfo_t __user *, uinfo)
4020 {
4021 kernel_siginfo_t info;
4022 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4023 if (unlikely(ret))
4024 return ret;
4025 return do_rt_sigqueueinfo(pid, sig, &info);
4026 }
4027
4028 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4029 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4030 compat_pid_t, pid,
4031 int, sig,
4032 struct compat_siginfo __user *, uinfo)
4033 {
4034 kernel_siginfo_t info;
4035 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4036 if (unlikely(ret))
4037 return ret;
4038 return do_rt_sigqueueinfo(pid, sig, &info);
4039 }
4040 #endif
4041
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)4042 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4043 {
4044 /* This is only valid for single tasks */
4045 if (pid <= 0 || tgid <= 0)
4046 return -EINVAL;
4047
4048 /* Not even root can pretend to send signals from the kernel.
4049 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4050 */
4051 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4052 (task_pid_vnr(current) != pid))
4053 return -EPERM;
4054
4055 return do_send_specific(tgid, pid, sig, info);
4056 }
4057
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4058 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4059 siginfo_t __user *, uinfo)
4060 {
4061 kernel_siginfo_t info;
4062 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4063 if (unlikely(ret))
4064 return ret;
4065 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4066 }
4067
4068 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4069 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4070 compat_pid_t, tgid,
4071 compat_pid_t, pid,
4072 int, sig,
4073 struct compat_siginfo __user *, uinfo)
4074 {
4075 kernel_siginfo_t info;
4076 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4077 if (unlikely(ret))
4078 return ret;
4079 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4080 }
4081 #endif
4082
4083 /*
4084 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4085 */
kernel_sigaction(int sig,__sighandler_t action)4086 void kernel_sigaction(int sig, __sighandler_t action)
4087 {
4088 spin_lock_irq(¤t->sighand->siglock);
4089 current->sighand->action[sig - 1].sa.sa_handler = action;
4090 if (action == SIG_IGN) {
4091 sigset_t mask;
4092
4093 sigemptyset(&mask);
4094 sigaddset(&mask, sig);
4095
4096 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4097 flush_sigqueue_mask(&mask, ¤t->pending);
4098 recalc_sigpending();
4099 }
4100 spin_unlock_irq(¤t->sighand->siglock);
4101 }
4102 EXPORT_SYMBOL(kernel_sigaction);
4103
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)4104 void __weak sigaction_compat_abi(struct k_sigaction *act,
4105 struct k_sigaction *oact)
4106 {
4107 }
4108
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)4109 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4110 {
4111 struct task_struct *p = current, *t;
4112 struct k_sigaction *k;
4113 sigset_t mask;
4114
4115 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4116 return -EINVAL;
4117
4118 k = &p->sighand->action[sig-1];
4119
4120 spin_lock_irq(&p->sighand->siglock);
4121 if (k->sa.sa_flags & SA_IMMUTABLE) {
4122 spin_unlock_irq(&p->sighand->siglock);
4123 return -EINVAL;
4124 }
4125 if (oact)
4126 *oact = *k;
4127
4128 /*
4129 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4130 * e.g. by having an architecture use the bit in their uapi.
4131 */
4132 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4133
4134 /*
4135 * Clear unknown flag bits in order to allow userspace to detect missing
4136 * support for flag bits and to allow the kernel to use non-uapi bits
4137 * internally.
4138 */
4139 if (act)
4140 act->sa.sa_flags &= UAPI_SA_FLAGS;
4141 if (oact)
4142 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4143
4144 sigaction_compat_abi(act, oact);
4145
4146 if (act) {
4147 sigdelsetmask(&act->sa.sa_mask,
4148 sigmask(SIGKILL) | sigmask(SIGSTOP));
4149 *k = *act;
4150 /*
4151 * POSIX 3.3.1.3:
4152 * "Setting a signal action to SIG_IGN for a signal that is
4153 * pending shall cause the pending signal to be discarded,
4154 * whether or not it is blocked."
4155 *
4156 * "Setting a signal action to SIG_DFL for a signal that is
4157 * pending and whose default action is to ignore the signal
4158 * (for example, SIGCHLD), shall cause the pending signal to
4159 * be discarded, whether or not it is blocked"
4160 */
4161 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4162 sigemptyset(&mask);
4163 sigaddset(&mask, sig);
4164 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4165 for_each_thread(p, t)
4166 flush_sigqueue_mask(&mask, &t->pending);
4167 }
4168 }
4169
4170 spin_unlock_irq(&p->sighand->siglock);
4171 return 0;
4172 }
4173
4174 #ifdef CONFIG_DYNAMIC_SIGFRAME
sigaltstack_lock(void)4175 static inline void sigaltstack_lock(void)
4176 __acquires(¤t->sighand->siglock)
4177 {
4178 spin_lock_irq(¤t->sighand->siglock);
4179 }
4180
sigaltstack_unlock(void)4181 static inline void sigaltstack_unlock(void)
4182 __releases(¤t->sighand->siglock)
4183 {
4184 spin_unlock_irq(¤t->sighand->siglock);
4185 }
4186 #else
sigaltstack_lock(void)4187 static inline void sigaltstack_lock(void) { }
sigaltstack_unlock(void)4188 static inline void sigaltstack_unlock(void) { }
4189 #endif
4190
4191 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4192 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4193 size_t min_ss_size)
4194 {
4195 struct task_struct *t = current;
4196 int ret = 0;
4197
4198 if (oss) {
4199 memset(oss, 0, sizeof(stack_t));
4200 oss->ss_sp = (void __user *) t->sas_ss_sp;
4201 oss->ss_size = t->sas_ss_size;
4202 oss->ss_flags = sas_ss_flags(sp) |
4203 (current->sas_ss_flags & SS_FLAG_BITS);
4204 }
4205
4206 if (ss) {
4207 void __user *ss_sp = ss->ss_sp;
4208 size_t ss_size = ss->ss_size;
4209 unsigned ss_flags = ss->ss_flags;
4210 int ss_mode;
4211
4212 if (unlikely(on_sig_stack(sp)))
4213 return -EPERM;
4214
4215 ss_mode = ss_flags & ~SS_FLAG_BITS;
4216 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4217 ss_mode != 0))
4218 return -EINVAL;
4219
4220 /*
4221 * Return before taking any locks if no actual
4222 * sigaltstack changes were requested.
4223 */
4224 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4225 t->sas_ss_size == ss_size &&
4226 t->sas_ss_flags == ss_flags)
4227 return 0;
4228
4229 sigaltstack_lock();
4230 if (ss_mode == SS_DISABLE) {
4231 ss_size = 0;
4232 ss_sp = NULL;
4233 } else {
4234 if (unlikely(ss_size < min_ss_size))
4235 ret = -ENOMEM;
4236 if (!sigaltstack_size_valid(ss_size))
4237 ret = -ENOMEM;
4238 }
4239 if (!ret) {
4240 t->sas_ss_sp = (unsigned long) ss_sp;
4241 t->sas_ss_size = ss_size;
4242 t->sas_ss_flags = ss_flags;
4243 }
4244 sigaltstack_unlock();
4245 }
4246 return ret;
4247 }
4248
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4249 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4250 {
4251 stack_t new, old;
4252 int err;
4253 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4254 return -EFAULT;
4255 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4256 current_user_stack_pointer(),
4257 MINSIGSTKSZ);
4258 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4259 err = -EFAULT;
4260 return err;
4261 }
4262
restore_altstack(const stack_t __user * uss)4263 int restore_altstack(const stack_t __user *uss)
4264 {
4265 stack_t new;
4266 if (copy_from_user(&new, uss, sizeof(stack_t)))
4267 return -EFAULT;
4268 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4269 MINSIGSTKSZ);
4270 /* squash all but EFAULT for now */
4271 return 0;
4272 }
4273
__save_altstack(stack_t __user * uss,unsigned long sp)4274 int __save_altstack(stack_t __user *uss, unsigned long sp)
4275 {
4276 struct task_struct *t = current;
4277 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4278 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4279 __put_user(t->sas_ss_size, &uss->ss_size);
4280 return err;
4281 }
4282
4283 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4284 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4285 compat_stack_t __user *uoss_ptr)
4286 {
4287 stack_t uss, uoss;
4288 int ret;
4289
4290 if (uss_ptr) {
4291 compat_stack_t uss32;
4292 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4293 return -EFAULT;
4294 uss.ss_sp = compat_ptr(uss32.ss_sp);
4295 uss.ss_flags = uss32.ss_flags;
4296 uss.ss_size = uss32.ss_size;
4297 }
4298 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4299 compat_user_stack_pointer(),
4300 COMPAT_MINSIGSTKSZ);
4301 if (ret >= 0 && uoss_ptr) {
4302 compat_stack_t old;
4303 memset(&old, 0, sizeof(old));
4304 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4305 old.ss_flags = uoss.ss_flags;
4306 old.ss_size = uoss.ss_size;
4307 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4308 ret = -EFAULT;
4309 }
4310 return ret;
4311 }
4312
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4313 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4314 const compat_stack_t __user *, uss_ptr,
4315 compat_stack_t __user *, uoss_ptr)
4316 {
4317 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4318 }
4319
compat_restore_altstack(const compat_stack_t __user * uss)4320 int compat_restore_altstack(const compat_stack_t __user *uss)
4321 {
4322 int err = do_compat_sigaltstack(uss, NULL);
4323 /* squash all but -EFAULT for now */
4324 return err == -EFAULT ? err : 0;
4325 }
4326
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4327 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4328 {
4329 int err;
4330 struct task_struct *t = current;
4331 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4332 &uss->ss_sp) |
4333 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4334 __put_user(t->sas_ss_size, &uss->ss_size);
4335 return err;
4336 }
4337 #endif
4338
4339 #ifdef __ARCH_WANT_SYS_SIGPENDING
4340
4341 /**
4342 * sys_sigpending - examine pending signals
4343 * @uset: where mask of pending signal is returned
4344 */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4345 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4346 {
4347 sigset_t set;
4348
4349 if (sizeof(old_sigset_t) > sizeof(*uset))
4350 return -EINVAL;
4351
4352 do_sigpending(&set);
4353
4354 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4355 return -EFAULT;
4356
4357 return 0;
4358 }
4359
4360 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4361 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4362 {
4363 sigset_t set;
4364
4365 do_sigpending(&set);
4366
4367 return put_user(set.sig[0], set32);
4368 }
4369 #endif
4370
4371 #endif
4372
4373 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4374 /**
4375 * sys_sigprocmask - examine and change blocked signals
4376 * @how: whether to add, remove, or set signals
4377 * @nset: signals to add or remove (if non-null)
4378 * @oset: previous value of signal mask if non-null
4379 *
4380 * Some platforms have their own version with special arguments;
4381 * others support only sys_rt_sigprocmask.
4382 */
4383
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4384 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4385 old_sigset_t __user *, oset)
4386 {
4387 old_sigset_t old_set, new_set;
4388 sigset_t new_blocked;
4389
4390 old_set = current->blocked.sig[0];
4391
4392 if (nset) {
4393 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4394 return -EFAULT;
4395
4396 new_blocked = current->blocked;
4397
4398 switch (how) {
4399 case SIG_BLOCK:
4400 sigaddsetmask(&new_blocked, new_set);
4401 break;
4402 case SIG_UNBLOCK:
4403 sigdelsetmask(&new_blocked, new_set);
4404 break;
4405 case SIG_SETMASK:
4406 new_blocked.sig[0] = new_set;
4407 break;
4408 default:
4409 return -EINVAL;
4410 }
4411
4412 set_current_blocked(&new_blocked);
4413 }
4414
4415 if (oset) {
4416 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4417 return -EFAULT;
4418 }
4419
4420 return 0;
4421 }
4422 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4423
4424 #ifndef CONFIG_ODD_RT_SIGACTION
4425 /**
4426 * sys_rt_sigaction - alter an action taken by a process
4427 * @sig: signal to be sent
4428 * @act: new sigaction
4429 * @oact: used to save the previous sigaction
4430 * @sigsetsize: size of sigset_t type
4431 */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4432 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4433 const struct sigaction __user *, act,
4434 struct sigaction __user *, oact,
4435 size_t, sigsetsize)
4436 {
4437 struct k_sigaction new_sa, old_sa;
4438 int ret;
4439
4440 /* XXX: Don't preclude handling different sized sigset_t's. */
4441 if (sigsetsize != sizeof(sigset_t))
4442 return -EINVAL;
4443
4444 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4445 return -EFAULT;
4446
4447 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4448 if (ret)
4449 return ret;
4450
4451 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4452 return -EFAULT;
4453
4454 return 0;
4455 }
4456 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4457 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4458 const struct compat_sigaction __user *, act,
4459 struct compat_sigaction __user *, oact,
4460 compat_size_t, sigsetsize)
4461 {
4462 struct k_sigaction new_ka, old_ka;
4463 #ifdef __ARCH_HAS_SA_RESTORER
4464 compat_uptr_t restorer;
4465 #endif
4466 int ret;
4467
4468 /* XXX: Don't preclude handling different sized sigset_t's. */
4469 if (sigsetsize != sizeof(compat_sigset_t))
4470 return -EINVAL;
4471
4472 if (act) {
4473 compat_uptr_t handler;
4474 ret = get_user(handler, &act->sa_handler);
4475 new_ka.sa.sa_handler = compat_ptr(handler);
4476 #ifdef __ARCH_HAS_SA_RESTORER
4477 ret |= get_user(restorer, &act->sa_restorer);
4478 new_ka.sa.sa_restorer = compat_ptr(restorer);
4479 #endif
4480 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4481 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4482 if (ret)
4483 return -EFAULT;
4484 }
4485
4486 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4487 if (!ret && oact) {
4488 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4489 &oact->sa_handler);
4490 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4491 sizeof(oact->sa_mask));
4492 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4493 #ifdef __ARCH_HAS_SA_RESTORER
4494 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4495 &oact->sa_restorer);
4496 #endif
4497 }
4498 return ret;
4499 }
4500 #endif
4501 #endif /* !CONFIG_ODD_RT_SIGACTION */
4502
4503 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4504 SYSCALL_DEFINE3(sigaction, int, sig,
4505 const struct old_sigaction __user *, act,
4506 struct old_sigaction __user *, oact)
4507 {
4508 struct k_sigaction new_ka, old_ka;
4509 int ret;
4510
4511 if (act) {
4512 old_sigset_t mask;
4513 if (!access_ok(act, sizeof(*act)) ||
4514 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4515 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4516 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4517 __get_user(mask, &act->sa_mask))
4518 return -EFAULT;
4519 #ifdef __ARCH_HAS_KA_RESTORER
4520 new_ka.ka_restorer = NULL;
4521 #endif
4522 siginitset(&new_ka.sa.sa_mask, mask);
4523 }
4524
4525 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4526
4527 if (!ret && oact) {
4528 if (!access_ok(oact, sizeof(*oact)) ||
4529 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4530 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4531 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4532 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4533 return -EFAULT;
4534 }
4535
4536 return ret;
4537 }
4538 #endif
4539 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4540 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4541 const struct compat_old_sigaction __user *, act,
4542 struct compat_old_sigaction __user *, oact)
4543 {
4544 struct k_sigaction new_ka, old_ka;
4545 int ret;
4546 compat_old_sigset_t mask;
4547 compat_uptr_t handler, restorer;
4548
4549 if (act) {
4550 if (!access_ok(act, sizeof(*act)) ||
4551 __get_user(handler, &act->sa_handler) ||
4552 __get_user(restorer, &act->sa_restorer) ||
4553 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4554 __get_user(mask, &act->sa_mask))
4555 return -EFAULT;
4556
4557 #ifdef __ARCH_HAS_KA_RESTORER
4558 new_ka.ka_restorer = NULL;
4559 #endif
4560 new_ka.sa.sa_handler = compat_ptr(handler);
4561 new_ka.sa.sa_restorer = compat_ptr(restorer);
4562 siginitset(&new_ka.sa.sa_mask, mask);
4563 }
4564
4565 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4566
4567 if (!ret && oact) {
4568 if (!access_ok(oact, sizeof(*oact)) ||
4569 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4570 &oact->sa_handler) ||
4571 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4572 &oact->sa_restorer) ||
4573 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4574 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4575 return -EFAULT;
4576 }
4577 return ret;
4578 }
4579 #endif
4580
4581 #ifdef CONFIG_SGETMASK_SYSCALL
4582
4583 /*
4584 * For backwards compatibility. Functionality superseded by sigprocmask.
4585 */
SYSCALL_DEFINE0(sgetmask)4586 SYSCALL_DEFINE0(sgetmask)
4587 {
4588 /* SMP safe */
4589 return current->blocked.sig[0];
4590 }
4591
SYSCALL_DEFINE1(ssetmask,int,newmask)4592 SYSCALL_DEFINE1(ssetmask, int, newmask)
4593 {
4594 int old = current->blocked.sig[0];
4595 sigset_t newset;
4596
4597 siginitset(&newset, newmask);
4598 set_current_blocked(&newset);
4599
4600 return old;
4601 }
4602 #endif /* CONFIG_SGETMASK_SYSCALL */
4603
4604 #ifdef __ARCH_WANT_SYS_SIGNAL
4605 /*
4606 * For backwards compatibility. Functionality superseded by sigaction.
4607 */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4608 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4609 {
4610 struct k_sigaction new_sa, old_sa;
4611 int ret;
4612
4613 new_sa.sa.sa_handler = handler;
4614 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4615 sigemptyset(&new_sa.sa.sa_mask);
4616
4617 ret = do_sigaction(sig, &new_sa, &old_sa);
4618
4619 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4620 }
4621 #endif /* __ARCH_WANT_SYS_SIGNAL */
4622
4623 #ifdef __ARCH_WANT_SYS_PAUSE
4624
SYSCALL_DEFINE0(pause)4625 SYSCALL_DEFINE0(pause)
4626 {
4627 while (!signal_pending(current)) {
4628 __set_current_state(TASK_INTERRUPTIBLE);
4629 schedule();
4630 }
4631 return -ERESTARTNOHAND;
4632 }
4633
4634 #endif
4635
sigsuspend(sigset_t * set)4636 static int sigsuspend(sigset_t *set)
4637 {
4638 current->saved_sigmask = current->blocked;
4639 set_current_blocked(set);
4640
4641 while (!signal_pending(current)) {
4642 __set_current_state(TASK_INTERRUPTIBLE);
4643 schedule();
4644 }
4645 set_restore_sigmask();
4646 return -ERESTARTNOHAND;
4647 }
4648
4649 /**
4650 * sys_rt_sigsuspend - replace the signal mask for a value with the
4651 * @unewset value until a signal is received
4652 * @unewset: new signal mask value
4653 * @sigsetsize: size of sigset_t type
4654 */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4655 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4656 {
4657 sigset_t newset;
4658
4659 /* XXX: Don't preclude handling different sized sigset_t's. */
4660 if (sigsetsize != sizeof(sigset_t))
4661 return -EINVAL;
4662
4663 if (copy_from_user(&newset, unewset, sizeof(newset)))
4664 return -EFAULT;
4665 return sigsuspend(&newset);
4666 }
4667
4668 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4669 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4670 {
4671 sigset_t newset;
4672
4673 /* XXX: Don't preclude handling different sized sigset_t's. */
4674 if (sigsetsize != sizeof(sigset_t))
4675 return -EINVAL;
4676
4677 if (get_compat_sigset(&newset, unewset))
4678 return -EFAULT;
4679 return sigsuspend(&newset);
4680 }
4681 #endif
4682
4683 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4684 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4685 {
4686 sigset_t blocked;
4687 siginitset(&blocked, mask);
4688 return sigsuspend(&blocked);
4689 }
4690 #endif
4691 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4692 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4693 {
4694 sigset_t blocked;
4695 siginitset(&blocked, mask);
4696 return sigsuspend(&blocked);
4697 }
4698 #endif
4699
arch_vma_name(struct vm_area_struct * vma)4700 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4701 {
4702 return NULL;
4703 }
4704
siginfo_buildtime_checks(void)4705 static inline void siginfo_buildtime_checks(void)
4706 {
4707 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4708
4709 /* Verify the offsets in the two siginfos match */
4710 #define CHECK_OFFSET(field) \
4711 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4712
4713 /* kill */
4714 CHECK_OFFSET(si_pid);
4715 CHECK_OFFSET(si_uid);
4716
4717 /* timer */
4718 CHECK_OFFSET(si_tid);
4719 CHECK_OFFSET(si_overrun);
4720 CHECK_OFFSET(si_value);
4721
4722 /* rt */
4723 CHECK_OFFSET(si_pid);
4724 CHECK_OFFSET(si_uid);
4725 CHECK_OFFSET(si_value);
4726
4727 /* sigchld */
4728 CHECK_OFFSET(si_pid);
4729 CHECK_OFFSET(si_uid);
4730 CHECK_OFFSET(si_status);
4731 CHECK_OFFSET(si_utime);
4732 CHECK_OFFSET(si_stime);
4733
4734 /* sigfault */
4735 CHECK_OFFSET(si_addr);
4736 CHECK_OFFSET(si_trapno);
4737 CHECK_OFFSET(si_addr_lsb);
4738 CHECK_OFFSET(si_lower);
4739 CHECK_OFFSET(si_upper);
4740 CHECK_OFFSET(si_pkey);
4741 CHECK_OFFSET(si_perf_data);
4742 CHECK_OFFSET(si_perf_type);
4743 CHECK_OFFSET(si_perf_flags);
4744
4745 /* sigpoll */
4746 CHECK_OFFSET(si_band);
4747 CHECK_OFFSET(si_fd);
4748
4749 /* sigsys */
4750 CHECK_OFFSET(si_call_addr);
4751 CHECK_OFFSET(si_syscall);
4752 CHECK_OFFSET(si_arch);
4753 #undef CHECK_OFFSET
4754
4755 /* usb asyncio */
4756 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4757 offsetof(struct siginfo, si_addr));
4758 if (sizeof(int) == sizeof(void __user *)) {
4759 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4760 sizeof(void __user *));
4761 } else {
4762 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4763 sizeof_field(struct siginfo, si_uid)) !=
4764 sizeof(void __user *));
4765 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4766 offsetof(struct siginfo, si_uid));
4767 }
4768 #ifdef CONFIG_COMPAT
4769 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4770 offsetof(struct compat_siginfo, si_addr));
4771 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4772 sizeof(compat_uptr_t));
4773 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4774 sizeof_field(struct siginfo, si_pid));
4775 #endif
4776 }
4777
signals_init(void)4778 void __init signals_init(void)
4779 {
4780 siginfo_buildtime_checks();
4781
4782 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4783 }
4784
4785 #ifdef CONFIG_KGDB_KDB
4786 #include <linux/kdb.h>
4787 /*
4788 * kdb_send_sig - Allows kdb to send signals without exposing
4789 * signal internals. This function checks if the required locks are
4790 * available before calling the main signal code, to avoid kdb
4791 * deadlocks.
4792 */
kdb_send_sig(struct task_struct * t,int sig)4793 void kdb_send_sig(struct task_struct *t, int sig)
4794 {
4795 static struct task_struct *kdb_prev_t;
4796 int new_t, ret;
4797 if (!spin_trylock(&t->sighand->siglock)) {
4798 kdb_printf("Can't do kill command now.\n"
4799 "The sigmask lock is held somewhere else in "
4800 "kernel, try again later\n");
4801 return;
4802 }
4803 new_t = kdb_prev_t != t;
4804 kdb_prev_t = t;
4805 if (!task_is_running(t) && new_t) {
4806 spin_unlock(&t->sighand->siglock);
4807 kdb_printf("Process is not RUNNING, sending a signal from "
4808 "kdb risks deadlock\n"
4809 "on the run queue locks. "
4810 "The signal has _not_ been sent.\n"
4811 "Reissue the kill command if you want to risk "
4812 "the deadlock.\n");
4813 return;
4814 }
4815 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4816 spin_unlock(&t->sighand->siglock);
4817 if (ret)
4818 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4819 sig, t->pid);
4820 else
4821 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4822 }
4823 #endif /* CONFIG_KGDB_KDB */
4824