1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58
59 /*
60 * SLAB caches for signal bits.
61 */
62
63 static struct kmem_cache *sigqueue_cachep;
64
65 int print_fatal_signals __read_mostly;
66
sig_handler(struct task_struct * t,int sig)67 static void __user *sig_handler(struct task_struct *t, int sig)
68 {
69 return t->sighand->action[sig - 1].sa.sa_handler;
70 }
71
sig_handler_ignored(void __user * handler,int sig)72 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 {
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77 }
78
sig_task_ignored(struct task_struct * t,int sig,bool force)79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 {
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99 }
100
sig_ignored(struct task_struct * t,int sig,bool force)101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 {
103 /*
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
106 * unblocked.
107 */
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111 /*
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
115 */
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120 }
121
122 /*
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
125 */
has_pending_signals(sigset_t * signal,sigset_t * blocked)126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 {
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150 }
151
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
recalc_sigpending_tsk(struct task_struct * t)154 static bool recalc_sigpending_tsk(struct task_struct *t)
155 {
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164 /*
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
168 */
169 return false;
170 }
171
172 /*
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 */
recalc_sigpending_and_wake(struct task_struct * t)176 void recalc_sigpending_and_wake(struct task_struct *t)
177 {
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180 }
181
recalc_sigpending(void)182 void recalc_sigpending(void)
183 {
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
187
188 }
189 EXPORT_SYMBOL(recalc_sigpending);
190
calculate_sigpending(void)191 void calculate_sigpending(void)
192 {
193 /* Have any signals or users of TIF_SIGPENDING been delayed
194 * until after fork?
195 */
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 recalc_sigpending();
199 spin_unlock_irq(¤t->sighand->siglock);
200 }
201
202 /* Given the mask, find the first available signal that should be serviced. */
203
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207
next_signal(struct sigpending * pending,sigset_t * mask)208 int next_signal(struct sigpending *pending, sigset_t *mask)
209 {
210 unsigned long i, *s, *m, x;
211 int sig = 0;
212
213 s = pending->signal.sig;
214 m = mask->sig;
215
216 /*
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
219 */
220 x = *s &~ *m;
221 if (x) {
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
224 sig = ffz(~x) + 1;
225 return sig;
226 }
227
228 switch (_NSIG_WORDS) {
229 default:
230 for (i = 1; i < _NSIG_WORDS; ++i) {
231 x = *++s &~ *++m;
232 if (!x)
233 continue;
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
235 break;
236 }
237 break;
238
239 case 2:
240 x = s[1] &~ m[1];
241 if (!x)
242 break;
243 sig = ffz(~x) + _NSIG_BPW + 1;
244 break;
245
246 case 1:
247 /* Nothing to do */
248 break;
249 }
250
251 return sig;
252 }
253
print_dropped_signal(int sig)254 static inline void print_dropped_signal(int sig)
255 {
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257
258 if (!print_fatal_signals)
259 return;
260
261 if (!__ratelimit(&ratelimit_state))
262 return;
263
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
266 }
267
268 /**
269 * task_set_jobctl_pending - set jobctl pending bits
270 * @task: target task
271 * @mask: pending bits to set
272 *
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
277 * becomes noop.
278 *
279 * CONTEXT:
280 * Must be called with @task->sighand->siglock held.
281 *
282 * RETURNS:
283 * %true if @mask is set, %false if made noop because @task was dying.
284 */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 {
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 return false;
293
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296
297 task->jobctl |= mask;
298 return true;
299 }
300
301 /**
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * @task: target task
304 *
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
308 * ptracer.
309 *
310 * CONTEXT:
311 * Must be called with @task->sighand->siglock held.
312 */
task_clear_jobctl_trapping(struct task_struct * task)313 void task_clear_jobctl_trapping(struct task_struct *task)
314 {
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 }
320 }
321
322 /**
323 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @task: target task
325 * @mask: pending bits to clear
326 *
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
330 *
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
333 *
334 * CONTEXT:
335 * Must be called with @task->sighand->siglock held.
336 */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 {
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343
344 task->jobctl &= ~mask;
345
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
348 }
349
350 /**
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
353 *
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
358 *
359 * CONTEXT:
360 * Must be called with @task->sighand->siglock held.
361 *
362 * RETURNS:
363 * %true if group stop completion should be notified to the parent, %false
364 * otherwise.
365 */
task_participate_group_stop(struct task_struct * task)366 static bool task_participate_group_stop(struct task_struct *task)
367 {
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374
375 if (!consume)
376 return false;
377
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
380
381 /*
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
384 */
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 return true;
388 }
389 return false;
390 }
391
task_join_group_stop(struct task_struct * task)392 void task_join_group_stop(struct task_struct *task)
393 {
394 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 struct signal_struct *sig = current->signal;
396
397 if (sig->group_stop_count) {
398 sig->group_stop_count++;
399 mask |= JOBCTL_STOP_CONSUME;
400 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 return;
402
403 /* Have the new thread join an on-going signal group stop */
404 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
405 }
406
407 /*
408 * allocate a new signal queue record
409 * - this may be called without locks if and only if t == current, otherwise an
410 * appropriate lock must be held to stop the target task from exiting
411 */
412 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t flags,int override_rlimit)413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 {
415 struct sigqueue *q = NULL;
416 struct user_struct *user;
417 int sigpending;
418
419 /*
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
422 *
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
426 */
427 rcu_read_lock();
428 user = __task_cred(t)->user;
429 sigpending = atomic_inc_return(&user->sigpending);
430 if (sigpending == 1)
431 get_uid(user);
432 rcu_read_unlock();
433
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, flags);
436 } else {
437 print_dropped_signal(sig);
438 }
439
440 if (unlikely(q == NULL)) {
441 if (atomic_dec_and_test(&user->sigpending))
442 free_uid(user);
443 } else {
444 INIT_LIST_HEAD(&q->list);
445 q->flags = 0;
446 q->user = user;
447 }
448
449 return q;
450 }
451
__sigqueue_free(struct sigqueue * q)452 static void __sigqueue_free(struct sigqueue *q)
453 {
454 if (q->flags & SIGQUEUE_PREALLOC)
455 return;
456 if (atomic_dec_and_test(&q->user->sigpending))
457 free_uid(q->user);
458 kmem_cache_free(sigqueue_cachep, q);
459 }
460
flush_sigqueue(struct sigpending * queue)461 void flush_sigqueue(struct sigpending *queue)
462 {
463 struct sigqueue *q;
464
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
469 __sigqueue_free(q);
470 }
471 }
472
473 /*
474 * Flush all pending signals for this kthread.
475 */
flush_signals(struct task_struct * t)476 void flush_signals(struct task_struct *t)
477 {
478 unsigned long flags;
479
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 }
486 EXPORT_SYMBOL(flush_signals);
487
488 #ifdef CONFIG_POSIX_TIMERS
__flush_itimer_signals(struct sigpending * pending)489 static void __flush_itimer_signals(struct sigpending *pending)
490 {
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
493
494 signal = pending->signal;
495 sigemptyset(&retain);
496
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
499
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
502 } else {
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
505 __sigqueue_free(q);
506 }
507 }
508
509 sigorsets(&pending->signal, &signal, &retain);
510 }
511
flush_itimer_signals(void)512 void flush_itimer_signals(void)
513 {
514 struct task_struct *tsk = current;
515 unsigned long flags;
516
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521 }
522 #endif
523
ignore_signals(struct task_struct * t)524 void ignore_signals(struct task_struct *t)
525 {
526 int i;
527
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
530
531 flush_signals(t);
532 }
533
534 /*
535 * Flush all handlers for a task.
536 */
537
538 void
flush_signal_handlers(struct task_struct * t,int force_default)539 flush_signal_handlers(struct task_struct *t, int force_default)
540 {
541 int i;
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
546 ka->sa.sa_flags = 0;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
549 #endif
550 sigemptyset(&ka->sa.sa_mask);
551 ka++;
552 }
553 }
554
unhandled_signal(struct task_struct * tsk,int sig)555 bool unhandled_signal(struct task_struct *tsk, int sig)
556 {
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
559 return true;
560
561 if (handler != SIG_IGN && handler != SIG_DFL)
562 return false;
563
564 /* if ptraced, let the tracer determine */
565 return !tsk->ptrace;
566 }
567
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,bool * resched_timer)568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 bool *resched_timer)
570 {
571 struct sigqueue *q, *first = NULL;
572
573 /*
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
576 */
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
579 if (first)
580 goto still_pending;
581 first = q;
582 }
583 }
584
585 sigdelset(&list->signal, sig);
586
587 if (first) {
588 still_pending:
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
591
592 *resched_timer =
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
596
597 __sigqueue_free(first);
598 } else {
599 /*
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
603 */
604 clear_siginfo(info);
605 info->si_signo = sig;
606 info->si_errno = 0;
607 info->si_code = SI_USER;
608 info->si_pid = 0;
609 info->si_uid = 0;
610 }
611 }
612
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,bool * resched_timer)613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
615 {
616 int sig = next_signal(pending, mask);
617
618 if (sig)
619 collect_signal(sig, pending, info, resched_timer);
620 return sig;
621 }
622
623 /*
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
626 *
627 * All callers have to hold the siglock.
628 */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,kernel_siginfo_t * info)629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 {
631 bool resched_timer = false;
632 int signr;
633
634 /* We only dequeue private signals from ourselves, we don't let
635 * signalfd steal them
636 */
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 if (!signr) {
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
642 /*
643 * itimer signal ?
644 *
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
653 * systems too.
654 */
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
657
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
663 }
664 }
665 #endif
666 }
667
668 recalc_sigpending();
669 if (!signr)
670 return 0;
671
672 if (unlikely(sig_kernel_stop(signr))) {
673 /*
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
684 */
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 }
687 #ifdef CONFIG_POSIX_TIMERS
688 if (resched_timer) {
689 /*
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
694 */
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
698
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
701 }
702 #endif
703 return signr;
704 }
705 EXPORT_SYMBOL_GPL(dequeue_signal);
706
dequeue_synchronous_signal(kernel_siginfo_t * info)707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 {
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
712
713 /*
714 * Might a synchronous signal be in the queue?
715 */
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 return 0;
718
719 /*
720 * Return the first synchronous signal in the queue.
721 */
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 sync = q;
727 goto next;
728 }
729 }
730 return 0;
731 next:
732 /*
733 * Check if there is another siginfo for the same signal.
734 */
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
737 goto still_pending;
738 }
739
740 sigdelset(&pending->signal, sync->info.si_signo);
741 recalc_sigpending();
742 still_pending:
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
747 }
748
749 /*
750 * Tell a process that it has a new active signal..
751 *
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
756 *
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
759 */
signal_wake_up_state(struct task_struct * t,unsigned int state)760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 {
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
763 /*
764 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 * case. We don't check t->state here because there is a race with it
766 * executing another processor and just now entering stopped state.
767 * By using wake_up_state, we ensure the process will wake up and
768 * handle its death signal.
769 */
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 kick_process(t);
772 }
773
774 /*
775 * Remove signals in mask from the pending set and queue.
776 * Returns 1 if any signals were found.
777 *
778 * All callers must be holding the siglock.
779 */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 {
782 struct sigqueue *q, *n;
783 sigset_t m;
784
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
787 return;
788
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
793 __sigqueue_free(q);
794 }
795 }
796 }
797
is_si_special(const struct kernel_siginfo * info)798 static inline int is_si_special(const struct kernel_siginfo *info)
799 {
800 return info <= SEND_SIG_PRIV;
801 }
802
si_fromuser(const struct kernel_siginfo * info)803 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 {
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
807 }
808
809 /*
810 * called with RCU read lock from check_kill_permission()
811 */
kill_ok_by_cred(struct task_struct * t)812 static bool kill_ok_by_cred(struct task_struct *t)
813 {
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
816
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
822 }
823
824 /*
825 * Bad permissions for sending the signal
826 * - the caller must hold the RCU read lock
827 */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
830 {
831 struct pid *sid;
832 int error;
833
834 if (!valid_signal(sig))
835 return -EINVAL;
836
837 if (!si_fromuser(info))
838 return 0;
839
840 error = audit_signal_info(sig, t); /* Let audit system see the signal */
841 if (error)
842 return error;
843
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
846 switch (sig) {
847 case SIGCONT:
848 sid = task_session(t);
849 /*
850 * We don't return the error if sid == NULL. The
851 * task was unhashed, the caller must notice this.
852 */
853 if (!sid || sid == task_session(current))
854 break;
855 fallthrough;
856 default:
857 return -EPERM;
858 }
859 }
860
861 return security_task_kill(t, info, sig, NULL);
862 }
863
864 /**
865 * ptrace_trap_notify - schedule trap to notify ptracer
866 * @t: tracee wanting to notify tracer
867 *
868 * This function schedules sticky ptrace trap which is cleared on the next
869 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
870 * ptracer.
871 *
872 * If @t is running, STOP trap will be taken. If trapped for STOP and
873 * ptracer is listening for events, tracee is woken up so that it can
874 * re-trap for the new event. If trapped otherwise, STOP trap will be
875 * eventually taken without returning to userland after the existing traps
876 * are finished by PTRACE_CONT.
877 *
878 * CONTEXT:
879 * Must be called with @task->sighand->siglock held.
880 */
ptrace_trap_notify(struct task_struct * t)881 static void ptrace_trap_notify(struct task_struct *t)
882 {
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
885
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888 }
889
890 /*
891 * Handle magic process-wide effects of stop/continue signals. Unlike
892 * the signal actions, these happen immediately at signal-generation
893 * time regardless of blocking, ignoring, or handling. This does the
894 * actual continuing for SIGCONT, but not the actual stopping for stop
895 * signals. The process stop is done as a signal action for SIG_DFL.
896 *
897 * Returns true if the signal should be actually delivered, otherwise
898 * it should be dropped.
899 */
prepare_signal(int sig,struct task_struct * p,bool force)900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 {
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
904 sigset_t flush;
905
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
909 /*
910 * The process is in the middle of dying, nothing to do.
911 */
912 } else if (sig_kernel_stop(sig)) {
913 /*
914 * This is a stop signal. Remove SIGCONT from all queues.
915 */
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
921 unsigned int why;
922 /*
923 * Remove all stop signals from all queues, wake all threads.
924 */
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
932 else
933 ptrace_trap_notify(t);
934 }
935
936 /*
937 * Notify the parent with CLD_CONTINUED if we were stopped.
938 *
939 * If we were in the middle of a group stop, we pretend it
940 * was already finished, and then continued. Since SIGCHLD
941 * doesn't queue we report only CLD_STOPPED, as if the next
942 * CLD_CONTINUED was dropped.
943 */
944 why = 0;
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
949
950 if (why) {
951 /*
952 * The first thread which returns from do_signal_stop()
953 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 * notify its parent. See get_signal().
955 */
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
959 }
960 }
961
962 return !sig_ignored(p, sig, force);
963 }
964
965 /*
966 * Test if P wants to take SIG. After we've checked all threads with this,
967 * it's equivalent to finding no threads not blocking SIG. Any threads not
968 * blocking SIG were ruled out because they are not running and already
969 * have pending signals. Such threads will dequeue from the shared queue
970 * as soon as they're available, so putting the signal on the shared queue
971 * will be equivalent to sending it to one such thread.
972 */
wants_signal(int sig,struct task_struct * p)973 static inline bool wants_signal(int sig, struct task_struct *p)
974 {
975 if (sigismember(&p->blocked, sig))
976 return false;
977
978 if (p->flags & PF_EXITING)
979 return false;
980
981 if (sig == SIGKILL)
982 return true;
983
984 if (task_is_stopped_or_traced(p))
985 return false;
986
987 return task_curr(p) || !task_sigpending(p);
988 }
989
complete_signal(int sig,struct task_struct * p,enum pid_type type)990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 {
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
994
995 /*
996 * Now find a thread we can wake up to take the signal off the queue.
997 *
998 * If the main thread wants the signal, it gets first crack.
999 * Probably the least surprising to the average bear.
1000 */
1001 if (wants_signal(sig, p))
1002 t = p;
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 /*
1005 * There is just one thread and it does not need to be woken.
1006 * It will dequeue unblocked signals before it runs again.
1007 */
1008 return;
1009 else {
1010 /*
1011 * Otherwise try to find a suitable thread.
1012 */
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1015 t = next_thread(t);
1016 if (t == signal->curr_target)
1017 /*
1018 * No thread needs to be woken.
1019 * Any eligible threads will see
1020 * the signal in the queue soon.
1021 */
1022 return;
1023 }
1024 signal->curr_target = t;
1025 }
1026
1027 /*
1028 * Found a killable thread. If the signal will be fatal,
1029 * then start taking the whole group down immediately.
1030 */
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1035 /*
1036 * This signal will be fatal to the whole group.
1037 */
1038 if (!sig_kernel_coredump(sig)) {
1039 /*
1040 * Start a group exit and wake everybody up.
1041 * This way we don't have other threads
1042 * running and doing things after a slower
1043 * thread has the fatal signal pending.
1044 */
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1048 t = p;
1049 do {
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1054 return;
1055 }
1056 }
1057
1058 /*
1059 * The signal is already in the shared-pending queue.
1060 * Tell the chosen thread to wake up and dequeue it.
1061 */
1062 signal_wake_up(t, sig == SIGKILL);
1063 return;
1064 }
1065
legacy_queue(struct sigpending * signals,int sig)1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 {
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069 }
1070
__send_signal(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1073 {
1074 struct sigpending *pending;
1075 struct sigqueue *q;
1076 int override_rlimit;
1077 int ret = 0, result;
1078
1079 assert_spin_locked(&t->sighand->siglock);
1080
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1083 goto ret;
1084
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 /*
1087 * Short-circuit ignored signals and support queuing
1088 * exactly one non-rt signal, so that we can get more
1089 * detailed information about the cause of the signal.
1090 */
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1093 goto ret;
1094
1095 result = TRACE_SIGNAL_DELIVERED;
1096 /*
1097 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 */
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100 goto out_set;
1101
1102 /*
1103 * Real-time signals must be queued if sent by sigqueue, or
1104 * some other real-time mechanism. It is implementation
1105 * defined whether kill() does so. We attempt to do so, on
1106 * the principle of least surprise, but since kill is not
1107 * allowed to fail with EAGAIN when low on memory we just
1108 * make sure at least one signal gets delivered and don't
1109 * pass on the info struct.
1110 */
1111 if (sig < SIGRTMIN)
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 else
1114 override_rlimit = 0;
1115
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 if (q) {
1118 list_add_tail(&q->list, &pending->list);
1119 switch ((unsigned long) info) {
1120 case (unsigned long) SEND_SIG_NOINFO:
1121 clear_siginfo(&q->info);
1122 q->info.si_signo = sig;
1123 q->info.si_errno = 0;
1124 q->info.si_code = SI_USER;
1125 q->info.si_pid = task_tgid_nr_ns(current,
1126 task_active_pid_ns(t));
1127 rcu_read_lock();
1128 q->info.si_uid =
1129 from_kuid_munged(task_cred_xxx(t, user_ns),
1130 current_uid());
1131 rcu_read_unlock();
1132 break;
1133 case (unsigned long) SEND_SIG_PRIV:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_KERNEL;
1138 q->info.si_pid = 0;
1139 q->info.si_uid = 0;
1140 break;
1141 default:
1142 copy_siginfo(&q->info, info);
1143 break;
1144 }
1145 } else if (!is_si_special(info) &&
1146 sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 /*
1148 * Queue overflow, abort. We may abort if the
1149 * signal was rt and sent by user using something
1150 * other than kill().
1151 */
1152 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1153 ret = -EAGAIN;
1154 goto ret;
1155 } else {
1156 /*
1157 * This is a silent loss of information. We still
1158 * send the signal, but the *info bits are lost.
1159 */
1160 result = TRACE_SIGNAL_LOSE_INFO;
1161 }
1162
1163 out_set:
1164 signalfd_notify(t, sig);
1165 sigaddset(&pending->signal, sig);
1166
1167 /* Let multiprocess signals appear after on-going forks */
1168 if (type > PIDTYPE_TGID) {
1169 struct multiprocess_signals *delayed;
1170 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 sigset_t *signal = &delayed->signal;
1172 /* Can't queue both a stop and a continue signal */
1173 if (sig == SIGCONT)
1174 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 else if (sig_kernel_stop(sig))
1176 sigdelset(signal, SIGCONT);
1177 sigaddset(signal, sig);
1178 }
1179 }
1180
1181 complete_signal(sig, t, type);
1182 ret:
1183 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1184 return ret;
1185 }
1186
has_si_pid_and_uid(struct kernel_siginfo * info)1187 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1188 {
1189 bool ret = false;
1190 switch (siginfo_layout(info->si_signo, info->si_code)) {
1191 case SIL_KILL:
1192 case SIL_CHLD:
1193 case SIL_RT:
1194 ret = true;
1195 break;
1196 case SIL_TIMER:
1197 case SIL_POLL:
1198 case SIL_FAULT:
1199 case SIL_FAULT_MCEERR:
1200 case SIL_FAULT_BNDERR:
1201 case SIL_FAULT_PKUERR:
1202 case SIL_SYS:
1203 ret = false;
1204 break;
1205 }
1206 return ret;
1207 }
1208
send_signal(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1209 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1210 enum pid_type type)
1211 {
1212 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1213 bool force = false;
1214
1215 if (info == SEND_SIG_NOINFO) {
1216 /* Force if sent from an ancestor pid namespace */
1217 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 } else if (info == SEND_SIG_PRIV) {
1219 /* Don't ignore kernel generated signals */
1220 force = true;
1221 } else if (has_si_pid_and_uid(info)) {
1222 /* SIGKILL and SIGSTOP is special or has ids */
1223 struct user_namespace *t_user_ns;
1224
1225 rcu_read_lock();
1226 t_user_ns = task_cred_xxx(t, user_ns);
1227 if (current_user_ns() != t_user_ns) {
1228 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 info->si_uid = from_kuid_munged(t_user_ns, uid);
1230 }
1231 rcu_read_unlock();
1232
1233 /* A kernel generated signal? */
1234 force = (info->si_code == SI_KERNEL);
1235
1236 /* From an ancestor pid namespace? */
1237 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1238 info->si_pid = 0;
1239 force = true;
1240 }
1241 }
1242 return __send_signal(sig, info, t, type, force);
1243 }
1244
print_fatal_signal(int signr)1245 static void print_fatal_signal(int signr)
1246 {
1247 struct pt_regs *regs = signal_pt_regs();
1248 pr_info("potentially unexpected fatal signal %d.\n", signr);
1249
1250 #if defined(__i386__) && !defined(__arch_um__)
1251 pr_info("code at %08lx: ", regs->ip);
1252 {
1253 int i;
1254 for (i = 0; i < 16; i++) {
1255 unsigned char insn;
1256
1257 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 break;
1259 pr_cont("%02x ", insn);
1260 }
1261 }
1262 pr_cont("\n");
1263 #endif
1264 preempt_disable();
1265 show_regs(regs);
1266 preempt_enable();
1267 }
1268
setup_print_fatal_signals(char * str)1269 static int __init setup_print_fatal_signals(char *str)
1270 {
1271 get_option (&str, &print_fatal_signals);
1272
1273 return 1;
1274 }
1275
1276 __setup("print-fatal-signals=", setup_print_fatal_signals);
1277
1278 int
__group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1279 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1280 {
1281 return send_signal(sig, info, p, PIDTYPE_TGID);
1282 }
1283
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1284 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1285 enum pid_type type)
1286 {
1287 unsigned long flags;
1288 int ret = -ESRCH;
1289
1290 if (lock_task_sighand(p, &flags)) {
1291 ret = send_signal(sig, info, p, type);
1292 unlock_task_sighand(p, &flags);
1293 }
1294
1295 return ret;
1296 }
1297
1298 /*
1299 * Force a signal that the process can't ignore: if necessary
1300 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1301 *
1302 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303 * since we do not want to have a signal handler that was blocked
1304 * be invoked when user space had explicitly blocked it.
1305 *
1306 * We don't want to have recursive SIGSEGV's etc, for example,
1307 * that is why we also clear SIGNAL_UNKILLABLE.
1308 */
1309 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t)1310 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1311 {
1312 unsigned long int flags;
1313 int ret, blocked, ignored;
1314 struct k_sigaction *action;
1315 int sig = info->si_signo;
1316
1317 spin_lock_irqsave(&t->sighand->siglock, flags);
1318 action = &t->sighand->action[sig-1];
1319 ignored = action->sa.sa_handler == SIG_IGN;
1320 blocked = sigismember(&t->blocked, sig);
1321 if (blocked || ignored) {
1322 action->sa.sa_handler = SIG_DFL;
1323 if (blocked) {
1324 sigdelset(&t->blocked, sig);
1325 recalc_sigpending_and_wake(t);
1326 }
1327 }
1328 /*
1329 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 * debugging to leave init killable.
1331 */
1332 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1333 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1334 ret = send_signal(sig, info, t, PIDTYPE_PID);
1335 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1336
1337 return ret;
1338 }
1339
force_sig_info(struct kernel_siginfo * info)1340 int force_sig_info(struct kernel_siginfo *info)
1341 {
1342 return force_sig_info_to_task(info, current);
1343 }
1344
1345 /*
1346 * Nuke all other threads in the group.
1347 */
zap_other_threads(struct task_struct * p)1348 int zap_other_threads(struct task_struct *p)
1349 {
1350 struct task_struct *t = p;
1351 int count = 0;
1352
1353 p->signal->group_stop_count = 0;
1354
1355 while_each_thread(p, t) {
1356 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1357 count++;
1358
1359 /* Don't bother with already dead threads */
1360 if (t->exit_state)
1361 continue;
1362 sigaddset(&t->pending.signal, SIGKILL);
1363 signal_wake_up(t, 1);
1364 }
1365
1366 return count;
1367 }
1368
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1369 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 unsigned long *flags)
1371 {
1372 struct sighand_struct *sighand;
1373
1374 rcu_read_lock();
1375 for (;;) {
1376 sighand = rcu_dereference(tsk->sighand);
1377 if (unlikely(sighand == NULL))
1378 break;
1379
1380 /*
1381 * This sighand can be already freed and even reused, but
1382 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1383 * initializes ->siglock: this slab can't go away, it has
1384 * the same object type, ->siglock can't be reinitialized.
1385 *
1386 * We need to ensure that tsk->sighand is still the same
1387 * after we take the lock, we can race with de_thread() or
1388 * __exit_signal(). In the latter case the next iteration
1389 * must see ->sighand == NULL.
1390 */
1391 spin_lock_irqsave(&sighand->siglock, *flags);
1392 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1393 break;
1394 spin_unlock_irqrestore(&sighand->siglock, *flags);
1395 }
1396 rcu_read_unlock();
1397
1398 return sighand;
1399 }
1400
1401 /*
1402 * send signal info to all the members of a group
1403 */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1404 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 struct task_struct *p, enum pid_type type)
1406 {
1407 int ret;
1408
1409 rcu_read_lock();
1410 ret = check_kill_permission(sig, info, p);
1411 rcu_read_unlock();
1412
1413 if (!ret && sig)
1414 ret = do_send_sig_info(sig, info, p, type);
1415
1416 return ret;
1417 }
1418
1419 /*
1420 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1421 * control characters do (^C, ^Z etc)
1422 * - the caller must hold at least a readlock on tasklist_lock
1423 */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1424 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1425 {
1426 struct task_struct *p = NULL;
1427 int retval, success;
1428
1429 success = 0;
1430 retval = -ESRCH;
1431 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1432 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1433 success |= !err;
1434 retval = err;
1435 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1436 return success ? 0 : retval;
1437 }
1438
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1439 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1440 {
1441 int error = -ESRCH;
1442 struct task_struct *p;
1443
1444 for (;;) {
1445 rcu_read_lock();
1446 p = pid_task(pid, PIDTYPE_PID);
1447 if (p)
1448 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1449 rcu_read_unlock();
1450 if (likely(!p || error != -ESRCH))
1451 return error;
1452
1453 /*
1454 * The task was unhashed in between, try again. If it
1455 * is dead, pid_task() will return NULL, if we race with
1456 * de_thread() it will find the new leader.
1457 */
1458 }
1459 }
1460
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1461 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1462 {
1463 int error;
1464 rcu_read_lock();
1465 error = kill_pid_info(sig, info, find_vpid(pid));
1466 rcu_read_unlock();
1467 return error;
1468 }
1469
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1470 static inline bool kill_as_cred_perm(const struct cred *cred,
1471 struct task_struct *target)
1472 {
1473 const struct cred *pcred = __task_cred(target);
1474
1475 return uid_eq(cred->euid, pcred->suid) ||
1476 uid_eq(cred->euid, pcred->uid) ||
1477 uid_eq(cred->uid, pcred->suid) ||
1478 uid_eq(cred->uid, pcred->uid);
1479 }
1480
1481 /*
1482 * The usb asyncio usage of siginfo is wrong. The glibc support
1483 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484 * AKA after the generic fields:
1485 * kernel_pid_t si_pid;
1486 * kernel_uid32_t si_uid;
1487 * sigval_t si_value;
1488 *
1489 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490 * after the generic fields is:
1491 * void __user *si_addr;
1492 *
1493 * This is a practical problem when there is a 64bit big endian kernel
1494 * and a 32bit userspace. As the 32bit address will encoded in the low
1495 * 32bits of the pointer. Those low 32bits will be stored at higher
1496 * address than appear in a 32 bit pointer. So userspace will not
1497 * see the address it was expecting for it's completions.
1498 *
1499 * There is nothing in the encoding that can allow
1500 * copy_siginfo_to_user32 to detect this confusion of formats, so
1501 * handle this by requiring the caller of kill_pid_usb_asyncio to
1502 * notice when this situration takes place and to store the 32bit
1503 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1504 * parameter.
1505 */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1506 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 struct pid *pid, const struct cred *cred)
1508 {
1509 struct kernel_siginfo info;
1510 struct task_struct *p;
1511 unsigned long flags;
1512 int ret = -EINVAL;
1513
1514 if (!valid_signal(sig))
1515 return ret;
1516
1517 clear_siginfo(&info);
1518 info.si_signo = sig;
1519 info.si_errno = errno;
1520 info.si_code = SI_ASYNCIO;
1521 *((sigval_t *)&info.si_pid) = addr;
1522
1523 rcu_read_lock();
1524 p = pid_task(pid, PIDTYPE_PID);
1525 if (!p) {
1526 ret = -ESRCH;
1527 goto out_unlock;
1528 }
1529 if (!kill_as_cred_perm(cred, p)) {
1530 ret = -EPERM;
1531 goto out_unlock;
1532 }
1533 ret = security_task_kill(p, &info, sig, cred);
1534 if (ret)
1535 goto out_unlock;
1536
1537 if (sig) {
1538 if (lock_task_sighand(p, &flags)) {
1539 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1540 unlock_task_sighand(p, &flags);
1541 } else
1542 ret = -ESRCH;
1543 }
1544 out_unlock:
1545 rcu_read_unlock();
1546 return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1549
1550 /*
1551 * kill_something_info() interprets pid in interesting ways just like kill(2).
1552 *
1553 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554 * is probably wrong. Should make it like BSD or SYSV.
1555 */
1556
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1557 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1558 {
1559 int ret;
1560
1561 if (pid > 0)
1562 return kill_proc_info(sig, info, pid);
1563
1564 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1565 if (pid == INT_MIN)
1566 return -ESRCH;
1567
1568 read_lock(&tasklist_lock);
1569 if (pid != -1) {
1570 ret = __kill_pgrp_info(sig, info,
1571 pid ? find_vpid(-pid) : task_pgrp(current));
1572 } else {
1573 int retval = 0, count = 0;
1574 struct task_struct * p;
1575
1576 for_each_process(p) {
1577 if (task_pid_vnr(p) > 1 &&
1578 !same_thread_group(p, current)) {
1579 int err = group_send_sig_info(sig, info, p,
1580 PIDTYPE_MAX);
1581 ++count;
1582 if (err != -EPERM)
1583 retval = err;
1584 }
1585 }
1586 ret = count ? retval : -ESRCH;
1587 }
1588 read_unlock(&tasklist_lock);
1589
1590 return ret;
1591 }
1592
1593 /*
1594 * These are for backward compatibility with the rest of the kernel source.
1595 */
1596
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1597 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1598 {
1599 /*
1600 * Make sure legacy kernel users don't send in bad values
1601 * (normal paths check this in check_kill_permission).
1602 */
1603 if (!valid_signal(sig))
1604 return -EINVAL;
1605
1606 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1607 }
1608 EXPORT_SYMBOL(send_sig_info);
1609
1610 #define __si_special(priv) \
1611 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1612
1613 int
send_sig(int sig,struct task_struct * p,int priv)1614 send_sig(int sig, struct task_struct *p, int priv)
1615 {
1616 return send_sig_info(sig, __si_special(priv), p);
1617 }
1618 EXPORT_SYMBOL(send_sig);
1619
force_sig(int sig)1620 void force_sig(int sig)
1621 {
1622 struct kernel_siginfo info;
1623
1624 clear_siginfo(&info);
1625 info.si_signo = sig;
1626 info.si_errno = 0;
1627 info.si_code = SI_KERNEL;
1628 info.si_pid = 0;
1629 info.si_uid = 0;
1630 force_sig_info(&info);
1631 }
1632 EXPORT_SYMBOL(force_sig);
1633
1634 /*
1635 * When things go south during signal handling, we
1636 * will force a SIGSEGV. And if the signal that caused
1637 * the problem was already a SIGSEGV, we'll want to
1638 * make sure we don't even try to deliver the signal..
1639 */
force_sigsegv(int sig)1640 void force_sigsegv(int sig)
1641 {
1642 struct task_struct *p = current;
1643
1644 if (sig == SIGSEGV) {
1645 unsigned long flags;
1646 spin_lock_irqsave(&p->sighand->siglock, flags);
1647 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1649 }
1650 force_sig(SIGSEGV);
1651 }
1652
force_sig_fault_to_task(int sig,int code,void __user * addr ___ARCH_SI_TRAPNO (int trapno)___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1653 int force_sig_fault_to_task(int sig, int code, void __user *addr
1654 ___ARCH_SI_TRAPNO(int trapno)
1655 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 , struct task_struct *t)
1657 {
1658 struct kernel_siginfo info;
1659
1660 clear_siginfo(&info);
1661 info.si_signo = sig;
1662 info.si_errno = 0;
1663 info.si_code = code;
1664 info.si_addr = addr;
1665 #ifdef __ARCH_SI_TRAPNO
1666 info.si_trapno = trapno;
1667 #endif
1668 #ifdef __ia64__
1669 info.si_imm = imm;
1670 info.si_flags = flags;
1671 info.si_isr = isr;
1672 #endif
1673 return force_sig_info_to_task(&info, t);
1674 }
1675
force_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_TRAPNO (int trapno)___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr))1676 int force_sig_fault(int sig, int code, void __user *addr
1677 ___ARCH_SI_TRAPNO(int trapno)
1678 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1679 {
1680 return force_sig_fault_to_task(sig, code, addr
1681 ___ARCH_SI_TRAPNO(trapno)
1682 ___ARCH_SI_IA64(imm, flags, isr), current);
1683 }
1684
send_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_TRAPNO (int trapno)___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1685 int send_sig_fault(int sig, int code, void __user *addr
1686 ___ARCH_SI_TRAPNO(int trapno)
1687 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 , struct task_struct *t)
1689 {
1690 struct kernel_siginfo info;
1691
1692 clear_siginfo(&info);
1693 info.si_signo = sig;
1694 info.si_errno = 0;
1695 info.si_code = code;
1696 info.si_addr = addr;
1697 #ifdef __ARCH_SI_TRAPNO
1698 info.si_trapno = trapno;
1699 #endif
1700 #ifdef __ia64__
1701 info.si_imm = imm;
1702 info.si_flags = flags;
1703 info.si_isr = isr;
1704 #endif
1705 return send_sig_info(info.si_signo, &info, t);
1706 }
1707
force_sig_mceerr(int code,void __user * addr,short lsb)1708 int force_sig_mceerr(int code, void __user *addr, short lsb)
1709 {
1710 struct kernel_siginfo info;
1711
1712 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 clear_siginfo(&info);
1714 info.si_signo = SIGBUS;
1715 info.si_errno = 0;
1716 info.si_code = code;
1717 info.si_addr = addr;
1718 info.si_addr_lsb = lsb;
1719 return force_sig_info(&info);
1720 }
1721
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1722 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1723 {
1724 struct kernel_siginfo info;
1725
1726 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 clear_siginfo(&info);
1728 info.si_signo = SIGBUS;
1729 info.si_errno = 0;
1730 info.si_code = code;
1731 info.si_addr = addr;
1732 info.si_addr_lsb = lsb;
1733 return send_sig_info(info.si_signo, &info, t);
1734 }
1735 EXPORT_SYMBOL(send_sig_mceerr);
1736
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1737 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1738 {
1739 struct kernel_siginfo info;
1740
1741 clear_siginfo(&info);
1742 info.si_signo = SIGSEGV;
1743 info.si_errno = 0;
1744 info.si_code = SEGV_BNDERR;
1745 info.si_addr = addr;
1746 info.si_lower = lower;
1747 info.si_upper = upper;
1748 return force_sig_info(&info);
1749 }
1750
1751 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1752 int force_sig_pkuerr(void __user *addr, u32 pkey)
1753 {
1754 struct kernel_siginfo info;
1755
1756 clear_siginfo(&info);
1757 info.si_signo = SIGSEGV;
1758 info.si_errno = 0;
1759 info.si_code = SEGV_PKUERR;
1760 info.si_addr = addr;
1761 info.si_pkey = pkey;
1762 return force_sig_info(&info);
1763 }
1764 #endif
1765
1766 /* For the crazy architectures that include trap information in
1767 * the errno field, instead of an actual errno value.
1768 */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1769 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1770 {
1771 struct kernel_siginfo info;
1772
1773 clear_siginfo(&info);
1774 info.si_signo = SIGTRAP;
1775 info.si_errno = errno;
1776 info.si_code = TRAP_HWBKPT;
1777 info.si_addr = addr;
1778 return force_sig_info(&info);
1779 }
1780
kill_pgrp(struct pid * pid,int sig,int priv)1781 int kill_pgrp(struct pid *pid, int sig, int priv)
1782 {
1783 int ret;
1784
1785 read_lock(&tasklist_lock);
1786 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1787 read_unlock(&tasklist_lock);
1788
1789 return ret;
1790 }
1791 EXPORT_SYMBOL(kill_pgrp);
1792
kill_pid(struct pid * pid,int sig,int priv)1793 int kill_pid(struct pid *pid, int sig, int priv)
1794 {
1795 return kill_pid_info(sig, __si_special(priv), pid);
1796 }
1797 EXPORT_SYMBOL(kill_pid);
1798
1799 /*
1800 * These functions support sending signals using preallocated sigqueue
1801 * structures. This is needed "because realtime applications cannot
1802 * afford to lose notifications of asynchronous events, like timer
1803 * expirations or I/O completions". In the case of POSIX Timers
1804 * we allocate the sigqueue structure from the timer_create. If this
1805 * allocation fails we are able to report the failure to the application
1806 * with an EAGAIN error.
1807 */
sigqueue_alloc(void)1808 struct sigqueue *sigqueue_alloc(void)
1809 {
1810 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1811
1812 if (q)
1813 q->flags |= SIGQUEUE_PREALLOC;
1814
1815 return q;
1816 }
1817
sigqueue_free(struct sigqueue * q)1818 void sigqueue_free(struct sigqueue *q)
1819 {
1820 unsigned long flags;
1821 spinlock_t *lock = ¤t->sighand->siglock;
1822
1823 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1824 /*
1825 * We must hold ->siglock while testing q->list
1826 * to serialize with collect_signal() or with
1827 * __exit_signal()->flush_sigqueue().
1828 */
1829 spin_lock_irqsave(lock, flags);
1830 q->flags &= ~SIGQUEUE_PREALLOC;
1831 /*
1832 * If it is queued it will be freed when dequeued,
1833 * like the "regular" sigqueue.
1834 */
1835 if (!list_empty(&q->list))
1836 q = NULL;
1837 spin_unlock_irqrestore(lock, flags);
1838
1839 if (q)
1840 __sigqueue_free(q);
1841 }
1842
send_sigqueue(struct sigqueue * q,struct pid * pid,enum pid_type type)1843 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1844 {
1845 int sig = q->info.si_signo;
1846 struct sigpending *pending;
1847 struct task_struct *t;
1848 unsigned long flags;
1849 int ret, result;
1850
1851 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1852
1853 ret = -1;
1854 rcu_read_lock();
1855 t = pid_task(pid, type);
1856 if (!t || !likely(lock_task_sighand(t, &flags)))
1857 goto ret;
1858
1859 ret = 1; /* the signal is ignored */
1860 result = TRACE_SIGNAL_IGNORED;
1861 if (!prepare_signal(sig, t, false))
1862 goto out;
1863
1864 ret = 0;
1865 if (unlikely(!list_empty(&q->list))) {
1866 /*
1867 * If an SI_TIMER entry is already queue just increment
1868 * the overrun count.
1869 */
1870 BUG_ON(q->info.si_code != SI_TIMER);
1871 q->info.si_overrun++;
1872 result = TRACE_SIGNAL_ALREADY_PENDING;
1873 goto out;
1874 }
1875 q->info.si_overrun = 0;
1876
1877 signalfd_notify(t, sig);
1878 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1879 list_add_tail(&q->list, &pending->list);
1880 sigaddset(&pending->signal, sig);
1881 complete_signal(sig, t, type);
1882 result = TRACE_SIGNAL_DELIVERED;
1883 out:
1884 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1885 unlock_task_sighand(t, &flags);
1886 ret:
1887 rcu_read_unlock();
1888 return ret;
1889 }
1890
do_notify_pidfd(struct task_struct * task)1891 static void do_notify_pidfd(struct task_struct *task)
1892 {
1893 struct pid *pid;
1894
1895 WARN_ON(task->exit_state == 0);
1896 pid = task_pid(task);
1897 wake_up_all(&pid->wait_pidfd);
1898 }
1899
1900 /*
1901 * Let a parent know about the death of a child.
1902 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1903 *
1904 * Returns true if our parent ignored us and so we've switched to
1905 * self-reaping.
1906 */
do_notify_parent(struct task_struct * tsk,int sig)1907 bool do_notify_parent(struct task_struct *tsk, int sig)
1908 {
1909 struct kernel_siginfo info;
1910 unsigned long flags;
1911 struct sighand_struct *psig;
1912 bool autoreap = false;
1913 u64 utime, stime;
1914
1915 WARN_ON_ONCE(sig == -1);
1916
1917 /* do_notify_parent_cldstop should have been called instead. */
1918 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
1919
1920 WARN_ON_ONCE(!tsk->ptrace &&
1921 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1922
1923 /* Wake up all pidfd waiters */
1924 do_notify_pidfd(tsk);
1925
1926 if (sig != SIGCHLD) {
1927 /*
1928 * This is only possible if parent == real_parent.
1929 * Check if it has changed security domain.
1930 */
1931 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1932 sig = SIGCHLD;
1933 }
1934
1935 clear_siginfo(&info);
1936 info.si_signo = sig;
1937 info.si_errno = 0;
1938 /*
1939 * We are under tasklist_lock here so our parent is tied to
1940 * us and cannot change.
1941 *
1942 * task_active_pid_ns will always return the same pid namespace
1943 * until a task passes through release_task.
1944 *
1945 * write_lock() currently calls preempt_disable() which is the
1946 * same as rcu_read_lock(), but according to Oleg, this is not
1947 * correct to rely on this
1948 */
1949 rcu_read_lock();
1950 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1951 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1952 task_uid(tsk));
1953 rcu_read_unlock();
1954
1955 task_cputime(tsk, &utime, &stime);
1956 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1957 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1958
1959 info.si_status = tsk->exit_code & 0x7f;
1960 if (tsk->exit_code & 0x80)
1961 info.si_code = CLD_DUMPED;
1962 else if (tsk->exit_code & 0x7f)
1963 info.si_code = CLD_KILLED;
1964 else {
1965 info.si_code = CLD_EXITED;
1966 info.si_status = tsk->exit_code >> 8;
1967 }
1968
1969 psig = tsk->parent->sighand;
1970 spin_lock_irqsave(&psig->siglock, flags);
1971 if (!tsk->ptrace && sig == SIGCHLD &&
1972 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1973 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1974 /*
1975 * We are exiting and our parent doesn't care. POSIX.1
1976 * defines special semantics for setting SIGCHLD to SIG_IGN
1977 * or setting the SA_NOCLDWAIT flag: we should be reaped
1978 * automatically and not left for our parent's wait4 call.
1979 * Rather than having the parent do it as a magic kind of
1980 * signal handler, we just set this to tell do_exit that we
1981 * can be cleaned up without becoming a zombie. Note that
1982 * we still call __wake_up_parent in this case, because a
1983 * blocked sys_wait4 might now return -ECHILD.
1984 *
1985 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1986 * is implementation-defined: we do (if you don't want
1987 * it, just use SIG_IGN instead).
1988 */
1989 autoreap = true;
1990 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1991 sig = 0;
1992 }
1993 /*
1994 * Send with __send_signal as si_pid and si_uid are in the
1995 * parent's namespaces.
1996 */
1997 if (valid_signal(sig) && sig)
1998 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1999 __wake_up_parent(tsk, tsk->parent);
2000 spin_unlock_irqrestore(&psig->siglock, flags);
2001
2002 return autoreap;
2003 }
2004
2005 /**
2006 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2007 * @tsk: task reporting the state change
2008 * @for_ptracer: the notification is for ptracer
2009 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2010 *
2011 * Notify @tsk's parent that the stopped/continued state has changed. If
2012 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2013 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2014 *
2015 * CONTEXT:
2016 * Must be called with tasklist_lock at least read locked.
2017 */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2018 static void do_notify_parent_cldstop(struct task_struct *tsk,
2019 bool for_ptracer, int why)
2020 {
2021 struct kernel_siginfo info;
2022 unsigned long flags;
2023 struct task_struct *parent;
2024 struct sighand_struct *sighand;
2025 u64 utime, stime;
2026
2027 if (for_ptracer) {
2028 parent = tsk->parent;
2029 } else {
2030 tsk = tsk->group_leader;
2031 parent = tsk->real_parent;
2032 }
2033
2034 clear_siginfo(&info);
2035 info.si_signo = SIGCHLD;
2036 info.si_errno = 0;
2037 /*
2038 * see comment in do_notify_parent() about the following 4 lines
2039 */
2040 rcu_read_lock();
2041 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2042 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2043 rcu_read_unlock();
2044
2045 task_cputime(tsk, &utime, &stime);
2046 info.si_utime = nsec_to_clock_t(utime);
2047 info.si_stime = nsec_to_clock_t(stime);
2048
2049 info.si_code = why;
2050 switch (why) {
2051 case CLD_CONTINUED:
2052 info.si_status = SIGCONT;
2053 break;
2054 case CLD_STOPPED:
2055 info.si_status = tsk->signal->group_exit_code & 0x7f;
2056 break;
2057 case CLD_TRAPPED:
2058 info.si_status = tsk->exit_code & 0x7f;
2059 break;
2060 default:
2061 BUG();
2062 }
2063
2064 sighand = parent->sighand;
2065 spin_lock_irqsave(&sighand->siglock, flags);
2066 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2067 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2068 __group_send_sig_info(SIGCHLD, &info, parent);
2069 /*
2070 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2071 */
2072 __wake_up_parent(tsk, parent);
2073 spin_unlock_irqrestore(&sighand->siglock, flags);
2074 }
2075
may_ptrace_stop(void)2076 static inline bool may_ptrace_stop(void)
2077 {
2078 if (!likely(current->ptrace))
2079 return false;
2080 /*
2081 * Are we in the middle of do_coredump?
2082 * If so and our tracer is also part of the coredump stopping
2083 * is a deadlock situation, and pointless because our tracer
2084 * is dead so don't allow us to stop.
2085 * If SIGKILL was already sent before the caller unlocked
2086 * ->siglock we must see ->core_state != NULL. Otherwise it
2087 * is safe to enter schedule().
2088 *
2089 * This is almost outdated, a task with the pending SIGKILL can't
2090 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2091 * after SIGKILL was already dequeued.
2092 */
2093 if (unlikely(current->mm->core_state) &&
2094 unlikely(current->mm == current->parent->mm))
2095 return false;
2096
2097 return true;
2098 }
2099
2100
2101 /*
2102 * This must be called with current->sighand->siglock held.
2103 *
2104 * This should be the path for all ptrace stops.
2105 * We always set current->last_siginfo while stopped here.
2106 * That makes it a way to test a stopped process for
2107 * being ptrace-stopped vs being job-control-stopped.
2108 *
2109 * If we actually decide not to stop at all because the tracer
2110 * is gone, we keep current->exit_code unless clear_code.
2111 */
ptrace_stop(int exit_code,int why,int clear_code,kernel_siginfo_t * info)2112 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2113 __releases(¤t->sighand->siglock)
2114 __acquires(¤t->sighand->siglock)
2115 {
2116 bool gstop_done = false;
2117
2118 if (arch_ptrace_stop_needed(exit_code, info)) {
2119 /*
2120 * The arch code has something special to do before a
2121 * ptrace stop. This is allowed to block, e.g. for faults
2122 * on user stack pages. We can't keep the siglock while
2123 * calling arch_ptrace_stop, so we must release it now.
2124 * To preserve proper semantics, we must do this before
2125 * any signal bookkeeping like checking group_stop_count.
2126 */
2127 spin_unlock_irq(¤t->sighand->siglock);
2128 arch_ptrace_stop(exit_code, info);
2129 spin_lock_irq(¤t->sighand->siglock);
2130 }
2131
2132 /*
2133 * schedule() will not sleep if there is a pending signal that
2134 * can awaken the task.
2135 */
2136 set_special_state(TASK_TRACED);
2137
2138 /*
2139 * We're committing to trapping. TRACED should be visible before
2140 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2141 * Also, transition to TRACED and updates to ->jobctl should be
2142 * atomic with respect to siglock and should be done after the arch
2143 * hook as siglock is released and regrabbed across it.
2144 *
2145 * TRACER TRACEE
2146 *
2147 * ptrace_attach()
2148 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2149 * do_wait()
2150 * set_current_state() smp_wmb();
2151 * ptrace_do_wait()
2152 * wait_task_stopped()
2153 * task_stopped_code()
2154 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2155 */
2156 smp_wmb();
2157
2158 current->last_siginfo = info;
2159 current->exit_code = exit_code;
2160
2161 /*
2162 * If @why is CLD_STOPPED, we're trapping to participate in a group
2163 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2164 * across siglock relocks since INTERRUPT was scheduled, PENDING
2165 * could be clear now. We act as if SIGCONT is received after
2166 * TASK_TRACED is entered - ignore it.
2167 */
2168 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2169 gstop_done = task_participate_group_stop(current);
2170
2171 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2172 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2173 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2174 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2175
2176 /* entering a trap, clear TRAPPING */
2177 task_clear_jobctl_trapping(current);
2178
2179 spin_unlock_irq(¤t->sighand->siglock);
2180 read_lock(&tasklist_lock);
2181 if (may_ptrace_stop()) {
2182 /*
2183 * Notify parents of the stop.
2184 *
2185 * While ptraced, there are two parents - the ptracer and
2186 * the real_parent of the group_leader. The ptracer should
2187 * know about every stop while the real parent is only
2188 * interested in the completion of group stop. The states
2189 * for the two don't interact with each other. Notify
2190 * separately unless they're gonna be duplicates.
2191 */
2192 do_notify_parent_cldstop(current, true, why);
2193 if (gstop_done && ptrace_reparented(current))
2194 do_notify_parent_cldstop(current, false, why);
2195
2196 /*
2197 * Don't want to allow preemption here, because
2198 * sys_ptrace() needs this task to be inactive.
2199 *
2200 * XXX: implement read_unlock_no_resched().
2201 */
2202 preempt_disable();
2203 read_unlock(&tasklist_lock);
2204 cgroup_enter_frozen();
2205 preempt_enable_no_resched();
2206 freezable_schedule();
2207 cgroup_leave_frozen(true);
2208 } else {
2209 /*
2210 * By the time we got the lock, our tracer went away.
2211 * Don't drop the lock yet, another tracer may come.
2212 *
2213 * If @gstop_done, the ptracer went away between group stop
2214 * completion and here. During detach, it would have set
2215 * JOBCTL_STOP_PENDING on us and we'll re-enter
2216 * TASK_STOPPED in do_signal_stop() on return, so notifying
2217 * the real parent of the group stop completion is enough.
2218 */
2219 if (gstop_done)
2220 do_notify_parent_cldstop(current, false, why);
2221
2222 /* tasklist protects us from ptrace_freeze_traced() */
2223 __set_current_state(TASK_RUNNING);
2224 if (clear_code)
2225 current->exit_code = 0;
2226 read_unlock(&tasklist_lock);
2227 }
2228
2229 /*
2230 * We are back. Now reacquire the siglock before touching
2231 * last_siginfo, so that we are sure to have synchronized with
2232 * any signal-sending on another CPU that wants to examine it.
2233 */
2234 spin_lock_irq(¤t->sighand->siglock);
2235 current->last_siginfo = NULL;
2236
2237 /* LISTENING can be set only during STOP traps, clear it */
2238 current->jobctl &= ~JOBCTL_LISTENING;
2239
2240 /*
2241 * Queued signals ignored us while we were stopped for tracing.
2242 * So check for any that we should take before resuming user mode.
2243 * This sets TIF_SIGPENDING, but never clears it.
2244 */
2245 recalc_sigpending_tsk(current);
2246 }
2247
ptrace_do_notify(int signr,int exit_code,int why)2248 static void ptrace_do_notify(int signr, int exit_code, int why)
2249 {
2250 kernel_siginfo_t info;
2251
2252 clear_siginfo(&info);
2253 info.si_signo = signr;
2254 info.si_code = exit_code;
2255 info.si_pid = task_pid_vnr(current);
2256 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2257
2258 /* Let the debugger run. */
2259 ptrace_stop(exit_code, why, 1, &info);
2260 }
2261
ptrace_notify(int exit_code)2262 void ptrace_notify(int exit_code)
2263 {
2264 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2265 if (unlikely(current->task_works))
2266 task_work_run();
2267
2268 spin_lock_irq(¤t->sighand->siglock);
2269 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2270 spin_unlock_irq(¤t->sighand->siglock);
2271 }
2272
2273 /**
2274 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2275 * @signr: signr causing group stop if initiating
2276 *
2277 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2278 * and participate in it. If already set, participate in the existing
2279 * group stop. If participated in a group stop (and thus slept), %true is
2280 * returned with siglock released.
2281 *
2282 * If ptraced, this function doesn't handle stop itself. Instead,
2283 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2284 * untouched. The caller must ensure that INTERRUPT trap handling takes
2285 * places afterwards.
2286 *
2287 * CONTEXT:
2288 * Must be called with @current->sighand->siglock held, which is released
2289 * on %true return.
2290 *
2291 * RETURNS:
2292 * %false if group stop is already cancelled or ptrace trap is scheduled.
2293 * %true if participated in group stop.
2294 */
do_signal_stop(int signr)2295 static bool do_signal_stop(int signr)
2296 __releases(¤t->sighand->siglock)
2297 {
2298 struct signal_struct *sig = current->signal;
2299
2300 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2301 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2302 struct task_struct *t;
2303
2304 /* signr will be recorded in task->jobctl for retries */
2305 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2306
2307 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2308 unlikely(signal_group_exit(sig)))
2309 return false;
2310 /*
2311 * There is no group stop already in progress. We must
2312 * initiate one now.
2313 *
2314 * While ptraced, a task may be resumed while group stop is
2315 * still in effect and then receive a stop signal and
2316 * initiate another group stop. This deviates from the
2317 * usual behavior as two consecutive stop signals can't
2318 * cause two group stops when !ptraced. That is why we
2319 * also check !task_is_stopped(t) below.
2320 *
2321 * The condition can be distinguished by testing whether
2322 * SIGNAL_STOP_STOPPED is already set. Don't generate
2323 * group_exit_code in such case.
2324 *
2325 * This is not necessary for SIGNAL_STOP_CONTINUED because
2326 * an intervening stop signal is required to cause two
2327 * continued events regardless of ptrace.
2328 */
2329 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2330 sig->group_exit_code = signr;
2331
2332 sig->group_stop_count = 0;
2333
2334 if (task_set_jobctl_pending(current, signr | gstop))
2335 sig->group_stop_count++;
2336
2337 t = current;
2338 while_each_thread(current, t) {
2339 /*
2340 * Setting state to TASK_STOPPED for a group
2341 * stop is always done with the siglock held,
2342 * so this check has no races.
2343 */
2344 if (!task_is_stopped(t) &&
2345 task_set_jobctl_pending(t, signr | gstop)) {
2346 sig->group_stop_count++;
2347 if (likely(!(t->ptrace & PT_SEIZED)))
2348 signal_wake_up(t, 0);
2349 else
2350 ptrace_trap_notify(t);
2351 }
2352 }
2353 }
2354
2355 if (likely(!current->ptrace)) {
2356 int notify = 0;
2357
2358 /*
2359 * If there are no other threads in the group, or if there
2360 * is a group stop in progress and we are the last to stop,
2361 * report to the parent.
2362 */
2363 if (task_participate_group_stop(current))
2364 notify = CLD_STOPPED;
2365
2366 set_special_state(TASK_STOPPED);
2367 spin_unlock_irq(¤t->sighand->siglock);
2368
2369 /*
2370 * Notify the parent of the group stop completion. Because
2371 * we're not holding either the siglock or tasklist_lock
2372 * here, ptracer may attach inbetween; however, this is for
2373 * group stop and should always be delivered to the real
2374 * parent of the group leader. The new ptracer will get
2375 * its notification when this task transitions into
2376 * TASK_TRACED.
2377 */
2378 if (notify) {
2379 read_lock(&tasklist_lock);
2380 do_notify_parent_cldstop(current, false, notify);
2381 read_unlock(&tasklist_lock);
2382 }
2383
2384 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2385 cgroup_enter_frozen();
2386 freezable_schedule();
2387 return true;
2388 } else {
2389 /*
2390 * While ptraced, group stop is handled by STOP trap.
2391 * Schedule it and let the caller deal with it.
2392 */
2393 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2394 return false;
2395 }
2396 }
2397
2398 /**
2399 * do_jobctl_trap - take care of ptrace jobctl traps
2400 *
2401 * When PT_SEIZED, it's used for both group stop and explicit
2402 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2403 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2404 * the stop signal; otherwise, %SIGTRAP.
2405 *
2406 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2407 * number as exit_code and no siginfo.
2408 *
2409 * CONTEXT:
2410 * Must be called with @current->sighand->siglock held, which may be
2411 * released and re-acquired before returning with intervening sleep.
2412 */
do_jobctl_trap(void)2413 static void do_jobctl_trap(void)
2414 {
2415 struct signal_struct *signal = current->signal;
2416 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2417
2418 if (current->ptrace & PT_SEIZED) {
2419 if (!signal->group_stop_count &&
2420 !(signal->flags & SIGNAL_STOP_STOPPED))
2421 signr = SIGTRAP;
2422 WARN_ON_ONCE(!signr);
2423 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2424 CLD_STOPPED);
2425 } else {
2426 WARN_ON_ONCE(!signr);
2427 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2428 current->exit_code = 0;
2429 }
2430 }
2431
2432 /**
2433 * do_freezer_trap - handle the freezer jobctl trap
2434 *
2435 * Puts the task into frozen state, if only the task is not about to quit.
2436 * In this case it drops JOBCTL_TRAP_FREEZE.
2437 *
2438 * CONTEXT:
2439 * Must be called with @current->sighand->siglock held,
2440 * which is always released before returning.
2441 */
do_freezer_trap(void)2442 static void do_freezer_trap(void)
2443 __releases(¤t->sighand->siglock)
2444 {
2445 /*
2446 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2447 * let's make another loop to give it a chance to be handled.
2448 * In any case, we'll return back.
2449 */
2450 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2451 JOBCTL_TRAP_FREEZE) {
2452 spin_unlock_irq(¤t->sighand->siglock);
2453 return;
2454 }
2455
2456 /*
2457 * Now we're sure that there is no pending fatal signal and no
2458 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2459 * immediately (if there is a non-fatal signal pending), and
2460 * put the task into sleep.
2461 */
2462 __set_current_state(TASK_INTERRUPTIBLE);
2463 clear_thread_flag(TIF_SIGPENDING);
2464 spin_unlock_irq(¤t->sighand->siglock);
2465 cgroup_enter_frozen();
2466 freezable_schedule();
2467 }
2468
ptrace_signal(int signr,kernel_siginfo_t * info)2469 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2470 {
2471 /*
2472 * We do not check sig_kernel_stop(signr) but set this marker
2473 * unconditionally because we do not know whether debugger will
2474 * change signr. This flag has no meaning unless we are going
2475 * to stop after return from ptrace_stop(). In this case it will
2476 * be checked in do_signal_stop(), we should only stop if it was
2477 * not cleared by SIGCONT while we were sleeping. See also the
2478 * comment in dequeue_signal().
2479 */
2480 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2481 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2482
2483 /* We're back. Did the debugger cancel the sig? */
2484 signr = current->exit_code;
2485 if (signr == 0)
2486 return signr;
2487
2488 current->exit_code = 0;
2489
2490 /*
2491 * Update the siginfo structure if the signal has
2492 * changed. If the debugger wanted something
2493 * specific in the siginfo structure then it should
2494 * have updated *info via PTRACE_SETSIGINFO.
2495 */
2496 if (signr != info->si_signo) {
2497 clear_siginfo(info);
2498 info->si_signo = signr;
2499 info->si_errno = 0;
2500 info->si_code = SI_USER;
2501 rcu_read_lock();
2502 info->si_pid = task_pid_vnr(current->parent);
2503 info->si_uid = from_kuid_munged(current_user_ns(),
2504 task_uid(current->parent));
2505 rcu_read_unlock();
2506 }
2507
2508 /* If the (new) signal is now blocked, requeue it. */
2509 if (sigismember(¤t->blocked, signr)) {
2510 send_signal(signr, info, current, PIDTYPE_PID);
2511 signr = 0;
2512 }
2513
2514 return signr;
2515 }
2516
get_signal(struct ksignal * ksig)2517 bool get_signal(struct ksignal *ksig)
2518 {
2519 struct sighand_struct *sighand = current->sighand;
2520 struct signal_struct *signal = current->signal;
2521 int signr;
2522
2523 if (unlikely(current->task_works))
2524 task_work_run();
2525
2526 /*
2527 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2528 * that the arch handlers don't all have to do it. If we get here
2529 * without TIF_SIGPENDING, just exit after running signal work.
2530 */
2531 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2532 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2533 tracehook_notify_signal();
2534 if (!task_sigpending(current))
2535 return false;
2536 }
2537
2538 if (unlikely(uprobe_deny_signal()))
2539 return false;
2540
2541 /*
2542 * Do this once, we can't return to user-mode if freezing() == T.
2543 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2544 * thus do not need another check after return.
2545 */
2546 try_to_freeze();
2547
2548 relock:
2549 spin_lock_irq(&sighand->siglock);
2550
2551 /*
2552 * Every stopped thread goes here after wakeup. Check to see if
2553 * we should notify the parent, prepare_signal(SIGCONT) encodes
2554 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2555 */
2556 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2557 int why;
2558
2559 if (signal->flags & SIGNAL_CLD_CONTINUED)
2560 why = CLD_CONTINUED;
2561 else
2562 why = CLD_STOPPED;
2563
2564 signal->flags &= ~SIGNAL_CLD_MASK;
2565
2566 spin_unlock_irq(&sighand->siglock);
2567
2568 /*
2569 * Notify the parent that we're continuing. This event is
2570 * always per-process and doesn't make whole lot of sense
2571 * for ptracers, who shouldn't consume the state via
2572 * wait(2) either, but, for backward compatibility, notify
2573 * the ptracer of the group leader too unless it's gonna be
2574 * a duplicate.
2575 */
2576 read_lock(&tasklist_lock);
2577 do_notify_parent_cldstop(current, false, why);
2578
2579 if (ptrace_reparented(current->group_leader))
2580 do_notify_parent_cldstop(current->group_leader,
2581 true, why);
2582 read_unlock(&tasklist_lock);
2583
2584 goto relock;
2585 }
2586
2587 /* Has this task already been marked for death? */
2588 if (signal_group_exit(signal)) {
2589 ksig->info.si_signo = signr = SIGKILL;
2590 sigdelset(¤t->pending.signal, SIGKILL);
2591 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2592 &sighand->action[SIGKILL - 1]);
2593 recalc_sigpending();
2594 goto fatal;
2595 }
2596
2597 for (;;) {
2598 struct k_sigaction *ka;
2599
2600 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2601 do_signal_stop(0))
2602 goto relock;
2603
2604 if (unlikely(current->jobctl &
2605 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2606 if (current->jobctl & JOBCTL_TRAP_MASK) {
2607 do_jobctl_trap();
2608 spin_unlock_irq(&sighand->siglock);
2609 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2610 do_freezer_trap();
2611
2612 goto relock;
2613 }
2614
2615 /*
2616 * If the task is leaving the frozen state, let's update
2617 * cgroup counters and reset the frozen bit.
2618 */
2619 if (unlikely(cgroup_task_frozen(current))) {
2620 spin_unlock_irq(&sighand->siglock);
2621 cgroup_leave_frozen(false);
2622 goto relock;
2623 }
2624
2625 /*
2626 * Signals generated by the execution of an instruction
2627 * need to be delivered before any other pending signals
2628 * so that the instruction pointer in the signal stack
2629 * frame points to the faulting instruction.
2630 */
2631 signr = dequeue_synchronous_signal(&ksig->info);
2632 if (!signr)
2633 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2634
2635 if (!signr)
2636 break; /* will return 0 */
2637
2638 if (unlikely(current->ptrace) && signr != SIGKILL) {
2639 signr = ptrace_signal(signr, &ksig->info);
2640 if (!signr)
2641 continue;
2642 }
2643
2644 ka = &sighand->action[signr-1];
2645
2646 /* Trace actually delivered signals. */
2647 trace_signal_deliver(signr, &ksig->info, ka);
2648
2649 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2650 continue;
2651 if (ka->sa.sa_handler != SIG_DFL) {
2652 /* Run the handler. */
2653 ksig->ka = *ka;
2654
2655 if (ka->sa.sa_flags & SA_ONESHOT)
2656 ka->sa.sa_handler = SIG_DFL;
2657
2658 break; /* will return non-zero "signr" value */
2659 }
2660
2661 /*
2662 * Now we are doing the default action for this signal.
2663 */
2664 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2665 continue;
2666
2667 /*
2668 * Global init gets no signals it doesn't want.
2669 * Container-init gets no signals it doesn't want from same
2670 * container.
2671 *
2672 * Note that if global/container-init sees a sig_kernel_only()
2673 * signal here, the signal must have been generated internally
2674 * or must have come from an ancestor namespace. In either
2675 * case, the signal cannot be dropped.
2676 */
2677 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2678 !sig_kernel_only(signr))
2679 continue;
2680
2681 if (sig_kernel_stop(signr)) {
2682 /*
2683 * The default action is to stop all threads in
2684 * the thread group. The job control signals
2685 * do nothing in an orphaned pgrp, but SIGSTOP
2686 * always works. Note that siglock needs to be
2687 * dropped during the call to is_orphaned_pgrp()
2688 * because of lock ordering with tasklist_lock.
2689 * This allows an intervening SIGCONT to be posted.
2690 * We need to check for that and bail out if necessary.
2691 */
2692 if (signr != SIGSTOP) {
2693 spin_unlock_irq(&sighand->siglock);
2694
2695 /* signals can be posted during this window */
2696
2697 if (is_current_pgrp_orphaned())
2698 goto relock;
2699
2700 spin_lock_irq(&sighand->siglock);
2701 }
2702
2703 if (likely(do_signal_stop(ksig->info.si_signo))) {
2704 /* It released the siglock. */
2705 goto relock;
2706 }
2707
2708 /*
2709 * We didn't actually stop, due to a race
2710 * with SIGCONT or something like that.
2711 */
2712 continue;
2713 }
2714
2715 fatal:
2716 spin_unlock_irq(&sighand->siglock);
2717 if (unlikely(cgroup_task_frozen(current)))
2718 cgroup_leave_frozen(true);
2719
2720 /*
2721 * Anything else is fatal, maybe with a core dump.
2722 */
2723 current->flags |= PF_SIGNALED;
2724
2725 if (sig_kernel_coredump(signr)) {
2726 if (print_fatal_signals)
2727 print_fatal_signal(ksig->info.si_signo);
2728 proc_coredump_connector(current);
2729 /*
2730 * If it was able to dump core, this kills all
2731 * other threads in the group and synchronizes with
2732 * their demise. If we lost the race with another
2733 * thread getting here, it set group_exit_code
2734 * first and our do_group_exit call below will use
2735 * that value and ignore the one we pass it.
2736 */
2737 do_coredump(&ksig->info);
2738 }
2739
2740 /*
2741 * PF_IO_WORKER threads will catch and exit on fatal signals
2742 * themselves. They have cleanup that must be performed, so
2743 * we cannot call do_exit() on their behalf.
2744 */
2745 if (current->flags & PF_IO_WORKER)
2746 goto out;
2747
2748 /*
2749 * Death signals, no core dump.
2750 */
2751 do_group_exit(ksig->info.si_signo);
2752 /* NOTREACHED */
2753 }
2754 spin_unlock_irq(&sighand->siglock);
2755 out:
2756 ksig->sig = signr;
2757 return ksig->sig > 0;
2758 }
2759
2760 /**
2761 * signal_delivered -
2762 * @ksig: kernel signal struct
2763 * @stepping: nonzero if debugger single-step or block-step in use
2764 *
2765 * This function should be called when a signal has successfully been
2766 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2767 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2768 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2769 */
signal_delivered(struct ksignal * ksig,int stepping)2770 static void signal_delivered(struct ksignal *ksig, int stepping)
2771 {
2772 sigset_t blocked;
2773
2774 /* A signal was successfully delivered, and the
2775 saved sigmask was stored on the signal frame,
2776 and will be restored by sigreturn. So we can
2777 simply clear the restore sigmask flag. */
2778 clear_restore_sigmask();
2779
2780 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2781 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2782 sigaddset(&blocked, ksig->sig);
2783 set_current_blocked(&blocked);
2784 tracehook_signal_handler(stepping);
2785 }
2786
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2787 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2788 {
2789 if (failed)
2790 force_sigsegv(ksig->sig);
2791 else
2792 signal_delivered(ksig, stepping);
2793 }
2794
2795 /*
2796 * It could be that complete_signal() picked us to notify about the
2797 * group-wide signal. Other threads should be notified now to take
2798 * the shared signals in @which since we will not.
2799 */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2800 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2801 {
2802 sigset_t retarget;
2803 struct task_struct *t;
2804
2805 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2806 if (sigisemptyset(&retarget))
2807 return;
2808
2809 t = tsk;
2810 while_each_thread(tsk, t) {
2811 if (t->flags & PF_EXITING)
2812 continue;
2813
2814 if (!has_pending_signals(&retarget, &t->blocked))
2815 continue;
2816 /* Remove the signals this thread can handle. */
2817 sigandsets(&retarget, &retarget, &t->blocked);
2818
2819 if (!task_sigpending(t))
2820 signal_wake_up(t, 0);
2821
2822 if (sigisemptyset(&retarget))
2823 break;
2824 }
2825 }
2826
exit_signals(struct task_struct * tsk)2827 void exit_signals(struct task_struct *tsk)
2828 {
2829 int group_stop = 0;
2830 sigset_t unblocked;
2831
2832 /*
2833 * @tsk is about to have PF_EXITING set - lock out users which
2834 * expect stable threadgroup.
2835 */
2836 cgroup_threadgroup_change_begin(tsk);
2837
2838 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2839 tsk->flags |= PF_EXITING;
2840 cgroup_threadgroup_change_end(tsk);
2841 return;
2842 }
2843
2844 spin_lock_irq(&tsk->sighand->siglock);
2845 /*
2846 * From now this task is not visible for group-wide signals,
2847 * see wants_signal(), do_signal_stop().
2848 */
2849 tsk->flags |= PF_EXITING;
2850
2851 cgroup_threadgroup_change_end(tsk);
2852
2853 if (!task_sigpending(tsk))
2854 goto out;
2855
2856 unblocked = tsk->blocked;
2857 signotset(&unblocked);
2858 retarget_shared_pending(tsk, &unblocked);
2859
2860 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2861 task_participate_group_stop(tsk))
2862 group_stop = CLD_STOPPED;
2863 out:
2864 spin_unlock_irq(&tsk->sighand->siglock);
2865
2866 /*
2867 * If group stop has completed, deliver the notification. This
2868 * should always go to the real parent of the group leader.
2869 */
2870 if (unlikely(group_stop)) {
2871 read_lock(&tasklist_lock);
2872 do_notify_parent_cldstop(tsk, false, group_stop);
2873 read_unlock(&tasklist_lock);
2874 }
2875 }
2876
2877 /*
2878 * System call entry points.
2879 */
2880
2881 /**
2882 * sys_restart_syscall - restart a system call
2883 */
SYSCALL_DEFINE0(restart_syscall)2884 SYSCALL_DEFINE0(restart_syscall)
2885 {
2886 struct restart_block *restart = ¤t->restart_block;
2887 return restart->fn(restart);
2888 }
2889
do_no_restart_syscall(struct restart_block * param)2890 long do_no_restart_syscall(struct restart_block *param)
2891 {
2892 return -EINTR;
2893 }
2894
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)2895 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2896 {
2897 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2898 sigset_t newblocked;
2899 /* A set of now blocked but previously unblocked signals. */
2900 sigandnsets(&newblocked, newset, ¤t->blocked);
2901 retarget_shared_pending(tsk, &newblocked);
2902 }
2903 tsk->blocked = *newset;
2904 recalc_sigpending();
2905 }
2906
2907 /**
2908 * set_current_blocked - change current->blocked mask
2909 * @newset: new mask
2910 *
2911 * It is wrong to change ->blocked directly, this helper should be used
2912 * to ensure the process can't miss a shared signal we are going to block.
2913 */
set_current_blocked(sigset_t * newset)2914 void set_current_blocked(sigset_t *newset)
2915 {
2916 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2917 __set_current_blocked(newset);
2918 }
2919
__set_current_blocked(const sigset_t * newset)2920 void __set_current_blocked(const sigset_t *newset)
2921 {
2922 struct task_struct *tsk = current;
2923
2924 /*
2925 * In case the signal mask hasn't changed, there is nothing we need
2926 * to do. The current->blocked shouldn't be modified by other task.
2927 */
2928 if (sigequalsets(&tsk->blocked, newset))
2929 return;
2930
2931 spin_lock_irq(&tsk->sighand->siglock);
2932 __set_task_blocked(tsk, newset);
2933 spin_unlock_irq(&tsk->sighand->siglock);
2934 }
2935
2936 /*
2937 * This is also useful for kernel threads that want to temporarily
2938 * (or permanently) block certain signals.
2939 *
2940 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2941 * interface happily blocks "unblockable" signals like SIGKILL
2942 * and friends.
2943 */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)2944 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2945 {
2946 struct task_struct *tsk = current;
2947 sigset_t newset;
2948
2949 /* Lockless, only current can change ->blocked, never from irq */
2950 if (oldset)
2951 *oldset = tsk->blocked;
2952
2953 switch (how) {
2954 case SIG_BLOCK:
2955 sigorsets(&newset, &tsk->blocked, set);
2956 break;
2957 case SIG_UNBLOCK:
2958 sigandnsets(&newset, &tsk->blocked, set);
2959 break;
2960 case SIG_SETMASK:
2961 newset = *set;
2962 break;
2963 default:
2964 return -EINVAL;
2965 }
2966
2967 __set_current_blocked(&newset);
2968 return 0;
2969 }
2970 EXPORT_SYMBOL(sigprocmask);
2971
2972 /*
2973 * The api helps set app-provided sigmasks.
2974 *
2975 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2976 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2977 *
2978 * Note that it does set_restore_sigmask() in advance, so it must be always
2979 * paired with restore_saved_sigmask_unless() before return from syscall.
2980 */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)2981 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2982 {
2983 sigset_t kmask;
2984
2985 if (!umask)
2986 return 0;
2987 if (sigsetsize != sizeof(sigset_t))
2988 return -EINVAL;
2989 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2990 return -EFAULT;
2991
2992 set_restore_sigmask();
2993 current->saved_sigmask = current->blocked;
2994 set_current_blocked(&kmask);
2995
2996 return 0;
2997 }
2998
2999 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3000 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3001 size_t sigsetsize)
3002 {
3003 sigset_t kmask;
3004
3005 if (!umask)
3006 return 0;
3007 if (sigsetsize != sizeof(compat_sigset_t))
3008 return -EINVAL;
3009 if (get_compat_sigset(&kmask, umask))
3010 return -EFAULT;
3011
3012 set_restore_sigmask();
3013 current->saved_sigmask = current->blocked;
3014 set_current_blocked(&kmask);
3015
3016 return 0;
3017 }
3018 #endif
3019
3020 /**
3021 * sys_rt_sigprocmask - change the list of currently blocked signals
3022 * @how: whether to add, remove, or set signals
3023 * @nset: stores pending signals
3024 * @oset: previous value of signal mask if non-null
3025 * @sigsetsize: size of sigset_t type
3026 */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3027 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3028 sigset_t __user *, oset, size_t, sigsetsize)
3029 {
3030 sigset_t old_set, new_set;
3031 int error;
3032
3033 /* XXX: Don't preclude handling different sized sigset_t's. */
3034 if (sigsetsize != sizeof(sigset_t))
3035 return -EINVAL;
3036
3037 old_set = current->blocked;
3038
3039 if (nset) {
3040 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3041 return -EFAULT;
3042 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3043
3044 error = sigprocmask(how, &new_set, NULL);
3045 if (error)
3046 return error;
3047 }
3048
3049 if (oset) {
3050 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3051 return -EFAULT;
3052 }
3053
3054 return 0;
3055 }
3056
3057 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3058 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3059 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3060 {
3061 sigset_t old_set = current->blocked;
3062
3063 /* XXX: Don't preclude handling different sized sigset_t's. */
3064 if (sigsetsize != sizeof(sigset_t))
3065 return -EINVAL;
3066
3067 if (nset) {
3068 sigset_t new_set;
3069 int error;
3070 if (get_compat_sigset(&new_set, nset))
3071 return -EFAULT;
3072 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3073
3074 error = sigprocmask(how, &new_set, NULL);
3075 if (error)
3076 return error;
3077 }
3078 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3079 }
3080 #endif
3081
do_sigpending(sigset_t * set)3082 static void do_sigpending(sigset_t *set)
3083 {
3084 spin_lock_irq(¤t->sighand->siglock);
3085 sigorsets(set, ¤t->pending.signal,
3086 ¤t->signal->shared_pending.signal);
3087 spin_unlock_irq(¤t->sighand->siglock);
3088
3089 /* Outside the lock because only this thread touches it. */
3090 sigandsets(set, ¤t->blocked, set);
3091 }
3092
3093 /**
3094 * sys_rt_sigpending - examine a pending signal that has been raised
3095 * while blocked
3096 * @uset: stores pending signals
3097 * @sigsetsize: size of sigset_t type or larger
3098 */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3099 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3100 {
3101 sigset_t set;
3102
3103 if (sigsetsize > sizeof(*uset))
3104 return -EINVAL;
3105
3106 do_sigpending(&set);
3107
3108 if (copy_to_user(uset, &set, sigsetsize))
3109 return -EFAULT;
3110
3111 return 0;
3112 }
3113
3114 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3115 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3116 compat_size_t, sigsetsize)
3117 {
3118 sigset_t set;
3119
3120 if (sigsetsize > sizeof(*uset))
3121 return -EINVAL;
3122
3123 do_sigpending(&set);
3124
3125 return put_compat_sigset(uset, &set, sigsetsize);
3126 }
3127 #endif
3128
3129 static const struct {
3130 unsigned char limit, layout;
3131 } sig_sicodes[] = {
3132 [SIGILL] = { NSIGILL, SIL_FAULT },
3133 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3134 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3135 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3136 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3137 #if defined(SIGEMT)
3138 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3139 #endif
3140 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3141 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3142 [SIGSYS] = { NSIGSYS, SIL_SYS },
3143 };
3144
known_siginfo_layout(unsigned sig,int si_code)3145 static bool known_siginfo_layout(unsigned sig, int si_code)
3146 {
3147 if (si_code == SI_KERNEL)
3148 return true;
3149 else if ((si_code > SI_USER)) {
3150 if (sig_specific_sicodes(sig)) {
3151 if (si_code <= sig_sicodes[sig].limit)
3152 return true;
3153 }
3154 else if (si_code <= NSIGPOLL)
3155 return true;
3156 }
3157 else if (si_code >= SI_DETHREAD)
3158 return true;
3159 else if (si_code == SI_ASYNCNL)
3160 return true;
3161 return false;
3162 }
3163
siginfo_layout(unsigned sig,int si_code)3164 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3165 {
3166 enum siginfo_layout layout = SIL_KILL;
3167 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3168 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3169 (si_code <= sig_sicodes[sig].limit)) {
3170 layout = sig_sicodes[sig].layout;
3171 /* Handle the exceptions */
3172 if ((sig == SIGBUS) &&
3173 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3174 layout = SIL_FAULT_MCEERR;
3175 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3176 layout = SIL_FAULT_BNDERR;
3177 #ifdef SEGV_PKUERR
3178 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3179 layout = SIL_FAULT_PKUERR;
3180 #endif
3181 }
3182 else if (si_code <= NSIGPOLL)
3183 layout = SIL_POLL;
3184 } else {
3185 if (si_code == SI_TIMER)
3186 layout = SIL_TIMER;
3187 else if (si_code == SI_SIGIO)
3188 layout = SIL_POLL;
3189 else if (si_code < 0)
3190 layout = SIL_RT;
3191 }
3192 return layout;
3193 }
3194
si_expansion(const siginfo_t __user * info)3195 static inline char __user *si_expansion(const siginfo_t __user *info)
3196 {
3197 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3198 }
3199
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3200 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3201 {
3202 char __user *expansion = si_expansion(to);
3203 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3204 return -EFAULT;
3205 if (clear_user(expansion, SI_EXPANSION_SIZE))
3206 return -EFAULT;
3207 return 0;
3208 }
3209
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3210 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3211 const siginfo_t __user *from)
3212 {
3213 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3214 char __user *expansion = si_expansion(from);
3215 char buf[SI_EXPANSION_SIZE];
3216 int i;
3217 /*
3218 * An unknown si_code might need more than
3219 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3220 * extra bytes are 0. This guarantees copy_siginfo_to_user
3221 * will return this data to userspace exactly.
3222 */
3223 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3224 return -EFAULT;
3225 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3226 if (buf[i] != 0)
3227 return -E2BIG;
3228 }
3229 }
3230 return 0;
3231 }
3232
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3233 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3234 const siginfo_t __user *from)
3235 {
3236 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3237 return -EFAULT;
3238 to->si_signo = signo;
3239 return post_copy_siginfo_from_user(to, from);
3240 }
3241
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3242 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3243 {
3244 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3245 return -EFAULT;
3246 return post_copy_siginfo_from_user(to, from);
3247 }
3248
3249 #ifdef CONFIG_COMPAT
3250 /**
3251 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3252 * @to: compat siginfo destination
3253 * @from: kernel siginfo source
3254 *
3255 * Note: This function does not work properly for the SIGCHLD on x32, but
3256 * fortunately it doesn't have to. The only valid callers for this function are
3257 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3258 * The latter does not care because SIGCHLD will never cause a coredump.
3259 */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3260 void copy_siginfo_to_external32(struct compat_siginfo *to,
3261 const struct kernel_siginfo *from)
3262 {
3263 memset(to, 0, sizeof(*to));
3264
3265 to->si_signo = from->si_signo;
3266 to->si_errno = from->si_errno;
3267 to->si_code = from->si_code;
3268 switch(siginfo_layout(from->si_signo, from->si_code)) {
3269 case SIL_KILL:
3270 to->si_pid = from->si_pid;
3271 to->si_uid = from->si_uid;
3272 break;
3273 case SIL_TIMER:
3274 to->si_tid = from->si_tid;
3275 to->si_overrun = from->si_overrun;
3276 to->si_int = from->si_int;
3277 break;
3278 case SIL_POLL:
3279 to->si_band = from->si_band;
3280 to->si_fd = from->si_fd;
3281 break;
3282 case SIL_FAULT:
3283 to->si_addr = ptr_to_compat(from->si_addr);
3284 #ifdef __ARCH_SI_TRAPNO
3285 to->si_trapno = from->si_trapno;
3286 #endif
3287 break;
3288 case SIL_FAULT_MCEERR:
3289 to->si_addr = ptr_to_compat(from->si_addr);
3290 #ifdef __ARCH_SI_TRAPNO
3291 to->si_trapno = from->si_trapno;
3292 #endif
3293 to->si_addr_lsb = from->si_addr_lsb;
3294 break;
3295 case SIL_FAULT_BNDERR:
3296 to->si_addr = ptr_to_compat(from->si_addr);
3297 #ifdef __ARCH_SI_TRAPNO
3298 to->si_trapno = from->si_trapno;
3299 #endif
3300 to->si_lower = ptr_to_compat(from->si_lower);
3301 to->si_upper = ptr_to_compat(from->si_upper);
3302 break;
3303 case SIL_FAULT_PKUERR:
3304 to->si_addr = ptr_to_compat(from->si_addr);
3305 #ifdef __ARCH_SI_TRAPNO
3306 to->si_trapno = from->si_trapno;
3307 #endif
3308 to->si_pkey = from->si_pkey;
3309 break;
3310 case SIL_CHLD:
3311 to->si_pid = from->si_pid;
3312 to->si_uid = from->si_uid;
3313 to->si_status = from->si_status;
3314 to->si_utime = from->si_utime;
3315 to->si_stime = from->si_stime;
3316 break;
3317 case SIL_RT:
3318 to->si_pid = from->si_pid;
3319 to->si_uid = from->si_uid;
3320 to->si_int = from->si_int;
3321 break;
3322 case SIL_SYS:
3323 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3324 to->si_syscall = from->si_syscall;
3325 to->si_arch = from->si_arch;
3326 break;
3327 }
3328 }
3329
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3330 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3331 const struct kernel_siginfo *from)
3332 {
3333 struct compat_siginfo new;
3334
3335 copy_siginfo_to_external32(&new, from);
3336 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3337 return -EFAULT;
3338 return 0;
3339 }
3340
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3341 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3342 const struct compat_siginfo *from)
3343 {
3344 clear_siginfo(to);
3345 to->si_signo = from->si_signo;
3346 to->si_errno = from->si_errno;
3347 to->si_code = from->si_code;
3348 switch(siginfo_layout(from->si_signo, from->si_code)) {
3349 case SIL_KILL:
3350 to->si_pid = from->si_pid;
3351 to->si_uid = from->si_uid;
3352 break;
3353 case SIL_TIMER:
3354 to->si_tid = from->si_tid;
3355 to->si_overrun = from->si_overrun;
3356 to->si_int = from->si_int;
3357 break;
3358 case SIL_POLL:
3359 to->si_band = from->si_band;
3360 to->si_fd = from->si_fd;
3361 break;
3362 case SIL_FAULT:
3363 to->si_addr = compat_ptr(from->si_addr);
3364 #ifdef __ARCH_SI_TRAPNO
3365 to->si_trapno = from->si_trapno;
3366 #endif
3367 break;
3368 case SIL_FAULT_MCEERR:
3369 to->si_addr = compat_ptr(from->si_addr);
3370 #ifdef __ARCH_SI_TRAPNO
3371 to->si_trapno = from->si_trapno;
3372 #endif
3373 to->si_addr_lsb = from->si_addr_lsb;
3374 break;
3375 case SIL_FAULT_BNDERR:
3376 to->si_addr = compat_ptr(from->si_addr);
3377 #ifdef __ARCH_SI_TRAPNO
3378 to->si_trapno = from->si_trapno;
3379 #endif
3380 to->si_lower = compat_ptr(from->si_lower);
3381 to->si_upper = compat_ptr(from->si_upper);
3382 break;
3383 case SIL_FAULT_PKUERR:
3384 to->si_addr = compat_ptr(from->si_addr);
3385 #ifdef __ARCH_SI_TRAPNO
3386 to->si_trapno = from->si_trapno;
3387 #endif
3388 to->si_pkey = from->si_pkey;
3389 break;
3390 case SIL_CHLD:
3391 to->si_pid = from->si_pid;
3392 to->si_uid = from->si_uid;
3393 to->si_status = from->si_status;
3394 #ifdef CONFIG_X86_X32_ABI
3395 if (in_x32_syscall()) {
3396 to->si_utime = from->_sifields._sigchld_x32._utime;
3397 to->si_stime = from->_sifields._sigchld_x32._stime;
3398 } else
3399 #endif
3400 {
3401 to->si_utime = from->si_utime;
3402 to->si_stime = from->si_stime;
3403 }
3404 break;
3405 case SIL_RT:
3406 to->si_pid = from->si_pid;
3407 to->si_uid = from->si_uid;
3408 to->si_int = from->si_int;
3409 break;
3410 case SIL_SYS:
3411 to->si_call_addr = compat_ptr(from->si_call_addr);
3412 to->si_syscall = from->si_syscall;
3413 to->si_arch = from->si_arch;
3414 break;
3415 }
3416 return 0;
3417 }
3418
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3419 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3420 const struct compat_siginfo __user *ufrom)
3421 {
3422 struct compat_siginfo from;
3423
3424 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3425 return -EFAULT;
3426
3427 from.si_signo = signo;
3428 return post_copy_siginfo_from_user32(to, &from);
3429 }
3430
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3431 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3432 const struct compat_siginfo __user *ufrom)
3433 {
3434 struct compat_siginfo from;
3435
3436 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3437 return -EFAULT;
3438
3439 return post_copy_siginfo_from_user32(to, &from);
3440 }
3441 #endif /* CONFIG_COMPAT */
3442
3443 /**
3444 * do_sigtimedwait - wait for queued signals specified in @which
3445 * @which: queued signals to wait for
3446 * @info: if non-null, the signal's siginfo is returned here
3447 * @ts: upper bound on process time suspension
3448 */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3449 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3450 const struct timespec64 *ts)
3451 {
3452 ktime_t *to = NULL, timeout = KTIME_MAX;
3453 struct task_struct *tsk = current;
3454 sigset_t mask = *which;
3455 int sig, ret = 0;
3456
3457 if (ts) {
3458 if (!timespec64_valid(ts))
3459 return -EINVAL;
3460 timeout = timespec64_to_ktime(*ts);
3461 to = &timeout;
3462 }
3463
3464 /*
3465 * Invert the set of allowed signals to get those we want to block.
3466 */
3467 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3468 signotset(&mask);
3469
3470 spin_lock_irq(&tsk->sighand->siglock);
3471 sig = dequeue_signal(tsk, &mask, info);
3472 if (!sig && timeout) {
3473 /*
3474 * None ready, temporarily unblock those we're interested
3475 * while we are sleeping in so that we'll be awakened when
3476 * they arrive. Unblocking is always fine, we can avoid
3477 * set_current_blocked().
3478 */
3479 tsk->real_blocked = tsk->blocked;
3480 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3481 recalc_sigpending();
3482 spin_unlock_irq(&tsk->sighand->siglock);
3483
3484 __set_current_state(TASK_INTERRUPTIBLE);
3485 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3486 HRTIMER_MODE_REL);
3487 spin_lock_irq(&tsk->sighand->siglock);
3488 __set_task_blocked(tsk, &tsk->real_blocked);
3489 sigemptyset(&tsk->real_blocked);
3490 sig = dequeue_signal(tsk, &mask, info);
3491 }
3492 spin_unlock_irq(&tsk->sighand->siglock);
3493
3494 if (sig)
3495 return sig;
3496 return ret ? -EINTR : -EAGAIN;
3497 }
3498
3499 /**
3500 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3501 * in @uthese
3502 * @uthese: queued signals to wait for
3503 * @uinfo: if non-null, the signal's siginfo is returned here
3504 * @uts: upper bound on process time suspension
3505 * @sigsetsize: size of sigset_t type
3506 */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3507 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3508 siginfo_t __user *, uinfo,
3509 const struct __kernel_timespec __user *, uts,
3510 size_t, sigsetsize)
3511 {
3512 sigset_t these;
3513 struct timespec64 ts;
3514 kernel_siginfo_t info;
3515 int ret;
3516
3517 /* XXX: Don't preclude handling different sized sigset_t's. */
3518 if (sigsetsize != sizeof(sigset_t))
3519 return -EINVAL;
3520
3521 if (copy_from_user(&these, uthese, sizeof(these)))
3522 return -EFAULT;
3523
3524 if (uts) {
3525 if (get_timespec64(&ts, uts))
3526 return -EFAULT;
3527 }
3528
3529 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3530
3531 if (ret > 0 && uinfo) {
3532 if (copy_siginfo_to_user(uinfo, &info))
3533 ret = -EFAULT;
3534 }
3535
3536 return ret;
3537 }
3538
3539 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3540 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3541 siginfo_t __user *, uinfo,
3542 const struct old_timespec32 __user *, uts,
3543 size_t, sigsetsize)
3544 {
3545 sigset_t these;
3546 struct timespec64 ts;
3547 kernel_siginfo_t info;
3548 int ret;
3549
3550 if (sigsetsize != sizeof(sigset_t))
3551 return -EINVAL;
3552
3553 if (copy_from_user(&these, uthese, sizeof(these)))
3554 return -EFAULT;
3555
3556 if (uts) {
3557 if (get_old_timespec32(&ts, uts))
3558 return -EFAULT;
3559 }
3560
3561 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3562
3563 if (ret > 0 && uinfo) {
3564 if (copy_siginfo_to_user(uinfo, &info))
3565 ret = -EFAULT;
3566 }
3567
3568 return ret;
3569 }
3570 #endif
3571
3572 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3573 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3574 struct compat_siginfo __user *, uinfo,
3575 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3576 {
3577 sigset_t s;
3578 struct timespec64 t;
3579 kernel_siginfo_t info;
3580 long ret;
3581
3582 if (sigsetsize != sizeof(sigset_t))
3583 return -EINVAL;
3584
3585 if (get_compat_sigset(&s, uthese))
3586 return -EFAULT;
3587
3588 if (uts) {
3589 if (get_timespec64(&t, uts))
3590 return -EFAULT;
3591 }
3592
3593 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3594
3595 if (ret > 0 && uinfo) {
3596 if (copy_siginfo_to_user32(uinfo, &info))
3597 ret = -EFAULT;
3598 }
3599
3600 return ret;
3601 }
3602
3603 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3604 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3605 struct compat_siginfo __user *, uinfo,
3606 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3607 {
3608 sigset_t s;
3609 struct timespec64 t;
3610 kernel_siginfo_t info;
3611 long ret;
3612
3613 if (sigsetsize != sizeof(sigset_t))
3614 return -EINVAL;
3615
3616 if (get_compat_sigset(&s, uthese))
3617 return -EFAULT;
3618
3619 if (uts) {
3620 if (get_old_timespec32(&t, uts))
3621 return -EFAULT;
3622 }
3623
3624 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3625
3626 if (ret > 0 && uinfo) {
3627 if (copy_siginfo_to_user32(uinfo, &info))
3628 ret = -EFAULT;
3629 }
3630
3631 return ret;
3632 }
3633 #endif
3634 #endif
3635
prepare_kill_siginfo(int sig,struct kernel_siginfo * info)3636 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3637 {
3638 clear_siginfo(info);
3639 info->si_signo = sig;
3640 info->si_errno = 0;
3641 info->si_code = SI_USER;
3642 info->si_pid = task_tgid_vnr(current);
3643 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3644 }
3645
3646 /**
3647 * sys_kill - send a signal to a process
3648 * @pid: the PID of the process
3649 * @sig: signal to be sent
3650 */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3651 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3652 {
3653 struct kernel_siginfo info;
3654
3655 prepare_kill_siginfo(sig, &info);
3656
3657 return kill_something_info(sig, &info, pid);
3658 }
3659
3660 /*
3661 * Verify that the signaler and signalee either are in the same pid namespace
3662 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3663 * namespace.
3664 */
access_pidfd_pidns(struct pid * pid)3665 static bool access_pidfd_pidns(struct pid *pid)
3666 {
3667 struct pid_namespace *active = task_active_pid_ns(current);
3668 struct pid_namespace *p = ns_of_pid(pid);
3669
3670 for (;;) {
3671 if (!p)
3672 return false;
3673 if (p == active)
3674 break;
3675 p = p->parent;
3676 }
3677
3678 return true;
3679 }
3680
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t * info)3681 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3682 {
3683 #ifdef CONFIG_COMPAT
3684 /*
3685 * Avoid hooking up compat syscalls and instead handle necessary
3686 * conversions here. Note, this is a stop-gap measure and should not be
3687 * considered a generic solution.
3688 */
3689 if (in_compat_syscall())
3690 return copy_siginfo_from_user32(
3691 kinfo, (struct compat_siginfo __user *)info);
3692 #endif
3693 return copy_siginfo_from_user(kinfo, info);
3694 }
3695
pidfd_to_pid(const struct file * file)3696 static struct pid *pidfd_to_pid(const struct file *file)
3697 {
3698 struct pid *pid;
3699
3700 pid = pidfd_pid(file);
3701 if (!IS_ERR(pid))
3702 return pid;
3703
3704 return tgid_pidfd_to_pid(file);
3705 }
3706
3707 /**
3708 * sys_pidfd_send_signal - Signal a process through a pidfd
3709 * @pidfd: file descriptor of the process
3710 * @sig: signal to send
3711 * @info: signal info
3712 * @flags: future flags
3713 *
3714 * The syscall currently only signals via PIDTYPE_PID which covers
3715 * kill(<positive-pid>, <signal>. It does not signal threads or process
3716 * groups.
3717 * In order to extend the syscall to threads and process groups the @flags
3718 * argument should be used. In essence, the @flags argument will determine
3719 * what is signaled and not the file descriptor itself. Put in other words,
3720 * grouping is a property of the flags argument not a property of the file
3721 * descriptor.
3722 *
3723 * Return: 0 on success, negative errno on failure
3724 */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)3725 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3726 siginfo_t __user *, info, unsigned int, flags)
3727 {
3728 int ret;
3729 struct fd f;
3730 struct pid *pid;
3731 kernel_siginfo_t kinfo;
3732
3733 /* Enforce flags be set to 0 until we add an extension. */
3734 if (flags)
3735 return -EINVAL;
3736
3737 f = fdget(pidfd);
3738 if (!f.file)
3739 return -EBADF;
3740
3741 /* Is this a pidfd? */
3742 pid = pidfd_to_pid(f.file);
3743 if (IS_ERR(pid)) {
3744 ret = PTR_ERR(pid);
3745 goto err;
3746 }
3747
3748 ret = -EINVAL;
3749 if (!access_pidfd_pidns(pid))
3750 goto err;
3751
3752 if (info) {
3753 ret = copy_siginfo_from_user_any(&kinfo, info);
3754 if (unlikely(ret))
3755 goto err;
3756
3757 ret = -EINVAL;
3758 if (unlikely(sig != kinfo.si_signo))
3759 goto err;
3760
3761 /* Only allow sending arbitrary signals to yourself. */
3762 ret = -EPERM;
3763 if ((task_pid(current) != pid) &&
3764 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3765 goto err;
3766 } else {
3767 prepare_kill_siginfo(sig, &kinfo);
3768 }
3769
3770 ret = kill_pid_info(sig, &kinfo, pid);
3771
3772 err:
3773 fdput(f);
3774 return ret;
3775 }
3776
3777 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)3778 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3779 {
3780 struct task_struct *p;
3781 int error = -ESRCH;
3782
3783 rcu_read_lock();
3784 p = find_task_by_vpid(pid);
3785 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3786 error = check_kill_permission(sig, info, p);
3787 /*
3788 * The null signal is a permissions and process existence
3789 * probe. No signal is actually delivered.
3790 */
3791 if (!error && sig) {
3792 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3793 /*
3794 * If lock_task_sighand() failed we pretend the task
3795 * dies after receiving the signal. The window is tiny,
3796 * and the signal is private anyway.
3797 */
3798 if (unlikely(error == -ESRCH))
3799 error = 0;
3800 }
3801 }
3802 rcu_read_unlock();
3803
3804 return error;
3805 }
3806
do_tkill(pid_t tgid,pid_t pid,int sig)3807 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3808 {
3809 struct kernel_siginfo info;
3810
3811 clear_siginfo(&info);
3812 info.si_signo = sig;
3813 info.si_errno = 0;
3814 info.si_code = SI_TKILL;
3815 info.si_pid = task_tgid_vnr(current);
3816 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3817
3818 return do_send_specific(tgid, pid, sig, &info);
3819 }
3820
3821 /**
3822 * sys_tgkill - send signal to one specific thread
3823 * @tgid: the thread group ID of the thread
3824 * @pid: the PID of the thread
3825 * @sig: signal to be sent
3826 *
3827 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3828 * exists but it's not belonging to the target process anymore. This
3829 * method solves the problem of threads exiting and PIDs getting reused.
3830 */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)3831 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3832 {
3833 /* This is only valid for single tasks */
3834 if (pid <= 0 || tgid <= 0)
3835 return -EINVAL;
3836
3837 return do_tkill(tgid, pid, sig);
3838 }
3839
3840 /**
3841 * sys_tkill - send signal to one specific task
3842 * @pid: the PID of the task
3843 * @sig: signal to be sent
3844 *
3845 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3846 */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)3847 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3848 {
3849 /* This is only valid for single tasks */
3850 if (pid <= 0)
3851 return -EINVAL;
3852
3853 return do_tkill(0, pid, sig);
3854 }
3855
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)3856 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3857 {
3858 /* Not even root can pretend to send signals from the kernel.
3859 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3860 */
3861 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3862 (task_pid_vnr(current) != pid))
3863 return -EPERM;
3864
3865 /* POSIX.1b doesn't mention process groups. */
3866 return kill_proc_info(sig, info, pid);
3867 }
3868
3869 /**
3870 * sys_rt_sigqueueinfo - send signal information to a signal
3871 * @pid: the PID of the thread
3872 * @sig: signal to be sent
3873 * @uinfo: signal info to be sent
3874 */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)3875 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3876 siginfo_t __user *, uinfo)
3877 {
3878 kernel_siginfo_t info;
3879 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3880 if (unlikely(ret))
3881 return ret;
3882 return do_rt_sigqueueinfo(pid, sig, &info);
3883 }
3884
3885 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)3886 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3887 compat_pid_t, pid,
3888 int, sig,
3889 struct compat_siginfo __user *, uinfo)
3890 {
3891 kernel_siginfo_t info;
3892 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3893 if (unlikely(ret))
3894 return ret;
3895 return do_rt_sigqueueinfo(pid, sig, &info);
3896 }
3897 #endif
3898
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)3899 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3900 {
3901 /* This is only valid for single tasks */
3902 if (pid <= 0 || tgid <= 0)
3903 return -EINVAL;
3904
3905 /* Not even root can pretend to send signals from the kernel.
3906 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3907 */
3908 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3909 (task_pid_vnr(current) != pid))
3910 return -EPERM;
3911
3912 return do_send_specific(tgid, pid, sig, info);
3913 }
3914
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)3915 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3916 siginfo_t __user *, uinfo)
3917 {
3918 kernel_siginfo_t info;
3919 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3920 if (unlikely(ret))
3921 return ret;
3922 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3923 }
3924
3925 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)3926 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3927 compat_pid_t, tgid,
3928 compat_pid_t, pid,
3929 int, sig,
3930 struct compat_siginfo __user *, uinfo)
3931 {
3932 kernel_siginfo_t info;
3933 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3934 if (unlikely(ret))
3935 return ret;
3936 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3937 }
3938 #endif
3939
3940 /*
3941 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3942 */
kernel_sigaction(int sig,__sighandler_t action)3943 void kernel_sigaction(int sig, __sighandler_t action)
3944 {
3945 spin_lock_irq(¤t->sighand->siglock);
3946 current->sighand->action[sig - 1].sa.sa_handler = action;
3947 if (action == SIG_IGN) {
3948 sigset_t mask;
3949
3950 sigemptyset(&mask);
3951 sigaddset(&mask, sig);
3952
3953 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3954 flush_sigqueue_mask(&mask, ¤t->pending);
3955 recalc_sigpending();
3956 }
3957 spin_unlock_irq(¤t->sighand->siglock);
3958 }
3959 EXPORT_SYMBOL(kernel_sigaction);
3960
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)3961 void __weak sigaction_compat_abi(struct k_sigaction *act,
3962 struct k_sigaction *oact)
3963 {
3964 }
3965
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)3966 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3967 {
3968 struct task_struct *p = current, *t;
3969 struct k_sigaction *k;
3970 sigset_t mask;
3971
3972 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3973 return -EINVAL;
3974
3975 k = &p->sighand->action[sig-1];
3976
3977 spin_lock_irq(&p->sighand->siglock);
3978 if (oact)
3979 *oact = *k;
3980
3981 sigaction_compat_abi(act, oact);
3982
3983 if (act) {
3984 sigdelsetmask(&act->sa.sa_mask,
3985 sigmask(SIGKILL) | sigmask(SIGSTOP));
3986 *k = *act;
3987 /*
3988 * POSIX 3.3.1.3:
3989 * "Setting a signal action to SIG_IGN for a signal that is
3990 * pending shall cause the pending signal to be discarded,
3991 * whether or not it is blocked."
3992 *
3993 * "Setting a signal action to SIG_DFL for a signal that is
3994 * pending and whose default action is to ignore the signal
3995 * (for example, SIGCHLD), shall cause the pending signal to
3996 * be discarded, whether or not it is blocked"
3997 */
3998 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3999 sigemptyset(&mask);
4000 sigaddset(&mask, sig);
4001 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4002 for_each_thread(p, t)
4003 flush_sigqueue_mask(&mask, &t->pending);
4004 }
4005 }
4006
4007 spin_unlock_irq(&p->sighand->siglock);
4008 return 0;
4009 }
4010
4011 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4012 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4013 size_t min_ss_size)
4014 {
4015 struct task_struct *t = current;
4016
4017 if (oss) {
4018 memset(oss, 0, sizeof(stack_t));
4019 oss->ss_sp = (void __user *) t->sas_ss_sp;
4020 oss->ss_size = t->sas_ss_size;
4021 oss->ss_flags = sas_ss_flags(sp) |
4022 (current->sas_ss_flags & SS_FLAG_BITS);
4023 }
4024
4025 if (ss) {
4026 void __user *ss_sp = ss->ss_sp;
4027 size_t ss_size = ss->ss_size;
4028 unsigned ss_flags = ss->ss_flags;
4029 int ss_mode;
4030
4031 if (unlikely(on_sig_stack(sp)))
4032 return -EPERM;
4033
4034 ss_mode = ss_flags & ~SS_FLAG_BITS;
4035 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4036 ss_mode != 0))
4037 return -EINVAL;
4038
4039 if (ss_mode == SS_DISABLE) {
4040 ss_size = 0;
4041 ss_sp = NULL;
4042 } else {
4043 if (unlikely(ss_size < min_ss_size))
4044 return -ENOMEM;
4045 }
4046
4047 t->sas_ss_sp = (unsigned long) ss_sp;
4048 t->sas_ss_size = ss_size;
4049 t->sas_ss_flags = ss_flags;
4050 }
4051 return 0;
4052 }
4053
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4054 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4055 {
4056 stack_t new, old;
4057 int err;
4058 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4059 return -EFAULT;
4060 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4061 current_user_stack_pointer(),
4062 MINSIGSTKSZ);
4063 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4064 err = -EFAULT;
4065 return err;
4066 }
4067
restore_altstack(const stack_t __user * uss)4068 int restore_altstack(const stack_t __user *uss)
4069 {
4070 stack_t new;
4071 if (copy_from_user(&new, uss, sizeof(stack_t)))
4072 return -EFAULT;
4073 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4074 MINSIGSTKSZ);
4075 /* squash all but EFAULT for now */
4076 return 0;
4077 }
4078
__save_altstack(stack_t __user * uss,unsigned long sp)4079 int __save_altstack(stack_t __user *uss, unsigned long sp)
4080 {
4081 struct task_struct *t = current;
4082 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4083 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4084 __put_user(t->sas_ss_size, &uss->ss_size);
4085 if (err)
4086 return err;
4087 if (t->sas_ss_flags & SS_AUTODISARM)
4088 sas_ss_reset(t);
4089 return 0;
4090 }
4091
4092 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4093 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4094 compat_stack_t __user *uoss_ptr)
4095 {
4096 stack_t uss, uoss;
4097 int ret;
4098
4099 if (uss_ptr) {
4100 compat_stack_t uss32;
4101 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4102 return -EFAULT;
4103 uss.ss_sp = compat_ptr(uss32.ss_sp);
4104 uss.ss_flags = uss32.ss_flags;
4105 uss.ss_size = uss32.ss_size;
4106 }
4107 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4108 compat_user_stack_pointer(),
4109 COMPAT_MINSIGSTKSZ);
4110 if (ret >= 0 && uoss_ptr) {
4111 compat_stack_t old;
4112 memset(&old, 0, sizeof(old));
4113 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4114 old.ss_flags = uoss.ss_flags;
4115 old.ss_size = uoss.ss_size;
4116 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4117 ret = -EFAULT;
4118 }
4119 return ret;
4120 }
4121
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4122 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4123 const compat_stack_t __user *, uss_ptr,
4124 compat_stack_t __user *, uoss_ptr)
4125 {
4126 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4127 }
4128
compat_restore_altstack(const compat_stack_t __user * uss)4129 int compat_restore_altstack(const compat_stack_t __user *uss)
4130 {
4131 int err = do_compat_sigaltstack(uss, NULL);
4132 /* squash all but -EFAULT for now */
4133 return err == -EFAULT ? err : 0;
4134 }
4135
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4136 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4137 {
4138 int err;
4139 struct task_struct *t = current;
4140 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4141 &uss->ss_sp) |
4142 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4143 __put_user(t->sas_ss_size, &uss->ss_size);
4144 if (err)
4145 return err;
4146 if (t->sas_ss_flags & SS_AUTODISARM)
4147 sas_ss_reset(t);
4148 return 0;
4149 }
4150 #endif
4151
4152 #ifdef __ARCH_WANT_SYS_SIGPENDING
4153
4154 /**
4155 * sys_sigpending - examine pending signals
4156 * @uset: where mask of pending signal is returned
4157 */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4158 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4159 {
4160 sigset_t set;
4161
4162 if (sizeof(old_sigset_t) > sizeof(*uset))
4163 return -EINVAL;
4164
4165 do_sigpending(&set);
4166
4167 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4168 return -EFAULT;
4169
4170 return 0;
4171 }
4172
4173 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4174 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4175 {
4176 sigset_t set;
4177
4178 do_sigpending(&set);
4179
4180 return put_user(set.sig[0], set32);
4181 }
4182 #endif
4183
4184 #endif
4185
4186 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4187 /**
4188 * sys_sigprocmask - examine and change blocked signals
4189 * @how: whether to add, remove, or set signals
4190 * @nset: signals to add or remove (if non-null)
4191 * @oset: previous value of signal mask if non-null
4192 *
4193 * Some platforms have their own version with special arguments;
4194 * others support only sys_rt_sigprocmask.
4195 */
4196
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4197 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4198 old_sigset_t __user *, oset)
4199 {
4200 old_sigset_t old_set, new_set;
4201 sigset_t new_blocked;
4202
4203 old_set = current->blocked.sig[0];
4204
4205 if (nset) {
4206 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4207 return -EFAULT;
4208
4209 new_blocked = current->blocked;
4210
4211 switch (how) {
4212 case SIG_BLOCK:
4213 sigaddsetmask(&new_blocked, new_set);
4214 break;
4215 case SIG_UNBLOCK:
4216 sigdelsetmask(&new_blocked, new_set);
4217 break;
4218 case SIG_SETMASK:
4219 new_blocked.sig[0] = new_set;
4220 break;
4221 default:
4222 return -EINVAL;
4223 }
4224
4225 set_current_blocked(&new_blocked);
4226 }
4227
4228 if (oset) {
4229 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4230 return -EFAULT;
4231 }
4232
4233 return 0;
4234 }
4235 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4236
4237 #ifndef CONFIG_ODD_RT_SIGACTION
4238 /**
4239 * sys_rt_sigaction - alter an action taken by a process
4240 * @sig: signal to be sent
4241 * @act: new sigaction
4242 * @oact: used to save the previous sigaction
4243 * @sigsetsize: size of sigset_t type
4244 */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4245 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4246 const struct sigaction __user *, act,
4247 struct sigaction __user *, oact,
4248 size_t, sigsetsize)
4249 {
4250 struct k_sigaction new_sa, old_sa;
4251 int ret;
4252
4253 /* XXX: Don't preclude handling different sized sigset_t's. */
4254 if (sigsetsize != sizeof(sigset_t))
4255 return -EINVAL;
4256
4257 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4258 return -EFAULT;
4259
4260 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4261 if (ret)
4262 return ret;
4263
4264 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4265 return -EFAULT;
4266
4267 return 0;
4268 }
4269 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4270 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4271 const struct compat_sigaction __user *, act,
4272 struct compat_sigaction __user *, oact,
4273 compat_size_t, sigsetsize)
4274 {
4275 struct k_sigaction new_ka, old_ka;
4276 #ifdef __ARCH_HAS_SA_RESTORER
4277 compat_uptr_t restorer;
4278 #endif
4279 int ret;
4280
4281 /* XXX: Don't preclude handling different sized sigset_t's. */
4282 if (sigsetsize != sizeof(compat_sigset_t))
4283 return -EINVAL;
4284
4285 if (act) {
4286 compat_uptr_t handler;
4287 ret = get_user(handler, &act->sa_handler);
4288 new_ka.sa.sa_handler = compat_ptr(handler);
4289 #ifdef __ARCH_HAS_SA_RESTORER
4290 ret |= get_user(restorer, &act->sa_restorer);
4291 new_ka.sa.sa_restorer = compat_ptr(restorer);
4292 #endif
4293 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4294 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4295 if (ret)
4296 return -EFAULT;
4297 }
4298
4299 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4300 if (!ret && oact) {
4301 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4302 &oact->sa_handler);
4303 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4304 sizeof(oact->sa_mask));
4305 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4306 #ifdef __ARCH_HAS_SA_RESTORER
4307 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4308 &oact->sa_restorer);
4309 #endif
4310 }
4311 return ret;
4312 }
4313 #endif
4314 #endif /* !CONFIG_ODD_RT_SIGACTION */
4315
4316 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4317 SYSCALL_DEFINE3(sigaction, int, sig,
4318 const struct old_sigaction __user *, act,
4319 struct old_sigaction __user *, oact)
4320 {
4321 struct k_sigaction new_ka, old_ka;
4322 int ret;
4323
4324 if (act) {
4325 old_sigset_t mask;
4326 if (!access_ok(act, sizeof(*act)) ||
4327 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4328 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4329 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4330 __get_user(mask, &act->sa_mask))
4331 return -EFAULT;
4332 #ifdef __ARCH_HAS_KA_RESTORER
4333 new_ka.ka_restorer = NULL;
4334 #endif
4335 siginitset(&new_ka.sa.sa_mask, mask);
4336 }
4337
4338 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4339
4340 if (!ret && oact) {
4341 if (!access_ok(oact, sizeof(*oact)) ||
4342 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4343 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4344 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4345 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4346 return -EFAULT;
4347 }
4348
4349 return ret;
4350 }
4351 #endif
4352 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4353 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4354 const struct compat_old_sigaction __user *, act,
4355 struct compat_old_sigaction __user *, oact)
4356 {
4357 struct k_sigaction new_ka, old_ka;
4358 int ret;
4359 compat_old_sigset_t mask;
4360 compat_uptr_t handler, restorer;
4361
4362 if (act) {
4363 if (!access_ok(act, sizeof(*act)) ||
4364 __get_user(handler, &act->sa_handler) ||
4365 __get_user(restorer, &act->sa_restorer) ||
4366 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4367 __get_user(mask, &act->sa_mask))
4368 return -EFAULT;
4369
4370 #ifdef __ARCH_HAS_KA_RESTORER
4371 new_ka.ka_restorer = NULL;
4372 #endif
4373 new_ka.sa.sa_handler = compat_ptr(handler);
4374 new_ka.sa.sa_restorer = compat_ptr(restorer);
4375 siginitset(&new_ka.sa.sa_mask, mask);
4376 }
4377
4378 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4379
4380 if (!ret && oact) {
4381 if (!access_ok(oact, sizeof(*oact)) ||
4382 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4383 &oact->sa_handler) ||
4384 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4385 &oact->sa_restorer) ||
4386 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4387 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4388 return -EFAULT;
4389 }
4390 return ret;
4391 }
4392 #endif
4393
4394 #ifdef CONFIG_SGETMASK_SYSCALL
4395
4396 /*
4397 * For backwards compatibility. Functionality superseded by sigprocmask.
4398 */
SYSCALL_DEFINE0(sgetmask)4399 SYSCALL_DEFINE0(sgetmask)
4400 {
4401 /* SMP safe */
4402 return current->blocked.sig[0];
4403 }
4404
SYSCALL_DEFINE1(ssetmask,int,newmask)4405 SYSCALL_DEFINE1(ssetmask, int, newmask)
4406 {
4407 int old = current->blocked.sig[0];
4408 sigset_t newset;
4409
4410 siginitset(&newset, newmask);
4411 set_current_blocked(&newset);
4412
4413 return old;
4414 }
4415 #endif /* CONFIG_SGETMASK_SYSCALL */
4416
4417 #ifdef __ARCH_WANT_SYS_SIGNAL
4418 /*
4419 * For backwards compatibility. Functionality superseded by sigaction.
4420 */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4421 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4422 {
4423 struct k_sigaction new_sa, old_sa;
4424 int ret;
4425
4426 new_sa.sa.sa_handler = handler;
4427 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4428 sigemptyset(&new_sa.sa.sa_mask);
4429
4430 ret = do_sigaction(sig, &new_sa, &old_sa);
4431
4432 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4433 }
4434 #endif /* __ARCH_WANT_SYS_SIGNAL */
4435
4436 #ifdef __ARCH_WANT_SYS_PAUSE
4437
SYSCALL_DEFINE0(pause)4438 SYSCALL_DEFINE0(pause)
4439 {
4440 while (!signal_pending(current)) {
4441 __set_current_state(TASK_INTERRUPTIBLE);
4442 schedule();
4443 }
4444 return -ERESTARTNOHAND;
4445 }
4446
4447 #endif
4448
sigsuspend(sigset_t * set)4449 static int sigsuspend(sigset_t *set)
4450 {
4451 current->saved_sigmask = current->blocked;
4452 set_current_blocked(set);
4453
4454 while (!signal_pending(current)) {
4455 __set_current_state(TASK_INTERRUPTIBLE);
4456 schedule();
4457 }
4458 set_restore_sigmask();
4459 return -ERESTARTNOHAND;
4460 }
4461
4462 /**
4463 * sys_rt_sigsuspend - replace the signal mask for a value with the
4464 * @unewset value until a signal is received
4465 * @unewset: new signal mask value
4466 * @sigsetsize: size of sigset_t type
4467 */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4468 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4469 {
4470 sigset_t newset;
4471
4472 /* XXX: Don't preclude handling different sized sigset_t's. */
4473 if (sigsetsize != sizeof(sigset_t))
4474 return -EINVAL;
4475
4476 if (copy_from_user(&newset, unewset, sizeof(newset)))
4477 return -EFAULT;
4478 return sigsuspend(&newset);
4479 }
4480
4481 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4482 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4483 {
4484 sigset_t newset;
4485
4486 /* XXX: Don't preclude handling different sized sigset_t's. */
4487 if (sigsetsize != sizeof(sigset_t))
4488 return -EINVAL;
4489
4490 if (get_compat_sigset(&newset, unewset))
4491 return -EFAULT;
4492 return sigsuspend(&newset);
4493 }
4494 #endif
4495
4496 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4497 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4498 {
4499 sigset_t blocked;
4500 siginitset(&blocked, mask);
4501 return sigsuspend(&blocked);
4502 }
4503 #endif
4504 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4505 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4506 {
4507 sigset_t blocked;
4508 siginitset(&blocked, mask);
4509 return sigsuspend(&blocked);
4510 }
4511 #endif
4512
arch_vma_name(struct vm_area_struct * vma)4513 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4514 {
4515 return NULL;
4516 }
4517
siginfo_buildtime_checks(void)4518 static inline void siginfo_buildtime_checks(void)
4519 {
4520 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4521
4522 /* Verify the offsets in the two siginfos match */
4523 #define CHECK_OFFSET(field) \
4524 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4525
4526 /* kill */
4527 CHECK_OFFSET(si_pid);
4528 CHECK_OFFSET(si_uid);
4529
4530 /* timer */
4531 CHECK_OFFSET(si_tid);
4532 CHECK_OFFSET(si_overrun);
4533 CHECK_OFFSET(si_value);
4534
4535 /* rt */
4536 CHECK_OFFSET(si_pid);
4537 CHECK_OFFSET(si_uid);
4538 CHECK_OFFSET(si_value);
4539
4540 /* sigchld */
4541 CHECK_OFFSET(si_pid);
4542 CHECK_OFFSET(si_uid);
4543 CHECK_OFFSET(si_status);
4544 CHECK_OFFSET(si_utime);
4545 CHECK_OFFSET(si_stime);
4546
4547 /* sigfault */
4548 CHECK_OFFSET(si_addr);
4549 CHECK_OFFSET(si_addr_lsb);
4550 CHECK_OFFSET(si_lower);
4551 CHECK_OFFSET(si_upper);
4552 CHECK_OFFSET(si_pkey);
4553
4554 /* sigpoll */
4555 CHECK_OFFSET(si_band);
4556 CHECK_OFFSET(si_fd);
4557
4558 /* sigsys */
4559 CHECK_OFFSET(si_call_addr);
4560 CHECK_OFFSET(si_syscall);
4561 CHECK_OFFSET(si_arch);
4562 #undef CHECK_OFFSET
4563
4564 /* usb asyncio */
4565 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4566 offsetof(struct siginfo, si_addr));
4567 if (sizeof(int) == sizeof(void __user *)) {
4568 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4569 sizeof(void __user *));
4570 } else {
4571 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4572 sizeof_field(struct siginfo, si_uid)) !=
4573 sizeof(void __user *));
4574 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4575 offsetof(struct siginfo, si_uid));
4576 }
4577 #ifdef CONFIG_COMPAT
4578 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4579 offsetof(struct compat_siginfo, si_addr));
4580 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4581 sizeof(compat_uptr_t));
4582 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4583 sizeof_field(struct siginfo, si_pid));
4584 #endif
4585 }
4586
signals_init(void)4587 void __init signals_init(void)
4588 {
4589 siginfo_buildtime_checks();
4590
4591 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4592 }
4593
4594 #ifdef CONFIG_KGDB_KDB
4595 #include <linux/kdb.h>
4596 /*
4597 * kdb_send_sig - Allows kdb to send signals without exposing
4598 * signal internals. This function checks if the required locks are
4599 * available before calling the main signal code, to avoid kdb
4600 * deadlocks.
4601 */
kdb_send_sig(struct task_struct * t,int sig)4602 void kdb_send_sig(struct task_struct *t, int sig)
4603 {
4604 static struct task_struct *kdb_prev_t;
4605 int new_t, ret;
4606 if (!spin_trylock(&t->sighand->siglock)) {
4607 kdb_printf("Can't do kill command now.\n"
4608 "The sigmask lock is held somewhere else in "
4609 "kernel, try again later\n");
4610 return;
4611 }
4612 new_t = kdb_prev_t != t;
4613 kdb_prev_t = t;
4614 if (t->state != TASK_RUNNING && new_t) {
4615 spin_unlock(&t->sighand->siglock);
4616 kdb_printf("Process is not RUNNING, sending a signal from "
4617 "kdb risks deadlock\n"
4618 "on the run queue locks. "
4619 "The signal has _not_ been sent.\n"
4620 "Reissue the kill command if you want to risk "
4621 "the deadlock.\n");
4622 return;
4623 }
4624 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4625 spin_unlock(&t->sighand->siglock);
4626 if (ret)
4627 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4628 sig, t->pid);
4629 else
4630 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4631 }
4632 #endif /* CONFIG_KGDB_KDB */
4633