Lines Matching refs:tsk
517 struct task_struct *tsk = current; in flush_itimer_signals() local
520 spin_lock_irqsave(&tsk->sighand->siglock, flags); in flush_itimer_signals()
521 __flush_itimer_signals(&tsk->pending); in flush_itimer_signals()
522 __flush_itimer_signals(&tsk->signal->shared_pending); in flush_itimer_signals()
523 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); in flush_itimer_signals()
558 bool unhandled_signal(struct task_struct *tsk, int sig) in unhandled_signal() argument
560 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; in unhandled_signal()
561 if (is_global_init(tsk)) in unhandled_signal()
568 return !tsk->ptrace; in unhandled_signal()
632 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) in dequeue_signal() argument
640 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); in dequeue_signal()
642 signr = __dequeue_signal(&tsk->signal->shared_pending, in dequeue_signal()
659 struct hrtimer *tmr = &tsk->signal->real_timer; in dequeue_signal()
662 tsk->signal->it_real_incr != 0) { in dequeue_signal()
664 tsk->signal->it_real_incr); in dequeue_signal()
698 spin_unlock(&tsk->sighand->siglock); in dequeue_signal()
700 spin_lock(&tsk->sighand->siglock); in dequeue_signal()
712 struct task_struct *tsk = current; in dequeue_synchronous_signal() local
713 struct sigpending *pending = &tsk->pending; in dequeue_synchronous_signal()
719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) in dequeue_synchronous_signal()
1372 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, in __lock_task_sighand() argument
1379 sighand = rcu_dereference(tsk->sighand); in __lock_task_sighand()
1395 if (likely(sighand == rcu_access_pointer(tsk->sighand))) in __lock_task_sighand()
1919 bool do_notify_parent(struct task_struct *tsk, int sig) in do_notify_parent() argument
1930 WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); in do_notify_parent()
1932 WARN_ON_ONCE(!tsk->ptrace && in do_notify_parent()
1933 (tsk->group_leader != tsk || !thread_group_empty(tsk))); in do_notify_parent()
1936 do_notify_pidfd(tsk); in do_notify_parent()
1943 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) in do_notify_parent()
1962 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); in do_notify_parent()
1963 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), in do_notify_parent()
1964 task_uid(tsk)); in do_notify_parent()
1967 task_cputime(tsk, &utime, &stime); in do_notify_parent()
1968 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); in do_notify_parent()
1969 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); in do_notify_parent()
1971 info.si_status = tsk->exit_code & 0x7f; in do_notify_parent()
1972 if (tsk->exit_code & 0x80) in do_notify_parent()
1974 else if (tsk->exit_code & 0x7f) in do_notify_parent()
1978 info.si_status = tsk->exit_code >> 8; in do_notify_parent()
1981 psig = tsk->parent->sighand; in do_notify_parent()
1983 if (!tsk->ptrace && sig == SIGCHLD && in do_notify_parent()
2010 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); in do_notify_parent()
2011 __wake_up_parent(tsk, tsk->parent); in do_notify_parent()
2030 static void do_notify_parent_cldstop(struct task_struct *tsk, in do_notify_parent_cldstop() argument
2040 parent = tsk->parent; in do_notify_parent_cldstop()
2042 tsk = tsk->group_leader; in do_notify_parent_cldstop()
2043 parent = tsk->real_parent; in do_notify_parent_cldstop()
2053 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); in do_notify_parent_cldstop()
2054 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); in do_notify_parent_cldstop()
2057 task_cputime(tsk, &utime, &stime); in do_notify_parent_cldstop()
2067 info.si_status = tsk->signal->group_exit_code & 0x7f; in do_notify_parent_cldstop()
2070 info.si_status = tsk->exit_code & 0x7f; in do_notify_parent_cldstop()
2084 __wake_up_parent(tsk, parent); in do_notify_parent_cldstop()
2836 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) in retarget_shared_pending() argument
2841 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); in retarget_shared_pending()
2845 t = tsk; in retarget_shared_pending()
2846 while_each_thread(tsk, t) { in retarget_shared_pending()
2863 void exit_signals(struct task_struct *tsk) in exit_signals() argument
2872 cgroup_threadgroup_change_begin(tsk); in exit_signals()
2874 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { in exit_signals()
2875 tsk->flags |= PF_EXITING; in exit_signals()
2876 cgroup_threadgroup_change_end(tsk); in exit_signals()
2880 spin_lock_irq(&tsk->sighand->siglock); in exit_signals()
2885 tsk->flags |= PF_EXITING; in exit_signals()
2887 cgroup_threadgroup_change_end(tsk); in exit_signals()
2889 if (!task_sigpending(tsk)) in exit_signals()
2892 unblocked = tsk->blocked; in exit_signals()
2894 retarget_shared_pending(tsk, &unblocked); in exit_signals()
2896 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && in exit_signals()
2897 task_participate_group_stop(tsk)) in exit_signals()
2900 spin_unlock_irq(&tsk->sighand->siglock); in exit_signals()
2908 do_notify_parent_cldstop(tsk, false, group_stop); in exit_signals()
2931 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) in __set_task_blocked() argument
2933 if (task_sigpending(tsk) && !thread_group_empty(tsk)) { in __set_task_blocked()
2937 retarget_shared_pending(tsk, &newblocked); in __set_task_blocked()
2939 tsk->blocked = *newset; in __set_task_blocked()
2958 struct task_struct *tsk = current; in __set_current_blocked() local
2964 if (sigequalsets(&tsk->blocked, newset)) in __set_current_blocked()
2967 spin_lock_irq(&tsk->sighand->siglock); in __set_current_blocked()
2968 __set_task_blocked(tsk, newset); in __set_current_blocked()
2969 spin_unlock_irq(&tsk->sighand->siglock); in __set_current_blocked()
2982 struct task_struct *tsk = current; in sigprocmask() local
2987 *oldset = tsk->blocked; in sigprocmask()
2991 sigorsets(&newset, &tsk->blocked, set); in sigprocmask()
2994 sigandnsets(&newset, &tsk->blocked, set); in sigprocmask()
3489 struct task_struct *tsk = current; in do_sigtimedwait() local
3506 spin_lock_irq(&tsk->sighand->siglock); in do_sigtimedwait()
3507 sig = dequeue_signal(tsk, &mask, info); in do_sigtimedwait()
3515 tsk->real_blocked = tsk->blocked; in do_sigtimedwait()
3516 sigandsets(&tsk->blocked, &tsk->blocked, &mask); in do_sigtimedwait()
3518 spin_unlock_irq(&tsk->sighand->siglock); in do_sigtimedwait()
3521 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, in do_sigtimedwait()
3523 spin_lock_irq(&tsk->sighand->siglock); in do_sigtimedwait()
3524 __set_task_blocked(tsk, &tsk->real_blocked); in do_sigtimedwait()
3525 sigemptyset(&tsk->real_blocked); in do_sigtimedwait()
3526 sig = dequeue_signal(tsk, &mask, info); in do_sigtimedwait()
3528 spin_unlock_irq(&tsk->sighand->siglock); in do_sigtimedwait()