/kernel/livepatch/ |
D | transition.c | 72 struct task_struct *g, *task; in klp_complete_transition() local 109 for_each_process_thread(g, task) { in klp_complete_transition() 110 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition() 111 task->patch_state = KLP_UNDEFINED; in klp_complete_transition() 116 task = idle_task(cpu); in klp_complete_transition() 117 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition() 118 task->patch_state = KLP_UNDEFINED; in klp_complete_transition() 162 void klp_update_patch_state(struct task_struct *task) in klp_update_patch_state() argument 182 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) in klp_update_patch_state() 183 task->patch_state = READ_ONCE(klp_target_state); in klp_update_patch_state() [all …]
|
/kernel/bpf/ |
D | task_iter.c | 38 struct task_struct *task, *next_task; in task_group_seq_get_next() local 48 task = get_pid_task(pid, PIDTYPE_TGID); in task_group_seq_get_next() 49 if (!task) in task_group_seq_get_next() 55 return task; in task_group_seq_get_next() 64 task = get_pid_task(pid, PIDTYPE_PID); in task_group_seq_get_next() 66 return task; in task_group_seq_get_next() 73 task = get_pid_task(pid, PIDTYPE_PID); in task_group_seq_get_next() 74 if (!task) in task_group_seq_get_next() 78 if (!pid_alive(task)) { in task_group_seq_get_next() 79 put_task_struct(task); in task_group_seq_get_next() [all …]
|
D | bpf_task_storage.c | 51 struct task_struct *task = owner; in task_storage_ptr() local 53 return &task->bpf_storage; in task_storage_ptr() 57 task_storage_lookup(struct task_struct *task, struct bpf_map *map, in task_storage_lookup() argument 64 rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held()); in task_storage_lookup() 72 void bpf_task_storage_free(struct task_struct *task) in bpf_task_storage_free() argument 82 local_storage = rcu_dereference(task->bpf_storage); in bpf_task_storage_free() 121 struct task_struct *task; in bpf_pid_task_storage_lookup_elem() local 135 task = pid_task(pid, PIDTYPE_PID); in bpf_pid_task_storage_lookup_elem() 136 if (!task) { in bpf_pid_task_storage_lookup_elem() 142 sdata = task_storage_lookup(task, map, true); in bpf_pid_task_storage_lookup_elem() [all …]
|
/kernel/ |
D | task_work.c | 42 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 50 head = READ_ONCE(task->task_works); in task_work_add() 55 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add() 61 set_notify_resume(task); in task_work_add() 64 set_notify_signal(task); in task_work_add() 67 __set_notify_signal(task); in task_work_add() 86 task_work_cancel_match(struct task_struct *task, in task_work_cancel_match() argument 90 struct callback_head **pprev = &task->task_works; in task_work_cancel_match() 94 if (likely(!task_work_pending(task))) in task_work_cancel_match() 102 raw_spin_lock_irqsave(&task->pi_lock, flags); in task_work_cancel_match() [all …]
|
D | pid.c | 321 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) in task_pid_ptr() argument 324 &task->thread_pid : in task_pid_ptr() 325 &task->signal->pids[type]; in task_pid_ptr() 331 void attach_pid(struct task_struct *task, enum pid_type type) in attach_pid() argument 333 struct pid *pid = *task_pid_ptr(task, type); in attach_pid() 334 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); in attach_pid() 337 static void __change_pid(struct task_struct *task, enum pid_type type, in __change_pid() argument 340 struct pid **pid_ptr = task_pid_ptr(task, type); in __change_pid() 346 hlist_del_rcu(&task->pid_links[type]); in __change_pid() 356 void detach_pid(struct task_struct *task, enum pid_type type) in detach_pid() argument [all …]
|
D | ptrace.c | 173 static bool looks_like_a_spurious_pid(struct task_struct *task) in looks_like_a_spurious_pid() argument 175 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP)) in looks_like_a_spurious_pid() 178 if (task_pid_vnr(task) == task->ptrace_message) in looks_like_a_spurious_pid() 194 static bool ptrace_freeze_traced(struct task_struct *task) in ptrace_freeze_traced() argument 199 if (task->jobctl & JOBCTL_LISTENING) in ptrace_freeze_traced() 202 spin_lock_irq(&task->sighand->siglock); in ptrace_freeze_traced() 203 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && in ptrace_freeze_traced() 204 !__fatal_signal_pending(task)) { in ptrace_freeze_traced() 205 task->jobctl |= JOBCTL_PTRACE_FROZEN; in ptrace_freeze_traced() 208 spin_unlock_irq(&task->sighand->siglock); in ptrace_freeze_traced() [all …]
|
D | kthread.c | 225 void *kthread_func(struct task_struct *task) in kthread_func() argument 227 struct kthread *kthread = __to_kthread(task); in kthread_func() 242 void *kthread_data(struct task_struct *task) in kthread_data() argument 244 return to_kthread(task)->data; in kthread_data() 257 void *kthread_probe_data(struct task_struct *task) in kthread_probe_data() argument 259 struct kthread *kthread = __to_kthread(task); in kthread_probe_data() 430 struct task_struct *task; in __kthread_create_on_node() local 465 task = create->result; in __kthread_create_on_node() 466 if (!IS_ERR(task)) { in __kthread_create_on_node() 479 struct kthread *kthread = to_kthread(task); in __kthread_create_on_node() [all …]
|
D | pid_namespace.c | 170 struct task_struct *task, *me = current; in zap_pid_ns_processes() local 203 task = pid_task(pid, PIDTYPE_PID); in zap_pid_ns_processes() 204 if (task && !__fatal_signal_pending(task)) in zap_pid_ns_processes() 205 group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); in zap_pid_ns_processes() 351 static struct ns_common *pidns_get(struct task_struct *task) in pidns_get() argument 356 ns = task_active_pid_ns(task); in pidns_get() 364 static struct ns_common *pidns_for_children_get(struct task_struct *task) in pidns_for_children_get() argument 368 task_lock(task); in pidns_for_children_get() 369 if (task->nsproxy) { in pidns_for_children_get() 370 ns = task->nsproxy->pid_ns_for_children; in pidns_for_children_get() [all …]
|
D | workqueue_internal.h | 39 struct task_struct *task; /* I: worker task */ member 77 void wq_worker_running(struct task_struct *task); 78 void wq_worker_sleeping(struct task_struct *task); 79 work_func_t wq_worker_last_func(struct task_struct *task);
|
D | cred.c | 199 const struct cred *get_task_cred(struct task_struct *task) in get_task_cred() argument 206 cred = __task_cred((task)); in get_task_cred() 257 struct task_struct *task = current; in prepare_creds() local 269 old = task->cred; in prepare_creds() 452 struct task_struct *task = current; in commit_creds() local 453 const struct cred *old = task->real_cred; in commit_creds() 459 BUG_ON(task->cred != old); in commit_creds() 475 if (task->mm) in commit_creds() 476 set_dumpable(task->mm, suid_dumpable); in commit_creds() 477 task->pdeath_signal = 0; in commit_creds() [all …]
|
D | seccomp.c | 65 struct task_struct *task; member 247 struct task_struct *task = current; in populate_seccomp_data() local 248 struct pt_regs *regs = task_pt_regs(task); in populate_seccomp_data() 251 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data() 252 sd->arch = syscall_get_arch(task); in populate_seccomp_data() 253 syscall_get_arguments(task, regs, args); in populate_seccomp_data() 260 sd->instruction_pointer = KSTK_EIP(task); in populate_seccomp_data() 442 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } in arch_seccomp_spec_mitigate() argument 444 static inline void seccomp_assign_mode(struct task_struct *task, in seccomp_assign_mode() argument 448 assert_spin_locked(&task->sighand->siglock); in seccomp_assign_mode() [all …]
|
/kernel/locking/ |
D | rtmutex.c | 327 static __always_inline int __waiter_prio(struct task_struct *task) in __waiter_prio() argument 329 int prio = task->prio; in __waiter_prio() 332 trace_android_vh_rtmutex_waiter_prio(task, &waiter_prio); in __waiter_prio() 343 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) in waiter_update_prio() argument 345 waiter->prio = __waiter_prio(task); in waiter_update_prio() 346 waiter->deadline = task->dl.deadline; in waiter_update_prio() 472 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument 474 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi() 478 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument 483 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi() [all …]
|
D | rtmutex_api.c | 299 struct task_struct *task) in __rt_mutex_start_proxy_lock() argument 305 if (try_to_take_rt_mutex(lock, task, NULL)) in __rt_mutex_start_proxy_lock() 309 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, in __rt_mutex_start_proxy_lock() 346 struct task_struct *task) in rt_mutex_start_proxy_lock() argument 351 ret = __rt_mutex_start_proxy_lock(lock, waiter, task); in rt_mutex_start_proxy_lock() 458 void __sched rt_mutex_adjust_pi(struct task_struct *task) in rt_mutex_adjust_pi() argument 464 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_pi() 466 waiter = task->pi_blocked_on; in rt_mutex_adjust_pi() 467 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { in rt_mutex_adjust_pi() 468 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi() [all …]
|
D | mutex-debug.c | 51 struct task_struct *task) in debug_mutex_add_waiter() argument 56 task->blocked_on = waiter; in debug_mutex_add_waiter() 60 struct task_struct *task) in debug_mutex_remove_waiter() argument 63 DEBUG_LOCKS_WARN_ON(waiter->task != task); in debug_mutex_remove_waiter() 64 DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); in debug_mutex_remove_waiter() 65 task->blocked_on = NULL; in debug_mutex_remove_waiter() 68 waiter->task = NULL; in debug_mutex_remove_waiter()
|
/kernel/cgroup/ |
D | legacy_freezer.c | 56 static inline struct freezer *task_freezer(struct task_struct *task) in task_freezer() argument 58 return css_freezer(task_css(task, freezer_cgrp_id)); in task_freezer() 66 bool cgroup_freezing(struct task_struct *task) in cgroup_freezing() argument 76 state = task_freezer(task)->state; in cgroup_freezing() 171 struct task_struct *task; in freezer_attach() local 186 cgroup_taskset_for_each(task, new_css, tset) { in freezer_attach() 190 __thaw_task(task); in freezer_attach() 192 freeze_task(task); in freezer_attach() 215 static void freezer_fork(struct task_struct *task) in freezer_fork() argument 226 if (task_css_is_root(task, freezer_cgrp_id)) in freezer_fork() [all …]
|
D | freezer.c | 156 static void cgroup_freeze_task(struct task_struct *task, bool freeze) in cgroup_freeze_task() argument 162 if (!lock_task_sighand(task, &flags)) in cgroup_freeze_task() 165 trace_android_vh_freeze_whether_wake(task, &wake); in cgroup_freeze_task() 167 task->jobctl |= JOBCTL_TRAP_FREEZE; in cgroup_freeze_task() 169 signal_wake_up(task, false); in cgroup_freeze_task() 171 task->jobctl &= ~JOBCTL_TRAP_FREEZE; in cgroup_freeze_task() 173 wake_up_process(task); in cgroup_freeze_task() 176 unlock_task_sighand(task, &flags); in cgroup_freeze_task() 185 struct task_struct *task; in cgroup_do_freeze() local 202 while ((task = css_task_iter_next(&it))) { in cgroup_do_freeze() [all …]
|
D | pids.c | 193 struct task_struct *task; in pids_can_attach() local 196 cgroup_taskset_for_each(task, dst_css, tset) { in pids_can_attach() 206 old_css = task_css(task, pids_cgrp_id); in pids_can_attach() 218 struct task_struct *task; in pids_cancel_attach() local 221 cgroup_taskset_for_each(task, dst_css, tset) { in pids_cancel_attach() 226 old_css = task_css(task, pids_cgrp_id); in pids_cancel_attach() 238 static int pids_can_fork(struct task_struct *task, struct css_set *cset) in pids_can_fork() argument 262 static void pids_cancel_fork(struct task_struct *task, struct css_set *cset) in pids_cancel_fork() argument 275 static void pids_release(struct task_struct *task) in pids_release() argument 277 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); in pids_release()
|
D | cpuset.c | 250 static inline struct cpuset *task_cs(struct task_struct *task) in task_cs() argument 252 return css_cs(task_css(task, cpuset_cgrp_id)); in task_cs() 1108 struct task_struct *task; in dl_update_tasks_root_domain() local 1115 while ((task = css_task_iter_next(&it))) in dl_update_tasks_root_domain() 1116 dl_add_task_root_domain(task); in dl_update_tasks_root_domain() 1269 struct task_struct *task; in update_tasks_cpumask() local 1273 while ((task = css_task_iter_next(&it))) { in update_tasks_cpumask() 1277 if (top_cs && (task->flags & PF_KTHREAD) && in update_tasks_cpumask() 1278 kthread_is_per_cpu(task)) in update_tasks_cpumask() 1282 task_cpu_possible_mask(task)); in update_tasks_cpumask() [all …]
|
/kernel/sched/ |
D | psi.c | 573 struct task_struct *task; in psi_schedule_poll_work() local 584 task = rcu_dereference(group->poll_task); in psi_schedule_poll_work() 589 if (likely(task)) in psi_schedule_poll_work() 837 static inline struct psi_group *task_psi_group(struct task_struct *task) in task_psi_group() argument 841 return cgroup_psi(task_dfl_cgroup(task)); in task_psi_group() 846 static void psi_flags_change(struct task_struct *task, int clear, int set) in psi_flags_change() argument 848 if (((task->psi_flags & set) || in psi_flags_change() 849 (task->psi_flags & clear) != clear) && in psi_flags_change() 852 task->pid, task->comm, task_cpu(task), in psi_flags_change() 853 task->psi_flags, clear, set); in psi_flags_change() [all …]
|
D | core_sched.c | 133 struct task_struct *task, *p; in sched_core_share_pid() local 150 task = current; in sched_core_share_pid() 152 task = find_task_by_vpid(pid); in sched_core_share_pid() 153 if (!task) { in sched_core_share_pid() 158 get_task_struct(task); in sched_core_share_pid() 165 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { in sched_core_share_pid() 176 cookie = sched_core_clone_cookie(task); in sched_core_share_pid() 201 cookie = sched_core_clone_cookie(task); in sched_core_share_pid() 211 __sched_core_set(task, cookie); in sched_core_share_pid() 216 grp = task_pid_type(task, type); in sched_core_share_pid() [all …]
|
/kernel/trace/ |
D | fgraph.c | 281 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) in ftrace_graph_get_ret_stack() argument 283 idx = task->curr_ret_stack - idx; in ftrace_graph_get_ret_stack() 285 if (idx >= 0 && idx <= task->curr_ret_stack) in ftrace_graph_get_ret_stack() 286 return &task->ret_stack[idx]; in ftrace_graph_get_ret_stack() 307 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument 310 int index = task->curr_ret_stack; in ftrace_graph_ret_addr() 320 if (task->ret_stack[i].retp == retp) in ftrace_graph_ret_addr() 321 return task->ret_stack[i].ret; in ftrace_graph_ret_addr() 326 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument 334 task_idx = task->curr_ret_stack; in ftrace_graph_ret_addr() [all …]
|
D | preemptirq_delay_test.c | 149 struct task_struct *task; in preemptirq_run_test() local 155 task = kthread_run(preemptirq_delay_run, NULL, task_name); in preemptirq_run_test() 156 if (IS_ERR(task)) in preemptirq_run_test() 157 return PTR_ERR(task); in preemptirq_run_test() 158 if (task) { in preemptirq_run_test() 160 kthread_stop(task); in preemptirq_run_test()
|
/kernel/events/ |
D | core.c | 231 if (ctx->task) { in event_function() 232 if (ctx->task != current) { in event_function() 264 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call() local 280 if (!task) { in event_function_call() 285 if (task == TASK_TOMBSTONE) in event_function_call() 289 if (!task_function_call(task, event_function, &efs)) in event_function_call() 297 task = ctx->task; in event_function_call() 298 if (task == TASK_TOMBSTONE) { in event_function_call() 318 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local() local 323 if (task) { in event_function_local() [all …]
|
/kernel/time/ |
D | namespace.c | 201 static void timens_set_vvar_page(struct task_struct *task, in timens_set_vvar_page() argument 243 static struct ns_common *timens_get(struct task_struct *task) in timens_get() argument 248 task_lock(task); in timens_get() 249 nsproxy = task->nsproxy; in timens_get() 254 task_unlock(task); in timens_get() 259 static struct ns_common *timens_for_children_get(struct task_struct *task) in timens_for_children_get() argument 264 task_lock(task); in timens_for_children_get() 265 nsproxy = task->nsproxy; in timens_for_children_get() 270 task_unlock(task); in timens_for_children_get()
|
/kernel/kcsan/ |
D | report.c | 64 struct task_struct *task; member 375 static void print_verbose_info(struct task_struct *task) in print_verbose_info() argument 377 if (!task) in print_verbose_info() 381 kcsan_restore_irqtrace(task); in print_verbose_info() 384 debug_show_held_locks(task); in print_verbose_info() 385 print_irqtrace_events(task); in print_verbose_info() 455 print_verbose_info(other_info->task); in print_report() 533 other_info->task = current; in set_other_info_task_blocking() 558 other_info->task = NULL; in set_other_info_task_blocking() 566 other_info->task == current); in set_other_info_task_blocking()
|