/kernel/locking/ |
D | rtmutex.c | 284 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument 286 struct rb_node **link = &task->pi_waiters.rb_node; in rt_mutex_enqueue_pi() 303 task->pi_waiters_leftmost = &waiter->pi_tree_entry; in rt_mutex_enqueue_pi() 306 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_enqueue_pi() 310 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument 315 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) in rt_mutex_dequeue_pi() 316 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi() 318 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi() 328 int rt_mutex_getprio(struct task_struct *task) in rt_mutex_getprio() argument 330 if (likely(!task_has_pi_waiters(task))) in rt_mutex_getprio() [all …]
|
D | rtmutex-debug.c | 58 void rt_mutex_debug_task_free(struct task_struct *task) in rt_mutex_debug_task_free() argument 60 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters)); in rt_mutex_debug_task_free() 61 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); in rt_mutex_debug_task_free() 73 struct task_struct *task; in debug_rt_mutex_deadlock() local 78 task = rt_mutex_owner(act_waiter->lock); in debug_rt_mutex_deadlock() 79 if (task && task != current) { in debug_rt_mutex_deadlock() 80 act_waiter->deadlock_task_pid = get_pid(task_pid(task)); in debug_rt_mutex_deadlock() 87 struct task_struct *task; in debug_rt_mutex_print_deadlock() local 93 task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); in debug_rt_mutex_print_deadlock() 94 if (!task) { in debug_rt_mutex_print_deadlock() [all …]
|
D | mutex-debug.c | 52 struct task_struct *task) in debug_mutex_add_waiter() argument 57 task->blocked_on = waiter; in debug_mutex_add_waiter() 61 struct task_struct *task) in mutex_remove_waiter() argument 64 DEBUG_LOCKS_WARN_ON(waiter->task != task); in mutex_remove_waiter() 65 DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); in mutex_remove_waiter() 66 task->blocked_on = NULL; in mutex_remove_waiter() 69 waiter->task = NULL; in mutex_remove_waiter()
|
D | semaphore.c | 195 struct task_struct *task; member 207 struct task_struct *task = current; in __down_common() local 211 waiter.task = task; in __down_common() 215 if (signal_pending_state(state, task)) in __down_common() 219 __set_task_state(task, state); in __down_common() 262 wake_up_process(waiter->task); in __up()
|
D | rwsem-spinlock.c | 19 struct task_struct *task; member 77 wake_up_process(waiter->task); in __rwsem_do_wake() 87 tsk = waiter->task; in __rwsem_do_wake() 96 waiter->task = NULL; in __rwsem_do_wake() 120 wake_up_process(waiter->task); in __rwsem_wake_one_writer() 147 waiter.task = tsk; in __down_read() 158 if (!waiter.task) in __down_read() 205 waiter.task = tsk; in __down_write_common()
|
D | mutex.c | 189 wake_up_process(cur->task); in ww_mutex_set_context_fastpath() 215 wake_up_process(cur->task); in ww_mutex_set_context_slowpath() 309 struct task_struct *task = current; in mutex_optimistic_spin() local 371 if (!owner && (need_resched() || rt_task(task))) in mutex_optimistic_spin() 509 struct task_struct *task = current; in __mutex_lock_common() local 540 debug_mutex_add_waiter(lock, &waiter, task); in __mutex_lock_common() 544 waiter.task = task; in __mutex_lock_common() 567 if (unlikely(signal_pending_state(state, task))) { in __mutex_lock_common() 578 __set_task_state(task, state); in __mutex_lock_common() 585 __set_task_state(task, TASK_RUNNING); in __mutex_lock_common() [all …]
|
D | rwsem-xadd.c | 101 struct task_struct *task; member 146 wake_q_add(wake_q, waiter->task); in __rwsem_mark_wake() 196 tsk = waiter->task; in __rwsem_mark_wake() 206 smp_store_release(&waiter->task, NULL); in __rwsem_mark_wake() 230 waiter.task = tsk; in rwsem_down_read_failed() 258 if (!waiter.task) in rwsem_down_read_failed() 477 waiter.task = current; in __rwsem_down_write_failed_common()
|
/kernel/ |
D | task_work.c | 27 task_work_add(struct task_struct *task, struct callback_head *work, bool notify) in task_work_add() argument 32 head = READ_ONCE(task->task_works); in task_work_add() 36 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 39 set_notify_resume(task); in task_work_add() 55 task_work_cancel(struct task_struct *task, task_work_func_t func) in task_work_cancel() argument 57 struct callback_head **pprev = &task->task_works; in task_work_cancel() 61 if (likely(!task->task_works)) in task_work_cancel() 69 raw_spin_lock_irqsave(&task->pi_lock, flags); in task_work_cancel() 76 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in task_work_cancel() 91 struct task_struct *task = current; in task_work_run() local [all …]
|
D | cgroup_freezer.c | 55 static inline struct freezer *task_freezer(struct task_struct *task) in task_freezer() argument 57 return css_freezer(task_css(task, freezer_cgrp_id)); in task_freezer() 65 bool cgroup_freezing(struct task_struct *task) in cgroup_freezing() argument 70 ret = task_freezer(task)->state & CGROUP_FREEZING; in cgroup_freezing() 160 struct task_struct *task; in freezer_attach() local 175 cgroup_taskset_for_each(task, new_css, tset) { in freezer_attach() 179 __thaw_task(task); in freezer_attach() 181 freeze_task(task); in freezer_attach() 203 static void freezer_fork(struct task_struct *task) in freezer_fork() argument 214 if (task_css_is_root(task, freezer_cgrp_id)) in freezer_fork() [all …]
|
D | ptrace.c | 165 static bool ptrace_freeze_traced(struct task_struct *task) in ptrace_freeze_traced() argument 170 if (task->jobctl & JOBCTL_LISTENING) in ptrace_freeze_traced() 173 spin_lock_irq(&task->sighand->siglock); in ptrace_freeze_traced() 174 if (task_is_traced(task) && !__fatal_signal_pending(task)) { in ptrace_freeze_traced() 175 task->state = __TASK_TRACED; in ptrace_freeze_traced() 178 spin_unlock_irq(&task->sighand->siglock); in ptrace_freeze_traced() 183 static void ptrace_unfreeze_traced(struct task_struct *task) in ptrace_unfreeze_traced() argument 185 if (task->state != __TASK_TRACED) in ptrace_unfreeze_traced() 188 WARN_ON(!task->ptrace || task->parent != current); in ptrace_unfreeze_traced() 194 spin_lock_irq(&task->sighand->siglock); in ptrace_unfreeze_traced() [all …]
|
D | kthread.c | 136 void *kthread_data(struct task_struct *task) in kthread_data() argument 138 return to_kthread(task)->data; in kthread_data() 150 void *kthread_probe_data(struct task_struct *task) in kthread_probe_data() argument 152 struct kthread *kthread = to_kthread(task); in kthread_probe_data() 255 struct task_struct *task; in __kthread_create_on_node() local 290 task = create->result; in __kthread_create_on_node() 291 if (!IS_ERR(task)) { in __kthread_create_on_node() 294 vsnprintf(task->comm, sizeof(task->comm), namefmt, args); in __kthread_create_on_node() 299 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); in __kthread_create_on_node() 300 set_cpus_allowed_ptr(task, cpu_all_mask); in __kthread_create_on_node() [all …]
|
D | pid.c | 391 void attach_pid(struct task_struct *task, enum pid_type type) in attach_pid() argument 393 struct pid_link *link = &task->pids[type]; in attach_pid() 397 static void __change_pid(struct task_struct *task, enum pid_type type, in __change_pid() argument 404 link = &task->pids[type]; in __change_pid() 417 void detach_pid(struct task_struct *task, enum pid_type type) in detach_pid() argument 419 __change_pid(task, type, NULL); in detach_pid() 422 void change_pid(struct task_struct *task, enum pid_type type, in change_pid() argument 425 __change_pid(task, type, pid); in change_pid() 426 attach_pid(task, type); in change_pid() 466 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) in get_task_pid() argument [all …]
|
D | cgroup_pids.c | 174 struct task_struct *task; in pids_can_attach() local 177 cgroup_taskset_for_each(task, dst_css, tset) { in pids_can_attach() 187 old_css = task_css(task, pids_cgrp_id); in pids_can_attach() 199 struct task_struct *task; in pids_cancel_attach() local 202 cgroup_taskset_for_each(task, dst_css, tset) { in pids_cancel_attach() 207 old_css = task_css(task, pids_cgrp_id); in pids_cancel_attach() 219 static int pids_can_fork(struct task_struct *task) in pids_can_fork() argument 240 static void pids_cancel_fork(struct task_struct *task) in pids_cancel_fork() argument 250 static void pids_free(struct task_struct *task) in pids_free() argument 252 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); in pids_free()
|
D | seccomp.c | 72 struct task_struct *task = current; in populate_seccomp_data() local 73 struct pt_regs *regs = task_pt_regs(task); in populate_seccomp_data() 76 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data() 78 syscall_get_arguments(task, regs, 0, 6, args); in populate_seccomp_data() 85 sd->instruction_pointer = KSTK_EIP(task); in populate_seccomp_data() 217 static inline void seccomp_assign_mode(struct task_struct *task, in seccomp_assign_mode() argument 220 assert_spin_locked(&task->sighand->siglock); in seccomp_assign_mode() 222 task->seccomp.mode = seccomp_mode; in seccomp_assign_mode() 228 set_tsk_thread_flag(task, TIF_SECCOMP); in seccomp_assign_mode() 847 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, in seccomp_get_filter() argument [all …]
|
D | cred.c | 187 const struct cred *get_task_cred(struct task_struct *task) in get_task_cred() argument 194 cred = __task_cred((task)); in get_task_cred() 245 struct task_struct *task = current; in prepare_creds() local 257 old = task->cred; in prepare_creds() 424 struct task_struct *task = current; in commit_creds() local 425 const struct cred *old = task->real_cred; in commit_creds() 431 BUG_ON(task->cred != old); in commit_creds() 447 if (task->mm) in commit_creds() 448 set_dumpable(task->mm, suid_dumpable); in commit_creds() 449 task->pdeath_signal = 0; in commit_creds() [all …]
|
D | cpuset.c | 143 static inline struct cpuset *task_cs(struct task_struct *task) in task_cs() argument 145 return css_cs(task_css(task, cpuset_cgrp_id)); in task_cs() 154 static inline bool task_has_mempolicy(struct task_struct *task) in task_has_mempolicy() argument 156 return task->mempolicy; in task_has_mempolicy() 159 static inline bool task_has_mempolicy(struct task_struct *task) in task_has_mempolicy() argument 862 struct task_struct *task; in update_tasks_cpumask() local 865 while ((task = css_task_iter_next(&it))) in update_tasks_cpumask() 866 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask() 1095 struct task_struct *task; in update_tasks_nodemask() local 1112 while ((task = css_task_iter_next(&it))) { in update_tasks_nodemask() [all …]
|
D | kcmp.c | 57 get_file_raw_ptr(struct task_struct *task, unsigned int idx) in get_file_raw_ptr() argument 61 task_lock(task); in get_file_raw_ptr() 64 if (task->files) in get_file_raw_ptr() 65 file = fcheck_files(task->files, idx); in get_file_raw_ptr() 68 task_unlock(task); in get_file_raw_ptr()
|
D | fork.c | 975 struct file *get_task_exe_file(struct task_struct *task) in get_task_exe_file() argument 980 task_lock(task); in get_task_exe_file() 981 mm = task->mm; in get_task_exe_file() 983 if (!(task->flags & PF_KTHREAD)) in get_task_exe_file() 986 task_unlock(task); in get_task_exe_file() 1000 struct mm_struct *get_task_mm(struct task_struct *task) in get_task_mm() argument 1004 task_lock(task); in get_task_mm() 1005 mm = task->mm; in get_task_mm() 1007 if (task->flags & PF_KTHREAD) in get_task_mm() 1012 task_unlock(task); in get_task_mm() [all …]
|
D | workqueue_internal.h | 38 struct task_struct *task; /* I: worker task */ member 72 void wq_worker_waking_up(struct task_struct *task, int cpu); 73 struct task_struct *wq_worker_sleeping(struct task_struct *task);
|
D | cgroup.c | 742 static void css_set_move_task(struct task_struct *task, in css_set_move_task() argument 754 WARN_ON_ONCE(list_empty(&task->cg_list)); in css_set_move_task() 765 if (it->task_pos == &task->cg_list) in css_set_move_task() 768 list_del_init(&task->cg_list); in css_set_move_task() 772 WARN_ON_ONCE(!list_empty(&task->cg_list)); in css_set_move_task() 782 WARN_ON_ONCE(task->flags & PF_EXITING); in css_set_move_task() 784 rcu_assign_pointer(task->cgroups, to_cset); in css_set_move_task() 785 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : in css_set_move_task() 1286 static struct cgroup *task_cgroup_from_root(struct task_struct *task, in task_cgroup_from_root() argument 1294 return cset_cgroup_from_root(task_css_set(task), root); in task_cgroup_from_root() [all …]
|
/kernel/sched/ |
D | tune.h | 32 #define schedtune_exit_task(task) do { } while (0) argument 34 #define schedtune_enqueue_task(task, cpu) do { } while (0) argument 35 #define schedtune_dequeue_task(task, cpu) do { } while (0) argument 41 struct task_struct *task); 48 #define schedtune_exit_task(task) do { } while (0) argument 50 #define schedtune_enqueue_task(task, cpu) do { } while (0) argument 51 #define schedtune_dequeue_task(task, cpu) do { } while (0) argument 53 #define schedtune_accept_deltas(nrg_delta, cap_delta, task) nrg_delta argument
|
D | tune.c | 169 struct task_struct *task) in schedtune_accept_deltas() argument 189 ct = task_schedtune(task); in schedtune_accept_deltas() 374 struct task_struct *task; in schedtune_can_attach() local 388 cgroup_taskset_for_each(task, css, tset) { in schedtune_can_attach() 395 rq = lock_rq_of(task, &irq_flags); in schedtune_can_attach() 397 if (!task->on_rq) { in schedtune_can_attach() 398 unlock_rq_of(rq, task, &irq_flags); in schedtune_can_attach() 411 src_bg = task_schedtune(task)->idx; in schedtune_can_attach() 419 unlock_rq_of(rq, task, &irq_flags); in schedtune_can_attach() 434 unlock_rq_of(rq, task, &irq_flags); in schedtune_can_attach() [all …]
|
D | deadline.c | 243 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 1140 static int find_later_rq(struct task_struct *task); 1402 static int find_later_rq(struct task_struct *task) in find_later_rq() argument 1407 int best_cpu, cpu = task_cpu(task); in find_later_rq() 1413 if (tsk_nr_cpus_allowed(task) == 1) in find_later_rq() 1420 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, in find_later_rq() 1421 task, later_mask); in find_later_rq() 1489 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) in find_lock_later_rq() argument 1496 cpu = find_later_rq(task); in find_lock_later_rq() 1504 !dl_time_before(task->dl.deadline, in find_lock_later_rq() [all …]
|
/kernel/bpf/ |
D | helpers.c | 109 struct task_struct *task = current; in BPF_CALL_0() local 111 if (unlikely(!task)) in BPF_CALL_0() 114 return (u64) task->tgid << 32 | task->pid; in BPF_CALL_0() 125 struct task_struct *task = current; in BPF_CALL_0() local 129 if (unlikely(!task)) in BPF_CALL_0() 145 struct task_struct *task = current; in BPF_CALL_2() local 147 if (unlikely(!task)) in BPF_CALL_2() 150 strncpy(buf, task->comm, size); in BPF_CALL_2()
|
/kernel/events/ |
D | core.c | 215 if (ctx->task) { in event_function() 216 if (ctx->task != current) { in event_function() 248 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call() local 264 if (!task) { in event_function_call() 269 if (task == TASK_TOMBSTONE) in event_function_call() 273 if (!task_function_call(task, event_function, &efs)) in event_function_call() 281 task = ctx->task; in event_function_call() 282 if (task == TASK_TOMBSTONE) { in event_function_call() 302 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local() local 307 if (task) { in event_function_local() [all …]
|