/kernel/sched/ |
D | core.c | 79 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() argument 84 lockdep_assert_held(&p->pi_lock); in __task_rq_lock() 87 rq = task_rq(p); in __task_rq_lock() 89 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock() 95 while (unlikely(task_on_rq_migrating(p))) in __task_rq_lock() 103 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument 104 __acquires(p->pi_lock) in task_rq_lock() 110 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock() 111 rq = task_rq(p); in task_rq_lock() 130 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock() [all …]
|
D | deadline.c | 35 struct task_struct *p = dl_task_of(dl_se); in dl_rq_of_se() local 36 struct rq *rq = task_rq(p); in dl_rq_of_se() 156 void dl_change_utilization(struct task_struct *p, u64 new_bw) in dl_change_utilization() argument 160 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization() 162 if (task_on_rq_queued(p)) in dl_change_utilization() 165 rq = task_rq(p); in dl_change_utilization() 166 if (p->dl.dl_non_contending) { in dl_change_utilization() 167 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization() 168 p->dl.dl_non_contending = 0; in dl_change_utilization() 176 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization() [all …]
|
D | fair.c | 261 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument 263 return p->se.cfs_rq; in task_cfs_rq() 446 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument 448 return &task_rq(p)->cfs; in task_cfs_rq() 453 struct task_struct *p = task_of(se); in cfs_rq_of() local 454 struct rq *rq = task_rq(p); in cfs_rq_of() 727 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 728 static unsigned long task_h_load(struct task_struct *p); 780 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument 782 struct sched_entity *se = &p->se; in post_init_entity_util_avg() [all …]
|
D | cputime.c | 99 static inline void task_group_account_field(struct task_struct *p, int index, in task_group_account_field() argument 110 cgroup_account_cputime_field(p, index, tmp); in task_group_account_field() 118 void account_user_time(struct task_struct *p, u64 cputime) in account_user_time() argument 123 p->utime += cputime; in account_user_time() 124 account_group_user_time(p, cputime); in account_user_time() 126 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; in account_user_time() 129 task_group_account_field(p, index, cputime); in account_user_time() 132 acct_account_cputime(p); in account_user_time() 135 cpufreq_acct_update_power(p, cputime); in account_user_time() 143 void account_guest_time(struct task_struct *p, u64 cputime) in account_guest_time() argument [all …]
|
D | rt.c | 239 struct task_struct *p = rt_task_of(rt_se); in rq_of_rt_se() local 241 return task_rq(p); in rq_of_rt_se() 318 struct task_struct *p; in inc_rt_migration() local 323 p = rt_task_of(rt_se); in inc_rt_migration() 327 if (p->nr_cpus_allowed > 1) in inc_rt_migration() 335 struct task_struct *p; in dec_rt_migration() local 340 p = rt_task_of(rt_se); in dec_rt_migration() 344 if (p->nr_cpus_allowed > 1) in dec_rt_migration() 374 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument 376 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task() [all …]
|
D | autogroup.c | 49 static inline struct autogroup *autogroup_task_get(struct task_struct *p) in autogroup_task_get() argument 54 if (!lock_task_sighand(p, &flags)) in autogroup_task_get() 57 ag = autogroup_kref_get(p->signal->autogroup); in autogroup_task_get() 58 unlock_task_sighand(p, &flags); in autogroup_task_get() 107 bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) in task_wants_autogroup() argument 119 if (p->flags & PF_EXITING) in task_wants_autogroup() 125 void sched_autogroup_exit_task(struct task_struct *p) in sched_autogroup_exit_task() argument 132 sched_move_task(p); in sched_autogroup_exit_task() 136 autogroup_move_group(struct task_struct *p, struct autogroup *ag) in autogroup_move_group() argument 142 BUG_ON(!lock_task_sighand(p, &flags)); in autogroup_move_group() [all …]
|
D | stats.h | 65 static inline void psi_enqueue(struct task_struct *p, bool wakeup) in psi_enqueue() argument 72 if (!wakeup || p->sched_psi_wake_requeue) { in psi_enqueue() 73 if (p->flags & PF_MEMSTALL) in psi_enqueue() 75 if (p->sched_psi_wake_requeue) in psi_enqueue() 76 p->sched_psi_wake_requeue = 0; in psi_enqueue() 78 if (p->in_iowait) in psi_enqueue() 82 psi_task_change(p, clear, set); in psi_enqueue() 85 static inline void psi_dequeue(struct task_struct *p, bool sleep) in psi_dequeue() argument 93 if (p->flags & PF_MEMSTALL) in psi_dequeue() 96 if (p->in_iowait) in psi_dequeue() [all …]
|
D | sched.h | 175 static inline int task_has_idle_policy(struct task_struct *p) in task_has_idle_policy() argument 177 return idle_policy(p->policy); in task_has_idle_policy() 180 static inline int task_has_rt_policy(struct task_struct *p) in task_has_rt_policy() argument 182 return rt_policy(p->policy); in task_has_rt_policy() 185 static inline int task_has_dl_policy(struct task_struct *p) in task_has_dl_policy() argument 187 return dl_policy(p->policy); in task_has_dl_policy() 242 void __dl_clear_params(struct task_struct *p); 308 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 312 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 313 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); [all …]
|
D | debug.c | 434 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument 436 if (rq->curr == p) in print_task() 439 SEQ_printf(m, " %c", task_state_to_char(p)); in print_task() 442 p->comm, task_pid_nr(p), in print_task() 443 SPLIT_NS(p->se.vruntime), in print_task() 444 (long long)(p->nvcsw + p->nivcsw), in print_task() 445 p->prio); in print_task() 448 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)), in print_task() 449 SPLIT_NS(p->se.sum_exec_runtime), in print_task() 450 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime))); in print_task() [all …]
|
D | membarrier.c | 86 struct task_struct *p; in membarrier_global_expedited() local 108 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 109 if (p->flags & PF_KTHREAD) in membarrier_global_expedited() 165 struct task_struct *p; in membarrier_private_expedited() local 177 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 178 if (p && p->mm == mm) in membarrier_private_expedited() 241 struct task_struct *p; in sync_runqueues_membarrier_state() local 243 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state() 244 if (p && p->mm == mm) in sync_runqueues_membarrier_state() 261 struct task_struct *p = current; in membarrier_register_global_expedited() local [all …]
|
/kernel/ |
D | kprobes.c | 326 struct kprobe *p; in get_kprobe() local 329 hlist_for_each_entry_rcu(p, head, hlist) { in get_kprobe() 330 if (p->addr == addr) in get_kprobe() 331 return p; in get_kprobe() 338 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 341 static inline int kprobe_aggrprobe(struct kprobe *p) in kprobe_aggrprobe() argument 343 return p->pre_handler == aggr_pre_handler; in kprobe_aggrprobe() 347 static inline int kprobe_unused(struct kprobe *p) in kprobe_unused() argument 349 return kprobe_aggrprobe(p) && kprobe_disabled(p) && in kprobe_unused() 350 list_empty(&p->list); in kprobe_unused() [all …]
|
D | exit.c | 72 static void __unhash_process(struct task_struct *p, bool group_dead) in __unhash_process() argument 75 detach_pid(p, PIDTYPE_PID); in __unhash_process() 77 detach_pid(p, PIDTYPE_TGID); in __unhash_process() 78 detach_pid(p, PIDTYPE_PGID); in __unhash_process() 79 detach_pid(p, PIDTYPE_SID); in __unhash_process() 81 list_del_rcu(&p->tasks); in __unhash_process() 82 list_del_init(&p->sibling); in __unhash_process() 85 list_del_rcu(&p->thread_group); in __unhash_process() 86 list_del_rcu(&p->thread_node); in __unhash_process() 191 void release_task(struct task_struct *p) in release_task() argument [all …]
|
D | fork.c | 992 struct task_struct *p) in mm_clear_owner() argument 995 if (mm->owner == p) in mm_clear_owner() 1000 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument 1003 mm->owner = p; in mm_init_owner() 1014 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument 1034 mm_init_owner(mm, p); in mm_init() 1054 if (init_new_context(p, mm)) in mm_init() 1604 static void copy_seccomp(struct task_struct *p) in copy_seccomp() argument 1617 p->seccomp = current->seccomp; in copy_seccomp() 1625 task_set_no_new_privs(p); in copy_seccomp() [all …]
|
D | resource.c | 64 static struct resource *next_resource(struct resource *p, bool sibling_only) in next_resource() argument 68 return p->sibling; in next_resource() 70 if (p->child) in next_resource() 71 return p->child; in next_resource() 72 while (!p->sibling && p->parent) in next_resource() 73 p = p->parent; in next_resource() 74 return p->sibling; in next_resource() 79 struct resource *p = v; in r_next() local 81 return (void *)next_resource(p, false); in r_next() 91 struct resource *p = PDE_DATA(file_inode(m->file)); in r_start() local [all …]
|
D | freezer.c | 37 bool freezing_slow_path(struct task_struct *p) in freezing_slow_path() argument 39 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path() 42 if (test_tsk_thread_flag(p, TIF_MEMDIE)) in freezing_slow_path() 45 if (pm_nosig_freezing || cgroup_freezing(p)) in freezing_slow_path() 48 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path() 94 static void fake_signal_wake_up(struct task_struct *p) in fake_signal_wake_up() argument 98 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up() 99 signal_wake_up(p, 0); in fake_signal_wake_up() 100 unlock_task_sighand(p, &flags); in fake_signal_wake_up() 115 bool freeze_task(struct task_struct *p) in freeze_task() argument [all …]
|
D | sys.c | 158 static bool set_one_prio_perm(struct task_struct *p) in set_one_prio_perm() argument 160 const struct cred *cred = current_cred(), *pcred = __task_cred(p); in set_one_prio_perm() 174 static int set_one_prio(struct task_struct *p, int niceval, int error) in set_one_prio() argument 178 if (!set_one_prio_perm(p)) { in set_one_prio() 182 if (niceval < task_nice(p) && !can_nice(p, niceval)) { in set_one_prio() 186 no_nice = security_task_setnice(p, niceval); in set_one_prio() 193 set_user_nice(p, niceval); in set_one_prio() 200 struct task_struct *g, *p; in SYSCALL_DEFINE3() local 222 p = find_task_by_vpid(who); in SYSCALL_DEFINE3() 224 p = current; in SYSCALL_DEFINE3() [all …]
|
/kernel/trace/ |
D | trace_branch.c | 41 const char *p; in probe_likely_condition() local 72 p = f->data.file + strlen(f->data.file); in probe_likely_condition() 73 while (p >= f->data.file && *p != '/') in probe_likely_condition() 74 p--; in probe_likely_condition() 75 p++; in probe_likely_condition() 78 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition() 247 static inline long get_incorrect_percent(struct ftrace_branch_data *p) in get_incorrect_percent() argument 251 if (p->correct) { in get_incorrect_percent() 252 percent = p->incorrect * 100; in get_incorrect_percent() 253 percent /= p->correct + p->incorrect; in get_incorrect_percent() [all …]
|
D | trace_output.c | 65 trace_print_flags_seq(struct trace_seq *p, const char *delim, in trace_print_flags_seq() argument 71 const char *ret = trace_seq_buffer_ptr(p); in trace_print_flags_seq() 83 trace_seq_puts(p, delim); in trace_print_flags_seq() 86 trace_seq_puts(p, str); in trace_print_flags_seq() 92 trace_seq_puts(p, delim); in trace_print_flags_seq() 93 trace_seq_printf(p, "0x%lx", flags); in trace_print_flags_seq() 96 trace_seq_putc(p, 0); in trace_print_flags_seq() 103 trace_print_symbols_seq(struct trace_seq *p, unsigned long val, in trace_print_symbols_seq() argument 107 const char *ret = trace_seq_buffer_ptr(p); in trace_print_symbols_seq() 114 trace_seq_puts(p, symbol_array[i].name); in trace_print_symbols_seq() [all …]
|
/kernel/debug/kdb/ |
D | kdb_bt.c | 22 static void kdb_show_stack(struct task_struct *p, void *addr) in kdb_show_stack() argument 27 kdb_set_current_task(p); in kdb_show_stack() 29 show_stack((struct task_struct *)p, addr); in kdb_show_stack() 32 show_stack(p, &kdb_current_regs->sp); in kdb_show_stack() 34 show_stack(p, NULL); in kdb_show_stack() 37 show_stack(p, NULL); in kdb_show_stack() 81 kdb_bt1(struct task_struct *p, unsigned long mask, in kdb_bt1() argument 85 if (kdb_getarea(buffer[0], (unsigned long)p) || in kdb_bt1() 86 kdb_getarea(buffer[0], (unsigned long)(p+1)-1)) in kdb_bt1() 88 if (!kdb_task_state(p, mask)) in kdb_bt1() [all …]
|
D | kdb_support.c | 621 char kdb_task_state_char (const struct task_struct *p) in kdb_task_state_char() argument 627 if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) in kdb_task_state_char() 630 cpu = kdb_process_cpu(p); in kdb_task_state_char() 631 state = (p->state == 0) ? 'R' : in kdb_task_state_char() 632 (p->state < 0) ? 'U' : in kdb_task_state_char() 633 (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : in kdb_task_state_char() 634 (p->state & TASK_STOPPED) ? 'T' : in kdb_task_state_char() 635 (p->state & TASK_TRACED) ? 'C' : in kdb_task_state_char() 636 (p->exit_state & EXIT_ZOMBIE) ? 'Z' : in kdb_task_state_char() 637 (p->exit_state & EXIT_DEAD) ? 'E' : in kdb_task_state_char() [all …]
|
/kernel/power/ |
D | process.c | 34 struct task_struct *g, *p; in try_to_freeze_tasks() local 56 for_each_process_thread(g, p) { in try_to_freeze_tasks() 57 if (p == current || !freeze_task(p)) in try_to_freeze_tasks() 60 if (!freezer_should_skip(p)) in try_to_freeze_tasks() 113 for_each_process_thread(g, p) { in try_to_freeze_tasks() 114 if (p != current && !freezer_should_skip(p) in try_to_freeze_tasks() 115 && freezing(p) && !frozen(p)) in try_to_freeze_tasks() 116 sched_show_task(p); in try_to_freeze_tasks() 203 struct task_struct *g, *p; in thaw_processes() local 222 for_each_process_thread(g, p) { in thaw_processes() [all …]
|
/kernel/time/ |
D | posix-cpu-timers.c | 53 struct task_struct *p; in lookup_task() local 62 p = find_task_by_vpid(pid); in lookup_task() 63 if (!p) in lookup_task() 64 return p; in lookup_task() 67 return same_thread_group(p, current) ? p : NULL; in lookup_task() 79 return (p == current || thread_group_leader(p)) ? p : NULL; in lookup_task() 85 return has_group_leader_pid(p) ? p : NULL; in lookup_task() 93 struct task_struct *p; in __get_task_for_clock() local 99 p = lookup_task(pid, thread, gettime); in __get_task_for_clock() 100 if (p && getref) in __get_task_for_clock() [all …]
|
/kernel/cgroup/ |
D | pids.c | 111 struct pids_cgroup *p; in pids_uncharge() local 113 for (p = pids; parent_pids(p); p = parent_pids(p)) in pids_uncharge() 114 pids_cancel(p, num); in pids_uncharge() 128 struct pids_cgroup *p; in pids_charge() local 130 for (p = pids; parent_pids(p); p = parent_pids(p)) in pids_charge() 131 atomic64_add(num, &p->counter); in pids_charge() 145 struct pids_cgroup *p, *q; in pids_try_charge() local 147 for (p = pids; parent_pids(p); p = parent_pids(p)) { in pids_try_charge() 148 int64_t new = atomic64_add_return(num, &p->counter); in pids_try_charge() 149 int64_t limit = atomic64_read(&p->limit); in pids_try_charge() [all …]
|
/kernel/irq/ |
D | proc.c | 429 int __weak arch_show_interrupts(struct seq_file *p, int prec) in arch_show_interrupts() argument 438 int show_interrupts(struct seq_file *p, void *v) in show_interrupts() argument 451 return arch_show_interrupts(p, prec); in show_interrupts() 458 seq_printf(p, "%*s", prec + 8, ""); in show_interrupts() 460 seq_printf(p, "CPU%-8d", j); in show_interrupts() 461 seq_putc(p, '\n'); in show_interrupts() 476 seq_printf(p, "%*d: ", prec, i); in show_interrupts() 478 seq_printf(p, "%10u ", desc->kstat_irqs ? in show_interrupts() 484 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); in show_interrupts() 486 seq_printf(p, " %8s", desc->irq_data.chip->name); in show_interrupts() [all …]
|
/kernel/rcu/ |
D | rcu.h | 381 #define raw_spin_lock_rcu_node(p) \ argument 383 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ 387 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) argument 389 #define raw_spin_lock_irq_rcu_node(p) \ argument 391 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 395 #define raw_spin_unlock_irq_rcu_node(p) \ argument 396 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 398 #define raw_spin_lock_irqsave_rcu_node(p, flags) \ argument 400 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 404 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ argument [all …]
|