/kernel/linux/linux-5.10/arch/x86/um/ |
D | ptrace_32.c | 198 int err, n, cpu = task_cpu(child); in get_fpregs() 215 int n, cpu = task_cpu(child); in set_fpregs() 228 int err, n, cpu = task_cpu(child); in get_fpxregs() 244 int n, cpu = task_cpu(child); in set_fpxregs()
|
/kernel/linux/linux-5.10/arch/ia64/include/asm/ |
D | switch_to.h | 62 (task_cpu(current) != \ 64 task_thread_info(current)->last_cpu = task_cpu(current); \
|
/kernel/linux/linux-5.10/kernel/sched/ |
D | walt.c | 436 (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) in inter_cluster_migration_fixup() argument 439 struct rq *src_rq = cpu_rq(task_cpu); in inter_cluster_migration_fixup() 441 if (same_freq_domain(new_cpu, task_cpu)) in inter_cluster_migration_fixup() 450 src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 451 src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 458 p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 460 p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 463 p->ravg.curr_window_cpu[task_cpu] = 0; in inter_cluster_migration_fixup() 464 p->ravg.prev_window_cpu[task_cpu] = 0; in inter_cluster_migration_fixup() 466 update_cluster_load_subtractions(p, task_cpu, in inter_cluster_migration_fixup() [all …]
|
D | stop_task.c | 17 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
|
D | core.c | 1721 return cpu_curr(task_cpu(p)) == p; in task_curr() 1816 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task() 2016 if (cpumask_test_cpu(task_cpu(p), &allowed_mask)) in __set_cpus_allowed_ptr() 2019 if (cpumask_test_cpu(task_cpu(p), new_mask)) in __set_cpus_allowed_ptr() 2089 if (task_cpu(p) != new_cpu) { in set_task_cpu() 2153 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop() 2156 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop() 2344 cpu = task_cpu(p); in kick_process() 2501 cpu = select_fallback_rq(task_cpu(p), p, allow_isolated); in select_task_rq() 2504 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq() [all …]
|
D | deadline.c | 337 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() 342 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending() 1358 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer() 1367 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer() 1966 int cpu = task_cpu(task); in find_later_rq() 2058 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task() 2347 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl() 2654 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
|
D | rt.c | 1741 int prev_cpu = task_cpu(task); in find_cas_cpu() 1896 int cpu = task_cpu(task); in find_lowest_rq() 2001 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task() 2648 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2656 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) in rt_active_load_balance_cpu_stop() 2677 int cpu = task_cpu(p); in check_for_migration_rt() 2716 stop_one_cpu_nowait(task_cpu(p), in check_for_migration_rt()
|
D | idle.c | 407 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
|
D | psi.c | 779 task->pid, task->comm, task_cpu(task), in psi_flags_change() 790 int cpu = task_cpu(task); in psi_task_change() 819 int cpu = task_cpu(prev); in psi_task_switch()
|
D | cpudeadline.c | 138 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
|
D | fair.c | 2009 .src_cpu = task_cpu(p), in task_numa_migrate() 2974 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period() 5976 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without() 5999 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without() 6700 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without() 6798 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next() 6800 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next() 9060 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu() 10376 task_cpu(push_task) == busiest_cpu && in active_load_balance_cpu_stop() 11269 int prev_cpu = task_cpu(p); in check_for_migration_fair() [all …]
|
/kernel/linux/linux-5.10/kernel/rcu/ |
D | tasks.h | 451 cpu = task_cpu(t); in check_holdout_task() 858 int cpu = task_cpu(t); in trc_inspect_reader() 930 cpu = task_cpu(t); in trc_wait_for_one_reader() 1017 cpu = task_cpu(t); in show_stalled_task_trace()
|
/kernel/linux/linux-5.10/include/linux/ |
D | kdb.h | 180 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
|
D | sched.h | 2067 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function 2080 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
|
/kernel/linux/linux-5.10/kernel/trace/ |
D | trace_sched_wakeup.c | 396 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 424 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace() 569 wakeup_cpu = task_cpu(p); in probe_wakeup()
|
/kernel/linux/linux-5.10/include/linux/sched/ |
D | topology.h | 263 return cpu_to_node(task_cpu(p)); in task_node()
|
/kernel/linux/linux-5.10/kernel/sched/rtg/ |
D | rtg.c | 725 if (idle_cpu(i) || (i == task_cpu(p) && p->state == TASK_RUNNING)) { in find_rtg_cpu() 741 if (idle_cpu(i) || (i == task_cpu(p) && p->state == TASK_RUNNING)) { in find_rtg_cpu() 817 cpumask_set_cpu(task_cpu(p), &rtg_cpus); in valid_normalized_util() 1164 tsk->prio, task_cpu(tsk), cpumask_pr_args(tsk->cpus_ptr)); in print_rtg_task()
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | rtg.h | 67 __entry->cpu = task_cpu(task);
|
D | walt.h | 226 __entry->tcpu = task_cpu(p);
|
D | sched.h | 81 __entry->target_cpu = task_cpu(p); 212 __entry->orig_cpu = task_cpu(p);
|
/kernel/linux/linux-5.10/Documentation/scheduler/ |
D | sched-capacity.rst | 339 task_util(p) < capacity(task_cpu(p)) 402 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``; 421 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 435 task_bandwidth(p) < capacity(task_cpu(p))
|
/kernel/linux/linux-5.10/kernel/locking/ |
D | mutex.c | 579 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner() 615 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
|
/kernel/linux/linux-5.10/drivers/staging/hungtask/ |
D | hungtask_base.c | 308 task_cpu(p), p->group_leader->comm, taskitem->task_type, in show_block_task() 314 p->comm, p->pid, p->tgid, p->prio, task_cpu(p), in show_block_task()
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
D | smp.c | 206 cpu = task_cpu(current); in secondary_start_kernel()
|
/kernel/linux/linux-5.10/arch/mips/kernel/ |
D | process.c | 834 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
|