Home
last modified time | relevance | path

Searched refs:task_cpu (Results 1 – 25 of 41) sorted by relevance

12

/kernel/linux/linux-5.10/arch/x86/um/
Dptrace_32.c198 int err, n, cpu = task_cpu(child); in get_fpregs()
215 int n, cpu = task_cpu(child); in set_fpregs()
228 int err, n, cpu = task_cpu(child); in get_fpxregs()
244 int n, cpu = task_cpu(child); in set_fpxregs()
/kernel/linux/linux-5.10/arch/ia64/include/asm/
Dswitch_to.h62 (task_cpu(current) != \
64 task_thread_info(current)->last_cpu = task_cpu(current); \
/kernel/linux/linux-5.10/kernel/sched/
Dwalt.c436 (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) in inter_cluster_migration_fixup() argument
439 struct rq *src_rq = cpu_rq(task_cpu); in inter_cluster_migration_fixup()
441 if (same_freq_domain(new_cpu, task_cpu)) in inter_cluster_migration_fixup()
450 src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup()
451 src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup()
458 p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup()
460 p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup()
463 p->ravg.curr_window_cpu[task_cpu] = 0; in inter_cluster_migration_fixup()
464 p->ravg.prev_window_cpu[task_cpu] = 0; in inter_cluster_migration_fixup()
466 update_cluster_load_subtractions(p, task_cpu, in inter_cluster_migration_fixup()
[all …]
Dstop_task.c17 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
Dcore.c1721 return cpu_curr(task_cpu(p)) == p; in task_curr()
1816 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task()
2016 if (cpumask_test_cpu(task_cpu(p), &allowed_mask)) in __set_cpus_allowed_ptr()
2019 if (cpumask_test_cpu(task_cpu(p), new_mask)) in __set_cpus_allowed_ptr()
2089 if (task_cpu(p) != new_cpu) { in set_task_cpu()
2153 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
2156 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
2344 cpu = task_cpu(p); in kick_process()
2501 cpu = select_fallback_rq(task_cpu(p), p, allow_isolated); in select_task_rq()
2504 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
[all …]
Ddeadline.c337 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
342 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
1358 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1367 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1966 int cpu = task_cpu(task); in find_later_rq()
2058 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2347 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2654 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
Drt.c1741 int prev_cpu = task_cpu(task); in find_cas_cpu()
1896 int cpu = task_cpu(task); in find_lowest_rq()
2001 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
2648 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop()
2656 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) in rt_active_load_balance_cpu_stop()
2677 int cpu = task_cpu(p); in check_for_migration_rt()
2716 stop_one_cpu_nowait(task_cpu(p), in check_for_migration_rt()
Didle.c407 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
Dpsi.c779 task->pid, task->comm, task_cpu(task), in psi_flags_change()
790 int cpu = task_cpu(task); in psi_task_change()
819 int cpu = task_cpu(prev); in psi_task_switch()
Dcpudeadline.c138 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
Dfair.c2009 .src_cpu = task_cpu(p), in task_numa_migrate()
2974 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
5976 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
5999 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6700 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
6798 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next()
6800 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next()
9060 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
10376 task_cpu(push_task) == busiest_cpu && in active_load_balance_cpu_stop()
11269 int prev_cpu = task_cpu(p); in check_for_migration_fair()
[all …]
/kernel/linux/linux-5.10/kernel/rcu/
Dtasks.h451 cpu = task_cpu(t); in check_holdout_task()
858 int cpu = task_cpu(t); in trc_inspect_reader()
930 cpu = task_cpu(t); in trc_wait_for_one_reader()
1017 cpu = task_cpu(t); in show_stalled_task_trace()
/kernel/linux/linux-5.10/include/linux/
Dkdb.h180 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
Dsched.h2067 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2080 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
/kernel/linux/linux-5.10/kernel/trace/
Dtrace_sched_wakeup.c396 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
424 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
569 wakeup_cpu = task_cpu(p); in probe_wakeup()
/kernel/linux/linux-5.10/include/linux/sched/
Dtopology.h263 return cpu_to_node(task_cpu(p)); in task_node()
/kernel/linux/linux-5.10/kernel/sched/rtg/
Drtg.c725 if (idle_cpu(i) || (i == task_cpu(p) && p->state == TASK_RUNNING)) { in find_rtg_cpu()
741 if (idle_cpu(i) || (i == task_cpu(p) && p->state == TASK_RUNNING)) { in find_rtg_cpu()
817 cpumask_set_cpu(task_cpu(p), &rtg_cpus); in valid_normalized_util()
1164 tsk->prio, task_cpu(tsk), cpumask_pr_args(tsk->cpus_ptr)); in print_rtg_task()
/kernel/linux/linux-5.10/include/trace/events/
Drtg.h67 __entry->cpu = task_cpu(task);
Dwalt.h226 __entry->tcpu = task_cpu(p);
Dsched.h81 __entry->target_cpu = task_cpu(p);
212 __entry->orig_cpu = task_cpu(p);
/kernel/linux/linux-5.10/Documentation/scheduler/
Dsched-capacity.rst339 task_util(p) < capacity(task_cpu(p))
402 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``;
421 task_uclamp_min(p) <= capacity(task_cpu(cpu))
435 task_bandwidth(p) < capacity(task_cpu(p))
/kernel/linux/linux-5.10/kernel/locking/
Dmutex.c579 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner()
615 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
/kernel/linux/linux-5.10/drivers/staging/hungtask/
Dhungtask_base.c308 task_cpu(p), p->group_leader->comm, taskitem->task_type, in show_block_task()
314 p->comm, p->pid, p->tgid, p->prio, task_cpu(p), in show_block_task()
/kernel/linux/linux-5.10/arch/arm64/kernel/
Dsmp.c206 cpu = task_cpu(current); in secondary_start_kernel()
/kernel/linux/linux-5.10/arch/mips/kernel/
Dprocess.c834 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()

12