• Home
  • Raw
  • Download

Lines Matching refs:this_cpu

1385 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1391 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2102 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in find_idlest_group() argument
2119 local_group = cpumask_test_cpu(this_cpu, in find_idlest_group()
2157 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_cpu() argument
2167 if (load < min_load || (load == min_load && i == this_cpu)) { in find_idlest_cpu()
2261 int cpu, orig_cpu, this_cpu, success = 0; in try_to_wake_up() local
2273 this_cpu = raw_smp_processor_id(); in try_to_wake_up()
2276 for_each_domain(this_cpu, sd) { in try_to_wake_up()
2297 this_cpu = smp_processor_id(); in try_to_wake_up()
2315 this_cpu = smp_processor_id(); in try_to_wake_up()
2321 if (cpu == this_cpu) in try_to_wake_up()
2325 for_each_domain(this_cpu, sd) { in try_to_wake_up()
2341 if (cpu == this_cpu) in try_to_wake_up()
2893 int new_cpu, this_cpu = get_cpu(); in sched_exec() local
2894 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC); in sched_exec()
2896 if (new_cpu != this_cpu) in sched_exec()
2905 struct rq *this_rq, int this_cpu) in pull_task() argument
2908 set_task_cpu(p, this_cpu); in pull_task()
2921 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, in can_migrate_task() argument
2931 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { in can_migrate_task()
2967 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, in balance_tasks() argument
2990 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { in balance_tasks()
2995 pull_task(busiest, p, this_rq, this_cpu); in balance_tasks()
3029 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, in move_tasks() argument
3040 class->load_balance(this_rq, this_cpu, busiest, in move_tasks()
3054 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, in iter_move_one_task() argument
3062 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { in iter_move_one_task()
3063 pull_task(busiest, p, this_rq, this_cpu); in iter_move_one_task()
3086 static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, in move_one_task() argument
3092 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) in move_one_task()
3104 find_busiest_group(struct sched_domain *sd, int this_cpu, in find_busiest_group() argument
3142 local_group = cpumask_test_cpu(this_cpu, in find_busiest_group()
3191 balance_cpu != this_cpu && balance) { in find_busiest_group()
3357 this_load_per_task = cpu_avg_load_per_task(this_cpu); in find_busiest_group()
3411 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = in find_busiest_group()
3464 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance() argument
3490 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, in load_balance()
3521 ld_moved = move_tasks(this_rq, this_cpu, busiest, in load_balance()
3529 if (ld_moved && this_cpu != smp_processor_id()) in load_balance()
3530 resched_cpu(this_cpu); in load_balance()
3552 if (!cpumask_test_cpu(this_cpu, in load_balance()
3561 busiest->push_cpu = this_cpu; in load_balance()
3627 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, in load_balance_newidle() argument
3652 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, in load_balance_newidle()
3675 ld_moved = move_tasks(this_rq, this_cpu, busiest, in load_balance_newidle()
3730 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { in load_balance_newidle()
3738 busiest->push_cpu = this_cpu; in load_balance_newidle()
3771 static void idle_balance(int this_cpu, struct rq *this_rq) in idle_balance() argument
3781 for_each_domain(this_cpu, sd) { in idle_balance()
3789 pulled_task = load_balance_newidle(this_cpu, this_rq, in idle_balance()
4029 int this_cpu = smp_processor_id(); in run_rebalance_domains() local
4030 struct rq *this_rq = cpu_rq(this_cpu); in run_rebalance_domains()
4034 rebalance_domains(this_cpu, idle); in run_rebalance_domains()
4043 atomic_read(&nohz.load_balancer) == this_cpu) { in run_rebalance_domains()
4048 if (balance_cpu == this_cpu) in run_rebalance_domains()
6412 int this_cpu = smp_processor_id(); in sched_idle_next() local
6413 struct rq *rq = cpu_rq(this_cpu); in sched_idle_next()
6418 BUG_ON(cpu_online(this_cpu)); in sched_idle_next()