• Home
  • Raw
  • Download

Lines Matching refs:group

1284 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)  in group_faults_cpu()  argument
1286 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + in group_faults_cpu()
1287 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; in group_faults_cpu()
6269 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
6279 if (group->group_weight == 1) in find_idlest_group_cpu()
6280 return cpumask_first(sched_group_span(group)); in find_idlest_group_cpu()
6283 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in find_idlest_group_cpu()
6341 struct sched_group *group; in find_idlest_cpu() local
6350 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
6351 if (!group) { in find_idlest_cpu()
6356 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
8869 struct sched_group *group, *sdg = sd->groups; in update_group_capacity() local
8905 group = child->groups; in update_group_capacity()
8907 struct sched_group_capacity *sgc = group->sgc; in update_group_capacity()
8912 group = group->next; in update_group_capacity()
8913 } while (group != child->groups); in update_group_capacity()
8974 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced() argument
8976 return group->sgc->imbalance; in sg_imbalanced()
9035 struct sched_group *group, in group_classify() argument
9041 if (sg_imbalanced(group)) in group_classify()
9064 struct sched_group *group, in update_sg_lb_stats() argument
9072 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); in update_sg_lb_stats()
9074 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats()
9119 sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) { in update_sg_lb_stats()
9123 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
9125 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
9127 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9341 struct sched_group *group, in update_sg_wakeup_stats() argument
9353 for_each_cpu(i, sched_group_span(group)) { in update_sg_wakeup_stats()
9380 sgs->group_capacity = group->sgc->capacity; in update_sg_wakeup_stats()
9382 sgs->group_weight = group->group_weight; in update_sg_wakeup_stats()
9384 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
9398 struct sched_group *group, in update_pick_idlest() argument
9427 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) in update_pick_idlest()
9467 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; in find_idlest_group() local
9480 if (!cpumask_intersects(sched_group_span(group), in find_idlest_group()
9485 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
9489 sched_group_span(group)); in find_idlest_group()
9493 local = group; in find_idlest_group()
9498 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9500 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { in find_idlest_group()
9501 idlest = group; in find_idlest_group()
9505 } while (group = group->next, group != sd->groups); in find_idlest_group()
10101 struct sched_group *group) in find_busiest_queue() argument
10108 trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus, in find_busiest_queue()
10113 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in find_busiest_queue()
10342 struct sched_group *group; in load_balance() local
10369 group = find_busiest_group(&env); in load_balance()
10370 if (!group) { in load_balance()
10375 busiest = find_busiest_queue(&env, group); in load_balance()