• Home
  • Raw
  • Download

Lines Matching refs:group

1301 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)  in group_faults_cpu()  argument
1303 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] + in group_faults_cpu()
1304 group->faults[task_faults_idx(NUMA_CPU, nid, 1)]; in group_faults_cpu()
6537 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
6547 if (group->group_weight == 1) in find_idlest_group_cpu()
6548 return cpumask_first(sched_group_span(group)); in find_idlest_group_cpu()
6551 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in find_idlest_group_cpu()
6609 struct sched_group *group; in find_idlest_cpu() local
6618 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
6619 if (!group) { in find_idlest_cpu()
6624 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
9051 struct sched_group *group, *sdg = sd->groups; in update_group_capacity() local
9087 group = child->groups; in update_group_capacity()
9089 struct sched_group_capacity *sgc = group->sgc; in update_group_capacity()
9094 group = group->next; in update_group_capacity()
9095 } while (group != child->groups); in update_group_capacity()
9156 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced() argument
9158 return group->sgc->imbalance; in sg_imbalanced()
9217 struct sched_group *group, in group_classify() argument
9223 if (sg_imbalanced(group)) in group_classify()
9323 struct sched_group *group) in sched_asym() argument
9327 (group->flags & SD_SHARE_CPUCAPACITY)) in sched_asym()
9328 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); in sched_asym()
9330 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); in sched_asym()
9356 struct sched_group *group, in update_sg_lb_stats() argument
9364 local_group = group == sds->local; in update_sg_lb_stats()
9366 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats()
9414 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
9416 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
9421 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9425 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9640 struct sched_group *group, in update_sg_wakeup_stats() argument
9652 for_each_cpu(i, sched_group_span(group)) { in update_sg_wakeup_stats()
9679 sgs->group_capacity = group->sgc->capacity; in update_sg_wakeup_stats()
9681 sgs->group_weight = group->group_weight; in update_sg_wakeup_stats()
9683 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
9697 struct sched_group *group, in update_pick_idlest() argument
9726 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) in update_pick_idlest()
9755 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; in find_idlest_group() local
9768 if (!cpumask_intersects(sched_group_span(group), in find_idlest_group()
9773 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
9777 sched_group_span(group)); in find_idlest_group()
9781 local = group; in find_idlest_group()
9786 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9788 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { in find_idlest_group()
9789 idlest = group; in find_idlest_group()
9793 } while (group = group->next, group != sd->groups); in find_idlest_group()
10400 struct sched_group *group) in find_busiest_queue() argument
10407 trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus, in find_busiest_queue()
10412 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in find_busiest_queue()
10653 struct sched_group *group; in load_balance() local
10679 group = find_busiest_group(&env); in load_balance()
10680 if (!group) { in load_balance()
10685 busiest = find_busiest_queue(&env, group); in load_balance()