• Home
  • Raw
  • Download

Lines Matching refs:group

1203 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)  in group_faults_cpu()  argument
1205 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + in group_faults_cpu()
1206 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; in group_faults_cpu()
6195 struct sched_group *idlest = NULL, *group = sd->groups; in find_idlest_group() local
6216 if (!cpumask_intersects(sched_group_cpus(group), in find_idlest_group()
6221 sched_group_cpus(group)); in find_idlest_group()
6231 for_each_cpu(i, sched_group_cpus(group)) { in find_idlest_group()
6250 group->sgc->capacity; in find_idlest_group()
6252 group->sgc->capacity; in find_idlest_group()
6266 idlest = group; in find_idlest_group()
6275 idlest = group; in find_idlest_group()
6280 most_spare_sg = group; in find_idlest_group()
6283 } while (group = group->next, group != sd->groups); in find_idlest_group()
6316 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
6326 if (group->group_weight == 1) in find_idlest_group_cpu()
6327 return cpumask_first(sched_group_cpus(group)); in find_idlest_group_cpu()
6330 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { in find_idlest_group_cpu()
6381 struct sched_group *group; in find_idlest_cpu() local
6393 group = find_idlest_group(sd, p, cpu, sd_flag); in find_idlest_cpu()
6394 if (!group) { in find_idlest_cpu()
6399 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
8447 struct sched_group *group, *sdg = sd->groups; in update_group_capacity() local
8501 group = child->groups; in update_group_capacity()
8503 struct sched_group_capacity *sgc = group->sgc; in update_group_capacity()
8508 group = group->next; in update_group_capacity()
8509 } while (group != child->groups); in update_group_capacity()
8558 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced() argument
8560 return group->sgc->imbalance; in sg_imbalanced()
8621 group_type group_classify(struct sched_group *group, in group_classify() argument
8627 if (sg_imbalanced(group)) in group_classify()
8679 struct sched_group *group, int load_idx, in update_sg_lb_stats() argument
8688 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in update_sg_lb_stats()
8730 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
8736 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
8739 sgs->group_type = group_classify(group, sgs); in update_sg_lb_stats()
9278 struct sched_group *group) in find_busiest_queue() argument
9284 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { in find_busiest_queue()
9439 struct sched_group *group; in load_balance() local
9473 group = find_busiest_group(&env); in load_balance()
9474 if (!group) { in load_balance()
9479 busiest = find_busiest_queue(&env, group); in load_balance()