Lines Matching refs:sg
597 static void free_sched_groups(struct sched_group *sg, int free_sgc) in free_sched_groups() argument
601 if (!sg) in free_sched_groups()
604 first = sg; in free_sched_groups()
606 tmp = sg->next; in free_sched_groups()
608 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
609 kfree(sg->sgc); in free_sched_groups()
611 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
612 kfree(sg); in free_sched_groups()
613 sg = tmp; in free_sched_groups()
614 } while (sg != first); in free_sched_groups()
732 struct sched_group *sg = sd->groups; in cpu_attach_domain() local
740 sg->flags = 0; in cpu_attach_domain()
741 } while (sg != sd->groups); in cpu_attach_domain()
779 int group_balance_cpu(struct sched_group *sg) in group_balance_cpu() argument
781 return cpumask_first(group_balance_mask(sg)); in group_balance_cpu()
891 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() argument
893 const struct cpumask *sg_span = sched_group_span(sg); in build_balance_mask()
930 struct sched_group *sg; in build_group_from_child_sched_domain() local
933 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain()
936 if (!sg) in build_group_from_child_sched_domain()
939 sg_span = sched_group_span(sg); in build_group_from_child_sched_domain()
942 sg->flags = sd->child->flags; in build_group_from_child_sched_domain()
947 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
948 return sg; in build_group_from_child_sched_domain()
952 struct sched_group *sg) in init_overlap_sched_group() argument
959 build_balance_mask(sd, sg, mask); in init_overlap_sched_group()
962 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
963 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
964 cpumask_copy(group_balance_mask(sg), mask); in init_overlap_sched_group()
966 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); in init_overlap_sched_group()
973 sg_span = sched_group_span(sg); in init_overlap_sched_group()
974 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
975 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
976 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
1007 struct sched_group *first = NULL, *last = NULL, *sg; in build_overlap_sched_groups() local
1072 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
1073 if (!sg) in build_overlap_sched_groups()
1076 sg_span = sched_group_span(sg); in build_overlap_sched_groups()
1079 init_overlap_sched_group(sibling, sg); in build_overlap_sched_groups()
1082 first = sg; in build_overlap_sched_groups()
1084 last->next = sg; in build_overlap_sched_groups()
1085 last = sg; in build_overlap_sched_groups()
1174 struct sched_group *sg; in get_group() local
1180 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1181 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1184 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1186 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1190 return sg; in get_group()
1193 cpumask_copy(sched_group_span(sg), sched_domain_span(child)); in get_group()
1194 cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); in get_group()
1195 sg->flags = child->flags; in get_group()
1197 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1198 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1201 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1202 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1203 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1205 return sg; in get_group()
1230 struct sched_group *sg; in build_sched_groups() local
1235 sg = get_group(i, sdd); in build_sched_groups()
1237 cpumask_or(covered, covered, sched_group_span(sg)); in build_sched_groups()
1240 first = sg; in build_sched_groups()
1242 last->next = sg; in build_sched_groups()
1243 last = sg; in build_sched_groups()
1263 struct sched_group *sg = sd->groups; in init_sched_groups_capacity() local
1265 WARN_ON(!sg); in init_sched_groups_capacity()
1270 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1275 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1281 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1284 sg = sg->next; in init_sched_groups_capacity()
1285 } while (sg != sd->groups); in init_sched_groups_capacity()
1287 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1492 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1493 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
2081 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
2082 if (!sdd->sg) in __sdt_alloc()
2092 struct sched_group *sg; in __sdt_alloc() local
2109 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in __sdt_alloc()
2111 if (!sg) in __sdt_alloc()
2114 sg->next = sg; in __sdt_alloc()
2116 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
2154 if (sdd->sg) in __sdt_free()
2155 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
2163 free_percpu(sdd->sg); in __sdt_free()
2164 sdd->sg = NULL; in __sdt_free()