• Home
  • Raw
  • Download

Lines Matching refs:sg

561 static void free_sched_groups(struct sched_group *sg, int free_sgc)  in free_sched_groups()  argument
565 if (!sg) in free_sched_groups()
568 first = sg; in free_sched_groups()
570 tmp = sg->next; in free_sched_groups()
572 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
573 kfree(sg->sgc); in free_sched_groups()
575 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
576 kfree(sg); in free_sched_groups()
577 sg = tmp; in free_sched_groups()
578 } while (sg != first); in free_sched_groups()
731 int group_balance_cpu(struct sched_group *sg) in group_balance_cpu() argument
733 return cpumask_first(group_balance_mask(sg)); in group_balance_cpu()
843 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() argument
845 const struct cpumask *sg_span = sched_group_span(sg); in build_balance_mask()
882 struct sched_group *sg; in build_group_from_child_sched_domain() local
885 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain()
888 if (!sg) in build_group_from_child_sched_domain()
891 sg_span = sched_group_span(sg); in build_group_from_child_sched_domain()
897 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
898 return sg; in build_group_from_child_sched_domain()
902 struct sched_group *sg) in init_overlap_sched_group() argument
909 build_balance_mask(sd, sg, mask); in init_overlap_sched_group()
910 cpu = cpumask_first_and(sched_group_span(sg), mask); in init_overlap_sched_group()
912 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
913 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
914 cpumask_copy(group_balance_mask(sg), mask); in init_overlap_sched_group()
916 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); in init_overlap_sched_group()
923 sg_span = sched_group_span(sg); in init_overlap_sched_group()
924 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
925 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
926 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
932 struct sched_group *first = NULL, *last = NULL, *sg; in build_overlap_sched_groups() local
962 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
963 if (!sg) in build_overlap_sched_groups()
966 sg_span = sched_group_span(sg); in build_overlap_sched_groups()
969 init_overlap_sched_group(sd, sg); in build_overlap_sched_groups()
972 first = sg; in build_overlap_sched_groups()
974 last->next = sg; in build_overlap_sched_groups()
975 last = sg; in build_overlap_sched_groups()
1064 struct sched_group *sg; in get_group() local
1070 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1071 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1074 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1076 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1080 return sg; in get_group()
1083 cpumask_copy(sched_group_span(sg), sched_domain_span(child)); in get_group()
1084 cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); in get_group()
1086 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1087 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1090 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1091 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1092 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1094 return sg; in get_group()
1119 struct sched_group *sg; in build_sched_groups() local
1124 sg = get_group(i, sdd); in build_sched_groups()
1126 cpumask_or(covered, covered, sched_group_span(sg)); in build_sched_groups()
1129 first = sg; in build_sched_groups()
1131 last->next = sg; in build_sched_groups()
1132 last = sg; in build_sched_groups()
1152 struct sched_group *sg = sd->groups; in init_sched_groups_capacity() local
1154 WARN_ON(!sg); in init_sched_groups_capacity()
1159 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1164 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1170 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1173 sg = sg->next; in init_sched_groups_capacity()
1174 } while (sg != sd->groups); in init_sched_groups_capacity()
1176 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1274 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1275 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1757 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
1758 if (!sdd->sg) in __sdt_alloc()
1768 struct sched_group *sg; in __sdt_alloc() local
1785 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in __sdt_alloc()
1787 if (!sg) in __sdt_alloc()
1790 sg->next = sg; in __sdt_alloc()
1792 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
1830 if (sdd->sg) in __sdt_free()
1831 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
1839 free_percpu(sdd->sg); in __sdt_free()
1840 sdd->sg = NULL; in __sdt_free()