Searched refs:sg (Results 1 – 4 of 4) sorted by relevance
/kernel/sched/ |
D | tune.c | 829 struct sched_group *sg, in schedtune_add_cluster_nrg() argument 843 cluster_cpus = sched_group_cpus(sg); in schedtune_add_cluster_nrg() 847 min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power; in schedtune_add_cluster_nrg() 848 max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power; in schedtune_add_cluster_nrg() 880 sched_group_cpus(sg), in schedtune_add_cluster_nrg() 901 struct sched_group *sg; in schedtune_init() local 919 sg = sd->groups; in schedtune_init() 921 schedtune_add_cluster_nrg(sd, sg, ste); in schedtune_init() 922 } while (sg = sg->next, sg != sd->groups); in schedtune_init()
|
D | core.c | 5331 sd_alloc_ctl_group_table(struct sched_group *sg) in sd_alloc_ctl_group_table() argument 5340 table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge); in sd_alloc_ctl_group_table() 5352 struct sched_group *sg = sd->groups; in sd_alloc_ctl_domain_table() local 5354 if (sg->sge) { in sd_alloc_ctl_domain_table() 5357 do {} while (nr_sgs++, sg = sg->next, sg != sd->groups); in sd_alloc_ctl_domain_table() 5395 sg = sd->groups; in sd_alloc_ctl_domain_table() 5396 if (sg->sge) { in sd_alloc_ctl_domain_table() 5404 entry->child = sd_alloc_ctl_group_table(sg); in sd_alloc_ctl_domain_table() 5405 } while (entry++, i++, sg = sg->next, sg != sd->groups); in sd_alloc_ctl_domain_table() 5956 static void free_sched_groups(struct sched_group *sg, int free_sgc) in free_sched_groups() argument [all …]
|
D | fair.c | 4696 long group_norm_util(struct energy_env *eenv, struct sched_group *sg) in group_norm_util() argument 4700 unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap; in group_norm_util() 4702 for_each_cpu(i, sched_group_cpus(sg)) { in group_norm_util() 4731 static int group_idle_state(struct sched_group *sg) in group_idle_state() argument 4736 for_each_cpu(i, sched_group_cpus(sg)) in group_idle_state() 4760 struct sched_group *sg; in sched_group_energy() local 4789 sg = sd->groups; in sched_group_energy() 4792 if (sd->child && group_first_cpu(sg) != cpu) in sched_group_energy() 4800 if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight) in sched_group_energy() 4803 eenv->sg_cap = sg; in sched_group_energy() [all …]
|
D | sched.h | 859 static inline struct cpumask *sched_group_cpus(struct sched_group *sg) in sched_group_cpus() argument 861 return to_cpumask(sg->cpumask); in sched_group_cpus() 868 static inline struct cpumask *sched_group_mask(struct sched_group *sg) in sched_group_mask() argument 870 return to_cpumask(sg->sgc->cpumask); in sched_group_mask() 882 extern int group_balance_cpu(struct sched_group *sg);
|