Lines Matching refs:child
717 struct task_group *parent, *child; in walk_tg_tree_from() local
726 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
727 parent = child; in walk_tg_tree_from()
737 child = parent; in walk_tg_tree_from()
6193 parent->parent->child = tmp; in cpu_attach_domain()
6211 sd->child = NULL; in cpu_attach_domain()
6282 if (!sibling->child) in build_group_mask()
6286 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_group_mask()
6336 if (sibling->child) in build_overlap_sched_groups()
6337 cpumask_copy(sg_span, sched_domain_span(sibling->child)); in build_overlap_sched_groups()
6385 struct sched_domain *child = sd->child; in get_group() local
6387 if (child) in get_group()
6388 cpu = cpumask_first(sched_domain_span(child)); in get_group()
6528 if (sd->child && !sd->child->groups->sge) { in init_sched_energy()
6532 sd->name, sd->child->name); in init_sched_energy()
6677 struct sched_domain *child, int cpu) in sd_init() argument
6730 .child = child, in sd_init()
7192 struct sched_domain *child, int cpu) in build_sched_domain() argument
7194 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
7196 if (child) { in build_sched_domain()
7197 sd->level = child->level + 1; in build_sched_domain()
7199 child->parent = sd; in build_sched_domain()
7201 if (!cpumask_subset(sched_domain_span(child), in build_sched_domain()
7206 child->name, sd->name); in build_sched_domain()
7211 sched_domain_span(child)); in build_sched_domain()
8253 struct task_group *child; in tg_rt_schedulable() local
8288 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
8289 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
8290 runtime = child->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
8292 if (child == d->tg) { in tg_rt_schedulable()