Lines Matching refs:rd
75 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
80 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() local
86 if (cpumask_subset(rd->span, cpu_active_mask)) in dl_bw_cpus()
87 return cpumask_weight(rd->span); in dl_bw_cpus()
91 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus()
99 struct root_domain *rd = cpu_rq(i)->rd; in __dl_bw_capacity() local
105 for_each_cpu_and(i, rd->span, cpu_active_mask) in __dl_bw_capacity()
127 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited() local
129 if (rd->visit_gen == gen) in dl_bw_visited()
132 rd->visit_gen = gen; in dl_bw_visited()
461 return atomic_read(&rq->rd->dlo_count); in dl_overloaded()
469 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload()
477 atomic_inc(&rq->rd->dlo_count); in dl_set_overload()
485 atomic_dec(&rq->rd->dlo_count); in dl_clear_overload()
486 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); in dl_clear_overload()
650 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
652 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
655 dl_b = &later_rq->rd->dl_bw; in dl_task_offline_migration()
657 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
1405 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); in inc_dl_deadline()
1407 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); in inc_dl_deadline()
1422 cpudl_clear(&rq->rd->cpudl, rq->cpu); in dec_dl_deadline()
1423 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in dec_dl_deadline()
1430 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); in dec_dl_deadline()
1772 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) in check_preempt_equal_dl()
1780 cpudl_find(&rq->rd->cpudl, p, NULL)) in check_preempt_equal_dl()
1993 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()
2254 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task()
2357 src_rd = rq->rd; in set_cpus_allowed_dl()
2387 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); in rq_online_dl()
2389 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2398 cpudl_clear(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2399 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2425 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2428 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2435 void dl_clear_root_domain(struct root_domain *rd) in dl_clear_root_domain() argument
2439 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2440 rd->dl_bw.total_bw = 0; in dl_clear_root_domain()
2441 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); in dl_clear_root_domain()