Home
last modified time | relevance | path

Searched refs:rd (Results 1 – 9 of 9) sorted by relevance

/kernel/time/
Dsched_clock.c88 struct clock_read_data *rd; in sched_clock() local
91 rd = sched_clock_read_begin(&seq); in sched_clock()
93 cyc = (rd->read_sched_clock() - rd->epoch_cyc) & in sched_clock()
94 rd->sched_clock_mask; in sched_clock()
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); in sched_clock()
111 static void update_clock_read_data(struct clock_read_data *rd) in update_clock_read_data() argument
114 cd.read_data[1] = *rd; in update_clock_read_data()
120 cd.read_data[0] = *rd; in update_clock_read_data()
133 struct clock_read_data rd; in update_sched_clock() local
135 rd = cd.read_data[0]; in update_sched_clock()
[all …]
/kernel/sched/
Dtopology.c380 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() local
444 tmp = rd->pd; in build_perf_domains()
445 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
453 tmp = rd->pd; in build_perf_domains()
454 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
466 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local
468 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
469 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
470 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
471 free_cpumask_var(rd->rto_mask); in free_rootdomain()
[all …]
Drt.c335 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
343 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
354 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
363 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
364 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
638 return this_rq()->rd->span; in sched_rt_period_mask()
737 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime() local
741 weight = cpumask_weight(rd->span); in do_balance_runtime()
745 for_each_cpu(i, rd->span) { in do_balance_runtime()
788 struct root_domain *rd = rq->rd; in __disable_runtime() local
[all …]
Ddeadline.c107 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
112 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() local
118 if (cpumask_subset(rd->span, cpu_active_mask)) in dl_bw_cpus()
119 return cpumask_weight(rd->span); in dl_bw_cpus()
123 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus()
153 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity()
159 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited() local
161 if (rd->visit_gen == gen) in dl_bw_visited()
164 rd->visit_gen = gen; in dl_bw_visited()
171 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); in __dl_update() local
[all …]
Dsched.h906 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
907 extern void sched_get_rd(struct root_domain *rd);
908 extern void sched_put_rd(struct root_domain *rd);
1059 struct root_domain *rd; member
2458 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
2459 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
Dfair.c6090 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { in update_overutilized_status()
6091 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); in update_overutilized_status()
6092 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); in update_overutilized_status()
7302 struct root_domain *rd = this_rq()->rd; in find_energy_efficient_cpu() local
7319 pd = rcu_dereference(rd->pd); in find_energy_efficient_cpu()
7320 if (!pd || READ_ONCE(rd->overutilized)) in find_energy_efficient_cpu()
9123 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || in check_misfit_status()
10045 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats() local
10048 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); in update_sd_lb_stats()
10051 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); in update_sd_lb_stats()
[all …]
Dcore.c7780 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
7788 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
8330 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
9543 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
9563 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
9654 if (rq->rd) { in sched_cpu_activate()
9655 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
9698 if (rq->rd) { in sched_cpu_deactivate()
9700 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_deactivate()
9993 rq->rd = NULL; in sched_init()
Ddebug.c708 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()
/kernel/trace/
Dring_buffer.c4013 unsigned int rd; in ring_buffer_record_off() local
4017 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4018 new_rd = rd | RB_BUFFER_OFF; in ring_buffer_record_off()
4019 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
4036 unsigned int rd; in ring_buffer_record_on() local
4043 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4044 new_rd = rd & ~RB_BUFFER_OFF; in ring_buffer_record_on()
4045 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()