Searched refs:rd (Results 1 – 9 of 9) sorted by relevance
/kernel/time/ |
D | sched_clock.c | 100 struct clock_read_data *rd; in sched_clock() local 104 rd = cd.read_data + (seq & 1); in sched_clock() 106 cyc = (rd->read_sched_clock() - rd->epoch_cyc) & in sched_clock() 107 rd->sched_clock_mask; in sched_clock() 108 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); in sched_clock() 124 static void update_clock_read_data(struct clock_read_data *rd) in update_clock_read_data() argument 127 cd.read_data[1] = *rd; in update_clock_read_data() 133 cd.read_data[0] = *rd; in update_clock_read_data() 146 struct clock_read_data rd; in update_sched_clock() local 148 rd = cd.read_data[0]; in update_sched_clock() [all …]
|
/kernel/sched/ |
D | topology.c | 347 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() local 375 if (rd->pd) in build_perf_domains() 406 tmp = rd->pd; in build_perf_domains() 407 rcu_assign_pointer(rd->pd, pd); in build_perf_domains() 415 tmp = rd->pd; in build_perf_domains() 416 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains() 428 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local 430 cpupri_cleanup(&rd->cpupri); in free_rootdomain() 431 cpudl_cleanup(&rd->cpudl); in free_rootdomain() 432 free_cpumask_var(rd->dlo_mask); in free_rootdomain() [all …]
|
D | rt.c | 280 return atomic_read(&rq->rd->rto_count); in rt_overloaded() 288 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload() 299 atomic_inc(&rq->rd->rto_count); in rt_set_overload() 308 atomic_dec(&rq->rd->rto_count); in rt_clear_overload() 309 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload() 591 return this_rq()->rd->span; in sched_rt_period_mask() 690 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime() local 694 weight = cpumask_weight(rd->span); in do_balance_runtime() 698 for_each_cpu(i, rd->span) { in do_balance_runtime() 741 struct root_domain *rd = rq->rd; in __disable_runtime() local [all …]
|
D | deadline.c | 51 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of() 56 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() local 61 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus() 380 return atomic_read(&rq->rd->dlo_count); in dl_overloaded() 388 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload() 396 atomic_inc(&rq->rd->dlo_count); in dl_set_overload() 404 atomic_dec(&rq->rd->dlo_count); in dl_clear_overload() 405 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); in dl_clear_overload() 583 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration() 585 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration() [all …]
|
D | sched.h | 801 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 802 extern void sched_get_rd(struct root_domain *rd); 803 extern void sched_put_rd(struct root_domain *rd); 933 struct root_domain *rd; member 1951 if (!READ_ONCE(rq->rd->overload)) in add_nr_running() 1952 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running() 2251 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); in __dl_update() local 2256 for_each_cpu_and(i, rd->span, cpu_active_mask) { in __dl_update()
|
D | fair.c | 5306 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { in update_overutilized_status() 5307 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); in update_overutilized_status() 5308 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); in update_overutilized_status() 6532 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; in find_energy_efficient_cpu() local 6544 pd = rcu_dereference(rd->pd); in find_energy_efficient_cpu() 6545 if (!pd || READ_ONCE(rd->overutilized)) in find_energy_efficient_cpu() 8054 mcc = &cpu_rq(cpu)->rd->max_cpu_capacity; in update_cpu_capacity() 8178 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity.val || in check_misfit_status() 8584 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats() local 8587 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); in update_sd_lb_stats() [all …]
|
D | core.c | 5047 cpumask_t *span = rq->rd->span; in __sched_setscheduler() 5055 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler() 5562 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity() 6231 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, in task_can_attach() 6429 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online() 6449 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline() 6531 if (rq->rd) { in sched_cpu_activate() 6532 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate() 6600 if (rq->rd) { in sched_cpu_dying() 6601 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_dying() [all …]
|
D | debug.c | 623 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()
|
/kernel/trace/ |
D | ring_buffer.c | 3328 unsigned int rd; in ring_buffer_record_off() local 3332 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off() 3333 new_rd = rd | RB_BUFFER_OFF; in ring_buffer_record_off() 3334 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off() 3351 unsigned int rd; in ring_buffer_record_on() local 3355 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on() 3356 new_rd = rd & ~RB_BUFFER_OFF; in ring_buffer_record_on() 3357 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
|