Searched refs:policy (Results 1 – 14 of 14) sorted by relevance
/kernel/sched/ |
D | cpufreq_schedutil.c | 25 struct cpufreq_policy *policy; member 89 if (!cpufreq_this_cpu_can_update(sg_policy->policy)) in sugov_should_update_freq() 123 cpufreq_driver_fast_switch(sg_policy->policy, next_freq); in sugov_fast_switch() 163 struct cpufreq_policy *policy = sg_policy->policy; in get_next_freq() local 165 policy->cpuinfo.max_freq : policy->cur; in get_next_freq() 168 trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy, in get_next_freq() 179 return cpufreq_driver_resolve_freq(policy, freq); in get_next_freq() 484 if (sg_policy->policy->fast_switch_enabled) { in sugov_update_single() 496 struct cpufreq_policy *policy = sg_policy->policy; in sugov_next_freq_shared() local 500 for_each_cpu(j, policy->cpus) { in sugov_next_freq_shared() [all …]
|
D | cpufreq.c | 73 bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) in cpufreq_this_cpu_can_update() argument 75 return cpumask_test_cpu(smp_processor_id(), policy->cpus) || in cpufreq_this_cpu_can_update() 76 (policy->dvfs_possible_from_any_cpu && in cpufreq_this_cpu_can_update()
|
D | core.c | 1704 static inline int __normal_prio(int policy, int rt_prio, int nice) in __normal_prio() argument 1708 if (dl_policy(policy)) in __normal_prio() 1710 else if (rt_policy(policy)) in __normal_prio() 1727 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio() 3489 p->policy = SCHED_NORMAL; in sched_fork() 5428 int policy = attr->sched_policy; in __setscheduler_params() local 5430 if (policy == SETPARAM_POLICY) in __setscheduler_params() 5431 policy = p->policy; in __setscheduler_params() 5433 p->policy = policy; in __setscheduler_params() 5435 if (dl_policy(policy)) in __setscheduler_params() [all …]
|
D | sched.h | 163 static inline int idle_policy(int policy) in idle_policy() argument 165 return policy == SCHED_IDLE; in idle_policy() 167 static inline int fair_policy(int policy) in fair_policy() argument 169 return policy == SCHED_NORMAL || policy == SCHED_BATCH; in fair_policy() 172 static inline int rt_policy(int policy) in rt_policy() argument 174 return policy == SCHED_FIFO || policy == SCHED_RR; in rt_policy() 177 static inline int dl_policy(int policy) in dl_policy() argument 179 return policy == SCHED_DEADLINE; in dl_policy() 181 static inline bool valid_policy(int policy) in valid_policy() argument 183 return idle_policy(policy) || fair_policy(policy) || in valid_policy() [all …]
|
D | idle.c | 365 WARN_ON_ONCE(current->policy != SCHED_FIFO); in play_idle_precise()
|
D | deadline.c | 2652 int sched_dl_overflow(struct task_struct *p, int policy, in sched_dl_overflow() argument 2657 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; in sched_dl_overflow() 2678 if (dl_policy(policy) && !task_has_dl_policy(p) && in sched_dl_overflow() 2684 } else if (dl_policy(policy) && task_has_dl_policy(p) && in sched_dl_overflow() 2697 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { in sched_dl_overflow()
|
D | rt.c | 1239 return (tsk->policy == SCHED_RR) ? 1 : 0; in rt_se_rr_nr_running() 2517 if (p->policy != SCHED_RR) in task_tick_rt() 2543 if (task->policy == SCHED_RR) in get_rr_interval_rt()
|
D | debug.c | 1018 P(policy); in proc_sched_show_task()
|
D | fair.c | 7305 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) in check_preempt_wakeup() 7537 if (curr->policy != SCHED_BATCH) { in yield_task_fair()
|
/kernel/ |
D | tsacct.c | 52 stats->ac_sched = tsk->policy; in bacct_add_tsk()
|
D | taskstats.c | 648 .policy = taskstats_cmd_get_policy, 656 .policy = cgroupstats_cmd_get_policy,
|
D | cpu.c | 1503 unsigned int policy = current->policy; in switch_to_rt_policy() local 1505 if (policy == SCHED_NORMAL) in switch_to_rt_policy()
|
/kernel/trace/ |
D | trace.h | 177 unsigned long policy; member
|
D | trace.c | 1801 max_data->policy = tsk->policy; in __update_max_tr() 3940 data->policy, data->rt_priority); in print_trace_header()
|