• Home
  • Raw
  • Download

Lines Matching +full:switching +full:- +full:freq

1 // SPDX-License-Identifier: GPL-2.0
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
64 /* The field below is for single-CPU policies only: */
79 * Since cpufreq_update_util() is called with rq->lock held for in sugov_should_update_freq()
80 * the @target_cpu, our per-CPU data is fully serialized. in sugov_should_update_freq()
82 * However, drivers cannot in general deal with cross-CPU in sugov_should_update_freq()
84 * sugov_update_commit() call may not for the fast switching platforms. in sugov_should_update_freq()
90 * This is needed on the slow switching platforms too to prevent CPUs in sugov_should_update_freq()
93 if (!cpufreq_this_cpu_can_update(sg_policy->policy)) in sugov_should_update_freq()
96 if (unlikely(sg_policy->limits_changed)) { in sugov_should_update_freq()
97 sg_policy->limits_changed = false; in sugov_should_update_freq()
98 sg_policy->need_freq_update = true; in sugov_should_update_freq()
102 delta_ns = time - sg_policy->last_freq_update_time; in sugov_should_update_freq()
104 return delta_ns >= sg_policy->freq_update_delay_ns; in sugov_should_update_freq()
110 if (!sg_policy->need_freq_update) { in sugov_update_next_freq()
111 if (sg_policy->next_freq == next_freq) in sugov_update_next_freq()
114 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); in sugov_update_next_freq()
117 sg_policy->next_freq = next_freq; in sugov_update_next_freq()
118 sg_policy->last_freq_update_time = time; in sugov_update_next_freq()
127 cpufreq_driver_fast_switch(sg_policy->policy, next_freq); in sugov_fast_switch()
136 if (!sg_policy->work_in_progress) { in sugov_deferred_update()
137 sg_policy->work_in_progress = true; in sugov_deferred_update()
138 irq_work_queue(&sg_policy->irq_work); in sugov_deferred_update()
143 * get_next_freq - Compute a new frequency for a given cpufreq policy.
148 * If the utilization is frequency-invariant, choose the new frequency to be
153 * Otherwise, approximate the would-be frequency-invariant utilization by
160 * The lowest driver-supported frequency which is equal or greater than the raw
167 struct cpufreq_policy *policy = sg_policy->policy; in get_next_freq()
168 unsigned int freq = arch_scale_freq_invariant() ? in get_next_freq() local
169 policy->cpuinfo.max_freq : policy->cur; in get_next_freq()
171 freq = map_util_freq(util, freq, max); in get_next_freq()
173 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) in get_next_freq()
174 return sg_policy->next_freq; in get_next_freq()
176 sg_policy->cached_raw_freq = freq; in get_next_freq()
177 return cpufreq_driver_resolve_freq(policy, freq); in get_next_freq()
192 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
193 * which excludes things like IRQ and steal-time. These latter are then accrued
208 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { in schedutil_cpu_util()
214 * because of inaccuracies in how we track these -- see in schedutil_cpu_util()
246 * saturation when we should -- something for later. in schedutil_cpu_util()
263 * max - irq in schedutil_cpu_util()
264 * U' = irq + --------- * U in schedutil_cpu_util()
276 * Ideally we would like to set bw_dl as min/guaranteed freq and util + in schedutil_cpu_util()
277 * bw_dl as requested freq. However, cpufreq is not yet ready for such in schedutil_cpu_util()
288 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
290 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_get_util()
292 sg_cpu->max = max; in sugov_get_util()
293 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
296 return cpu_util_freq_walt(sg_cpu->cpu); in sugov_get_util()
299 return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); in sugov_get_util()
303 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
316 s64 delta_ns = time - sg_cpu->last_update; in sugov_iowait_reset()
322 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; in sugov_iowait_reset()
323 sg_cpu->iowait_boost_pending = set_iowait_boost; in sugov_iowait_reset()
329 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
348 if (sg_cpu->iowait_boost && in sugov_iowait_boost()
357 if (sg_cpu->iowait_boost_pending) in sugov_iowait_boost()
359 sg_cpu->iowait_boost_pending = true; in sugov_iowait_boost()
362 if (sg_cpu->iowait_boost) { in sugov_iowait_boost()
363 sg_cpu->iowait_boost = in sugov_iowait_boost()
364 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
369 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; in sugov_iowait_boost()
373 * sugov_iowait_apply() - Apply the IO boost to a CPU.
397 if (!sg_cpu->iowait_boost) in sugov_iowait_apply()
404 if (!sg_cpu->iowait_boost_pending) { in sugov_iowait_apply()
408 sg_cpu->iowait_boost >>= 1; in sugov_iowait_apply()
409 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { in sugov_iowait_apply()
410 sg_cpu->iowait_boost = 0; in sugov_iowait_apply()
415 sg_cpu->iowait_boost_pending = false; in sugov_iowait_apply()
421 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
428 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); in sugov_cpu_is_busy()
429 bool ret = idle_calls == sg_cpu->saved_idle_calls; in sugov_cpu_is_busy()
431 sg_cpu->saved_idle_calls = idle_calls; in sugov_cpu_is_busy()
444 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
445 sg_policy->limits_changed = true; in ignore_dl_rate_limit()
452 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single()
455 unsigned int cached_freq = sg_policy->cached_raw_freq; in sugov_update_single()
465 sg_cpu->last_update = time; in sugov_update_single()
473 max = sg_cpu->max; in sugov_update_single()
480 if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { in sugov_update_single()
481 next_f = sg_policy->next_freq; in sugov_update_single()
483 /* Restore cached freq as next_freq has changed */ in sugov_update_single()
484 sg_policy->cached_raw_freq = cached_freq; in sugov_update_single()
488 * This code runs under rq->lock for the target CPU, so it won't run in sugov_update_single()
492 if (sg_policy->policy->fast_switch_enabled) { in sugov_update_single()
496 raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag); in sugov_update_single()
498 raw_spin_lock(&sg_policy->update_lock); in sugov_update_single()
502 raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag); in sugov_update_single()
504 raw_spin_unlock(&sg_policy->update_lock); in sugov_update_single()
511 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared()
512 struct cpufreq_policy *policy = sg_policy->policy; in sugov_next_freq_shared()
516 for_each_cpu(j, policy->cpus) { in sugov_next_freq_shared()
521 j_max = j_sg_cpu->max; in sugov_next_freq_shared()
531 sched_get_max_group_util(policy->cpus, &sg_policy->rtg_util, &sg_policy->rtg_freq); in sugov_next_freq_shared()
532 util = max(sg_policy->rtg_util, util); in sugov_next_freq_shared()
542 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
551 raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag);
553 raw_spin_lock(&sg_policy->update_lock);
557 sg_cpu->last_update = time;
569 if (sg_policy->policy->fast_switch_enabled)
576 raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag);
578 raw_spin_unlock(&sg_policy->update_lock);
585 unsigned int freq; local
589 * Hold sg_policy->update_lock shortly to handle the case where:
590 * incase sg_policy->next_freq is read here, and then updated by
598 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
599 freq = sg_policy->next_freq;
600 sg_policy->work_in_progress = false;
601 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
603 mutex_lock(&sg_policy->work_lock);
604 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
605 mutex_unlock(&sg_policy->work_lock);
614 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
631 return sprintf(buf, "%u\n", tunables->rate_limit_us);
642 return -EINVAL;
644 tunables->rate_limit_us = rate_limit_us;
646 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
647 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
685 sg_policy->policy = policy;
686 raw_spin_lock_init(&sg_policy->update_lock);
712 struct cpufreq_policy *policy = sg_policy->policy;
716 if (policy->fast_switch_enabled)
719 kthread_init_work(&sg_policy->work, sugov_work);
720 kthread_init_worker(&sg_policy->worker);
721 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
723 cpumask_first(policy->related_cpus));
736 sg_policy->thread = thread;
737 kthread_bind_mask(thread, policy->related_cpus);
738 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
739 mutex_init(&sg_policy->work_lock);
749 if (sg_policy->policy->fast_switch_enabled)
752 kthread_flush_worker(&sg_policy->worker);
753 kthread_stop(sg_policy->thread);
754 mutex_destroy(&sg_policy->work_lock);
763 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
783 if (policy->governor_data)
784 return -EBUSY;
790 ret = -ENOMEM;
802 ret = -EINVAL;
805 policy->governor_data = sg_policy;
806 sg_policy->tunables = global_tunables;
808 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
814 ret = -ENOMEM;
818 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
820 policy->governor_data = sg_policy;
821 sg_policy->tunables = tunables;
823 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
834 kobject_put(&tunables->attr_set.kobj);
835 policy->governor_data = NULL;
854 struct sugov_policy *sg_policy = policy->governor_data;
855 struct sugov_tunables *tunables = sg_policy->tunables;
860 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
861 policy->governor_data = NULL;
874 struct sugov_policy *sg_policy = policy->governor_data;
877 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
878 sg_policy->last_freq_update_time = 0;
879 sg_policy->next_freq = 0;
880 sg_policy->work_in_progress = false;
881 sg_policy->limits_changed = false;
882 sg_policy->cached_raw_freq = 0;
884 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
886 for_each_cpu(cpu, policy->cpus) {
890 sg_cpu->cpu = cpu;
891 sg_cpu->sg_policy = sg_policy;
894 for_each_cpu(cpu, policy->cpus) {
897 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
907 struct sugov_policy *sg_policy = policy->governor_data;
910 for_each_cpu(cpu, policy->cpus)
915 if (!policy->fast_switch_enabled) {
916 irq_work_sync(&sg_policy->irq_work);
917 kthread_cancel_work_sync(&sg_policy->work);
923 struct sugov_policy *sg_policy = policy->governor_data;
925 if (!policy->fast_switch_enabled) {
926 mutex_lock(&sg_policy->work_lock);
928 mutex_unlock(&sg_policy->work_lock);
931 sg_policy->limits_changed = true;
975 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {