Lines Matching refs:sg_cpu
165 static void sugov_get_util(struct sugov_cpu *sg_cpu) in sugov_get_util() argument
167 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
169 sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_get_util()
170 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
171 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), in sugov_get_util()
186 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_reset() argument
189 s64 delta_ns = time - sg_cpu->last_update; in sugov_iowait_reset()
195 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; in sugov_iowait_reset()
196 sg_cpu->iowait_boost_pending = set_iowait_boost; in sugov_iowait_reset()
215 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_boost() argument
221 if (sg_cpu->iowait_boost && in sugov_iowait_boost()
222 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) in sugov_iowait_boost()
230 if (sg_cpu->iowait_boost_pending) in sugov_iowait_boost()
232 sg_cpu->iowait_boost_pending = true; in sugov_iowait_boost()
235 if (sg_cpu->iowait_boost) { in sugov_iowait_boost()
236 sg_cpu->iowait_boost = in sugov_iowait_boost()
237 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
242 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; in sugov_iowait_boost()
262 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) in sugov_iowait_apply() argument
267 if (!sg_cpu->iowait_boost) in sugov_iowait_apply()
271 if (sugov_iowait_reset(sg_cpu, time, false)) in sugov_iowait_apply()
274 if (!sg_cpu->iowait_boost_pending) { in sugov_iowait_apply()
278 sg_cpu->iowait_boost >>= 1; in sugov_iowait_apply()
279 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { in sugov_iowait_apply()
280 sg_cpu->iowait_boost = 0; in sugov_iowait_apply()
285 sg_cpu->iowait_boost_pending = false; in sugov_iowait_apply()
291 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
292 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); in sugov_iowait_apply()
293 if (sg_cpu->util < boost) in sugov_iowait_apply()
294 sg_cpu->util = boost; in sugov_iowait_apply()
298 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) in sugov_cpu_is_busy() argument
300 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); in sugov_cpu_is_busy()
301 bool ret = idle_calls == sg_cpu->saved_idle_calls; in sugov_cpu_is_busy()
303 sg_cpu->saved_idle_calls = idle_calls; in sugov_cpu_is_busy()
307 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } in sugov_cpu_is_busy() argument
314 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) in ignore_dl_rate_limit() argument
316 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
317 sg_cpu->sg_policy->limits_changed = true; in ignore_dl_rate_limit()
320 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, in sugov_update_single_common() argument
323 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_single_common()
324 sg_cpu->last_update = time; in sugov_update_single_common()
326 ignore_dl_rate_limit(sg_cpu); in sugov_update_single_common()
328 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) in sugov_update_single_common()
331 sugov_get_util(sg_cpu); in sugov_update_single_common()
332 sugov_iowait_apply(sg_cpu, time); in sugov_update_single_common()
340 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_freq() local
341 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single_freq()
345 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_freq()
348 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); in sugov_update_single_freq()
355 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_freq()
356 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq && in sugov_update_single_freq()
384 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_perf() local
385 unsigned long prev_util = sg_cpu->util; in sugov_update_single_perf()
397 if (!sugov_update_single_common(sg_cpu, time, flags)) in sugov_update_single_perf()
406 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_perf()
407 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) in sugov_update_single_perf()
408 sg_cpu->util = prev_util; in sugov_update_single_perf()
410 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), in sugov_update_single_perf()
411 map_util_perf(sg_cpu->util), sg_cpu->max); in sugov_update_single_perf()
413 sg_cpu->sg_policy->last_freq_update_time = time; in sugov_update_single_perf()
416 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) in sugov_next_freq_shared() argument
418 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared()
444 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_shared() local
445 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_shared()
450 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_shared()
451 sg_cpu->last_update = time; in sugov_update_shared()
453 ignore_dl_rate_limit(sg_cpu); in sugov_update_shared()
456 next_f = sugov_next_freq_shared(sg_cpu, time); in sugov_update_shared()
777 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
779 memset(sg_cpu, 0, sizeof(*sg_cpu)); in sugov_start()
780 sg_cpu->cpu = cpu; in sugov_start()
781 sg_cpu->sg_policy = sg_policy; in sugov_start()
792 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
794 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); in sugov_start()