Searched refs:util (Results 1 – 4 of 4) sorted by relevance
/kernel/sched/ |
D | cpufreq_schedutil.c | 171 unsigned long util, unsigned long max) in get_next_freq() argument 178 trace_android_vh_map_util_freq(util, freq, max, &next_freq); in get_next_freq() 182 freq = map_util_freq(util, freq, max); in get_next_freq() 216 unsigned long dl_util, util, irq; in schedutil_cpu_util() local 245 util = util_cfs + cpu_util_rt(rq); in schedutil_cpu_util() 247 util = uclamp_rq_util_with(rq, util, p); in schedutil_cpu_util() 260 if (util + dl_util >= max) in schedutil_cpu_util() 268 util += dl_util; in schedutil_cpu_util() 279 util = scale_irq_capacity(util, irq, max); in schedutil_cpu_util() 280 util += irq; in schedutil_cpu_util() [all …]
|
D | sched.h | 2362 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument 2369 return util; in uclamp_rq_util_with() 2387 return clamp(util, min_util, max_util); in uclamp_rq_util_with() 2409 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument 2412 return util; in uclamp_rq_util_with() 2492 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs() local 2495 util = max_t(unsigned long, util, in cpu_util_cfs() 2499 return util; in cpu_util_cfs() 2522 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) in scale_irq_capacity() argument 2524 util *= (max - irq); in scale_irq_capacity() [all …]
|
D | fair.c | 6295 unsigned int util; in cpu_util() local 6298 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util() 6301 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); in cpu_util() 6303 return min_t(unsigned long, util, capacity_orig_of(cpu)); in cpu_util() 6322 unsigned int util; in cpu_util_without() local 6329 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_without() 6332 lsub_positive(&util, task_util(p)); in cpu_util_without() 6384 util = max(util, estimated); in cpu_util_without() 6392 return min_t(unsigned long, util, capacity_orig_of(cpu)); in cpu_util_without() 6402 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_next() local [all …]
|
D | core.c | 7367 u64 util; member 7376 .util = SCHED_CAPACITY_SCALE, in capacity_from_percent() 7391 req.util = req.percent << SCHED_CAPACITY_SHIFT; in capacity_from_percent() 7392 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); in capacity_from_percent() 7415 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write() 7416 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
|