Searched refs:SCHED_CAPACITY_SCALE (Results 1 – 15 of 15) sorted by relevance
/kernel/linux/linux-5.10/kernel/sched/rtg/ |
D | frame_rtg.h | 41 #define FRAME_MAX_VLOAD SCHED_CAPACITY_SCALE 42 #define FRAME_MAX_LOAD SCHED_CAPACITY_SCALE 45 #define FRAME_DEFAULT_MAX_UTIL SCHED_CAPACITY_SCALE 47 #define FRAME_DEFAULT_MAX_PREV_UTIL SCHED_CAPACITY_SCALE
|
D | frame_rtg.c | 1111 if (unlikely((min_util < 0) || (min_util > SCHED_CAPACITY_SCALE))) { in set_frame_min_util() 1140 if ((max_util < 0) || (max_util > SCHED_CAPACITY_SCALE)) { in set_frame_max_util()
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | smpboot.c | 1823 static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE; 1824 static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE; 1828 arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE : in arch_set_max_freq_ratio() 2018 turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq); in intel_set_max_freq_ratio() 2074 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; 2078 u64 freq_scale = SCHED_CAPACITY_SCALE; in arch_scale_freq_tick() 2104 if (freq_scale > SCHED_CAPACITY_SCALE) in arch_scale_freq_tick() 2105 freq_scale = SCHED_CAPACITY_SCALE; in arch_scale_freq_tick()
|
/kernel/linux/linux-5.10/include/linux/sched/ |
D | topology.h | 242 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()
|
/kernel/linux/linux-5.10/kernel/sched/ |
D | topology.c | 102 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one() 941 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group() 942 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group() 943 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group() 1167 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group() 1168 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group() 1169 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
|
D | sched.h | 958 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 959 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 2229 return SCHED_CAPACITY_SCALE; in arch_scale_freq_capacity() 2669 return SCHED_CAPACITY_SCALE; in uclamp_eff_value() 2695 return SCHED_CAPACITY_SCALE; in uclamp_rq_get() 2770 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
|
D | cpufreq_schedutil.c | 17 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) 364 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
|
D | core.c | 939 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 942 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 959 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 985 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 999 return SCHED_CAPACITY_SCALE; in uclamp_none() 1431 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || in sysctl_sched_uclamp_handler() 1432 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { in sysctl_sched_uclamp_handler() 1490 if (upper_bound > SCHED_CAPACITY_SCALE) in uclamp_validate() 7773 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init() 8398 .util = SCHED_CAPACITY_SCALE, in capacity_from_percent() [all …]
|
D | fair.c | 4025 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100) 4211 uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE); in util_fits_cpu() 4247 if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE) in util_fits_cpu() 9046 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_lb_stats() 9307 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_wakeup_stats() 9733 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance() 9745 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance() 9771 ) / SCHED_CAPACITY_SCALE; in calculate_imbalance() 9870 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / in find_busiest_group() 12268 SCHED_CAPACITY_SCALE in sched_trace_rq_cpu_capacity()
|
D | deadline.c | 117 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { in dl_bw_capacity() 136 return SCHED_CAPACITY_SCALE; in dl_bw_capacity()
|
/kernel/linux/linux-5.10/drivers/base/ |
D | arch_topology.c | 34 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; 59 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
D | topology.c | 265 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); in topology_scale_freq_tick()
|
/kernel/linux/common_modules/qos_auth/auth_ctl/ |
D | qos_ctrl.c | 502 if (uclamp_max > SCHED_CAPACITY_SCALE) in valid_uclamp()
|
/kernel/linux/linux-5.10/include/linux/ |
D | sched.h | 378 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro 722 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
|
/kernel/linux/linux-5.10/init/ |
D | Kconfig | 854 will be SCHED_CAPACITY_SCALE/UCLAMP_BUCKETS_COUNT. The higher the
|