• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * intel_pstate.c: Native P state management for Intel processors
4  *
5  * (C) Copyright 2012 Intel Corporation
6  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/module.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/slab.h>
18 #include <linux/sched/cpufreq.h>
19 #include <linux/sched/smt.h>
20 #include <linux/list.h>
21 #include <linux/cpu.h>
22 #include <linux/cpufreq.h>
23 #include <linux/sysfs.h>
24 #include <linux/types.h>
25 #include <linux/fs.h>
26 #include <linux/acpi.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pm_qos.h>
29 #include <linux/bitfield.h>
30 #include <trace/events/power.h>
31 
32 #include <asm/cpu.h>
33 #include <asm/div64.h>
34 #include <asm/msr.h>
35 #include <asm/cpu_device_id.h>
36 #include <asm/cpufeature.h>
37 #include <asm/intel-family.h>
38 #include "../drivers/thermal/intel/thermal_interrupt.h"
39 
40 #define INTEL_PSTATE_SAMPLING_INTERVAL	(10 * NSEC_PER_MSEC)
41 
42 #define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
43 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP	5000
44 #define INTEL_CPUFREQ_TRANSITION_DELAY		500
45 
46 #ifdef CONFIG_ACPI
47 #include <acpi/processor.h>
48 #include <acpi/cppc_acpi.h>
49 #endif
50 
51 #define FRAC_BITS 8
52 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
53 #define fp_toint(X) ((X) >> FRAC_BITS)
54 
55 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
56 
57 #define EXT_BITS 6
58 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
59 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
60 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
61 
mul_fp(int32_t x,int32_t y)62 static inline int32_t mul_fp(int32_t x, int32_t y)
63 {
64 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
65 }
66 
div_fp(s64 x,s64 y)67 static inline int32_t div_fp(s64 x, s64 y)
68 {
69 	return div64_s64((int64_t)x << FRAC_BITS, y);
70 }
71 
ceiling_fp(int32_t x)72 static inline int ceiling_fp(int32_t x)
73 {
74 	int mask, ret;
75 
76 	ret = fp_toint(x);
77 	mask = (1 << FRAC_BITS) - 1;
78 	if (x & mask)
79 		ret += 1;
80 	return ret;
81 }
82 
mul_ext_fp(u64 x,u64 y)83 static inline u64 mul_ext_fp(u64 x, u64 y)
84 {
85 	return (x * y) >> EXT_FRAC_BITS;
86 }
87 
div_ext_fp(u64 x,u64 y)88 static inline u64 div_ext_fp(u64 x, u64 y)
89 {
90 	return div64_u64(x << EXT_FRAC_BITS, y);
91 }
92 
93 /**
94  * struct sample -	Store performance sample
95  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
96  *			performance during last sample period
97  * @busy_scaled:	Scaled busy value which is used to calculate next
98  *			P state. This can be different than core_avg_perf
99  *			to account for cpu idle period
100  * @aperf:		Difference of actual performance frequency clock count
101  *			read from APERF MSR between last and current sample
102  * @mperf:		Difference of maximum performance frequency clock count
103  *			read from MPERF MSR between last and current sample
104  * @tsc:		Difference of time stamp counter between last and
105  *			current sample
106  * @time:		Current time from scheduler
107  *
108  * This structure is used in the cpudata structure to store performance sample
109  * data for choosing next P State.
110  */
111 struct sample {
112 	int32_t core_avg_perf;
113 	int32_t busy_scaled;
114 	u64 aperf;
115 	u64 mperf;
116 	u64 tsc;
117 	u64 time;
118 };
119 
120 /**
121  * struct pstate_data - Store P state data
122  * @current_pstate:	Current requested P state
123  * @min_pstate:		Min P state possible for this platform
124  * @max_pstate:		Max P state possible for this platform
125  * @max_pstate_physical:This is physical Max P state for a processor
126  *			This can be higher than the max_pstate which can
127  *			be limited by platform thermal design power limits
128  * @perf_ctl_scaling:	PERF_CTL P-state to frequency scaling factor
129  * @scaling:		Scaling factor between performance and frequency
130  * @turbo_pstate:	Max Turbo P state possible for this platform
131  * @min_freq:		@min_pstate frequency in cpufreq units
132  * @max_freq:		@max_pstate frequency in cpufreq units
133  * @turbo_freq:		@turbo_pstate frequency in cpufreq units
134  *
135  * Stores the per cpu model P state limits and current P state.
136  */
137 struct pstate_data {
138 	int	current_pstate;
139 	int	min_pstate;
140 	int	max_pstate;
141 	int	max_pstate_physical;
142 	int	perf_ctl_scaling;
143 	int	scaling;
144 	int	turbo_pstate;
145 	unsigned int min_freq;
146 	unsigned int max_freq;
147 	unsigned int turbo_freq;
148 };
149 
150 /**
151  * struct vid_data -	Stores voltage information data
152  * @min:		VID data for this platform corresponding to
153  *			the lowest P state
154  * @max:		VID data corresponding to the highest P State.
155  * @turbo:		VID data for turbo P state
156  * @ratio:		Ratio of (vid max - vid min) /
157  *			(max P state - Min P State)
158  *
159  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
160  * This data is used in Atom platforms, where in addition to target P state,
161  * the voltage data needs to be specified to select next P State.
162  */
163 struct vid_data {
164 	int min;
165 	int max;
166 	int turbo;
167 	int32_t ratio;
168 };
169 
170 /**
171  * struct global_params - Global parameters, mostly tunable via sysfs.
172  * @no_turbo:		Whether or not to use turbo P-states.
173  * @turbo_disabled:	Whether or not turbo P-states are available at all,
174  *			based on the MSR_IA32_MISC_ENABLE value and whether or
175  *			not the maximum reported turbo P-state is different from
176  *			the maximum reported non-turbo one.
177  * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
178  *			P-state capacity.
179  * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
180  *			P-state capacity.
181  */
182 struct global_params {
183 	bool no_turbo;
184 	bool turbo_disabled;
185 	int max_perf_pct;
186 	int min_perf_pct;
187 };
188 
189 /**
190  * struct cpudata -	Per CPU instance data storage
191  * @cpu:		CPU number for this instance data
192  * @policy:		CPUFreq policy value
193  * @update_util:	CPUFreq utility callback information
194  * @update_util_set:	CPUFreq utility callback is set
195  * @iowait_boost:	iowait-related boost fraction
196  * @last_update:	Time of the last update.
197  * @pstate:		Stores P state limits for this CPU
198  * @vid:		Stores VID limits for this CPU
199  * @last_sample_time:	Last Sample time
200  * @aperf_mperf_shift:	APERF vs MPERF counting frequency difference
201  * @prev_aperf:		Last APERF value read from APERF MSR
202  * @prev_mperf:		Last MPERF value read from MPERF MSR
203  * @prev_tsc:		Last timestamp counter (TSC) value
204  * @sample:		Storage for storing last Sample data
205  * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
206  * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
207  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
208  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
209  * @epp_powersave:	Last saved HWP energy performance preference
210  *			(EPP) or energy performance bias (EPB),
211  *			when policy switched to performance
212  * @epp_policy:		Last saved policy used to set EPP/EPB
213  * @epp_default:	Power on default HWP energy performance
214  *			preference/bias
215  * @epp_cached:		Cached HWP energy-performance preference value
216  * @hwp_req_cached:	Cached value of the last HWP Request MSR
217  * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
218  * @last_io_update:	Last time when IO wake flag was set
219  * @capacity_perf:	Highest perf used for scale invariance
220  * @sched_flags:	Store scheduler flags for possible cross CPU update
221  * @hwp_boost_min:	Last HWP boosted min performance
222  * @suspended:		Whether or not the driver has been suspended.
223  * @hwp_notify_work:	workqueue for HWP notifications.
224  *
225  * This structure stores per CPU instance data for all CPUs.
226  */
227 struct cpudata {
228 	int cpu;
229 
230 	unsigned int policy;
231 	struct update_util_data update_util;
232 	bool   update_util_set;
233 
234 	struct pstate_data pstate;
235 	struct vid_data vid;
236 
237 	u64	last_update;
238 	u64	last_sample_time;
239 	u64	aperf_mperf_shift;
240 	u64	prev_aperf;
241 	u64	prev_mperf;
242 	u64	prev_tsc;
243 	struct sample sample;
244 	int32_t	min_perf_ratio;
245 	int32_t	max_perf_ratio;
246 #ifdef CONFIG_ACPI
247 	struct acpi_processor_performance acpi_perf_data;
248 	bool valid_pss_table;
249 #endif
250 	unsigned int iowait_boost;
251 	s16 epp_powersave;
252 	s16 epp_policy;
253 	s16 epp_default;
254 	s16 epp_cached;
255 	u64 hwp_req_cached;
256 	u64 hwp_cap_cached;
257 	u64 last_io_update;
258 	unsigned int capacity_perf;
259 	unsigned int sched_flags;
260 	u32 hwp_boost_min;
261 	bool suspended;
262 	struct delayed_work hwp_notify_work;
263 };
264 
265 static struct cpudata **all_cpu_data;
266 
267 /**
268  * struct pstate_funcs - Per CPU model specific callbacks
269  * @get_max:		Callback to get maximum non turbo effective P state
270  * @get_max_physical:	Callback to get maximum non turbo physical P state
271  * @get_min:		Callback to get minimum P state
272  * @get_turbo:		Callback to get turbo P state
273  * @get_scaling:	Callback to get frequency scaling factor
274  * @get_cpu_scaling:	Get frequency scaling factor for a given cpu
275  * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
276  * @get_val:		Callback to convert P state to actual MSR write value
277  * @get_vid:		Callback to get VID data for Atom platforms
278  *
279  * Core and Atom CPU models have different way to get P State limits. This
280  * structure is used to store those callbacks.
281  */
282 struct pstate_funcs {
283 	int (*get_max)(int cpu);
284 	int (*get_max_physical)(int cpu);
285 	int (*get_min)(int cpu);
286 	int (*get_turbo)(int cpu);
287 	int (*get_scaling)(void);
288 	int (*get_cpu_scaling)(int cpu);
289 	int (*get_aperf_mperf_shift)(void);
290 	u64 (*get_val)(struct cpudata*, int pstate);
291 	void (*get_vid)(struct cpudata *);
292 };
293 
294 static struct pstate_funcs pstate_funcs __read_mostly;
295 
296 static bool hwp_active __ro_after_init;
297 static int hwp_mode_bdw __ro_after_init;
298 static bool per_cpu_limits __ro_after_init;
299 static bool hwp_forced __ro_after_init;
300 static bool hwp_boost __read_mostly;
301 static bool hwp_is_hybrid;
302 
303 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
304 
305 #define HYBRID_SCALING_FACTOR		78741
306 #define HYBRID_SCALING_FACTOR_MTL	80000
307 #define HYBRID_SCALING_FACTOR_LNL	86957
308 
309 static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
310 
core_get_scaling(void)311 static inline int core_get_scaling(void)
312 {
313 	return 100000;
314 }
315 
316 #ifdef CONFIG_ACPI
317 static bool acpi_ppc;
318 #endif
319 
320 static struct global_params global;
321 
322 static DEFINE_MUTEX(intel_pstate_driver_lock);
323 static DEFINE_MUTEX(intel_pstate_limits_lock);
324 
325 #ifdef CONFIG_ACPI
326 
intel_pstate_acpi_pm_profile_server(void)327 static bool intel_pstate_acpi_pm_profile_server(void)
328 {
329 	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
330 	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
331 		return true;
332 
333 	return false;
334 }
335 
intel_pstate_get_ppc_enable_status(void)336 static bool intel_pstate_get_ppc_enable_status(void)
337 {
338 	if (intel_pstate_acpi_pm_profile_server())
339 		return true;
340 
341 	return acpi_ppc;
342 }
343 
344 #ifdef CONFIG_ACPI_CPPC_LIB
345 
346 /* The work item is needed to avoid CPU hotplug locking issues */
intel_pstste_sched_itmt_work_fn(struct work_struct * work)347 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
348 {
349 	sched_set_itmt_support();
350 }
351 
352 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
353 
354 #define CPPC_MAX_PERF	U8_MAX
355 
intel_pstate_set_itmt_prio(int cpu)356 static void intel_pstate_set_itmt_prio(int cpu)
357 {
358 	struct cppc_perf_caps cppc_perf;
359 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
360 	int ret;
361 
362 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
363 	/*
364 	 * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
365 	 *
366 	 * Also, on some systems with overclocking enabled, CPPC.highest_perf is
367 	 * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
368 	 * Fall back to MSR_HWP_CAPABILITIES then too.
369 	 */
370 	if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
371 		cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
372 
373 	/*
374 	 * The priorities can be set regardless of whether or not
375 	 * sched_set_itmt_support(true) has been called and it is valid to
376 	 * update them at any time after it has been called.
377 	 */
378 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
379 
380 	if (max_highest_perf <= min_highest_perf) {
381 		if (cppc_perf.highest_perf > max_highest_perf)
382 			max_highest_perf = cppc_perf.highest_perf;
383 
384 		if (cppc_perf.highest_perf < min_highest_perf)
385 			min_highest_perf = cppc_perf.highest_perf;
386 
387 		if (max_highest_perf > min_highest_perf) {
388 			/*
389 			 * This code can be run during CPU online under the
390 			 * CPU hotplug locks, so sched_set_itmt_support()
391 			 * cannot be called from here.  Queue up a work item
392 			 * to invoke it.
393 			 */
394 			schedule_work(&sched_itmt_work);
395 		}
396 	}
397 }
398 
intel_pstate_get_cppc_guaranteed(int cpu)399 static int intel_pstate_get_cppc_guaranteed(int cpu)
400 {
401 	struct cppc_perf_caps cppc_perf;
402 	int ret;
403 
404 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
405 	if (ret)
406 		return ret;
407 
408 	if (cppc_perf.guaranteed_perf)
409 		return cppc_perf.guaranteed_perf;
410 
411 	return cppc_perf.nominal_perf;
412 }
413 
intel_pstate_cppc_get_scaling(int cpu)414 static int intel_pstate_cppc_get_scaling(int cpu)
415 {
416 	struct cppc_perf_caps cppc_perf;
417 	int ret;
418 
419 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
420 
421 	/*
422 	 * If the nominal frequency and the nominal performance are not
423 	 * zero and the ratio between them is not 100, return the hybrid
424 	 * scaling factor.
425 	 */
426 	if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
427 	    cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
428 		return hybrid_scaling_factor;
429 
430 	return core_get_scaling();
431 }
432 
433 #else /* CONFIG_ACPI_CPPC_LIB */
intel_pstate_set_itmt_prio(int cpu)434 static inline void intel_pstate_set_itmt_prio(int cpu)
435 {
436 }
437 #endif /* CONFIG_ACPI_CPPC_LIB */
438 
intel_pstate_init_acpi_perf_limits(struct cpufreq_policy * policy)439 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
440 {
441 	struct cpudata *cpu;
442 	int ret;
443 	int i;
444 
445 	if (hwp_active) {
446 		intel_pstate_set_itmt_prio(policy->cpu);
447 		return;
448 	}
449 
450 	if (!intel_pstate_get_ppc_enable_status())
451 		return;
452 
453 	cpu = all_cpu_data[policy->cpu];
454 
455 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
456 						  policy->cpu);
457 	if (ret)
458 		return;
459 
460 	/*
461 	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
462 	 * guarantee that the states returned by it map to the states in our
463 	 * list directly.
464 	 */
465 	if (cpu->acpi_perf_data.control_register.space_id !=
466 						ACPI_ADR_SPACE_FIXED_HARDWARE)
467 		goto err;
468 
469 	/*
470 	 * If there is only one entry _PSS, simply ignore _PSS and continue as
471 	 * usual without taking _PSS into account
472 	 */
473 	if (cpu->acpi_perf_data.state_count < 2)
474 		goto err;
475 
476 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
477 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
478 		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
479 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
480 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
481 			 (u32) cpu->acpi_perf_data.states[i].power,
482 			 (u32) cpu->acpi_perf_data.states[i].control);
483 	}
484 
485 	cpu->valid_pss_table = true;
486 	pr_debug("_PPC limits will be enforced\n");
487 
488 	return;
489 
490  err:
491 	cpu->valid_pss_table = false;
492 	acpi_processor_unregister_performance(policy->cpu);
493 }
494 
intel_pstate_exit_perf_limits(struct cpufreq_policy * policy)495 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
496 {
497 	struct cpudata *cpu;
498 
499 	cpu = all_cpu_data[policy->cpu];
500 	if (!cpu->valid_pss_table)
501 		return;
502 
503 	acpi_processor_unregister_performance(policy->cpu);
504 }
505 #else /* CONFIG_ACPI */
intel_pstate_init_acpi_perf_limits(struct cpufreq_policy * policy)506 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
507 {
508 }
509 
intel_pstate_exit_perf_limits(struct cpufreq_policy * policy)510 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
511 {
512 }
513 
intel_pstate_acpi_pm_profile_server(void)514 static inline bool intel_pstate_acpi_pm_profile_server(void)
515 {
516 	return false;
517 }
518 #endif /* CONFIG_ACPI */
519 
520 #ifndef CONFIG_ACPI_CPPC_LIB
intel_pstate_get_cppc_guaranteed(int cpu)521 static inline int intel_pstate_get_cppc_guaranteed(int cpu)
522 {
523 	return -ENOTSUPP;
524 }
525 
intel_pstate_cppc_get_scaling(int cpu)526 static int intel_pstate_cppc_get_scaling(int cpu)
527 {
528 	return core_get_scaling();
529 }
530 #endif /* CONFIG_ACPI_CPPC_LIB */
531 
intel_pstate_freq_to_hwp_rel(struct cpudata * cpu,int freq,unsigned int relation)532 static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
533 					unsigned int relation)
534 {
535 	if (freq == cpu->pstate.turbo_freq)
536 		return cpu->pstate.turbo_pstate;
537 
538 	if (freq == cpu->pstate.max_freq)
539 		return cpu->pstate.max_pstate;
540 
541 	switch (relation) {
542 	case CPUFREQ_RELATION_H:
543 		return freq / cpu->pstate.scaling;
544 	case CPUFREQ_RELATION_C:
545 		return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
546 	}
547 
548 	return DIV_ROUND_UP(freq, cpu->pstate.scaling);
549 }
550 
intel_pstate_freq_to_hwp(struct cpudata * cpu,int freq)551 static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
552 {
553 	return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
554 }
555 
556 /**
557  * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
558  * @cpu: Target CPU.
559  *
560  * On hybrid processors, HWP may expose more performance levels than there are
561  * P-states accessible through the PERF_CTL interface.  If that happens, the
562  * scaling factor between HWP performance levels and CPU frequency will be less
563  * than the scaling factor between P-state values and CPU frequency.
564  *
565  * In that case, adjust the CPU parameters used in computations accordingly.
566  */
intel_pstate_hybrid_hwp_adjust(struct cpudata * cpu)567 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
568 {
569 	int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
570 	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
571 	int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
572 	int scaling = cpu->pstate.scaling;
573 	int freq;
574 
575 	pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
576 	pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
577 	pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
578 	pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
579 	pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
580 	pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
581 
582 	cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
583 					   perf_ctl_scaling);
584 	cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
585 					 perf_ctl_scaling);
586 
587 	freq = perf_ctl_max_phys * perf_ctl_scaling;
588 	cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
589 
590 	freq = cpu->pstate.min_pstate * perf_ctl_scaling;
591 	cpu->pstate.min_freq = freq;
592 	/*
593 	 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
594 	 * the effective range of HWP performance levels.
595 	 */
596 	cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
597 }
598 
turbo_is_disabled(void)599 static bool turbo_is_disabled(void)
600 {
601 	u64 misc_en;
602 
603 	if (!cpu_feature_enabled(X86_FEATURE_IDA))
604 		return true;
605 
606 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
607 
608 	return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
609 }
610 
min_perf_pct_min(void)611 static int min_perf_pct_min(void)
612 {
613 	struct cpudata *cpu = all_cpu_data[0];
614 	int turbo_pstate = cpu->pstate.turbo_pstate;
615 
616 	return turbo_pstate ?
617 		(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
618 }
619 
intel_pstate_get_epb(struct cpudata * cpu_data)620 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
621 {
622 	u64 epb;
623 	int ret;
624 
625 	if (!boot_cpu_has(X86_FEATURE_EPB))
626 		return -ENXIO;
627 
628 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
629 	if (ret)
630 		return (s16)ret;
631 
632 	return (s16)(epb & 0x0f);
633 }
634 
intel_pstate_get_epp(struct cpudata * cpu_data,u64 hwp_req_data)635 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
636 {
637 	s16 epp;
638 
639 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
640 		/*
641 		 * When hwp_req_data is 0, means that caller didn't read
642 		 * MSR_HWP_REQUEST, so need to read and get EPP.
643 		 */
644 		if (!hwp_req_data) {
645 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
646 					    &hwp_req_data);
647 			if (epp)
648 				return epp;
649 		}
650 		epp = (hwp_req_data >> 24) & 0xff;
651 	} else {
652 		/* When there is no EPP present, HWP uses EPB settings */
653 		epp = intel_pstate_get_epb(cpu_data);
654 	}
655 
656 	return epp;
657 }
658 
intel_pstate_set_epb(int cpu,s16 pref)659 static int intel_pstate_set_epb(int cpu, s16 pref)
660 {
661 	u64 epb;
662 	int ret;
663 
664 	if (!boot_cpu_has(X86_FEATURE_EPB))
665 		return -ENXIO;
666 
667 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
668 	if (ret)
669 		return ret;
670 
671 	epb = (epb & ~0x0f) | pref;
672 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
673 
674 	return 0;
675 }
676 
677 /*
678  * EPP/EPB display strings corresponding to EPP index in the
679  * energy_perf_strings[]
680  *	index		String
681  *-------------------------------------
682  *	0		default
683  *	1		performance
684  *	2		balance_performance
685  *	3		balance_power
686  *	4		power
687  */
688 
689 enum energy_perf_value_index {
690 	EPP_INDEX_DEFAULT = 0,
691 	EPP_INDEX_PERFORMANCE,
692 	EPP_INDEX_BALANCE_PERFORMANCE,
693 	EPP_INDEX_BALANCE_POWERSAVE,
694 	EPP_INDEX_POWERSAVE,
695 };
696 
697 static const char * const energy_perf_strings[] = {
698 	[EPP_INDEX_DEFAULT] = "default",
699 	[EPP_INDEX_PERFORMANCE] = "performance",
700 	[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
701 	[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
702 	[EPP_INDEX_POWERSAVE] = "power",
703 	NULL
704 };
705 static unsigned int epp_values[] = {
706 	[EPP_INDEX_DEFAULT] = 0, /* Unused index */
707 	[EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
708 	[EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
709 	[EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
710 	[EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
711 };
712 
intel_pstate_get_energy_pref_index(struct cpudata * cpu_data,int * raw_epp)713 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
714 {
715 	s16 epp;
716 	int index = -EINVAL;
717 
718 	*raw_epp = 0;
719 	epp = intel_pstate_get_epp(cpu_data, 0);
720 	if (epp < 0)
721 		return epp;
722 
723 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
724 		if (epp == epp_values[EPP_INDEX_PERFORMANCE])
725 			return EPP_INDEX_PERFORMANCE;
726 		if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
727 			return EPP_INDEX_BALANCE_PERFORMANCE;
728 		if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
729 			return EPP_INDEX_BALANCE_POWERSAVE;
730 		if (epp == epp_values[EPP_INDEX_POWERSAVE])
731 			return EPP_INDEX_POWERSAVE;
732 		*raw_epp = epp;
733 		return 0;
734 	} else if (boot_cpu_has(X86_FEATURE_EPB)) {
735 		/*
736 		 * Range:
737 		 *	0x00-0x03	:	Performance
738 		 *	0x04-0x07	:	Balance performance
739 		 *	0x08-0x0B	:	Balance power
740 		 *	0x0C-0x0F	:	Power
741 		 * The EPB is a 4 bit value, but our ranges restrict the
742 		 * value which can be set. Here only using top two bits
743 		 * effectively.
744 		 */
745 		index = (epp >> 2) + 1;
746 	}
747 
748 	return index;
749 }
750 
intel_pstate_set_epp(struct cpudata * cpu,u32 epp)751 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
752 {
753 	int ret;
754 
755 	/*
756 	 * Use the cached HWP Request MSR value, because in the active mode the
757 	 * register itself may be updated by intel_pstate_hwp_boost_up() or
758 	 * intel_pstate_hwp_boost_down() at any time.
759 	 */
760 	u64 value = READ_ONCE(cpu->hwp_req_cached);
761 
762 	value &= ~GENMASK_ULL(31, 24);
763 	value |= (u64)epp << 24;
764 	/*
765 	 * The only other updater of hwp_req_cached in the active mode,
766 	 * intel_pstate_hwp_set(), is called under the same lock as this
767 	 * function, so it cannot run in parallel with the update below.
768 	 */
769 	WRITE_ONCE(cpu->hwp_req_cached, value);
770 	ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
771 	if (!ret)
772 		cpu->epp_cached = epp;
773 
774 	return ret;
775 }
776 
intel_pstate_set_energy_pref_index(struct cpudata * cpu_data,int pref_index,bool use_raw,u32 raw_epp)777 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
778 					      int pref_index, bool use_raw,
779 					      u32 raw_epp)
780 {
781 	int epp = -EINVAL;
782 	int ret;
783 
784 	if (!pref_index)
785 		epp = cpu_data->epp_default;
786 
787 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
788 		if (use_raw)
789 			epp = raw_epp;
790 		else if (epp == -EINVAL)
791 			epp = epp_values[pref_index];
792 
793 		/*
794 		 * To avoid confusion, refuse to set EPP to any values different
795 		 * from 0 (performance) if the current policy is "performance",
796 		 * because those values would be overridden.
797 		 */
798 		if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
799 			return -EBUSY;
800 
801 		ret = intel_pstate_set_epp(cpu_data, epp);
802 	} else {
803 		if (epp == -EINVAL)
804 			epp = (pref_index - 1) << 2;
805 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
806 	}
807 
808 	return ret;
809 }
810 
show_energy_performance_available_preferences(struct cpufreq_policy * policy,char * buf)811 static ssize_t show_energy_performance_available_preferences(
812 				struct cpufreq_policy *policy, char *buf)
813 {
814 	int i = 0;
815 	int ret = 0;
816 
817 	while (energy_perf_strings[i] != NULL)
818 		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
819 
820 	ret += sprintf(&buf[ret], "\n");
821 
822 	return ret;
823 }
824 
825 cpufreq_freq_attr_ro(energy_performance_available_preferences);
826 
827 static struct cpufreq_driver intel_pstate;
828 
store_energy_performance_preference(struct cpufreq_policy * policy,const char * buf,size_t count)829 static ssize_t store_energy_performance_preference(
830 		struct cpufreq_policy *policy, const char *buf, size_t count)
831 {
832 	struct cpudata *cpu = all_cpu_data[policy->cpu];
833 	char str_preference[21];
834 	bool raw = false;
835 	ssize_t ret;
836 	u32 epp = 0;
837 
838 	ret = sscanf(buf, "%20s", str_preference);
839 	if (ret != 1)
840 		return -EINVAL;
841 
842 	ret = match_string(energy_perf_strings, -1, str_preference);
843 	if (ret < 0) {
844 		if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
845 			return ret;
846 
847 		ret = kstrtouint(buf, 10, &epp);
848 		if (ret)
849 			return ret;
850 
851 		if (epp > 255)
852 			return -EINVAL;
853 
854 		raw = true;
855 	}
856 
857 	/*
858 	 * This function runs with the policy R/W semaphore held, which
859 	 * guarantees that the driver pointer will not change while it is
860 	 * running.
861 	 */
862 	if (!intel_pstate_driver)
863 		return -EAGAIN;
864 
865 	mutex_lock(&intel_pstate_limits_lock);
866 
867 	if (intel_pstate_driver == &intel_pstate) {
868 		ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
869 	} else {
870 		/*
871 		 * In the passive mode the governor needs to be stopped on the
872 		 * target CPU before the EPP update and restarted after it,
873 		 * which is super-heavy-weight, so make sure it is worth doing
874 		 * upfront.
875 		 */
876 		if (!raw)
877 			epp = ret ? epp_values[ret] : cpu->epp_default;
878 
879 		if (cpu->epp_cached != epp) {
880 			int err;
881 
882 			cpufreq_stop_governor(policy);
883 			ret = intel_pstate_set_epp(cpu, epp);
884 			err = cpufreq_start_governor(policy);
885 			if (!ret)
886 				ret = err;
887 		} else {
888 			ret = 0;
889 		}
890 	}
891 
892 	mutex_unlock(&intel_pstate_limits_lock);
893 
894 	return ret ?: count;
895 }
896 
show_energy_performance_preference(struct cpufreq_policy * policy,char * buf)897 static ssize_t show_energy_performance_preference(
898 				struct cpufreq_policy *policy, char *buf)
899 {
900 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
901 	int preference, raw_epp;
902 
903 	preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
904 	if (preference < 0)
905 		return preference;
906 
907 	if (raw_epp)
908 		return  sprintf(buf, "%d\n", raw_epp);
909 	else
910 		return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
911 }
912 
913 cpufreq_freq_attr_rw(energy_performance_preference);
914 
show_base_frequency(struct cpufreq_policy * policy,char * buf)915 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
916 {
917 	struct cpudata *cpu = all_cpu_data[policy->cpu];
918 	int ratio, freq;
919 
920 	ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
921 	if (ratio <= 0) {
922 		u64 cap;
923 
924 		rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
925 		ratio = HWP_GUARANTEED_PERF(cap);
926 	}
927 
928 	freq = ratio * cpu->pstate.scaling;
929 	if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
930 		freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
931 
932 	return sprintf(buf, "%d\n", freq);
933 }
934 
935 cpufreq_freq_attr_ro(base_frequency);
936 
937 static struct freq_attr *hwp_cpufreq_attrs[] = {
938 	&energy_performance_preference,
939 	&energy_performance_available_preferences,
940 	&base_frequency,
941 	NULL,
942 };
943 
944 static struct cpudata *hybrid_max_perf_cpu __read_mostly;
945 /*
946  * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
947  * and the x86 arch scale-invariance information from concurrent updates.
948  */
949 static DEFINE_MUTEX(hybrid_capacity_lock);
950 
hybrid_set_cpu_capacity(struct cpudata * cpu)951 static void hybrid_set_cpu_capacity(struct cpudata *cpu)
952 {
953 	arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
954 			      hybrid_max_perf_cpu->capacity_perf,
955 			      cpu->capacity_perf,
956 			      cpu->pstate.max_pstate_physical);
957 
958 	pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
959 		 cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
960 		 cpu->pstate.max_pstate_physical);
961 }
962 
hybrid_clear_cpu_capacity(unsigned int cpunum)963 static void hybrid_clear_cpu_capacity(unsigned int cpunum)
964 {
965 	arch_set_cpu_capacity(cpunum, 1, 1, 1, 1);
966 }
967 
hybrid_get_capacity_perf(struct cpudata * cpu)968 static void hybrid_get_capacity_perf(struct cpudata *cpu)
969 {
970 	if (READ_ONCE(global.no_turbo)) {
971 		cpu->capacity_perf = cpu->pstate.max_pstate_physical;
972 		return;
973 	}
974 
975 	cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
976 }
977 
hybrid_set_capacity_of_cpus(void)978 static void hybrid_set_capacity_of_cpus(void)
979 {
980 	int cpunum;
981 
982 	for_each_online_cpu(cpunum) {
983 		struct cpudata *cpu = all_cpu_data[cpunum];
984 
985 		if (cpu)
986 			hybrid_set_cpu_capacity(cpu);
987 	}
988 }
989 
hybrid_update_cpu_capacity_scaling(void)990 static void hybrid_update_cpu_capacity_scaling(void)
991 {
992 	struct cpudata *max_perf_cpu = NULL;
993 	unsigned int max_cap_perf = 0;
994 	int cpunum;
995 
996 	for_each_online_cpu(cpunum) {
997 		struct cpudata *cpu = all_cpu_data[cpunum];
998 
999 		if (!cpu)
1000 			continue;
1001 
1002 		/*
1003 		 * During initialization, CPU performance at full capacity needs
1004 		 * to be determined.
1005 		 */
1006 		if (!hybrid_max_perf_cpu)
1007 			hybrid_get_capacity_perf(cpu);
1008 
1009 		/*
1010 		 * If hybrid_max_perf_cpu is not NULL at this point, it is
1011 		 * being replaced, so don't take it into account when looking
1012 		 * for the new one.
1013 		 */
1014 		if (cpu == hybrid_max_perf_cpu)
1015 			continue;
1016 
1017 		if (cpu->capacity_perf > max_cap_perf) {
1018 			max_cap_perf = cpu->capacity_perf;
1019 			max_perf_cpu = cpu;
1020 		}
1021 	}
1022 
1023 	if (max_perf_cpu) {
1024 		hybrid_max_perf_cpu = max_perf_cpu;
1025 		hybrid_set_capacity_of_cpus();
1026 	} else {
1027 		pr_info("Found no CPUs with nonzero maximum performance\n");
1028 		/* Revert to the flat CPU capacity structure. */
1029 		for_each_online_cpu(cpunum)
1030 			hybrid_clear_cpu_capacity(cpunum);
1031 	}
1032 }
1033 
__hybrid_refresh_cpu_capacity_scaling(void)1034 static void __hybrid_refresh_cpu_capacity_scaling(void)
1035 {
1036 	hybrid_max_perf_cpu = NULL;
1037 	hybrid_update_cpu_capacity_scaling();
1038 }
1039 
hybrid_refresh_cpu_capacity_scaling(void)1040 static void hybrid_refresh_cpu_capacity_scaling(void)
1041 {
1042 	guard(mutex)(&hybrid_capacity_lock);
1043 
1044 	__hybrid_refresh_cpu_capacity_scaling();
1045 }
1046 
hybrid_init_cpu_capacity_scaling(bool refresh)1047 static void hybrid_init_cpu_capacity_scaling(bool refresh)
1048 {
1049 	/*
1050 	 * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
1051 	 * scaling has been enabled already and the driver is just changing the
1052 	 * operation mode.
1053 	 */
1054 	if (refresh) {
1055 		hybrid_refresh_cpu_capacity_scaling();
1056 		return;
1057 	}
1058 
1059 	/*
1060 	 * On hybrid systems, use asym capacity instead of ITMT, but because
1061 	 * the capacity of SMT threads is not deterministic even approximately,
1062 	 * do not do that when SMT is in use.
1063 	 */
1064 	if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
1065 		hybrid_refresh_cpu_capacity_scaling();
1066 		/*
1067 		 * Disabling ITMT causes sched domains to be rebuilt to disable asym
1068 		 * packing and enable asym capacity.
1069 		 */
1070 		sched_clear_itmt_support();
1071 	}
1072 }
1073 
hybrid_clear_max_perf_cpu(void)1074 static bool hybrid_clear_max_perf_cpu(void)
1075 {
1076 	bool ret;
1077 
1078 	guard(mutex)(&hybrid_capacity_lock);
1079 
1080 	ret = !!hybrid_max_perf_cpu;
1081 	hybrid_max_perf_cpu = NULL;
1082 
1083 	return ret;
1084 }
1085 
__intel_pstate_get_hwp_cap(struct cpudata * cpu)1086 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
1087 {
1088 	u64 cap;
1089 
1090 	rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
1091 	WRITE_ONCE(cpu->hwp_cap_cached, cap);
1092 	cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
1093 	cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
1094 }
1095 
intel_pstate_get_hwp_cap(struct cpudata * cpu)1096 static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
1097 {
1098 	int scaling = cpu->pstate.scaling;
1099 
1100 	__intel_pstate_get_hwp_cap(cpu);
1101 
1102 	cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
1103 	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
1104 	if (scaling != cpu->pstate.perf_ctl_scaling) {
1105 		int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
1106 
1107 		cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
1108 						 perf_ctl_scaling);
1109 		cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
1110 						   perf_ctl_scaling);
1111 	}
1112 }
1113 
hybrid_update_capacity(struct cpudata * cpu)1114 static void hybrid_update_capacity(struct cpudata *cpu)
1115 {
1116 	unsigned int max_cap_perf;
1117 
1118 	mutex_lock(&hybrid_capacity_lock);
1119 
1120 	if (!hybrid_max_perf_cpu)
1121 		goto unlock;
1122 
1123 	/*
1124 	 * The maximum performance of the CPU may have changed, but assume
1125 	 * that the performance of the other CPUs has not changed.
1126 	 */
1127 	max_cap_perf = hybrid_max_perf_cpu->capacity_perf;
1128 
1129 	intel_pstate_get_hwp_cap(cpu);
1130 
1131 	hybrid_get_capacity_perf(cpu);
1132 	/* Should hybrid_max_perf_cpu be replaced by this CPU? */
1133 	if (cpu->capacity_perf > max_cap_perf) {
1134 		hybrid_max_perf_cpu = cpu;
1135 		hybrid_set_capacity_of_cpus();
1136 		goto unlock;
1137 	}
1138 
1139 	/* If this CPU is hybrid_max_perf_cpu, should it be replaced? */
1140 	if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) {
1141 		hybrid_update_cpu_capacity_scaling();
1142 		goto unlock;
1143 	}
1144 
1145 	hybrid_set_cpu_capacity(cpu);
1146 
1147 unlock:
1148 	mutex_unlock(&hybrid_capacity_lock);
1149 }
1150 
intel_pstate_hwp_set(unsigned int cpu)1151 static void intel_pstate_hwp_set(unsigned int cpu)
1152 {
1153 	struct cpudata *cpu_data = all_cpu_data[cpu];
1154 	int max, min;
1155 	u64 value;
1156 	s16 epp;
1157 
1158 	max = cpu_data->max_perf_ratio;
1159 	min = cpu_data->min_perf_ratio;
1160 
1161 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
1162 		min = max;
1163 
1164 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
1165 
1166 	value &= ~HWP_MIN_PERF(~0L);
1167 	value |= HWP_MIN_PERF(min);
1168 
1169 	value &= ~HWP_MAX_PERF(~0L);
1170 	value |= HWP_MAX_PERF(max);
1171 
1172 	if (cpu_data->epp_policy == cpu_data->policy)
1173 		goto skip_epp;
1174 
1175 	cpu_data->epp_policy = cpu_data->policy;
1176 
1177 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
1178 		epp = intel_pstate_get_epp(cpu_data, value);
1179 		cpu_data->epp_powersave = epp;
1180 		/* If EPP read was failed, then don't try to write */
1181 		if (epp < 0)
1182 			goto skip_epp;
1183 
1184 		epp = 0;
1185 	} else {
1186 		/* skip setting EPP, when saved value is invalid */
1187 		if (cpu_data->epp_powersave < 0)
1188 			goto skip_epp;
1189 
1190 		/*
1191 		 * No need to restore EPP when it is not zero. This
1192 		 * means:
1193 		 *  - Policy is not changed
1194 		 *  - user has manually changed
1195 		 *  - Error reading EPB
1196 		 */
1197 		epp = intel_pstate_get_epp(cpu_data, value);
1198 		if (epp)
1199 			goto skip_epp;
1200 
1201 		epp = cpu_data->epp_powersave;
1202 	}
1203 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1204 		value &= ~GENMASK_ULL(31, 24);
1205 		value |= (u64)epp << 24;
1206 	} else {
1207 		intel_pstate_set_epb(cpu, epp);
1208 	}
1209 skip_epp:
1210 	WRITE_ONCE(cpu_data->hwp_req_cached, value);
1211 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
1212 }
1213 
1214 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
1215 
intel_pstate_hwp_offline(struct cpudata * cpu)1216 static void intel_pstate_hwp_offline(struct cpudata *cpu)
1217 {
1218 	u64 value = READ_ONCE(cpu->hwp_req_cached);
1219 	int min_perf;
1220 
1221 	intel_pstate_disable_hwp_interrupt(cpu);
1222 
1223 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
1224 		/*
1225 		 * In case the EPP has been set to "performance" by the
1226 		 * active mode "performance" scaling algorithm, replace that
1227 		 * temporary value with the cached EPP one.
1228 		 */
1229 		value &= ~GENMASK_ULL(31, 24);
1230 		value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
1231 		/*
1232 		 * However, make sure that EPP will be set to "performance" when
1233 		 * the CPU is brought back online again and the "performance"
1234 		 * scaling algorithm is still in effect.
1235 		 */
1236 		cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1237 	}
1238 
1239 	/*
1240 	 * Clear the desired perf field in the cached HWP request value to
1241 	 * prevent nonzero desired values from being leaked into the active
1242 	 * mode.
1243 	 */
1244 	value &= ~HWP_DESIRED_PERF(~0L);
1245 	WRITE_ONCE(cpu->hwp_req_cached, value);
1246 
1247 	value &= ~GENMASK_ULL(31, 0);
1248 	min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
1249 
1250 	/* Set hwp_max = hwp_min */
1251 	value |= HWP_MAX_PERF(min_perf);
1252 	value |= HWP_MIN_PERF(min_perf);
1253 
1254 	/* Set EPP to min */
1255 	if (boot_cpu_has(X86_FEATURE_HWP_EPP))
1256 		value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
1257 
1258 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
1259 
1260 	mutex_lock(&hybrid_capacity_lock);
1261 
1262 	if (!hybrid_max_perf_cpu) {
1263 		mutex_unlock(&hybrid_capacity_lock);
1264 
1265 		return;
1266 	}
1267 
1268 	if (hybrid_max_perf_cpu == cpu)
1269 		hybrid_update_cpu_capacity_scaling();
1270 
1271 	mutex_unlock(&hybrid_capacity_lock);
1272 
1273 	/* Reset the capacity of the CPU going offline to the initial value. */
1274 	hybrid_clear_cpu_capacity(cpu->cpu);
1275 }
1276 
1277 #define POWER_CTL_EE_ENABLE	1
1278 #define POWER_CTL_EE_DISABLE	2
1279 
1280 static int power_ctl_ee_state;
1281 
set_power_ctl_ee_state(bool input)1282 static void set_power_ctl_ee_state(bool input)
1283 {
1284 	u64 power_ctl;
1285 
1286 	mutex_lock(&intel_pstate_driver_lock);
1287 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1288 	if (input) {
1289 		power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
1290 		power_ctl_ee_state = POWER_CTL_EE_ENABLE;
1291 	} else {
1292 		power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1293 		power_ctl_ee_state = POWER_CTL_EE_DISABLE;
1294 	}
1295 	wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
1296 	mutex_unlock(&intel_pstate_driver_lock);
1297 }
1298 
1299 static void intel_pstate_hwp_enable(struct cpudata *cpudata);
1300 
intel_pstate_hwp_reenable(struct cpudata * cpu)1301 static void intel_pstate_hwp_reenable(struct cpudata *cpu)
1302 {
1303 	intel_pstate_hwp_enable(cpu);
1304 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
1305 }
1306 
intel_pstate_suspend(struct cpufreq_policy * policy)1307 static int intel_pstate_suspend(struct cpufreq_policy *policy)
1308 {
1309 	struct cpudata *cpu = all_cpu_data[policy->cpu];
1310 
1311 	pr_debug("CPU %d suspending\n", cpu->cpu);
1312 
1313 	cpu->suspended = true;
1314 
1315 	/* disable HWP interrupt and cancel any pending work */
1316 	intel_pstate_disable_hwp_interrupt(cpu);
1317 
1318 	return 0;
1319 }
1320 
intel_pstate_resume(struct cpufreq_policy * policy)1321 static int intel_pstate_resume(struct cpufreq_policy *policy)
1322 {
1323 	struct cpudata *cpu = all_cpu_data[policy->cpu];
1324 
1325 	pr_debug("CPU %d resuming\n", cpu->cpu);
1326 
1327 	/* Only restore if the system default is changed */
1328 	if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
1329 		set_power_ctl_ee_state(true);
1330 	else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
1331 		set_power_ctl_ee_state(false);
1332 
1333 	if (cpu->suspended && hwp_active) {
1334 		mutex_lock(&intel_pstate_limits_lock);
1335 
1336 		/* Re-enable HWP, because "online" has not done that. */
1337 		intel_pstate_hwp_reenable(cpu);
1338 
1339 		mutex_unlock(&intel_pstate_limits_lock);
1340 	}
1341 
1342 	cpu->suspended = false;
1343 
1344 	return 0;
1345 }
1346 
intel_pstate_update_policies(void)1347 static void intel_pstate_update_policies(void)
1348 {
1349 	int cpu;
1350 
1351 	for_each_possible_cpu(cpu)
1352 		cpufreq_update_policy(cpu);
1353 }
1354 
__intel_pstate_update_max_freq(struct cpudata * cpudata,struct cpufreq_policy * policy)1355 static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
1356 					   struct cpufreq_policy *policy)
1357 {
1358 	if (hwp_active)
1359 		intel_pstate_get_hwp_cap(cpudata);
1360 
1361 	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
1362 			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
1363 
1364 	refresh_frequency_limits(policy);
1365 }
1366 
intel_pstate_update_limits(unsigned int cpu)1367 static void intel_pstate_update_limits(unsigned int cpu)
1368 {
1369 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1370 	struct cpudata *cpudata;
1371 
1372 	if (!policy)
1373 		return;
1374 
1375 	cpudata = all_cpu_data[cpu];
1376 
1377 	__intel_pstate_update_max_freq(cpudata, policy);
1378 
1379 	/* Prevent the driver from being unregistered now. */
1380 	mutex_lock(&intel_pstate_driver_lock);
1381 
1382 	cpufreq_cpu_release(policy);
1383 
1384 	hybrid_update_capacity(cpudata);
1385 
1386 	mutex_unlock(&intel_pstate_driver_lock);
1387 }
1388 
intel_pstate_update_limits_for_all(void)1389 static void intel_pstate_update_limits_for_all(void)
1390 {
1391 	int cpu;
1392 
1393 	for_each_possible_cpu(cpu) {
1394 		struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1395 
1396 		if (!policy)
1397 			continue;
1398 
1399 		__intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
1400 
1401 		cpufreq_cpu_release(policy);
1402 	}
1403 
1404 	mutex_lock(&hybrid_capacity_lock);
1405 
1406 	if (hybrid_max_perf_cpu)
1407 		__hybrid_refresh_cpu_capacity_scaling();
1408 
1409 	mutex_unlock(&hybrid_capacity_lock);
1410 }
1411 
1412 /************************** sysfs begin ************************/
1413 #define show_one(file_name, object)					\
1414 	static ssize_t show_##file_name					\
1415 	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
1416 	{								\
1417 		return sprintf(buf, "%u\n", global.object);		\
1418 	}
1419 
1420 static ssize_t intel_pstate_show_status(char *buf);
1421 static int intel_pstate_update_status(const char *buf, size_t size);
1422 
show_status(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1423 static ssize_t show_status(struct kobject *kobj,
1424 			   struct kobj_attribute *attr, char *buf)
1425 {
1426 	ssize_t ret;
1427 
1428 	mutex_lock(&intel_pstate_driver_lock);
1429 	ret = intel_pstate_show_status(buf);
1430 	mutex_unlock(&intel_pstate_driver_lock);
1431 
1432 	return ret;
1433 }
1434 
store_status(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1435 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
1436 			    const char *buf, size_t count)
1437 {
1438 	char *p = memchr(buf, '\n', count);
1439 	int ret;
1440 
1441 	mutex_lock(&intel_pstate_driver_lock);
1442 	ret = intel_pstate_update_status(buf, p ? p - buf : count);
1443 	mutex_unlock(&intel_pstate_driver_lock);
1444 
1445 	return ret < 0 ? ret : count;
1446 }
1447 
show_turbo_pct(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1448 static ssize_t show_turbo_pct(struct kobject *kobj,
1449 				struct kobj_attribute *attr, char *buf)
1450 {
1451 	struct cpudata *cpu;
1452 	int total, no_turbo, turbo_pct;
1453 	uint32_t turbo_fp;
1454 
1455 	mutex_lock(&intel_pstate_driver_lock);
1456 
1457 	if (!intel_pstate_driver) {
1458 		mutex_unlock(&intel_pstate_driver_lock);
1459 		return -EAGAIN;
1460 	}
1461 
1462 	cpu = all_cpu_data[0];
1463 
1464 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1465 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1466 	turbo_fp = div_fp(no_turbo, total);
1467 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1468 
1469 	mutex_unlock(&intel_pstate_driver_lock);
1470 
1471 	return sprintf(buf, "%u\n", turbo_pct);
1472 }
1473 
show_num_pstates(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1474 static ssize_t show_num_pstates(struct kobject *kobj,
1475 				struct kobj_attribute *attr, char *buf)
1476 {
1477 	struct cpudata *cpu;
1478 	int total;
1479 
1480 	mutex_lock(&intel_pstate_driver_lock);
1481 
1482 	if (!intel_pstate_driver) {
1483 		mutex_unlock(&intel_pstate_driver_lock);
1484 		return -EAGAIN;
1485 	}
1486 
1487 	cpu = all_cpu_data[0];
1488 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1489 
1490 	mutex_unlock(&intel_pstate_driver_lock);
1491 
1492 	return sprintf(buf, "%u\n", total);
1493 }
1494 
show_no_turbo(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1495 static ssize_t show_no_turbo(struct kobject *kobj,
1496 			     struct kobj_attribute *attr, char *buf)
1497 {
1498 	ssize_t ret;
1499 
1500 	mutex_lock(&intel_pstate_driver_lock);
1501 
1502 	if (!intel_pstate_driver) {
1503 		mutex_unlock(&intel_pstate_driver_lock);
1504 		return -EAGAIN;
1505 	}
1506 
1507 	ret = sprintf(buf, "%u\n", global.no_turbo);
1508 
1509 	mutex_unlock(&intel_pstate_driver_lock);
1510 
1511 	return ret;
1512 }
1513 
store_no_turbo(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1514 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1515 			      const char *buf, size_t count)
1516 {
1517 	unsigned int input;
1518 	bool no_turbo;
1519 
1520 	if (sscanf(buf, "%u", &input) != 1)
1521 		return -EINVAL;
1522 
1523 	mutex_lock(&intel_pstate_driver_lock);
1524 
1525 	if (!intel_pstate_driver) {
1526 		count = -EAGAIN;
1527 		goto unlock_driver;
1528 	}
1529 
1530 	no_turbo = !!clamp_t(int, input, 0, 1);
1531 
1532 	WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
1533 	if (global.turbo_disabled && !no_turbo) {
1534 		pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
1535 		count = -EPERM;
1536 		if (global.no_turbo)
1537 			goto unlock_driver;
1538 		else
1539 			no_turbo = 1;
1540 	}
1541 
1542 	if (no_turbo == global.no_turbo) {
1543 		goto unlock_driver;
1544 	}
1545 
1546 	WRITE_ONCE(global.no_turbo, no_turbo);
1547 
1548 	mutex_lock(&intel_pstate_limits_lock);
1549 
1550 	if (no_turbo) {
1551 		struct cpudata *cpu = all_cpu_data[0];
1552 		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1553 
1554 		/* Squash the global minimum into the permitted range. */
1555 		if (global.min_perf_pct > pct)
1556 			global.min_perf_pct = pct;
1557 	}
1558 
1559 	mutex_unlock(&intel_pstate_limits_lock);
1560 
1561 	intel_pstate_update_limits_for_all();
1562 	arch_set_max_freq_ratio(no_turbo);
1563 
1564 unlock_driver:
1565 	mutex_unlock(&intel_pstate_driver_lock);
1566 
1567 	return count;
1568 }
1569 
update_qos_request(enum freq_qos_req_type type)1570 static void update_qos_request(enum freq_qos_req_type type)
1571 {
1572 	struct freq_qos_request *req;
1573 	struct cpufreq_policy *policy;
1574 	int i;
1575 
1576 	for_each_possible_cpu(i) {
1577 		struct cpudata *cpu = all_cpu_data[i];
1578 		unsigned int freq, perf_pct;
1579 
1580 		policy = cpufreq_cpu_get(i);
1581 		if (!policy)
1582 			continue;
1583 
1584 		req = policy->driver_data;
1585 		cpufreq_cpu_put(policy);
1586 
1587 		if (!req)
1588 			continue;
1589 
1590 		if (hwp_active)
1591 			intel_pstate_get_hwp_cap(cpu);
1592 
1593 		if (type == FREQ_QOS_MIN) {
1594 			perf_pct = global.min_perf_pct;
1595 		} else {
1596 			req++;
1597 			perf_pct = global.max_perf_pct;
1598 		}
1599 
1600 		freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
1601 
1602 		if (freq_qos_update_request(req, freq) < 0)
1603 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
1604 	}
1605 }
1606 
store_max_perf_pct(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1607 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1608 				  const char *buf, size_t count)
1609 {
1610 	unsigned int input;
1611 	int ret;
1612 
1613 	ret = sscanf(buf, "%u", &input);
1614 	if (ret != 1)
1615 		return -EINVAL;
1616 
1617 	mutex_lock(&intel_pstate_driver_lock);
1618 
1619 	if (!intel_pstate_driver) {
1620 		mutex_unlock(&intel_pstate_driver_lock);
1621 		return -EAGAIN;
1622 	}
1623 
1624 	mutex_lock(&intel_pstate_limits_lock);
1625 
1626 	global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1627 
1628 	mutex_unlock(&intel_pstate_limits_lock);
1629 
1630 	if (intel_pstate_driver == &intel_pstate)
1631 		intel_pstate_update_policies();
1632 	else
1633 		update_qos_request(FREQ_QOS_MAX);
1634 
1635 	mutex_unlock(&intel_pstate_driver_lock);
1636 
1637 	return count;
1638 }
1639 
store_min_perf_pct(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1640 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1641 				  const char *buf, size_t count)
1642 {
1643 	unsigned int input;
1644 	int ret;
1645 
1646 	ret = sscanf(buf, "%u", &input);
1647 	if (ret != 1)
1648 		return -EINVAL;
1649 
1650 	mutex_lock(&intel_pstate_driver_lock);
1651 
1652 	if (!intel_pstate_driver) {
1653 		mutex_unlock(&intel_pstate_driver_lock);
1654 		return -EAGAIN;
1655 	}
1656 
1657 	mutex_lock(&intel_pstate_limits_lock);
1658 
1659 	global.min_perf_pct = clamp_t(int, input,
1660 				      min_perf_pct_min(), global.max_perf_pct);
1661 
1662 	mutex_unlock(&intel_pstate_limits_lock);
1663 
1664 	if (intel_pstate_driver == &intel_pstate)
1665 		intel_pstate_update_policies();
1666 	else
1667 		update_qos_request(FREQ_QOS_MIN);
1668 
1669 	mutex_unlock(&intel_pstate_driver_lock);
1670 
1671 	return count;
1672 }
1673 
show_hwp_dynamic_boost(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1674 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1675 				struct kobj_attribute *attr, char *buf)
1676 {
1677 	return sprintf(buf, "%u\n", hwp_boost);
1678 }
1679 
store_hwp_dynamic_boost(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1680 static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1681 				       struct kobj_attribute *b,
1682 				       const char *buf, size_t count)
1683 {
1684 	unsigned int input;
1685 	int ret;
1686 
1687 	ret = kstrtouint(buf, 10, &input);
1688 	if (ret)
1689 		return ret;
1690 
1691 	mutex_lock(&intel_pstate_driver_lock);
1692 	hwp_boost = !!input;
1693 	intel_pstate_update_policies();
1694 	mutex_unlock(&intel_pstate_driver_lock);
1695 
1696 	return count;
1697 }
1698 
show_energy_efficiency(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1699 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
1700 				      char *buf)
1701 {
1702 	u64 power_ctl;
1703 	int enable;
1704 
1705 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1706 	enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
1707 	return sprintf(buf, "%d\n", !enable);
1708 }
1709 
store_energy_efficiency(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)1710 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
1711 				       const char *buf, size_t count)
1712 {
1713 	bool input;
1714 	int ret;
1715 
1716 	ret = kstrtobool(buf, &input);
1717 	if (ret)
1718 		return ret;
1719 
1720 	set_power_ctl_ee_state(input);
1721 
1722 	return count;
1723 }
1724 
1725 show_one(max_perf_pct, max_perf_pct);
1726 show_one(min_perf_pct, min_perf_pct);
1727 
1728 define_one_global_rw(status);
1729 define_one_global_rw(no_turbo);
1730 define_one_global_rw(max_perf_pct);
1731 define_one_global_rw(min_perf_pct);
1732 define_one_global_ro(turbo_pct);
1733 define_one_global_ro(num_pstates);
1734 define_one_global_rw(hwp_dynamic_boost);
1735 define_one_global_rw(energy_efficiency);
1736 
1737 static struct attribute *intel_pstate_attributes[] = {
1738 	&status.attr,
1739 	&no_turbo.attr,
1740 	NULL
1741 };
1742 
1743 static const struct attribute_group intel_pstate_attr_group = {
1744 	.attrs = intel_pstate_attributes,
1745 };
1746 
1747 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
1748 
1749 static struct kobject *intel_pstate_kobject;
1750 
intel_pstate_sysfs_expose_params(void)1751 static void __init intel_pstate_sysfs_expose_params(void)
1752 {
1753 	struct device *dev_root = bus_get_dev_root(&cpu_subsys);
1754 	int rc;
1755 
1756 	if (dev_root) {
1757 		intel_pstate_kobject = kobject_create_and_add("intel_pstate", &dev_root->kobj);
1758 		put_device(dev_root);
1759 	}
1760 	if (WARN_ON(!intel_pstate_kobject))
1761 		return;
1762 
1763 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1764 	if (WARN_ON(rc))
1765 		return;
1766 
1767 	if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1768 		rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
1769 		WARN_ON(rc);
1770 
1771 		rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
1772 		WARN_ON(rc);
1773 	}
1774 
1775 	/*
1776 	 * If per cpu limits are enforced there are no global limits, so
1777 	 * return without creating max/min_perf_pct attributes
1778 	 */
1779 	if (per_cpu_limits)
1780 		return;
1781 
1782 	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1783 	WARN_ON(rc);
1784 
1785 	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1786 	WARN_ON(rc);
1787 
1788 	if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
1789 		rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
1790 		WARN_ON(rc);
1791 	}
1792 }
1793 
intel_pstate_sysfs_remove(void)1794 static void __init intel_pstate_sysfs_remove(void)
1795 {
1796 	if (!intel_pstate_kobject)
1797 		return;
1798 
1799 	sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
1800 
1801 	if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1802 		sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
1803 		sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
1804 	}
1805 
1806 	if (!per_cpu_limits) {
1807 		sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
1808 		sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
1809 
1810 		if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
1811 			sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
1812 	}
1813 
1814 	kobject_put(intel_pstate_kobject);
1815 }
1816 
intel_pstate_sysfs_expose_hwp_dynamic_boost(void)1817 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1818 {
1819 	int rc;
1820 
1821 	if (!hwp_active)
1822 		return;
1823 
1824 	rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1825 	WARN_ON_ONCE(rc);
1826 }
1827 
intel_pstate_sysfs_hide_hwp_dynamic_boost(void)1828 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1829 {
1830 	if (!hwp_active)
1831 		return;
1832 
1833 	sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1834 }
1835 
1836 /************************** sysfs end ************************/
1837 
intel_pstate_notify_work(struct work_struct * work)1838 static void intel_pstate_notify_work(struct work_struct *work)
1839 {
1840 	struct cpudata *cpudata =
1841 		container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
1842 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
1843 
1844 	if (policy) {
1845 		__intel_pstate_update_max_freq(cpudata, policy);
1846 
1847 		cpufreq_cpu_release(policy);
1848 
1849 		/*
1850 		 * The driver will not be unregistered while this function is
1851 		 * running, so update the capacity without acquiring the driver
1852 		 * lock.
1853 		 */
1854 		hybrid_update_capacity(cpudata);
1855 	}
1856 
1857 	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1858 }
1859 
1860 static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
1861 static cpumask_t hwp_intr_enable_mask;
1862 
1863 #define HWP_GUARANTEED_PERF_CHANGE_STATUS      BIT(0)
1864 #define HWP_HIGHEST_PERF_CHANGE_STATUS         BIT(3)
1865 
notify_hwp_interrupt(void)1866 void notify_hwp_interrupt(void)
1867 {
1868 	unsigned int this_cpu = smp_processor_id();
1869 	u64 value, status_mask;
1870 	unsigned long flags;
1871 
1872 	if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
1873 		return;
1874 
1875 	status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS;
1876 	if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
1877 		status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
1878 
1879 	rdmsrl_safe(MSR_HWP_STATUS, &value);
1880 	if (!(value & status_mask))
1881 		return;
1882 
1883 	raw_spin_lock_irqsave(&hwp_notify_lock, flags);
1884 
1885 	if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
1886 		goto ack_intr;
1887 
1888 	schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
1889 			      msecs_to_jiffies(10));
1890 
1891 	raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
1892 
1893 	return;
1894 
1895 ack_intr:
1896 	wrmsrl_safe(MSR_HWP_STATUS, 0);
1897 	raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
1898 }
1899 
intel_pstate_disable_hwp_interrupt(struct cpudata * cpudata)1900 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
1901 {
1902 	bool cancel_work;
1903 
1904 	if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
1905 		return;
1906 
1907 	/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1908 	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1909 
1910 	raw_spin_lock_irq(&hwp_notify_lock);
1911 	cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1912 	raw_spin_unlock_irq(&hwp_notify_lock);
1913 
1914 	if (cancel_work)
1915 		cancel_delayed_work_sync(&cpudata->hwp_notify_work);
1916 }
1917 
1918 #define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0)
1919 #define HWP_HIGHEST_PERF_CHANGE_REQ    BIT(2)
1920 
intel_pstate_enable_hwp_interrupt(struct cpudata * cpudata)1921 static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
1922 {
1923 	/* Enable HWP notification interrupt for performance change */
1924 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
1925 		u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
1926 
1927 		raw_spin_lock_irq(&hwp_notify_lock);
1928 		INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
1929 		cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1930 		raw_spin_unlock_irq(&hwp_notify_lock);
1931 
1932 		if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
1933 			interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
1934 
1935 		/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1936 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
1937 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1938 	}
1939 }
1940 
intel_pstate_update_epp_defaults(struct cpudata * cpudata)1941 static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
1942 {
1943 	cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1944 
1945 	/*
1946 	 * If the EPP is set by firmware, which means that firmware enabled HWP
1947 	 * - Is equal or less than 0x80 (default balance_perf EPP)
1948 	 * - But less performance oriented than performance EPP
1949 	 *   then use this as new balance_perf EPP.
1950 	 */
1951 	if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
1952 	    cpudata->epp_default > HWP_EPP_PERFORMANCE) {
1953 		epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
1954 		return;
1955 	}
1956 
1957 	/*
1958 	 * If this CPU gen doesn't call for change in balance_perf
1959 	 * EPP return.
1960 	 */
1961 	if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
1962 		return;
1963 
1964 	/*
1965 	 * Use hard coded value per gen to update the balance_perf
1966 	 * and default EPP.
1967 	 */
1968 	cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
1969 	intel_pstate_set_epp(cpudata, cpudata->epp_default);
1970 }
1971 
intel_pstate_hwp_enable(struct cpudata * cpudata)1972 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1973 {
1974 	/* First disable HWP notification interrupt till we activate again */
1975 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1976 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1977 
1978 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1979 
1980 	intel_pstate_enable_hwp_interrupt(cpudata);
1981 
1982 	if (cpudata->epp_default >= 0)
1983 		return;
1984 
1985 	intel_pstate_update_epp_defaults(cpudata);
1986 }
1987 
atom_get_min_pstate(int not_used)1988 static int atom_get_min_pstate(int not_used)
1989 {
1990 	u64 value;
1991 
1992 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1993 	return (value >> 8) & 0x7F;
1994 }
1995 
atom_get_max_pstate(int not_used)1996 static int atom_get_max_pstate(int not_used)
1997 {
1998 	u64 value;
1999 
2000 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
2001 	return (value >> 16) & 0x7F;
2002 }
2003 
atom_get_turbo_pstate(int not_used)2004 static int atom_get_turbo_pstate(int not_used)
2005 {
2006 	u64 value;
2007 
2008 	rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
2009 	return value & 0x7F;
2010 }
2011 
atom_get_val(struct cpudata * cpudata,int pstate)2012 static u64 atom_get_val(struct cpudata *cpudata, int pstate)
2013 {
2014 	u64 val;
2015 	int32_t vid_fp;
2016 	u32 vid;
2017 
2018 	val = (u64)pstate << 8;
2019 	if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
2020 		val |= (u64)1 << 32;
2021 
2022 	vid_fp = cpudata->vid.min + mul_fp(
2023 		int_tofp(pstate - cpudata->pstate.min_pstate),
2024 		cpudata->vid.ratio);
2025 
2026 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
2027 	vid = ceiling_fp(vid_fp);
2028 
2029 	if (pstate > cpudata->pstate.max_pstate)
2030 		vid = cpudata->vid.turbo;
2031 
2032 	return val | vid;
2033 }
2034 
silvermont_get_scaling(void)2035 static int silvermont_get_scaling(void)
2036 {
2037 	u64 value;
2038 	int i;
2039 	/* Defined in Table 35-6 from SDM (Sept 2015) */
2040 	static int silvermont_freq_table[] = {
2041 		83300, 100000, 133300, 116700, 80000};
2042 
2043 	rdmsrl(MSR_FSB_FREQ, value);
2044 	i = value & 0x7;
2045 	WARN_ON(i > 4);
2046 
2047 	return silvermont_freq_table[i];
2048 }
2049 
airmont_get_scaling(void)2050 static int airmont_get_scaling(void)
2051 {
2052 	u64 value;
2053 	int i;
2054 	/* Defined in Table 35-10 from SDM (Sept 2015) */
2055 	static int airmont_freq_table[] = {
2056 		83300, 100000, 133300, 116700, 80000,
2057 		93300, 90000, 88900, 87500};
2058 
2059 	rdmsrl(MSR_FSB_FREQ, value);
2060 	i = value & 0xF;
2061 	WARN_ON(i > 8);
2062 
2063 	return airmont_freq_table[i];
2064 }
2065 
atom_get_vid(struct cpudata * cpudata)2066 static void atom_get_vid(struct cpudata *cpudata)
2067 {
2068 	u64 value;
2069 
2070 	rdmsrl(MSR_ATOM_CORE_VIDS, value);
2071 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
2072 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
2073 	cpudata->vid.ratio = div_fp(
2074 		cpudata->vid.max - cpudata->vid.min,
2075 		int_tofp(cpudata->pstate.max_pstate -
2076 			cpudata->pstate.min_pstate));
2077 
2078 	rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
2079 	cpudata->vid.turbo = value & 0x7f;
2080 }
2081 
core_get_min_pstate(int cpu)2082 static int core_get_min_pstate(int cpu)
2083 {
2084 	u64 value;
2085 
2086 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
2087 	return (value >> 40) & 0xFF;
2088 }
2089 
core_get_max_pstate_physical(int cpu)2090 static int core_get_max_pstate_physical(int cpu)
2091 {
2092 	u64 value;
2093 
2094 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
2095 	return (value >> 8) & 0xFF;
2096 }
2097 
core_get_tdp_ratio(int cpu,u64 plat_info)2098 static int core_get_tdp_ratio(int cpu, u64 plat_info)
2099 {
2100 	/* Check how many TDP levels present */
2101 	if (plat_info & 0x600000000) {
2102 		u64 tdp_ctrl;
2103 		u64 tdp_ratio;
2104 		int tdp_msr;
2105 		int err;
2106 
2107 		/* Get the TDP level (0, 1, 2) to get ratios */
2108 		err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
2109 		if (err)
2110 			return err;
2111 
2112 		/* TDP MSR are continuous starting at 0x648 */
2113 		tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
2114 		err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
2115 		if (err)
2116 			return err;
2117 
2118 		/* For level 1 and 2, bits[23:16] contain the ratio */
2119 		if (tdp_ctrl & 0x03)
2120 			tdp_ratio >>= 16;
2121 
2122 		tdp_ratio &= 0xff; /* ratios are only 8 bits long */
2123 		pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
2124 
2125 		return (int)tdp_ratio;
2126 	}
2127 
2128 	return -ENXIO;
2129 }
2130 
core_get_max_pstate(int cpu)2131 static int core_get_max_pstate(int cpu)
2132 {
2133 	u64 tar;
2134 	u64 plat_info;
2135 	int max_pstate;
2136 	int tdp_ratio;
2137 	int err;
2138 
2139 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
2140 	max_pstate = (plat_info >> 8) & 0xFF;
2141 
2142 	tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
2143 	if (tdp_ratio <= 0)
2144 		return max_pstate;
2145 
2146 	if (hwp_active) {
2147 		/* Turbo activation ratio is not used on HWP platforms */
2148 		return tdp_ratio;
2149 	}
2150 
2151 	err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
2152 	if (!err) {
2153 		int tar_levels;
2154 
2155 		/* Do some sanity checking for safety */
2156 		tar_levels = tar & 0xff;
2157 		if (tdp_ratio - 1 == tar_levels) {
2158 			max_pstate = tar_levels;
2159 			pr_debug("max_pstate=TAC %x\n", max_pstate);
2160 		}
2161 	}
2162 
2163 	return max_pstate;
2164 }
2165 
core_get_turbo_pstate(int cpu)2166 static int core_get_turbo_pstate(int cpu)
2167 {
2168 	u64 value;
2169 	int nont, ret;
2170 
2171 	rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
2172 	nont = core_get_max_pstate(cpu);
2173 	ret = (value) & 255;
2174 	if (ret <= nont)
2175 		ret = nont;
2176 	return ret;
2177 }
2178 
core_get_val(struct cpudata * cpudata,int pstate)2179 static u64 core_get_val(struct cpudata *cpudata, int pstate)
2180 {
2181 	u64 val;
2182 
2183 	val = (u64)pstate << 8;
2184 	if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
2185 		val |= (u64)1 << 32;
2186 
2187 	return val;
2188 }
2189 
knl_get_aperf_mperf_shift(void)2190 static int knl_get_aperf_mperf_shift(void)
2191 {
2192 	return 10;
2193 }
2194 
knl_get_turbo_pstate(int cpu)2195 static int knl_get_turbo_pstate(int cpu)
2196 {
2197 	u64 value;
2198 	int nont, ret;
2199 
2200 	rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
2201 	nont = core_get_max_pstate(cpu);
2202 	ret = (((value) >> 8) & 0xFF);
2203 	if (ret <= nont)
2204 		ret = nont;
2205 	return ret;
2206 }
2207 
hybrid_get_type(void * data)2208 static void hybrid_get_type(void *data)
2209 {
2210 	u8 *cpu_type = data;
2211 
2212 	*cpu_type = get_this_hybrid_cpu_type();
2213 }
2214 
hwp_get_cpu_scaling(int cpu)2215 static int hwp_get_cpu_scaling(int cpu)
2216 {
2217 	u8 cpu_type = 0;
2218 
2219 	smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
2220 	/* P-cores have a smaller perf level-to-freqency scaling factor. */
2221 	if (cpu_type == 0x40)
2222 		return hybrid_scaling_factor;
2223 
2224 	/* Use default core scaling for E-cores */
2225 	if (cpu_type == 0x20)
2226 		return core_get_scaling();
2227 
2228 	/*
2229 	 * If reached here, this system is either non-hybrid (like Tiger
2230 	 * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
2231 	 * no E cores (in which case CPUID for hybrid support is 0).
2232 	 *
2233 	 * The CPPC nominal_frequency field is 0 for non-hybrid systems,
2234 	 * so the default core scaling will be used for them.
2235 	 */
2236 	return intel_pstate_cppc_get_scaling(cpu);
2237 }
2238 
intel_pstate_set_pstate(struct cpudata * cpu,int pstate)2239 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
2240 {
2241 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
2242 	cpu->pstate.current_pstate = pstate;
2243 	/*
2244 	 * Generally, there is no guarantee that this code will always run on
2245 	 * the CPU being updated, so force the register update to run on the
2246 	 * right CPU.
2247 	 */
2248 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2249 		      pstate_funcs.get_val(cpu, pstate));
2250 }
2251 
intel_pstate_set_min_pstate(struct cpudata * cpu)2252 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
2253 {
2254 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
2255 }
2256 
intel_pstate_get_cpu_pstates(struct cpudata * cpu)2257 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2258 {
2259 	int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
2260 	int perf_ctl_scaling = pstate_funcs.get_scaling();
2261 
2262 	cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
2263 	cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
2264 	cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
2265 
2266 	if (hwp_active && !hwp_mode_bdw) {
2267 		__intel_pstate_get_hwp_cap(cpu);
2268 
2269 		if (pstate_funcs.get_cpu_scaling) {
2270 			cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
2271 			if (cpu->pstate.scaling != perf_ctl_scaling) {
2272 				intel_pstate_hybrid_hwp_adjust(cpu);
2273 				hwp_is_hybrid = true;
2274 			}
2275 		} else {
2276 			cpu->pstate.scaling = perf_ctl_scaling;
2277 		}
2278 		/*
2279 		 * If the CPU is going online for the first time and it was
2280 		 * offline initially, asym capacity scaling needs to be updated.
2281 		 */
2282 		hybrid_update_capacity(cpu);
2283 	} else {
2284 		cpu->pstate.scaling = perf_ctl_scaling;
2285 		cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
2286 		cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
2287 	}
2288 
2289 	if (cpu->pstate.scaling == perf_ctl_scaling) {
2290 		cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
2291 		cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
2292 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
2293 	}
2294 
2295 	if (pstate_funcs.get_aperf_mperf_shift)
2296 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
2297 
2298 	if (pstate_funcs.get_vid)
2299 		pstate_funcs.get_vid(cpu);
2300 
2301 	intel_pstate_set_min_pstate(cpu);
2302 }
2303 
2304 /*
2305  * Long hold time will keep high perf limits for long time,
2306  * which negatively impacts perf/watt for some workloads,
2307  * like specpower. 3ms is based on experiements on some
2308  * workoads.
2309  */
2310 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
2311 
intel_pstate_hwp_boost_up(struct cpudata * cpu)2312 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
2313 {
2314 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
2315 	u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2316 	u32 max_limit = (hwp_req & 0xff00) >> 8;
2317 	u32 min_limit = (hwp_req & 0xff);
2318 	u32 boost_level1;
2319 
2320 	/*
2321 	 * Cases to consider (User changes via sysfs or boot time):
2322 	 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
2323 	 *	No boost, return.
2324 	 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
2325 	 *     Should result in one level boost only for P0.
2326 	 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
2327 	 *     Should result in two level boost:
2328 	 *         (min + p1)/2 and P1.
2329 	 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
2330 	 *     Should result in three level boost:
2331 	 *        (min + p1)/2, P1 and P0.
2332 	 */
2333 
2334 	/* If max and min are equal or already at max, nothing to boost */
2335 	if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
2336 		return;
2337 
2338 	if (!cpu->hwp_boost_min)
2339 		cpu->hwp_boost_min = min_limit;
2340 
2341 	/* level at half way mark between min and guranteed */
2342 	boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
2343 
2344 	if (cpu->hwp_boost_min < boost_level1)
2345 		cpu->hwp_boost_min = boost_level1;
2346 	else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
2347 		cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
2348 	else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
2349 		 max_limit != HWP_GUARANTEED_PERF(hwp_cap))
2350 		cpu->hwp_boost_min = max_limit;
2351 	else
2352 		return;
2353 
2354 	hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
2355 	wrmsrl(MSR_HWP_REQUEST, hwp_req);
2356 	cpu->last_update = cpu->sample.time;
2357 }
2358 
intel_pstate_hwp_boost_down(struct cpudata * cpu)2359 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
2360 {
2361 	if (cpu->hwp_boost_min) {
2362 		bool expired;
2363 
2364 		/* Check if we are idle for hold time to boost down */
2365 		expired = time_after64(cpu->sample.time, cpu->last_update +
2366 				       hwp_boost_hold_time_ns);
2367 		if (expired) {
2368 			wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
2369 			cpu->hwp_boost_min = 0;
2370 		}
2371 	}
2372 	cpu->last_update = cpu->sample.time;
2373 }
2374 
intel_pstate_update_util_hwp_local(struct cpudata * cpu,u64 time)2375 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
2376 						      u64 time)
2377 {
2378 	cpu->sample.time = time;
2379 
2380 	if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
2381 		bool do_io = false;
2382 
2383 		cpu->sched_flags = 0;
2384 		/*
2385 		 * Set iowait_boost flag and update time. Since IO WAIT flag
2386 		 * is set all the time, we can't just conclude that there is
2387 		 * some IO bound activity is scheduled on this CPU with just
2388 		 * one occurrence. If we receive at least two in two
2389 		 * consecutive ticks, then we treat as boost candidate.
2390 		 */
2391 		if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
2392 			do_io = true;
2393 
2394 		cpu->last_io_update = time;
2395 
2396 		if (do_io)
2397 			intel_pstate_hwp_boost_up(cpu);
2398 
2399 	} else {
2400 		intel_pstate_hwp_boost_down(cpu);
2401 	}
2402 }
2403 
intel_pstate_update_util_hwp(struct update_util_data * data,u64 time,unsigned int flags)2404 static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
2405 						u64 time, unsigned int flags)
2406 {
2407 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2408 
2409 	cpu->sched_flags |= flags;
2410 
2411 	if (smp_processor_id() == cpu->cpu)
2412 		intel_pstate_update_util_hwp_local(cpu, time);
2413 }
2414 
intel_pstate_calc_avg_perf(struct cpudata * cpu)2415 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
2416 {
2417 	struct sample *sample = &cpu->sample;
2418 
2419 	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
2420 }
2421 
intel_pstate_sample(struct cpudata * cpu,u64 time)2422 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
2423 {
2424 	u64 aperf, mperf;
2425 	unsigned long flags;
2426 	u64 tsc;
2427 
2428 	local_irq_save(flags);
2429 	rdmsrl(MSR_IA32_APERF, aperf);
2430 	rdmsrl(MSR_IA32_MPERF, mperf);
2431 	tsc = rdtsc();
2432 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
2433 		local_irq_restore(flags);
2434 		return false;
2435 	}
2436 	local_irq_restore(flags);
2437 
2438 	cpu->last_sample_time = cpu->sample.time;
2439 	cpu->sample.time = time;
2440 	cpu->sample.aperf = aperf;
2441 	cpu->sample.mperf = mperf;
2442 	cpu->sample.tsc =  tsc;
2443 	cpu->sample.aperf -= cpu->prev_aperf;
2444 	cpu->sample.mperf -= cpu->prev_mperf;
2445 	cpu->sample.tsc -= cpu->prev_tsc;
2446 
2447 	cpu->prev_aperf = aperf;
2448 	cpu->prev_mperf = mperf;
2449 	cpu->prev_tsc = tsc;
2450 	/*
2451 	 * First time this function is invoked in a given cycle, all of the
2452 	 * previous sample data fields are equal to zero or stale and they must
2453 	 * be populated with meaningful numbers for things to work, so assume
2454 	 * that sample.time will always be reset before setting the utilization
2455 	 * update hook and make the caller skip the sample then.
2456 	 */
2457 	if (cpu->last_sample_time) {
2458 		intel_pstate_calc_avg_perf(cpu);
2459 		return true;
2460 	}
2461 	return false;
2462 }
2463 
get_avg_frequency(struct cpudata * cpu)2464 static inline int32_t get_avg_frequency(struct cpudata *cpu)
2465 {
2466 	return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
2467 }
2468 
get_avg_pstate(struct cpudata * cpu)2469 static inline int32_t get_avg_pstate(struct cpudata *cpu)
2470 {
2471 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
2472 			  cpu->sample.core_avg_perf);
2473 }
2474 
get_target_pstate(struct cpudata * cpu)2475 static inline int32_t get_target_pstate(struct cpudata *cpu)
2476 {
2477 	struct sample *sample = &cpu->sample;
2478 	int32_t busy_frac;
2479 	int target, avg_pstate;
2480 
2481 	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
2482 			   sample->tsc);
2483 
2484 	if (busy_frac < cpu->iowait_boost)
2485 		busy_frac = cpu->iowait_boost;
2486 
2487 	sample->busy_scaled = busy_frac * 100;
2488 
2489 	target = READ_ONCE(global.no_turbo) ?
2490 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2491 	target += target >> 2;
2492 	target = mul_fp(target, busy_frac);
2493 	if (target < cpu->pstate.min_pstate)
2494 		target = cpu->pstate.min_pstate;
2495 
2496 	/*
2497 	 * If the average P-state during the previous cycle was higher than the
2498 	 * current target, add 50% of the difference to the target to reduce
2499 	 * possible performance oscillations and offset possible performance
2500 	 * loss related to moving the workload from one CPU to another within
2501 	 * a package/module.
2502 	 */
2503 	avg_pstate = get_avg_pstate(cpu);
2504 	if (avg_pstate > target)
2505 		target += (avg_pstate - target) >> 1;
2506 
2507 	return target;
2508 }
2509 
intel_pstate_prepare_request(struct cpudata * cpu,int pstate)2510 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
2511 {
2512 	int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
2513 	int max_pstate = max(min_pstate, cpu->max_perf_ratio);
2514 
2515 	return clamp_t(int, pstate, min_pstate, max_pstate);
2516 }
2517 
intel_pstate_update_pstate(struct cpudata * cpu,int pstate)2518 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
2519 {
2520 	if (pstate == cpu->pstate.current_pstate)
2521 		return;
2522 
2523 	cpu->pstate.current_pstate = pstate;
2524 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
2525 }
2526 
intel_pstate_adjust_pstate(struct cpudata * cpu)2527 static void intel_pstate_adjust_pstate(struct cpudata *cpu)
2528 {
2529 	int from = cpu->pstate.current_pstate;
2530 	struct sample *sample;
2531 	int target_pstate;
2532 
2533 	target_pstate = get_target_pstate(cpu);
2534 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2535 	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
2536 	intel_pstate_update_pstate(cpu, target_pstate);
2537 
2538 	sample = &cpu->sample;
2539 	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
2540 		fp_toint(sample->busy_scaled),
2541 		from,
2542 		cpu->pstate.current_pstate,
2543 		sample->mperf,
2544 		sample->aperf,
2545 		sample->tsc,
2546 		get_avg_frequency(cpu),
2547 		fp_toint(cpu->iowait_boost * 100));
2548 }
2549 
intel_pstate_update_util(struct update_util_data * data,u64 time,unsigned int flags)2550 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
2551 				     unsigned int flags)
2552 {
2553 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2554 	u64 delta_ns;
2555 
2556 	/* Don't allow remote callbacks */
2557 	if (smp_processor_id() != cpu->cpu)
2558 		return;
2559 
2560 	delta_ns = time - cpu->last_update;
2561 	if (flags & SCHED_CPUFREQ_IOWAIT) {
2562 		/* Start over if the CPU may have been idle. */
2563 		if (delta_ns > TICK_NSEC) {
2564 			cpu->iowait_boost = ONE_EIGHTH_FP;
2565 		} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
2566 			cpu->iowait_boost <<= 1;
2567 			if (cpu->iowait_boost > int_tofp(1))
2568 				cpu->iowait_boost = int_tofp(1);
2569 		} else {
2570 			cpu->iowait_boost = ONE_EIGHTH_FP;
2571 		}
2572 	} else if (cpu->iowait_boost) {
2573 		/* Clear iowait_boost if the CPU may have been idle. */
2574 		if (delta_ns > TICK_NSEC)
2575 			cpu->iowait_boost = 0;
2576 		else
2577 			cpu->iowait_boost >>= 1;
2578 	}
2579 	cpu->last_update = time;
2580 	delta_ns = time - cpu->sample.time;
2581 	if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
2582 		return;
2583 
2584 	if (intel_pstate_sample(cpu, time))
2585 		intel_pstate_adjust_pstate(cpu);
2586 }
2587 
2588 static struct pstate_funcs core_funcs = {
2589 	.get_max = core_get_max_pstate,
2590 	.get_max_physical = core_get_max_pstate_physical,
2591 	.get_min = core_get_min_pstate,
2592 	.get_turbo = core_get_turbo_pstate,
2593 	.get_scaling = core_get_scaling,
2594 	.get_val = core_get_val,
2595 };
2596 
2597 static const struct pstate_funcs silvermont_funcs = {
2598 	.get_max = atom_get_max_pstate,
2599 	.get_max_physical = atom_get_max_pstate,
2600 	.get_min = atom_get_min_pstate,
2601 	.get_turbo = atom_get_turbo_pstate,
2602 	.get_val = atom_get_val,
2603 	.get_scaling = silvermont_get_scaling,
2604 	.get_vid = atom_get_vid,
2605 };
2606 
2607 static const struct pstate_funcs airmont_funcs = {
2608 	.get_max = atom_get_max_pstate,
2609 	.get_max_physical = atom_get_max_pstate,
2610 	.get_min = atom_get_min_pstate,
2611 	.get_turbo = atom_get_turbo_pstate,
2612 	.get_val = atom_get_val,
2613 	.get_scaling = airmont_get_scaling,
2614 	.get_vid = atom_get_vid,
2615 };
2616 
2617 static const struct pstate_funcs knl_funcs = {
2618 	.get_max = core_get_max_pstate,
2619 	.get_max_physical = core_get_max_pstate_physical,
2620 	.get_min = core_get_min_pstate,
2621 	.get_turbo = knl_get_turbo_pstate,
2622 	.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
2623 	.get_scaling = core_get_scaling,
2624 	.get_val = core_get_val,
2625 };
2626 
2627 #define X86_MATCH(vfm, policy)					 \
2628 	X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy)
2629 
2630 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
2631 	X86_MATCH(INTEL_SANDYBRIDGE,		core_funcs),
2632 	X86_MATCH(INTEL_SANDYBRIDGE_X,		core_funcs),
2633 	X86_MATCH(INTEL_ATOM_SILVERMONT,	silvermont_funcs),
2634 	X86_MATCH(INTEL_IVYBRIDGE,		core_funcs),
2635 	X86_MATCH(INTEL_HASWELL,		core_funcs),
2636 	X86_MATCH(INTEL_BROADWELL,		core_funcs),
2637 	X86_MATCH(INTEL_IVYBRIDGE_X,		core_funcs),
2638 	X86_MATCH(INTEL_HASWELL_X,		core_funcs),
2639 	X86_MATCH(INTEL_HASWELL_L,		core_funcs),
2640 	X86_MATCH(INTEL_HASWELL_G,		core_funcs),
2641 	X86_MATCH(INTEL_BROADWELL_G,		core_funcs),
2642 	X86_MATCH(INTEL_ATOM_AIRMONT,		airmont_funcs),
2643 	X86_MATCH(INTEL_SKYLAKE_L,		core_funcs),
2644 	X86_MATCH(INTEL_BROADWELL_X,		core_funcs),
2645 	X86_MATCH(INTEL_SKYLAKE,		core_funcs),
2646 	X86_MATCH(INTEL_BROADWELL_D,		core_funcs),
2647 	X86_MATCH(INTEL_XEON_PHI_KNL,		knl_funcs),
2648 	X86_MATCH(INTEL_XEON_PHI_KNM,		knl_funcs),
2649 	X86_MATCH(INTEL_ATOM_GOLDMONT,		core_funcs),
2650 	X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS,	core_funcs),
2651 	X86_MATCH(INTEL_SKYLAKE_X,		core_funcs),
2652 	X86_MATCH(INTEL_COMETLAKE,		core_funcs),
2653 	X86_MATCH(INTEL_ICELAKE_X,		core_funcs),
2654 	X86_MATCH(INTEL_TIGERLAKE,		core_funcs),
2655 	X86_MATCH(INTEL_SAPPHIRERAPIDS_X,	core_funcs),
2656 	X86_MATCH(INTEL_EMERALDRAPIDS_X,	core_funcs),
2657 	X86_MATCH(INTEL_GRANITERAPIDS_D,	core_funcs),
2658 	X86_MATCH(INTEL_GRANITERAPIDS_X,	core_funcs),
2659 	{}
2660 };
2661 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
2662 
2663 #ifdef CONFIG_ACPI
2664 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
2665 	X86_MATCH(INTEL_BROADWELL_D,		core_funcs),
2666 	X86_MATCH(INTEL_BROADWELL_X,		core_funcs),
2667 	X86_MATCH(INTEL_SKYLAKE_X,		core_funcs),
2668 	X86_MATCH(INTEL_ICELAKE_X,		core_funcs),
2669 	X86_MATCH(INTEL_SAPPHIRERAPIDS_X,	core_funcs),
2670 	X86_MATCH(INTEL_EMERALDRAPIDS_X,	core_funcs),
2671 	X86_MATCH(INTEL_GRANITERAPIDS_D,	core_funcs),
2672 	X86_MATCH(INTEL_GRANITERAPIDS_X,	core_funcs),
2673 	X86_MATCH(INTEL_ATOM_CRESTMONT,		core_funcs),
2674 	X86_MATCH(INTEL_ATOM_CRESTMONT_X,	core_funcs),
2675 	{}
2676 };
2677 #endif
2678 
2679 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
2680 	X86_MATCH(INTEL_KABYLAKE,		core_funcs),
2681 	{}
2682 };
2683 
intel_pstate_init_cpu(unsigned int cpunum)2684 static int intel_pstate_init_cpu(unsigned int cpunum)
2685 {
2686 	struct cpudata *cpu;
2687 
2688 	cpu = all_cpu_data[cpunum];
2689 
2690 	if (!cpu) {
2691 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
2692 		if (!cpu)
2693 			return -ENOMEM;
2694 
2695 		WRITE_ONCE(all_cpu_data[cpunum], cpu);
2696 
2697 		cpu->cpu = cpunum;
2698 
2699 		cpu->epp_default = -EINVAL;
2700 
2701 		if (hwp_active) {
2702 			intel_pstate_hwp_enable(cpu);
2703 
2704 			if (intel_pstate_acpi_pm_profile_server())
2705 				hwp_boost = true;
2706 		}
2707 	} else if (hwp_active) {
2708 		/*
2709 		 * Re-enable HWP in case this happens after a resume from ACPI
2710 		 * S3 if the CPU was offline during the whole system/resume
2711 		 * cycle.
2712 		 */
2713 		intel_pstate_hwp_reenable(cpu);
2714 	}
2715 
2716 	cpu->epp_powersave = -EINVAL;
2717 	cpu->epp_policy = 0;
2718 
2719 	intel_pstate_get_cpu_pstates(cpu);
2720 
2721 	pr_debug("controlling: cpu %d\n", cpunum);
2722 
2723 	return 0;
2724 }
2725 
intel_pstate_set_update_util_hook(unsigned int cpu_num)2726 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2727 {
2728 	struct cpudata *cpu = all_cpu_data[cpu_num];
2729 
2730 	if (hwp_active && !hwp_boost)
2731 		return;
2732 
2733 	if (cpu->update_util_set)
2734 		return;
2735 
2736 	/* Prevent intel_pstate_update_util() from using stale data. */
2737 	cpu->sample.time = 0;
2738 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2739 				     (hwp_active ?
2740 				      intel_pstate_update_util_hwp :
2741 				      intel_pstate_update_util));
2742 	cpu->update_util_set = true;
2743 }
2744 
intel_pstate_clear_update_util_hook(unsigned int cpu)2745 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2746 {
2747 	struct cpudata *cpu_data = all_cpu_data[cpu];
2748 
2749 	if (!cpu_data->update_util_set)
2750 		return;
2751 
2752 	cpufreq_remove_update_util_hook(cpu);
2753 	cpu_data->update_util_set = false;
2754 	synchronize_rcu();
2755 }
2756 
intel_pstate_get_max_freq(struct cpudata * cpu)2757 static int intel_pstate_get_max_freq(struct cpudata *cpu)
2758 {
2759 	return READ_ONCE(global.no_turbo) ?
2760 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2761 }
2762 
intel_pstate_update_perf_limits(struct cpudata * cpu,unsigned int policy_min,unsigned int policy_max)2763 static void intel_pstate_update_perf_limits(struct cpudata *cpu,
2764 					    unsigned int policy_min,
2765 					    unsigned int policy_max)
2766 {
2767 	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
2768 	int32_t max_policy_perf, min_policy_perf;
2769 
2770 	max_policy_perf = policy_max / perf_ctl_scaling;
2771 	if (policy_max == policy_min) {
2772 		min_policy_perf = max_policy_perf;
2773 	} else {
2774 		min_policy_perf = policy_min / perf_ctl_scaling;
2775 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
2776 					  0, max_policy_perf);
2777 	}
2778 
2779 	/*
2780 	 * HWP needs some special consideration, because HWP_REQUEST uses
2781 	 * abstract values to represent performance rather than pure ratios.
2782 	 */
2783 	if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
2784 		int freq;
2785 
2786 		freq = max_policy_perf * perf_ctl_scaling;
2787 		max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
2788 		freq = min_policy_perf * perf_ctl_scaling;
2789 		min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
2790 	}
2791 
2792 	pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2793 		 cpu->cpu, min_policy_perf, max_policy_perf);
2794 
2795 	/* Normalize user input to [min_perf, max_perf] */
2796 	if (per_cpu_limits) {
2797 		cpu->min_perf_ratio = min_policy_perf;
2798 		cpu->max_perf_ratio = max_policy_perf;
2799 	} else {
2800 		int turbo_max = cpu->pstate.turbo_pstate;
2801 		int32_t global_min, global_max;
2802 
2803 		/* Global limits are in percent of the maximum turbo P-state. */
2804 		global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2805 		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2806 		global_min = clamp_t(int32_t, global_min, 0, global_max);
2807 
2808 		pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
2809 			 global_min, global_max);
2810 
2811 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
2812 		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2813 		cpu->max_perf_ratio = min(max_policy_perf, global_max);
2814 		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2815 
2816 		/* Make sure min_perf <= max_perf */
2817 		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2818 					  cpu->max_perf_ratio);
2819 
2820 	}
2821 	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
2822 		 cpu->max_perf_ratio,
2823 		 cpu->min_perf_ratio);
2824 }
2825 
intel_pstate_set_policy(struct cpufreq_policy * policy)2826 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2827 {
2828 	struct cpudata *cpu;
2829 
2830 	if (!policy->cpuinfo.max_freq)
2831 		return -ENODEV;
2832 
2833 	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2834 		 policy->cpuinfo.max_freq, policy->max);
2835 
2836 	cpu = all_cpu_data[policy->cpu];
2837 	cpu->policy = policy->policy;
2838 
2839 	mutex_lock(&intel_pstate_limits_lock);
2840 
2841 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2842 
2843 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2844 		int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
2845 
2846 		/*
2847 		 * NOHZ_FULL CPUs need this as the governor callback may not
2848 		 * be invoked on them.
2849 		 */
2850 		intel_pstate_clear_update_util_hook(policy->cpu);
2851 		intel_pstate_set_pstate(cpu, pstate);
2852 	} else {
2853 		intel_pstate_set_update_util_hook(policy->cpu);
2854 	}
2855 
2856 	if (hwp_active) {
2857 		/*
2858 		 * When hwp_boost was active before and dynamically it
2859 		 * was turned off, in that case we need to clear the
2860 		 * update util hook.
2861 		 */
2862 		if (!hwp_boost)
2863 			intel_pstate_clear_update_util_hook(policy->cpu);
2864 		intel_pstate_hwp_set(policy->cpu);
2865 	}
2866 	/*
2867 	 * policy->cur is never updated with the intel_pstate driver, but it
2868 	 * is used as a stale frequency value. So, keep it within limits.
2869 	 */
2870 	policy->cur = policy->min;
2871 
2872 	mutex_unlock(&intel_pstate_limits_lock);
2873 
2874 	return 0;
2875 }
2876 
intel_pstate_adjust_policy_max(struct cpudata * cpu,struct cpufreq_policy_data * policy)2877 static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
2878 					   struct cpufreq_policy_data *policy)
2879 {
2880 	if (!hwp_active &&
2881 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2882 	    policy->max < policy->cpuinfo.max_freq &&
2883 	    policy->max > cpu->pstate.max_freq) {
2884 		pr_debug("policy->max > max non turbo frequency\n");
2885 		policy->max = policy->cpuinfo.max_freq;
2886 	}
2887 }
2888 
intel_pstate_verify_cpu_policy(struct cpudata * cpu,struct cpufreq_policy_data * policy)2889 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
2890 					   struct cpufreq_policy_data *policy)
2891 {
2892 	int max_freq;
2893 
2894 	if (hwp_active) {
2895 		intel_pstate_get_hwp_cap(cpu);
2896 		max_freq = READ_ONCE(global.no_turbo) ?
2897 				cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2898 	} else {
2899 		max_freq = intel_pstate_get_max_freq(cpu);
2900 	}
2901 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
2902 
2903 	intel_pstate_adjust_policy_max(cpu, policy);
2904 }
2905 
intel_pstate_verify_policy(struct cpufreq_policy_data * policy)2906 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
2907 {
2908 	intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
2909 
2910 	return 0;
2911 }
2912 
intel_cpufreq_cpu_offline(struct cpufreq_policy * policy)2913 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
2914 {
2915 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2916 
2917 	pr_debug("CPU %d going offline\n", cpu->cpu);
2918 
2919 	if (cpu->suspended)
2920 		return 0;
2921 
2922 	/*
2923 	 * If the CPU is an SMT thread and it goes offline with the performance
2924 	 * settings different from the minimum, it will prevent its sibling
2925 	 * from getting to lower performance levels, so force the minimum
2926 	 * performance on CPU offline to prevent that from happening.
2927 	 */
2928 	if (hwp_active)
2929 		intel_pstate_hwp_offline(cpu);
2930 	else
2931 		intel_pstate_set_min_pstate(cpu);
2932 
2933 	intel_pstate_exit_perf_limits(policy);
2934 
2935 	return 0;
2936 }
2937 
intel_pstate_cpu_online(struct cpufreq_policy * policy)2938 static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
2939 {
2940 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2941 
2942 	pr_debug("CPU %d going online\n", cpu->cpu);
2943 
2944 	intel_pstate_init_acpi_perf_limits(policy);
2945 
2946 	if (hwp_active) {
2947 		/*
2948 		 * Re-enable HWP and clear the "suspended" flag to let "resume"
2949 		 * know that it need not do that.
2950 		 */
2951 		intel_pstate_hwp_reenable(cpu);
2952 		cpu->suspended = false;
2953 
2954 		hybrid_update_capacity(cpu);
2955 	}
2956 
2957 	return 0;
2958 }
2959 
intel_pstate_cpu_offline(struct cpufreq_policy * policy)2960 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
2961 {
2962 	intel_pstate_clear_update_util_hook(policy->cpu);
2963 
2964 	return intel_cpufreq_cpu_offline(policy);
2965 }
2966 
intel_pstate_cpu_exit(struct cpufreq_policy * policy)2967 static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2968 {
2969 	pr_debug("CPU %d exiting\n", policy->cpu);
2970 
2971 	policy->fast_switch_possible = false;
2972 }
2973 
__intel_pstate_cpu_init(struct cpufreq_policy * policy)2974 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2975 {
2976 	struct cpudata *cpu;
2977 	int rc;
2978 
2979 	rc = intel_pstate_init_cpu(policy->cpu);
2980 	if (rc)
2981 		return rc;
2982 
2983 	cpu = all_cpu_data[policy->cpu];
2984 
2985 	cpu->max_perf_ratio = 0xFF;
2986 	cpu->min_perf_ratio = 0;
2987 
2988 	/* cpuinfo and default policy values */
2989 	policy->cpuinfo.min_freq = cpu->pstate.min_freq;
2990 	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
2991 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2992 
2993 	policy->min = policy->cpuinfo.min_freq;
2994 	policy->max = policy->cpuinfo.max_freq;
2995 
2996 	intel_pstate_init_acpi_perf_limits(policy);
2997 
2998 	policy->fast_switch_possible = true;
2999 
3000 	return 0;
3001 }
3002 
intel_pstate_cpu_init(struct cpufreq_policy * policy)3003 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
3004 {
3005 	int ret = __intel_pstate_cpu_init(policy);
3006 
3007 	if (ret)
3008 		return ret;
3009 
3010 	/*
3011 	 * Set the policy to powersave to provide a valid fallback value in case
3012 	 * the default cpufreq governor is neither powersave nor performance.
3013 	 */
3014 	policy->policy = CPUFREQ_POLICY_POWERSAVE;
3015 
3016 	if (hwp_active) {
3017 		struct cpudata *cpu = all_cpu_data[policy->cpu];
3018 
3019 		cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static struct cpufreq_driver intel_pstate = {
3026 	.flags		= CPUFREQ_CONST_LOOPS,
3027 	.verify		= intel_pstate_verify_policy,
3028 	.setpolicy	= intel_pstate_set_policy,
3029 	.suspend	= intel_pstate_suspend,
3030 	.resume		= intel_pstate_resume,
3031 	.init		= intel_pstate_cpu_init,
3032 	.exit		= intel_pstate_cpu_exit,
3033 	.offline	= intel_pstate_cpu_offline,
3034 	.online		= intel_pstate_cpu_online,
3035 	.update_limits	= intel_pstate_update_limits,
3036 	.name		= "intel_pstate",
3037 };
3038 
intel_cpufreq_verify_policy(struct cpufreq_policy_data * policy)3039 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
3040 {
3041 	struct cpudata *cpu = all_cpu_data[policy->cpu];
3042 
3043 	intel_pstate_verify_cpu_policy(cpu, policy);
3044 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
3045 
3046 	return 0;
3047 }
3048 
3049 /* Use of trace in passive mode:
3050  *
3051  * In passive mode the trace core_busy field (also known as the
3052  * performance field, and lablelled as such on the graphs; also known as
3053  * core_avg_perf) is not needed and so is re-assigned to indicate if the
3054  * driver call was via the normal or fast switch path. Various graphs
3055  * output from the intel_pstate_tracer.py utility that include core_busy
3056  * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
3057  * so we use 10 to indicate the normal path through the driver, and
3058  * 90 to indicate the fast switch path through the driver.
3059  * The scaled_busy field is not used, and is set to 0.
3060  */
3061 
3062 #define	INTEL_PSTATE_TRACE_TARGET 10
3063 #define	INTEL_PSTATE_TRACE_FAST_SWITCH 90
3064 
intel_cpufreq_trace(struct cpudata * cpu,unsigned int trace_type,int old_pstate)3065 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
3066 {
3067 	struct sample *sample;
3068 
3069 	if (!trace_pstate_sample_enabled())
3070 		return;
3071 
3072 	if (!intel_pstate_sample(cpu, ktime_get()))
3073 		return;
3074 
3075 	sample = &cpu->sample;
3076 	trace_pstate_sample(trace_type,
3077 		0,
3078 		old_pstate,
3079 		cpu->pstate.current_pstate,
3080 		sample->mperf,
3081 		sample->aperf,
3082 		sample->tsc,
3083 		get_avg_frequency(cpu),
3084 		fp_toint(cpu->iowait_boost * 100));
3085 }
3086 
intel_cpufreq_hwp_update(struct cpudata * cpu,u32 min,u32 max,u32 desired,bool fast_switch)3087 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
3088 				     u32 desired, bool fast_switch)
3089 {
3090 	u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
3091 
3092 	value &= ~HWP_MIN_PERF(~0L);
3093 	value |= HWP_MIN_PERF(min);
3094 
3095 	value &= ~HWP_MAX_PERF(~0L);
3096 	value |= HWP_MAX_PERF(max);
3097 
3098 	value &= ~HWP_DESIRED_PERF(~0L);
3099 	value |= HWP_DESIRED_PERF(desired);
3100 
3101 	if (value == prev)
3102 		return;
3103 
3104 	WRITE_ONCE(cpu->hwp_req_cached, value);
3105 	if (fast_switch)
3106 		wrmsrl(MSR_HWP_REQUEST, value);
3107 	else
3108 		wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
3109 }
3110 
intel_cpufreq_perf_ctl_update(struct cpudata * cpu,u32 target_pstate,bool fast_switch)3111 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
3112 					  u32 target_pstate, bool fast_switch)
3113 {
3114 	if (fast_switch)
3115 		wrmsrl(MSR_IA32_PERF_CTL,
3116 		       pstate_funcs.get_val(cpu, target_pstate));
3117 	else
3118 		wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
3119 			      pstate_funcs.get_val(cpu, target_pstate));
3120 }
3121 
intel_cpufreq_update_pstate(struct cpufreq_policy * policy,int target_pstate,bool fast_switch)3122 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
3123 				       int target_pstate, bool fast_switch)
3124 {
3125 	struct cpudata *cpu = all_cpu_data[policy->cpu];
3126 	int old_pstate = cpu->pstate.current_pstate;
3127 
3128 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
3129 	if (hwp_active) {
3130 		int max_pstate = policy->strict_target ?
3131 					target_pstate : cpu->max_perf_ratio;
3132 
3133 		intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
3134 					 target_pstate, fast_switch);
3135 	} else if (target_pstate != old_pstate) {
3136 		intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
3137 	}
3138 
3139 	cpu->pstate.current_pstate = target_pstate;
3140 
3141 	intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
3142 			    INTEL_PSTATE_TRACE_TARGET, old_pstate);
3143 
3144 	return target_pstate;
3145 }
3146 
intel_cpufreq_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)3147 static int intel_cpufreq_target(struct cpufreq_policy *policy,
3148 				unsigned int target_freq,
3149 				unsigned int relation)
3150 {
3151 	struct cpudata *cpu = all_cpu_data[policy->cpu];
3152 	struct cpufreq_freqs freqs;
3153 	int target_pstate;
3154 
3155 	freqs.old = policy->cur;
3156 	freqs.new = target_freq;
3157 
3158 	cpufreq_freq_transition_begin(policy, &freqs);
3159 
3160 	target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
3161 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
3162 
3163 	freqs.new = target_pstate * cpu->pstate.scaling;
3164 
3165 	cpufreq_freq_transition_end(policy, &freqs, false);
3166 
3167 	return 0;
3168 }
3169 
intel_cpufreq_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)3170 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
3171 					      unsigned int target_freq)
3172 {
3173 	struct cpudata *cpu = all_cpu_data[policy->cpu];
3174 	int target_pstate;
3175 
3176 	target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
3177 
3178 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
3179 
3180 	return target_pstate * cpu->pstate.scaling;
3181 }
3182 
intel_cpufreq_adjust_perf(unsigned int cpunum,unsigned long min_perf,unsigned long target_perf,unsigned long capacity)3183 static void intel_cpufreq_adjust_perf(unsigned int cpunum,
3184 				      unsigned long min_perf,
3185 				      unsigned long target_perf,
3186 				      unsigned long capacity)
3187 {
3188 	struct cpudata *cpu = all_cpu_data[cpunum];
3189 	u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
3190 	int old_pstate = cpu->pstate.current_pstate;
3191 	int cap_pstate, min_pstate, max_pstate, target_pstate;
3192 
3193 	cap_pstate = READ_ONCE(global.no_turbo) ?
3194 					HWP_GUARANTEED_PERF(hwp_cap) :
3195 					HWP_HIGHEST_PERF(hwp_cap);
3196 
3197 	/* Optimization: Avoid unnecessary divisions. */
3198 
3199 	target_pstate = cap_pstate;
3200 	if (target_perf < capacity)
3201 		target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
3202 
3203 	min_pstate = cap_pstate;
3204 	if (min_perf < capacity)
3205 		min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
3206 
3207 	if (min_pstate < cpu->pstate.min_pstate)
3208 		min_pstate = cpu->pstate.min_pstate;
3209 
3210 	if (min_pstate < cpu->min_perf_ratio)
3211 		min_pstate = cpu->min_perf_ratio;
3212 
3213 	if (min_pstate > cpu->max_perf_ratio)
3214 		min_pstate = cpu->max_perf_ratio;
3215 
3216 	max_pstate = min(cap_pstate, cpu->max_perf_ratio);
3217 	if (max_pstate < min_pstate)
3218 		max_pstate = min_pstate;
3219 
3220 	target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
3221 
3222 	intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
3223 
3224 	cpu->pstate.current_pstate = target_pstate;
3225 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
3226 }
3227 
intel_cpufreq_cpu_init(struct cpufreq_policy * policy)3228 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
3229 {
3230 	struct freq_qos_request *req;
3231 	struct cpudata *cpu;
3232 	struct device *dev;
3233 	int ret, freq;
3234 
3235 	dev = get_cpu_device(policy->cpu);
3236 	if (!dev)
3237 		return -ENODEV;
3238 
3239 	ret = __intel_pstate_cpu_init(policy);
3240 	if (ret)
3241 		return ret;
3242 
3243 	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
3244 	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
3245 	policy->cur = policy->cpuinfo.min_freq;
3246 
3247 	req = kcalloc(2, sizeof(*req), GFP_KERNEL);
3248 	if (!req) {
3249 		ret = -ENOMEM;
3250 		goto pstate_exit;
3251 	}
3252 
3253 	cpu = all_cpu_data[policy->cpu];
3254 
3255 	if (hwp_active) {
3256 		u64 value;
3257 
3258 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
3259 
3260 		intel_pstate_get_hwp_cap(cpu);
3261 
3262 		rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
3263 		WRITE_ONCE(cpu->hwp_req_cached, value);
3264 
3265 		cpu->epp_cached = intel_pstate_get_epp(cpu, value);
3266 	} else {
3267 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
3268 	}
3269 
3270 	freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
3271 
3272 	ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
3273 				   freq);
3274 	if (ret < 0) {
3275 		dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
3276 		goto free_req;
3277 	}
3278 
3279 	freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
3280 
3281 	ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
3282 				   freq);
3283 	if (ret < 0) {
3284 		dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
3285 		goto remove_min_req;
3286 	}
3287 
3288 	policy->driver_data = req;
3289 
3290 	return 0;
3291 
3292 remove_min_req:
3293 	freq_qos_remove_request(req);
3294 free_req:
3295 	kfree(req);
3296 pstate_exit:
3297 	intel_pstate_exit_perf_limits(policy);
3298 
3299 	return ret;
3300 }
3301 
intel_cpufreq_cpu_exit(struct cpufreq_policy * policy)3302 static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
3303 {
3304 	struct freq_qos_request *req;
3305 
3306 	req = policy->driver_data;
3307 
3308 	freq_qos_remove_request(req + 1);
3309 	freq_qos_remove_request(req);
3310 	kfree(req);
3311 
3312 	intel_pstate_cpu_exit(policy);
3313 }
3314 
intel_cpufreq_suspend(struct cpufreq_policy * policy)3315 static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
3316 {
3317 	intel_pstate_suspend(policy);
3318 
3319 	if (hwp_active) {
3320 		struct cpudata *cpu = all_cpu_data[policy->cpu];
3321 		u64 value = READ_ONCE(cpu->hwp_req_cached);
3322 
3323 		/*
3324 		 * Clear the desired perf field in MSR_HWP_REQUEST in case
3325 		 * intel_cpufreq_adjust_perf() is in use and the last value
3326 		 * written by it may not be suitable.
3327 		 */
3328 		value &= ~HWP_DESIRED_PERF(~0L);
3329 		wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
3330 		WRITE_ONCE(cpu->hwp_req_cached, value);
3331 	}
3332 
3333 	return 0;
3334 }
3335 
3336 static struct cpufreq_driver intel_cpufreq = {
3337 	.flags		= CPUFREQ_CONST_LOOPS,
3338 	.verify		= intel_cpufreq_verify_policy,
3339 	.target		= intel_cpufreq_target,
3340 	.fast_switch	= intel_cpufreq_fast_switch,
3341 	.init		= intel_cpufreq_cpu_init,
3342 	.exit		= intel_cpufreq_cpu_exit,
3343 	.offline	= intel_cpufreq_cpu_offline,
3344 	.online		= intel_pstate_cpu_online,
3345 	.suspend	= intel_cpufreq_suspend,
3346 	.resume		= intel_pstate_resume,
3347 	.update_limits	= intel_pstate_update_limits,
3348 	.name		= "intel_cpufreq",
3349 };
3350 
3351 static struct cpufreq_driver *default_driver;
3352 
intel_pstate_driver_cleanup(void)3353 static void intel_pstate_driver_cleanup(void)
3354 {
3355 	unsigned int cpu;
3356 
3357 	cpus_read_lock();
3358 	for_each_online_cpu(cpu) {
3359 		if (all_cpu_data[cpu]) {
3360 			if (intel_pstate_driver == &intel_pstate)
3361 				intel_pstate_clear_update_util_hook(cpu);
3362 
3363 			kfree(all_cpu_data[cpu]);
3364 			WRITE_ONCE(all_cpu_data[cpu], NULL);
3365 		}
3366 	}
3367 	cpus_read_unlock();
3368 
3369 	intel_pstate_driver = NULL;
3370 }
3371 
intel_pstate_register_driver(struct cpufreq_driver * driver)3372 static int intel_pstate_register_driver(struct cpufreq_driver *driver)
3373 {
3374 	bool refresh_cpu_cap_scaling;
3375 	int ret;
3376 
3377 	if (driver == &intel_pstate)
3378 		intel_pstate_sysfs_expose_hwp_dynamic_boost();
3379 
3380 	memset(&global, 0, sizeof(global));
3381 	global.max_perf_pct = 100;
3382 	global.turbo_disabled = turbo_is_disabled();
3383 	global.no_turbo = global.turbo_disabled;
3384 
3385 	arch_set_max_freq_ratio(global.turbo_disabled);
3386 
3387 	refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
3388 
3389 	intel_pstate_driver = driver;
3390 	ret = cpufreq_register_driver(intel_pstate_driver);
3391 	if (ret) {
3392 		intel_pstate_driver_cleanup();
3393 		return ret;
3394 	}
3395 
3396 	global.min_perf_pct = min_perf_pct_min();
3397 
3398 	hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
3399 
3400 	return 0;
3401 }
3402 
intel_pstate_show_status(char * buf)3403 static ssize_t intel_pstate_show_status(char *buf)
3404 {
3405 	if (!intel_pstate_driver)
3406 		return sprintf(buf, "off\n");
3407 
3408 	return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
3409 					"active" : "passive");
3410 }
3411 
intel_pstate_update_status(const char * buf,size_t size)3412 static int intel_pstate_update_status(const char *buf, size_t size)
3413 {
3414 	if (size == 3 && !strncmp(buf, "off", size)) {
3415 		if (!intel_pstate_driver)
3416 			return -EINVAL;
3417 
3418 		if (hwp_active)
3419 			return -EBUSY;
3420 
3421 		cpufreq_unregister_driver(intel_pstate_driver);
3422 		intel_pstate_driver_cleanup();
3423 		return 0;
3424 	}
3425 
3426 	if (size == 6 && !strncmp(buf, "active", size)) {
3427 		if (intel_pstate_driver) {
3428 			if (intel_pstate_driver == &intel_pstate)
3429 				return 0;
3430 
3431 			cpufreq_unregister_driver(intel_pstate_driver);
3432 		}
3433 
3434 		return intel_pstate_register_driver(&intel_pstate);
3435 	}
3436 
3437 	if (size == 7 && !strncmp(buf, "passive", size)) {
3438 		if (intel_pstate_driver) {
3439 			if (intel_pstate_driver == &intel_cpufreq)
3440 				return 0;
3441 
3442 			cpufreq_unregister_driver(intel_pstate_driver);
3443 			intel_pstate_sysfs_hide_hwp_dynamic_boost();
3444 		}
3445 
3446 		return intel_pstate_register_driver(&intel_cpufreq);
3447 	}
3448 
3449 	return -EINVAL;
3450 }
3451 
3452 static int no_load __initdata;
3453 static int no_hwp __initdata;
3454 static int hwp_only __initdata;
3455 static unsigned int force_load __initdata;
3456 
intel_pstate_msrs_not_valid(void)3457 static int __init intel_pstate_msrs_not_valid(void)
3458 {
3459 	if (!pstate_funcs.get_max(0) ||
3460 	    !pstate_funcs.get_min(0) ||
3461 	    !pstate_funcs.get_turbo(0))
3462 		return -ENODEV;
3463 
3464 	return 0;
3465 }
3466 
copy_cpu_funcs(struct pstate_funcs * funcs)3467 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
3468 {
3469 	pstate_funcs.get_max   = funcs->get_max;
3470 	pstate_funcs.get_max_physical = funcs->get_max_physical;
3471 	pstate_funcs.get_min   = funcs->get_min;
3472 	pstate_funcs.get_turbo = funcs->get_turbo;
3473 	pstate_funcs.get_scaling = funcs->get_scaling;
3474 	pstate_funcs.get_val   = funcs->get_val;
3475 	pstate_funcs.get_vid   = funcs->get_vid;
3476 	pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
3477 }
3478 
3479 #ifdef CONFIG_ACPI
3480 
intel_pstate_no_acpi_pss(void)3481 static bool __init intel_pstate_no_acpi_pss(void)
3482 {
3483 	int i;
3484 
3485 	for_each_possible_cpu(i) {
3486 		acpi_status status;
3487 		union acpi_object *pss;
3488 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
3489 		struct acpi_processor *pr = per_cpu(processors, i);
3490 
3491 		if (!pr)
3492 			continue;
3493 
3494 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
3495 		if (ACPI_FAILURE(status))
3496 			continue;
3497 
3498 		pss = buffer.pointer;
3499 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
3500 			kfree(pss);
3501 			return false;
3502 		}
3503 
3504 		kfree(pss);
3505 	}
3506 
3507 	pr_debug("ACPI _PSS not found\n");
3508 	return true;
3509 }
3510 
intel_pstate_no_acpi_pcch(void)3511 static bool __init intel_pstate_no_acpi_pcch(void)
3512 {
3513 	acpi_status status;
3514 	acpi_handle handle;
3515 
3516 	status = acpi_get_handle(NULL, "\\_SB", &handle);
3517 	if (ACPI_FAILURE(status))
3518 		goto not_found;
3519 
3520 	if (acpi_has_method(handle, "PCCH"))
3521 		return false;
3522 
3523 not_found:
3524 	pr_debug("ACPI PCCH not found\n");
3525 	return true;
3526 }
3527 
intel_pstate_has_acpi_ppc(void)3528 static bool __init intel_pstate_has_acpi_ppc(void)
3529 {
3530 	int i;
3531 
3532 	for_each_possible_cpu(i) {
3533 		struct acpi_processor *pr = per_cpu(processors, i);
3534 
3535 		if (!pr)
3536 			continue;
3537 		if (acpi_has_method(pr->handle, "_PPC"))
3538 			return true;
3539 	}
3540 	pr_debug("ACPI _PPC not found\n");
3541 	return false;
3542 }
3543 
3544 enum {
3545 	PSS,
3546 	PPC,
3547 };
3548 
3549 /* Hardware vendor-specific info that has its own power management modes */
3550 static struct acpi_platform_list plat_info[] __initdata = {
3551 	{"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
3552 	{"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3553 	{"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3554 	{"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3555 	{"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3556 	{"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3557 	{"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3558 	{"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3559 	{"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3560 	{"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3561 	{"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3562 	{"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3563 	{"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3564 	{"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3565 	{"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3566 	{ } /* End */
3567 };
3568 
3569 #define BITMASK_OOB	(BIT(8) | BIT(18))
3570 
intel_pstate_platform_pwr_mgmt_exists(void)3571 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
3572 {
3573 	const struct x86_cpu_id *id;
3574 	u64 misc_pwr;
3575 	int idx;
3576 
3577 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
3578 	if (id) {
3579 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
3580 		if (misc_pwr & BITMASK_OOB) {
3581 			pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
3582 			pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
3583 			return true;
3584 		}
3585 	}
3586 
3587 	idx = acpi_match_platform_list(plat_info);
3588 	if (idx < 0)
3589 		return false;
3590 
3591 	switch (plat_info[idx].data) {
3592 	case PSS:
3593 		if (!intel_pstate_no_acpi_pss())
3594 			return false;
3595 
3596 		return intel_pstate_no_acpi_pcch();
3597 	case PPC:
3598 		return intel_pstate_has_acpi_ppc() && !force_load;
3599 	}
3600 
3601 	return false;
3602 }
3603 
intel_pstate_request_control_from_smm(void)3604 static void intel_pstate_request_control_from_smm(void)
3605 {
3606 	/*
3607 	 * It may be unsafe to request P-states control from SMM if _PPC support
3608 	 * has not been enabled.
3609 	 */
3610 	if (acpi_ppc)
3611 		acpi_processor_pstate_control();
3612 }
3613 #else /* CONFIG_ACPI not enabled */
intel_pstate_platform_pwr_mgmt_exists(void)3614 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
intel_pstate_has_acpi_ppc(void)3615 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
intel_pstate_request_control_from_smm(void)3616 static inline void intel_pstate_request_control_from_smm(void) {}
3617 #endif /* CONFIG_ACPI */
3618 
3619 #define INTEL_PSTATE_HWP_BROADWELL	0x01
3620 
3621 #define X86_MATCH_HWP(vfm, hwp_mode)				\
3622 	X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode)
3623 
3624 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
3625 	X86_MATCH_HWP(INTEL_BROADWELL_X,	INTEL_PSTATE_HWP_BROADWELL),
3626 	X86_MATCH_HWP(INTEL_BROADWELL_D,	INTEL_PSTATE_HWP_BROADWELL),
3627 	X86_MATCH_HWP(INTEL_ANY,		0),
3628 	{}
3629 };
3630 
intel_pstate_hwp_is_enabled(void)3631 static bool intel_pstate_hwp_is_enabled(void)
3632 {
3633 	u64 value;
3634 
3635 	rdmsrl(MSR_PM_ENABLE, value);
3636 	return !!(value & 0x1);
3637 }
3638 
3639 #define POWERSAVE_MASK			GENMASK(7, 0)
3640 #define BALANCE_POWER_MASK		GENMASK(15, 8)
3641 #define BALANCE_PERFORMANCE_MASK	GENMASK(23, 16)
3642 #define PERFORMANCE_MASK		GENMASK(31, 24)
3643 
3644 #define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \
3645 	(FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\
3646 	 FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\
3647 	 FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\
3648 	 FIELD_PREP_CONST(PERFORMANCE_MASK, performance))
3649 
3650 #define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \
3651 	(HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\
3652 	 balance_perf, HWP_EPP_PERFORMANCE))
3653 
3654 static const struct x86_cpu_id intel_epp_default[] = {
3655 	/*
3656 	 * Set EPP value as 102, this is the max suggested EPP
3657 	 * which can result in one core turbo frequency for
3658 	 * AlderLake Mobile CPUs.
3659 	 */
3660 	X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
3661 	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
3662 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
3663 	X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
3664 		      179, 64, 16)),
3665 	X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
3666 		      179, 64, 16)),
3667 	{}
3668 };
3669 
3670 static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
3671 	X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
3672 	X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
3673 	X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
3674 	{}
3675 };
3676 
intel_pstate_init(void)3677 static int __init intel_pstate_init(void)
3678 {
3679 	static struct cpudata **_all_cpu_data;
3680 	const struct x86_cpu_id *id;
3681 	int rc;
3682 
3683 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3684 		return -ENODEV;
3685 
3686 	id = x86_match_cpu(hwp_support_ids);
3687 	if (id) {
3688 		hwp_forced = intel_pstate_hwp_is_enabled();
3689 
3690 		if (hwp_forced)
3691 			pr_info("HWP enabled by BIOS\n");
3692 		else if (no_load)
3693 			return -ENODEV;
3694 
3695 		copy_cpu_funcs(&core_funcs);
3696 		/*
3697 		 * Avoid enabling HWP for processors without EPP support,
3698 		 * because that means incomplete HWP implementation which is a
3699 		 * corner case and supporting it is generally problematic.
3700 		 *
3701 		 * If HWP is enabled already, though, there is no choice but to
3702 		 * deal with it.
3703 		 */
3704 		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
3705 			hwp_active = true;
3706 			hwp_mode_bdw = id->driver_data;
3707 			intel_pstate.attr = hwp_cpufreq_attrs;
3708 			intel_cpufreq.attr = hwp_cpufreq_attrs;
3709 			intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3710 			intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
3711 			if (!default_driver)
3712 				default_driver = &intel_pstate;
3713 
3714 			pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling;
3715 
3716 			goto hwp_cpu_matched;
3717 		}
3718 		pr_info("HWP not enabled\n");
3719 	} else {
3720 		if (no_load)
3721 			return -ENODEV;
3722 
3723 		id = x86_match_cpu(intel_pstate_cpu_ids);
3724 		if (!id) {
3725 			pr_info("CPU model not supported\n");
3726 			return -ENODEV;
3727 		}
3728 
3729 		copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
3730 	}
3731 
3732 	if (intel_pstate_msrs_not_valid()) {
3733 		pr_info("Invalid MSRs\n");
3734 		return -ENODEV;
3735 	}
3736 	/* Without HWP start in the passive mode. */
3737 	if (!default_driver)
3738 		default_driver = &intel_cpufreq;
3739 
3740 hwp_cpu_matched:
3741 	/*
3742 	 * The Intel pstate driver will be ignored if the platform
3743 	 * firmware has its own power management modes.
3744 	 */
3745 	if (intel_pstate_platform_pwr_mgmt_exists()) {
3746 		pr_info("P-states controlled by the platform\n");
3747 		return -ENODEV;
3748 	}
3749 
3750 	if (!hwp_active && hwp_only)
3751 		return -ENOTSUPP;
3752 
3753 	pr_info("Intel P-state driver initializing\n");
3754 
3755 	_all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3756 	if (!_all_cpu_data)
3757 		return -ENOMEM;
3758 
3759 	WRITE_ONCE(all_cpu_data, _all_cpu_data);
3760 
3761 	intel_pstate_request_control_from_smm();
3762 
3763 	intel_pstate_sysfs_expose_params();
3764 
3765 	if (hwp_active) {
3766 		const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default);
3767 		const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
3768 
3769 		if (id) {
3770 			epp_values[EPP_INDEX_POWERSAVE] =
3771 					FIELD_GET(POWERSAVE_MASK, id->driver_data);
3772 			epp_values[EPP_INDEX_BALANCE_POWERSAVE] =
3773 					FIELD_GET(BALANCE_POWER_MASK, id->driver_data);
3774 			epp_values[EPP_INDEX_BALANCE_PERFORMANCE] =
3775 					FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data);
3776 			epp_values[EPP_INDEX_PERFORMANCE] =
3777 					FIELD_GET(PERFORMANCE_MASK, id->driver_data);
3778 			pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n",
3779 				 epp_values[EPP_INDEX_POWERSAVE],
3780 				 epp_values[EPP_INDEX_BALANCE_POWERSAVE],
3781 				 epp_values[EPP_INDEX_BALANCE_PERFORMANCE],
3782 				 epp_values[EPP_INDEX_PERFORMANCE]);
3783 		}
3784 
3785 		if (hybrid_id) {
3786 			hybrid_scaling_factor = hybrid_id->driver_data;
3787 			pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
3788 		}
3789 
3790 	}
3791 
3792 	mutex_lock(&intel_pstate_driver_lock);
3793 	rc = intel_pstate_register_driver(default_driver);
3794 	mutex_unlock(&intel_pstate_driver_lock);
3795 	if (rc) {
3796 		intel_pstate_sysfs_remove();
3797 		return rc;
3798 	}
3799 
3800 	if (hwp_active) {
3801 		const struct x86_cpu_id *id;
3802 
3803 		id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
3804 		if (id) {
3805 			set_power_ctl_ee_state(false);
3806 			pr_info("Disabling energy efficiency optimization\n");
3807 		}
3808 
3809 		pr_info("HWP enabled\n");
3810 	} else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
3811 		pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
3812 	}
3813 
3814 	return 0;
3815 }
3816 device_initcall(intel_pstate_init);
3817 
intel_pstate_setup(char * str)3818 static int __init intel_pstate_setup(char *str)
3819 {
3820 	if (!str)
3821 		return -EINVAL;
3822 
3823 	if (!strcmp(str, "disable"))
3824 		no_load = 1;
3825 	else if (!strcmp(str, "active"))
3826 		default_driver = &intel_pstate;
3827 	else if (!strcmp(str, "passive"))
3828 		default_driver = &intel_cpufreq;
3829 
3830 	if (!strcmp(str, "no_hwp"))
3831 		no_hwp = 1;
3832 
3833 	if (!strcmp(str, "force"))
3834 		force_load = 1;
3835 	if (!strcmp(str, "hwp_only"))
3836 		hwp_only = 1;
3837 	if (!strcmp(str, "per_cpu_perf_limits"))
3838 		per_cpu_limits = true;
3839 
3840 #ifdef CONFIG_ACPI
3841 	if (!strcmp(str, "support_acpi_ppc"))
3842 		acpi_ppc = true;
3843 #endif
3844 
3845 	return 0;
3846 }
3847 early_param("intel_pstate", intel_pstate_setup);
3848 
3849 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3850 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
3851