1 /*
2 * intel_pstate.c: Native P state management for Intel processors
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
31
32 #include <asm/div64.h>
33 #include <asm/msr.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
36
37 #define ATOM_RATIOS 0x66a
38 #define ATOM_VIDS 0x66b
39 #define ATOM_TURBO_RATIOS 0x66c
40 #define ATOM_TURBO_VIDS 0x66d
41
42 #define FRAC_BITS 8
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
45
mul_fp(int32_t x,int32_t y)46 static inline int32_t mul_fp(int32_t x, int32_t y)
47 {
48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
49 }
50
div_fp(s64 x,s64 y)51 static inline int32_t div_fp(s64 x, s64 y)
52 {
53 return div64_s64((int64_t)x << FRAC_BITS, y);
54 }
55
ceiling_fp(int32_t x)56 static inline int ceiling_fp(int32_t x)
57 {
58 int mask, ret;
59
60 ret = fp_toint(x);
61 mask = (1 << FRAC_BITS) - 1;
62 if (x & mask)
63 ret += 1;
64 return ret;
65 }
66
67 struct sample {
68 int32_t core_pct_busy;
69 u64 aperf;
70 u64 mperf;
71 u64 tsc;
72 int freq;
73 ktime_t time;
74 };
75
76 struct pstate_data {
77 int current_pstate;
78 int min_pstate;
79 int max_pstate;
80 int max_pstate_physical;
81 int scaling;
82 int turbo_pstate;
83 };
84
85 struct vid_data {
86 int min;
87 int max;
88 int turbo;
89 int32_t ratio;
90 };
91
92 struct _pid {
93 int setpoint;
94 int32_t integral;
95 int32_t p_gain;
96 int32_t i_gain;
97 int32_t d_gain;
98 int deadband;
99 int32_t last_err;
100 };
101
102 struct cpudata {
103 int cpu;
104
105 struct timer_list timer;
106
107 struct pstate_data pstate;
108 struct vid_data vid;
109 struct _pid pid;
110
111 ktime_t last_sample_time;
112 u64 prev_aperf;
113 u64 prev_mperf;
114 u64 prev_tsc;
115 struct sample sample;
116 };
117
118 static struct cpudata **all_cpu_data;
119 struct pstate_adjust_policy {
120 int sample_rate_ms;
121 int deadband;
122 int setpoint;
123 int p_gain_pct;
124 int d_gain_pct;
125 int i_gain_pct;
126 };
127
128 struct pstate_funcs {
129 int (*get_max)(void);
130 int (*get_max_physical)(void);
131 int (*get_min)(void);
132 int (*get_turbo)(void);
133 int (*get_scaling)(void);
134 void (*set)(struct cpudata*, int pstate);
135 void (*get_vid)(struct cpudata *);
136 };
137
138 struct cpu_defaults {
139 struct pstate_adjust_policy pid_policy;
140 struct pstate_funcs funcs;
141 };
142
143 static struct pstate_adjust_policy pid_params;
144 static struct pstate_funcs pstate_funcs;
145 static int hwp_active;
146
147 struct perf_limits {
148 int no_turbo;
149 int turbo_disabled;
150 int max_perf_pct;
151 int min_perf_pct;
152 int32_t max_perf;
153 int32_t min_perf;
154 int max_policy_pct;
155 int max_sysfs_pct;
156 int min_policy_pct;
157 int min_sysfs_pct;
158 };
159
160 static struct perf_limits performance_limits = {
161 .no_turbo = 0,
162 .turbo_disabled = 0,
163 .max_perf_pct = 100,
164 .max_perf = int_tofp(1),
165 .min_perf_pct = 100,
166 .min_perf = int_tofp(1),
167 .max_policy_pct = 100,
168 .max_sysfs_pct = 100,
169 .min_policy_pct = 0,
170 .min_sysfs_pct = 0,
171 };
172
173 static struct perf_limits powersave_limits = {
174 .no_turbo = 0,
175 .turbo_disabled = 0,
176 .max_perf_pct = 100,
177 .max_perf = int_tofp(1),
178 .min_perf_pct = 0,
179 .min_perf = 0,
180 .max_policy_pct = 100,
181 .max_sysfs_pct = 100,
182 .min_policy_pct = 0,
183 .min_sysfs_pct = 0,
184 };
185
186 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
187 static struct perf_limits *limits = &performance_limits;
188 #else
189 static struct perf_limits *limits = &powersave_limits;
190 #endif
191
pid_reset(struct _pid * pid,int setpoint,int busy,int deadband,int integral)192 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
193 int deadband, int integral) {
194 pid->setpoint = setpoint;
195 pid->deadband = deadband;
196 pid->integral = int_tofp(integral);
197 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
198 }
199
pid_p_gain_set(struct _pid * pid,int percent)200 static inline void pid_p_gain_set(struct _pid *pid, int percent)
201 {
202 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
203 }
204
pid_i_gain_set(struct _pid * pid,int percent)205 static inline void pid_i_gain_set(struct _pid *pid, int percent)
206 {
207 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
208 }
209
pid_d_gain_set(struct _pid * pid,int percent)210 static inline void pid_d_gain_set(struct _pid *pid, int percent)
211 {
212 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
213 }
214
pid_calc(struct _pid * pid,int32_t busy)215 static signed int pid_calc(struct _pid *pid, int32_t busy)
216 {
217 signed int result;
218 int32_t pterm, dterm, fp_error;
219 int32_t integral_limit;
220
221 fp_error = int_tofp(pid->setpoint) - busy;
222
223 if (abs(fp_error) <= int_tofp(pid->deadband))
224 return 0;
225
226 pterm = mul_fp(pid->p_gain, fp_error);
227
228 pid->integral += fp_error;
229
230 /*
231 * We limit the integral here so that it will never
232 * get higher than 30. This prevents it from becoming
233 * too large an input over long periods of time and allows
234 * it to get factored out sooner.
235 *
236 * The value of 30 was chosen through experimentation.
237 */
238 integral_limit = int_tofp(30);
239 if (pid->integral > integral_limit)
240 pid->integral = integral_limit;
241 if (pid->integral < -integral_limit)
242 pid->integral = -integral_limit;
243
244 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
245 pid->last_err = fp_error;
246
247 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
248 result = result + (1 << (FRAC_BITS-1));
249 return (signed int)fp_toint(result);
250 }
251
intel_pstate_busy_pid_reset(struct cpudata * cpu)252 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
253 {
254 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
255 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
256 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
257
258 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
259 }
260
intel_pstate_reset_all_pid(void)261 static inline void intel_pstate_reset_all_pid(void)
262 {
263 unsigned int cpu;
264
265 for_each_online_cpu(cpu) {
266 if (all_cpu_data[cpu])
267 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
268 }
269 }
270
update_turbo_state(void)271 static inline void update_turbo_state(void)
272 {
273 u64 misc_en;
274 struct cpudata *cpu;
275
276 cpu = all_cpu_data[0];
277 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
278 limits->turbo_disabled =
279 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
280 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
281 }
282
intel_pstate_hwp_set(void)283 static void intel_pstate_hwp_set(void)
284 {
285 int min, hw_min, max, hw_max, cpu, range, adj_range;
286 u64 value, cap;
287
288 get_online_cpus();
289
290 for_each_online_cpu(cpu) {
291 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
292 hw_min = HWP_LOWEST_PERF(cap);
293 hw_max = HWP_HIGHEST_PERF(cap);
294 range = hw_max - hw_min;
295
296 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
297 adj_range = limits->min_perf_pct * range / 100;
298 min = hw_min + adj_range;
299 value &= ~HWP_MIN_PERF(~0L);
300 value |= HWP_MIN_PERF(min);
301
302 adj_range = limits->max_perf_pct * range / 100;
303 max = hw_min + adj_range;
304 if (limits->no_turbo) {
305 hw_max = HWP_GUARANTEED_PERF(cap);
306 if (hw_max < max)
307 max = hw_max;
308 }
309
310 value &= ~HWP_MAX_PERF(~0L);
311 value |= HWP_MAX_PERF(max);
312 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
313 }
314
315 put_online_cpus();
316 }
317
318 /************************** debugfs begin ************************/
pid_param_set(void * data,u64 val)319 static int pid_param_set(void *data, u64 val)
320 {
321 *(u32 *)data = val;
322 intel_pstate_reset_all_pid();
323 return 0;
324 }
325
pid_param_get(void * data,u64 * val)326 static int pid_param_get(void *data, u64 *val)
327 {
328 *val = *(u32 *)data;
329 return 0;
330 }
331 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
332
333 struct pid_param {
334 char *name;
335 void *value;
336 };
337
338 static struct pid_param pid_files[] = {
339 {"sample_rate_ms", &pid_params.sample_rate_ms},
340 {"d_gain_pct", &pid_params.d_gain_pct},
341 {"i_gain_pct", &pid_params.i_gain_pct},
342 {"deadband", &pid_params.deadband},
343 {"setpoint", &pid_params.setpoint},
344 {"p_gain_pct", &pid_params.p_gain_pct},
345 {NULL, NULL}
346 };
347
intel_pstate_debug_expose_params(void)348 static void __init intel_pstate_debug_expose_params(void)
349 {
350 struct dentry *debugfs_parent;
351 int i = 0;
352
353 if (hwp_active)
354 return;
355 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
356 if (IS_ERR_OR_NULL(debugfs_parent))
357 return;
358 while (pid_files[i].name) {
359 debugfs_create_file(pid_files[i].name, 0660,
360 debugfs_parent, pid_files[i].value,
361 &fops_pid_param);
362 i++;
363 }
364 }
365
366 /************************** debugfs end ************************/
367
368 /************************** sysfs begin ************************/
369 #define show_one(file_name, object) \
370 static ssize_t show_##file_name \
371 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
372 { \
373 return sprintf(buf, "%u\n", limits->object); \
374 }
375
show_turbo_pct(struct kobject * kobj,struct kobj_attribute * attr,char * buf)376 static ssize_t show_turbo_pct(struct kobject *kobj,
377 struct kobj_attribute *attr, char *buf)
378 {
379 struct cpudata *cpu;
380 int total, no_turbo, turbo_pct;
381 uint32_t turbo_fp;
382
383 cpu = all_cpu_data[0];
384
385 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
386 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
387 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
388 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
389 return sprintf(buf, "%u\n", turbo_pct);
390 }
391
show_num_pstates(struct kobject * kobj,struct kobj_attribute * attr,char * buf)392 static ssize_t show_num_pstates(struct kobject *kobj,
393 struct kobj_attribute *attr, char *buf)
394 {
395 struct cpudata *cpu;
396 int total;
397
398 cpu = all_cpu_data[0];
399 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
400 return sprintf(buf, "%u\n", total);
401 }
402
show_no_turbo(struct kobject * kobj,struct kobj_attribute * attr,char * buf)403 static ssize_t show_no_turbo(struct kobject *kobj,
404 struct kobj_attribute *attr, char *buf)
405 {
406 ssize_t ret;
407
408 update_turbo_state();
409 if (limits->turbo_disabled)
410 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
411 else
412 ret = sprintf(buf, "%u\n", limits->no_turbo);
413
414 return ret;
415 }
416
store_no_turbo(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)417 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
418 const char *buf, size_t count)
419 {
420 unsigned int input;
421 int ret;
422
423 ret = sscanf(buf, "%u", &input);
424 if (ret != 1)
425 return -EINVAL;
426
427 update_turbo_state();
428 if (limits->turbo_disabled) {
429 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
430 return -EPERM;
431 }
432
433 limits->no_turbo = clamp_t(int, input, 0, 1);
434
435 if (hwp_active)
436 intel_pstate_hwp_set();
437
438 return count;
439 }
440
store_max_perf_pct(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)441 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
442 const char *buf, size_t count)
443 {
444 unsigned int input;
445 int ret;
446
447 ret = sscanf(buf, "%u", &input);
448 if (ret != 1)
449 return -EINVAL;
450
451 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
452 limits->max_perf_pct = min(limits->max_policy_pct,
453 limits->max_sysfs_pct);
454 limits->max_perf_pct = max(limits->min_policy_pct,
455 limits->max_perf_pct);
456 limits->max_perf_pct = max(limits->min_perf_pct,
457 limits->max_perf_pct);
458 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
459 int_tofp(100));
460
461 if (hwp_active)
462 intel_pstate_hwp_set();
463 return count;
464 }
465
store_min_perf_pct(struct kobject * a,struct kobj_attribute * b,const char * buf,size_t count)466 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
467 const char *buf, size_t count)
468 {
469 unsigned int input;
470 int ret;
471
472 ret = sscanf(buf, "%u", &input);
473 if (ret != 1)
474 return -EINVAL;
475
476 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
477 limits->min_perf_pct = max(limits->min_policy_pct,
478 limits->min_sysfs_pct);
479 limits->min_perf_pct = min(limits->max_policy_pct,
480 limits->min_perf_pct);
481 limits->min_perf_pct = min(limits->max_perf_pct,
482 limits->min_perf_pct);
483 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
484 int_tofp(100));
485
486 if (hwp_active)
487 intel_pstate_hwp_set();
488 return count;
489 }
490
491 show_one(max_perf_pct, max_perf_pct);
492 show_one(min_perf_pct, min_perf_pct);
493
494 define_one_global_rw(no_turbo);
495 define_one_global_rw(max_perf_pct);
496 define_one_global_rw(min_perf_pct);
497 define_one_global_ro(turbo_pct);
498 define_one_global_ro(num_pstates);
499
500 static struct attribute *intel_pstate_attributes[] = {
501 &no_turbo.attr,
502 &max_perf_pct.attr,
503 &min_perf_pct.attr,
504 &turbo_pct.attr,
505 &num_pstates.attr,
506 NULL
507 };
508
509 static struct attribute_group intel_pstate_attr_group = {
510 .attrs = intel_pstate_attributes,
511 };
512
intel_pstate_sysfs_expose_params(void)513 static void __init intel_pstate_sysfs_expose_params(void)
514 {
515 struct kobject *intel_pstate_kobject;
516 int rc;
517
518 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
519 &cpu_subsys.dev_root->kobj);
520 BUG_ON(!intel_pstate_kobject);
521 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
522 BUG_ON(rc);
523 }
524 /************************** sysfs end ************************/
525
intel_pstate_hwp_enable(struct cpudata * cpudata)526 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
527 {
528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
529 }
530
atom_get_min_pstate(void)531 static int atom_get_min_pstate(void)
532 {
533 u64 value;
534
535 rdmsrl(ATOM_RATIOS, value);
536 return (value >> 8) & 0x7F;
537 }
538
atom_get_max_pstate(void)539 static int atom_get_max_pstate(void)
540 {
541 u64 value;
542
543 rdmsrl(ATOM_RATIOS, value);
544 return (value >> 16) & 0x7F;
545 }
546
atom_get_turbo_pstate(void)547 static int atom_get_turbo_pstate(void)
548 {
549 u64 value;
550
551 rdmsrl(ATOM_TURBO_RATIOS, value);
552 return value & 0x7F;
553 }
554
atom_set_pstate(struct cpudata * cpudata,int pstate)555 static void atom_set_pstate(struct cpudata *cpudata, int pstate)
556 {
557 u64 val;
558 int32_t vid_fp;
559 u32 vid;
560
561 val = (u64)pstate << 8;
562 if (limits->no_turbo && !limits->turbo_disabled)
563 val |= (u64)1 << 32;
564
565 vid_fp = cpudata->vid.min + mul_fp(
566 int_tofp(pstate - cpudata->pstate.min_pstate),
567 cpudata->vid.ratio);
568
569 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
570 vid = ceiling_fp(vid_fp);
571
572 if (pstate > cpudata->pstate.max_pstate)
573 vid = cpudata->vid.turbo;
574
575 val |= vid;
576
577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
578 }
579
silvermont_get_scaling(void)580 static int silvermont_get_scaling(void)
581 {
582 u64 value;
583 int i;
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
587
588 rdmsrl(MSR_FSB_FREQ, value);
589 i = value & 0x7;
590 WARN_ON(i > 4);
591
592 return silvermont_freq_table[i];
593 }
594
airmont_get_scaling(void)595 static int airmont_get_scaling(void)
596 {
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
609 }
610
atom_get_vid(struct cpudata * cpudata)611 static void atom_get_vid(struct cpudata *cpudata)
612 {
613 u64 value;
614
615 rdmsrl(ATOM_VIDS, value);
616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
618 cpudata->vid.ratio = div_fp(
619 cpudata->vid.max - cpudata->vid.min,
620 int_tofp(cpudata->pstate.max_pstate -
621 cpudata->pstate.min_pstate));
622
623 rdmsrl(ATOM_TURBO_VIDS, value);
624 cpudata->vid.turbo = value & 0x7f;
625 }
626
core_get_min_pstate(void)627 static int core_get_min_pstate(void)
628 {
629 u64 value;
630
631 rdmsrl(MSR_PLATFORM_INFO, value);
632 return (value >> 40) & 0xFF;
633 }
634
core_get_max_pstate_physical(void)635 static int core_get_max_pstate_physical(void)
636 {
637 u64 value;
638
639 rdmsrl(MSR_PLATFORM_INFO, value);
640 return (value >> 8) & 0xFF;
641 }
642
core_get_max_pstate(void)643 static int core_get_max_pstate(void)
644 {
645 u64 tar;
646 u64 plat_info;
647 int max_pstate;
648 int err;
649
650 rdmsrl(MSR_PLATFORM_INFO, plat_info);
651 max_pstate = (plat_info >> 8) & 0xFF;
652
653 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
654 if (!err) {
655 /* Do some sanity checking for safety */
656 if (plat_info & 0x600000000) {
657 u64 tdp_ctrl;
658 u64 tdp_ratio;
659 int tdp_msr;
660
661 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
662 if (err)
663 goto skip_tar;
664
665 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
666 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
667 if (err)
668 goto skip_tar;
669
670 /* For level 1 and 2, bits[23:16] contain the ratio */
671 if (tdp_ctrl)
672 tdp_ratio >>= 16;
673
674 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
675 if (tdp_ratio - 1 == tar) {
676 max_pstate = tar;
677 pr_debug("max_pstate=TAC %x\n", max_pstate);
678 } else {
679 goto skip_tar;
680 }
681 }
682 }
683
684 skip_tar:
685 return max_pstate;
686 }
687
core_get_turbo_pstate(void)688 static int core_get_turbo_pstate(void)
689 {
690 u64 value;
691 int nont, ret;
692
693 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
694 nont = core_get_max_pstate();
695 ret = (value) & 255;
696 if (ret <= nont)
697 ret = nont;
698 return ret;
699 }
700
core_get_scaling(void)701 static inline int core_get_scaling(void)
702 {
703 return 100000;
704 }
705
core_set_pstate(struct cpudata * cpudata,int pstate)706 static void core_set_pstate(struct cpudata *cpudata, int pstate)
707 {
708 u64 val;
709
710 val = (u64)pstate << 8;
711 if (limits->no_turbo && !limits->turbo_disabled)
712 val |= (u64)1 << 32;
713
714 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
715 }
716
knl_get_turbo_pstate(void)717 static int knl_get_turbo_pstate(void)
718 {
719 u64 value;
720 int nont, ret;
721
722 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
723 nont = core_get_max_pstate();
724 ret = (((value) >> 8) & 0xFF);
725 if (ret <= nont)
726 ret = nont;
727 return ret;
728 }
729
730 static struct cpu_defaults core_params = {
731 .pid_policy = {
732 .sample_rate_ms = 10,
733 .deadband = 0,
734 .setpoint = 97,
735 .p_gain_pct = 20,
736 .d_gain_pct = 0,
737 .i_gain_pct = 0,
738 },
739 .funcs = {
740 .get_max = core_get_max_pstate,
741 .get_max_physical = core_get_max_pstate_physical,
742 .get_min = core_get_min_pstate,
743 .get_turbo = core_get_turbo_pstate,
744 .get_scaling = core_get_scaling,
745 .set = core_set_pstate,
746 },
747 };
748
749 static struct cpu_defaults silvermont_params = {
750 .pid_policy = {
751 .sample_rate_ms = 10,
752 .deadband = 0,
753 .setpoint = 60,
754 .p_gain_pct = 14,
755 .d_gain_pct = 0,
756 .i_gain_pct = 4,
757 },
758 .funcs = {
759 .get_max = atom_get_max_pstate,
760 .get_max_physical = atom_get_max_pstate,
761 .get_min = atom_get_min_pstate,
762 .get_turbo = atom_get_turbo_pstate,
763 .set = atom_set_pstate,
764 .get_scaling = silvermont_get_scaling,
765 .get_vid = atom_get_vid,
766 },
767 };
768
769 static struct cpu_defaults airmont_params = {
770 .pid_policy = {
771 .sample_rate_ms = 10,
772 .deadband = 0,
773 .setpoint = 60,
774 .p_gain_pct = 14,
775 .d_gain_pct = 0,
776 .i_gain_pct = 4,
777 },
778 .funcs = {
779 .get_max = atom_get_max_pstate,
780 .get_max_physical = atom_get_max_pstate,
781 .get_min = atom_get_min_pstate,
782 .get_turbo = atom_get_turbo_pstate,
783 .set = atom_set_pstate,
784 .get_scaling = airmont_get_scaling,
785 .get_vid = atom_get_vid,
786 },
787 };
788
789 static struct cpu_defaults knl_params = {
790 .pid_policy = {
791 .sample_rate_ms = 10,
792 .deadband = 0,
793 .setpoint = 97,
794 .p_gain_pct = 20,
795 .d_gain_pct = 0,
796 .i_gain_pct = 0,
797 },
798 .funcs = {
799 .get_max = core_get_max_pstate,
800 .get_max_physical = core_get_max_pstate_physical,
801 .get_min = core_get_min_pstate,
802 .get_turbo = knl_get_turbo_pstate,
803 .get_scaling = core_get_scaling,
804 .set = core_set_pstate,
805 },
806 };
807
intel_pstate_get_min_max(struct cpudata * cpu,int * min,int * max)808 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
809 {
810 int max_perf = cpu->pstate.turbo_pstate;
811 int max_perf_adj;
812 int min_perf;
813
814 if (limits->no_turbo || limits->turbo_disabled)
815 max_perf = cpu->pstate.max_pstate;
816
817 /*
818 * performance can be limited by user through sysfs, by cpufreq
819 * policy, or by cpu specific default values determined through
820 * experimentation.
821 */
822 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
823 *max = clamp_t(int, max_perf_adj,
824 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
825
826 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
827 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
828 }
829
intel_pstate_set_pstate(struct cpudata * cpu,int pstate,bool force)830 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
831 {
832 int max_perf, min_perf;
833
834 if (force) {
835 update_turbo_state();
836
837 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
838
839 pstate = clamp_t(int, pstate, min_perf, max_perf);
840
841 if (pstate == cpu->pstate.current_pstate)
842 return;
843 }
844 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
845
846 cpu->pstate.current_pstate = pstate;
847
848 pstate_funcs.set(cpu, pstate);
849 }
850
intel_pstate_get_cpu_pstates(struct cpudata * cpu)851 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
852 {
853 cpu->pstate.min_pstate = pstate_funcs.get_min();
854 cpu->pstate.max_pstate = pstate_funcs.get_max();
855 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
856 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
857 cpu->pstate.scaling = pstate_funcs.get_scaling();
858
859 if (pstate_funcs.get_vid)
860 pstate_funcs.get_vid(cpu);
861 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
862 }
863
intel_pstate_calc_busy(struct cpudata * cpu)864 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
865 {
866 struct sample *sample = &cpu->sample;
867 int64_t core_pct;
868
869 core_pct = int_tofp(sample->aperf) * int_tofp(100);
870 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
871
872 sample->freq = fp_toint(
873 mul_fp(int_tofp(
874 cpu->pstate.max_pstate_physical *
875 cpu->pstate.scaling / 100),
876 core_pct));
877
878 sample->core_pct_busy = (int32_t)core_pct;
879 }
880
intel_pstate_sample(struct cpudata * cpu)881 static inline void intel_pstate_sample(struct cpudata *cpu)
882 {
883 u64 aperf, mperf;
884 unsigned long flags;
885 u64 tsc;
886
887 local_irq_save(flags);
888 rdmsrl(MSR_IA32_APERF, aperf);
889 rdmsrl(MSR_IA32_MPERF, mperf);
890 if (cpu->prev_mperf == mperf) {
891 local_irq_restore(flags);
892 return;
893 }
894
895 tsc = rdtsc();
896 local_irq_restore(flags);
897
898 cpu->last_sample_time = cpu->sample.time;
899 cpu->sample.time = ktime_get();
900 cpu->sample.aperf = aperf;
901 cpu->sample.mperf = mperf;
902 cpu->sample.tsc = tsc;
903 cpu->sample.aperf -= cpu->prev_aperf;
904 cpu->sample.mperf -= cpu->prev_mperf;
905 cpu->sample.tsc -= cpu->prev_tsc;
906
907 intel_pstate_calc_busy(cpu);
908
909 cpu->prev_aperf = aperf;
910 cpu->prev_mperf = mperf;
911 cpu->prev_tsc = tsc;
912 }
913
intel_hwp_set_sample_time(struct cpudata * cpu)914 static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
915 {
916 int delay;
917
918 delay = msecs_to_jiffies(50);
919 mod_timer_pinned(&cpu->timer, jiffies + delay);
920 }
921
intel_pstate_set_sample_time(struct cpudata * cpu)922 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
923 {
924 int delay;
925
926 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
927 mod_timer_pinned(&cpu->timer, jiffies + delay);
928 }
929
intel_pstate_get_scaled_busy(struct cpudata * cpu)930 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
931 {
932 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
933 s64 duration_us;
934 u32 sample_time;
935
936 /*
937 * core_busy is the ratio of actual performance to max
938 * max_pstate is the max non turbo pstate available
939 * current_pstate was the pstate that was requested during
940 * the last sample period.
941 *
942 * We normalize core_busy, which was our actual percent
943 * performance to what we requested during the last sample
944 * period. The result will be a percentage of busy at a
945 * specified pstate.
946 */
947 core_busy = cpu->sample.core_pct_busy;
948 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
949 current_pstate = int_tofp(cpu->pstate.current_pstate);
950 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
951
952 /*
953 * Since we have a deferred timer, it will not fire unless
954 * we are in C0. So, determine if the actual elapsed time
955 * is significantly greater (3x) than our sample interval. If it
956 * is, then we were idle for a long enough period of time
957 * to adjust our busyness.
958 */
959 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
960 duration_us = ktime_us_delta(cpu->sample.time,
961 cpu->last_sample_time);
962 if (duration_us > sample_time * 3) {
963 sample_ratio = div_fp(int_tofp(sample_time),
964 int_tofp(duration_us));
965 core_busy = mul_fp(core_busy, sample_ratio);
966 }
967
968 return core_busy;
969 }
970
intel_pstate_adjust_busy_pstate(struct cpudata * cpu)971 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
972 {
973 int32_t busy_scaled;
974 struct _pid *pid;
975 signed int ctl;
976 int from;
977 struct sample *sample;
978
979 from = cpu->pstate.current_pstate;
980
981 pid = &cpu->pid;
982 busy_scaled = intel_pstate_get_scaled_busy(cpu);
983
984 ctl = pid_calc(pid, busy_scaled);
985
986 /* Negative values of ctl increase the pstate and vice versa */
987 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
988
989 sample = &cpu->sample;
990 trace_pstate_sample(fp_toint(sample->core_pct_busy),
991 fp_toint(busy_scaled),
992 from,
993 cpu->pstate.current_pstate,
994 sample->mperf,
995 sample->aperf,
996 sample->tsc,
997 sample->freq);
998 }
999
intel_hwp_timer_func(unsigned long __data)1000 static void intel_hwp_timer_func(unsigned long __data)
1001 {
1002 struct cpudata *cpu = (struct cpudata *) __data;
1003
1004 intel_pstate_sample(cpu);
1005 intel_hwp_set_sample_time(cpu);
1006 }
1007
intel_pstate_timer_func(unsigned long __data)1008 static void intel_pstate_timer_func(unsigned long __data)
1009 {
1010 struct cpudata *cpu = (struct cpudata *) __data;
1011
1012 intel_pstate_sample(cpu);
1013
1014 intel_pstate_adjust_busy_pstate(cpu);
1015
1016 intel_pstate_set_sample_time(cpu);
1017 }
1018
1019 #define ICPU(model, policy) \
1020 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1021 (unsigned long)&policy }
1022
1023 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1024 ICPU(0x2a, core_params),
1025 ICPU(0x2d, core_params),
1026 ICPU(0x37, silvermont_params),
1027 ICPU(0x3a, core_params),
1028 ICPU(0x3c, core_params),
1029 ICPU(0x3d, core_params),
1030 ICPU(0x3e, core_params),
1031 ICPU(0x3f, core_params),
1032 ICPU(0x45, core_params),
1033 ICPU(0x46, core_params),
1034 ICPU(0x47, core_params),
1035 ICPU(0x4c, airmont_params),
1036 ICPU(0x4e, core_params),
1037 ICPU(0x4f, core_params),
1038 ICPU(0x5e, core_params),
1039 ICPU(0x56, core_params),
1040 ICPU(0x57, knl_params),
1041 {}
1042 };
1043 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1044
1045 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1046 ICPU(0x56, core_params),
1047 {}
1048 };
1049
intel_pstate_init_cpu(unsigned int cpunum)1050 static int intel_pstate_init_cpu(unsigned int cpunum)
1051 {
1052 struct cpudata *cpu;
1053
1054 if (!all_cpu_data[cpunum])
1055 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1056 GFP_KERNEL);
1057 if (!all_cpu_data[cpunum])
1058 return -ENOMEM;
1059
1060 cpu = all_cpu_data[cpunum];
1061
1062 cpu->cpu = cpunum;
1063
1064 if (hwp_active)
1065 intel_pstate_hwp_enable(cpu);
1066
1067 intel_pstate_get_cpu_pstates(cpu);
1068
1069 init_timer_deferrable(&cpu->timer);
1070 cpu->timer.data = (unsigned long)cpu;
1071 cpu->timer.expires = jiffies + HZ/100;
1072
1073 if (!hwp_active)
1074 cpu->timer.function = intel_pstate_timer_func;
1075 else
1076 cpu->timer.function = intel_hwp_timer_func;
1077
1078 intel_pstate_busy_pid_reset(cpu);
1079 intel_pstate_sample(cpu);
1080
1081 add_timer_on(&cpu->timer, cpunum);
1082
1083 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1084
1085 return 0;
1086 }
1087
intel_pstate_get(unsigned int cpu_num)1088 static unsigned int intel_pstate_get(unsigned int cpu_num)
1089 {
1090 struct sample *sample;
1091 struct cpudata *cpu;
1092
1093 cpu = all_cpu_data[cpu_num];
1094 if (!cpu)
1095 return 0;
1096 sample = &cpu->sample;
1097 return sample->freq;
1098 }
1099
intel_pstate_set_policy(struct cpufreq_policy * policy)1100 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1101 {
1102 if (!policy->cpuinfo.max_freq)
1103 return -ENODEV;
1104
1105 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1106 policy->max >= policy->cpuinfo.max_freq) {
1107 pr_debug("intel_pstate: set performance\n");
1108 limits = &performance_limits;
1109 if (hwp_active)
1110 intel_pstate_hwp_set();
1111 return 0;
1112 }
1113
1114 pr_debug("intel_pstate: set powersave\n");
1115 limits = &powersave_limits;
1116 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1117 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1118 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1119 policy->cpuinfo.max_freq);
1120 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1121
1122 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1123 limits->min_perf_pct = max(limits->min_policy_pct,
1124 limits->min_sysfs_pct);
1125 limits->min_perf_pct = min(limits->max_policy_pct,
1126 limits->min_perf_pct);
1127 limits->max_perf_pct = min(limits->max_policy_pct,
1128 limits->max_sysfs_pct);
1129 limits->max_perf_pct = max(limits->min_policy_pct,
1130 limits->max_perf_pct);
1131 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1132
1133 /* Make sure min_perf_pct <= max_perf_pct */
1134 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1135
1136 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1137 int_tofp(100));
1138 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1139 int_tofp(100));
1140
1141 if (hwp_active)
1142 intel_pstate_hwp_set();
1143
1144 return 0;
1145 }
1146
intel_pstate_verify_policy(struct cpufreq_policy * policy)1147 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1148 {
1149 cpufreq_verify_within_cpu_limits(policy);
1150
1151 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1152 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1153 return -EINVAL;
1154
1155 return 0;
1156 }
1157
intel_pstate_stop_cpu(struct cpufreq_policy * policy)1158 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1159 {
1160 int cpu_num = policy->cpu;
1161 struct cpudata *cpu = all_cpu_data[cpu_num];
1162
1163 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1164
1165 del_timer_sync(&all_cpu_data[cpu_num]->timer);
1166 if (hwp_active)
1167 return;
1168
1169 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1170 }
1171
intel_pstate_cpu_init(struct cpufreq_policy * policy)1172 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1173 {
1174 struct cpudata *cpu;
1175 int rc;
1176
1177 rc = intel_pstate_init_cpu(policy->cpu);
1178 if (rc)
1179 return rc;
1180
1181 cpu = all_cpu_data[policy->cpu];
1182
1183 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1184 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1185 else
1186 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1187
1188 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1189 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1190
1191 /* cpuinfo and default policy values */
1192 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1193 policy->cpuinfo.max_freq =
1194 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1195 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1196 cpumask_set_cpu(policy->cpu, policy->cpus);
1197
1198 return 0;
1199 }
1200
1201 static struct cpufreq_driver intel_pstate_driver = {
1202 .flags = CPUFREQ_CONST_LOOPS,
1203 .verify = intel_pstate_verify_policy,
1204 .setpolicy = intel_pstate_set_policy,
1205 .get = intel_pstate_get,
1206 .init = intel_pstate_cpu_init,
1207 .stop_cpu = intel_pstate_stop_cpu,
1208 .name = "intel_pstate",
1209 };
1210
1211 static int __initdata no_load;
1212 static int __initdata no_hwp;
1213 static int __initdata hwp_only;
1214 static unsigned int force_load;
1215
intel_pstate_msrs_not_valid(void)1216 static int intel_pstate_msrs_not_valid(void)
1217 {
1218 if (!pstate_funcs.get_max() ||
1219 !pstate_funcs.get_min() ||
1220 !pstate_funcs.get_turbo())
1221 return -ENODEV;
1222
1223 return 0;
1224 }
1225
copy_pid_params(struct pstate_adjust_policy * policy)1226 static void copy_pid_params(struct pstate_adjust_policy *policy)
1227 {
1228 pid_params.sample_rate_ms = policy->sample_rate_ms;
1229 pid_params.p_gain_pct = policy->p_gain_pct;
1230 pid_params.i_gain_pct = policy->i_gain_pct;
1231 pid_params.d_gain_pct = policy->d_gain_pct;
1232 pid_params.deadband = policy->deadband;
1233 pid_params.setpoint = policy->setpoint;
1234 }
1235
copy_cpu_funcs(struct pstate_funcs * funcs)1236 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1237 {
1238 pstate_funcs.get_max = funcs->get_max;
1239 pstate_funcs.get_max_physical = funcs->get_max_physical;
1240 pstate_funcs.get_min = funcs->get_min;
1241 pstate_funcs.get_turbo = funcs->get_turbo;
1242 pstate_funcs.get_scaling = funcs->get_scaling;
1243 pstate_funcs.set = funcs->set;
1244 pstate_funcs.get_vid = funcs->get_vid;
1245 }
1246
1247 #if IS_ENABLED(CONFIG_ACPI)
1248 #include <acpi/processor.h>
1249
intel_pstate_no_acpi_pss(void)1250 static bool intel_pstate_no_acpi_pss(void)
1251 {
1252 int i;
1253
1254 for_each_possible_cpu(i) {
1255 acpi_status status;
1256 union acpi_object *pss;
1257 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1258 struct acpi_processor *pr = per_cpu(processors, i);
1259
1260 if (!pr)
1261 continue;
1262
1263 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1264 if (ACPI_FAILURE(status))
1265 continue;
1266
1267 pss = buffer.pointer;
1268 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1269 kfree(pss);
1270 return false;
1271 }
1272
1273 kfree(pss);
1274 }
1275
1276 return true;
1277 }
1278
intel_pstate_has_acpi_ppc(void)1279 static bool intel_pstate_has_acpi_ppc(void)
1280 {
1281 int i;
1282
1283 for_each_possible_cpu(i) {
1284 struct acpi_processor *pr = per_cpu(processors, i);
1285
1286 if (!pr)
1287 continue;
1288 if (acpi_has_method(pr->handle, "_PPC"))
1289 return true;
1290 }
1291 return false;
1292 }
1293
1294 enum {
1295 PSS,
1296 PPC,
1297 };
1298
1299 struct hw_vendor_info {
1300 u16 valid;
1301 char oem_id[ACPI_OEM_ID_SIZE];
1302 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1303 int oem_pwr_table;
1304 };
1305
1306 /* Hardware vendor-specific info that has its own power management modes */
1307 static struct hw_vendor_info vendor_info[] = {
1308 {1, "HP ", "ProLiant", PSS},
1309 {1, "ORACLE", "X4-2 ", PPC},
1310 {1, "ORACLE", "X4-2L ", PPC},
1311 {1, "ORACLE", "X4-2B ", PPC},
1312 {1, "ORACLE", "X3-2 ", PPC},
1313 {1, "ORACLE", "X3-2L ", PPC},
1314 {1, "ORACLE", "X3-2B ", PPC},
1315 {1, "ORACLE", "X4470M2 ", PPC},
1316 {1, "ORACLE", "X4270M3 ", PPC},
1317 {1, "ORACLE", "X4270M2 ", PPC},
1318 {1, "ORACLE", "X4170M2 ", PPC},
1319 {1, "ORACLE", "X4170 M3", PPC},
1320 {1, "ORACLE", "X4275 M3", PPC},
1321 {1, "ORACLE", "X6-2 ", PPC},
1322 {1, "ORACLE", "Sudbury ", PPC},
1323 {0, "", ""},
1324 };
1325
intel_pstate_platform_pwr_mgmt_exists(void)1326 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1327 {
1328 struct acpi_table_header hdr;
1329 struct hw_vendor_info *v_info;
1330 const struct x86_cpu_id *id;
1331 u64 misc_pwr;
1332
1333 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1334 if (id) {
1335 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1336 if ( misc_pwr & (1 << 8))
1337 return true;
1338 }
1339
1340 if (acpi_disabled ||
1341 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1342 return false;
1343
1344 for (v_info = vendor_info; v_info->valid; v_info++) {
1345 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1346 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1347 ACPI_OEM_TABLE_ID_SIZE))
1348 switch (v_info->oem_pwr_table) {
1349 case PSS:
1350 return intel_pstate_no_acpi_pss();
1351 case PPC:
1352 return intel_pstate_has_acpi_ppc() &&
1353 (!force_load);
1354 }
1355 }
1356
1357 return false;
1358 }
1359 #else /* CONFIG_ACPI not enabled */
intel_pstate_platform_pwr_mgmt_exists(void)1360 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
intel_pstate_has_acpi_ppc(void)1361 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1362 #endif /* CONFIG_ACPI */
1363
1364 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1365 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1366 {}
1367 };
1368
intel_pstate_init(void)1369 static int __init intel_pstate_init(void)
1370 {
1371 int cpu, rc = 0;
1372 const struct x86_cpu_id *id;
1373 struct cpu_defaults *cpu_def;
1374
1375 if (no_load)
1376 return -ENODEV;
1377
1378 if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
1379 copy_cpu_funcs(&core_params.funcs);
1380 hwp_active++;
1381 goto hwp_cpu_matched;
1382 }
1383
1384 id = x86_match_cpu(intel_pstate_cpu_ids);
1385 if (!id)
1386 return -ENODEV;
1387
1388 cpu_def = (struct cpu_defaults *)id->driver_data;
1389
1390 copy_pid_params(&cpu_def->pid_policy);
1391 copy_cpu_funcs(&cpu_def->funcs);
1392
1393 if (intel_pstate_msrs_not_valid())
1394 return -ENODEV;
1395
1396 hwp_cpu_matched:
1397 /*
1398 * The Intel pstate driver will be ignored if the platform
1399 * firmware has its own power management modes.
1400 */
1401 if (intel_pstate_platform_pwr_mgmt_exists())
1402 return -ENODEV;
1403
1404 pr_info("Intel P-state driver initializing.\n");
1405
1406 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1407 if (!all_cpu_data)
1408 return -ENOMEM;
1409
1410 if (!hwp_active && hwp_only)
1411 goto out;
1412
1413 rc = cpufreq_register_driver(&intel_pstate_driver);
1414 if (rc)
1415 goto out;
1416
1417 intel_pstate_debug_expose_params();
1418 intel_pstate_sysfs_expose_params();
1419
1420 if (hwp_active)
1421 pr_info("intel_pstate: HWP enabled\n");
1422
1423 return rc;
1424 out:
1425 get_online_cpus();
1426 for_each_online_cpu(cpu) {
1427 if (all_cpu_data[cpu]) {
1428 del_timer_sync(&all_cpu_data[cpu]->timer);
1429 kfree(all_cpu_data[cpu]);
1430 }
1431 }
1432
1433 put_online_cpus();
1434 vfree(all_cpu_data);
1435 return -ENODEV;
1436 }
1437 device_initcall(intel_pstate_init);
1438
intel_pstate_setup(char * str)1439 static int __init intel_pstate_setup(char *str)
1440 {
1441 if (!str)
1442 return -EINVAL;
1443
1444 if (!strcmp(str, "disable"))
1445 no_load = 1;
1446 if (!strcmp(str, "no_hwp")) {
1447 pr_info("intel_pstate: HWP disabled\n");
1448 no_hwp = 1;
1449 }
1450 if (!strcmp(str, "force"))
1451 force_load = 1;
1452 if (!strcmp(str, "hwp_only"))
1453 hwp_only = 1;
1454 return 0;
1455 }
1456 early_param("intel_pstate", intel_pstate_setup);
1457
1458 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1459 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1460 MODULE_LICENSE("GPL");
1461