• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * From Coreboot file of same name
4  *
5  * Copyright (C) 2007-2009 coresystems GmbH
6  * Copyright (C) 2011 The Chromium Authors
7  */
8 
9 #include <common.h>
10 #include <cpu.h>
11 #include <dm.h>
12 #include <fdtdec.h>
13 #include <malloc.h>
14 #include <asm/cpu.h>
15 #include <asm/cpu_x86.h>
16 #include <asm/msr.h>
17 #include <asm/msr-index.h>
18 #include <asm/mtrr.h>
19 #include <asm/processor.h>
20 #include <asm/speedstep.h>
21 #include <asm/turbo.h>
22 #include <asm/arch/model_206ax.h>
23 
24 DECLARE_GLOBAL_DATA_PTR;
25 
enable_vmx(void)26 static void enable_vmx(void)
27 {
28 	struct cpuid_result regs;
29 #ifdef CONFIG_ENABLE_VMX
30 	int enable = true;
31 #else
32 	int enable = false;
33 #endif
34 	msr_t msr;
35 
36 	regs = cpuid(1);
37 	/* Check that the VMX is supported before reading or writing the MSR. */
38 	if (!((regs.ecx & CPUID_VMX) || (regs.ecx & CPUID_SMX)))
39 		return;
40 
41 	msr = msr_read(MSR_IA32_FEATURE_CONTROL);
42 
43 	if (msr.lo & (1 << 0)) {
44 		debug("VMX is locked, so %s will do nothing\n", __func__);
45 		/* VMX locked. If we set it again we get an illegal
46 		 * instruction
47 		 */
48 		return;
49 	}
50 
51 	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
52 	 * It must be cleared regardless of VMX config setting.
53 	 */
54 	msr.hi = 0;
55 	msr.lo = 0;
56 
57 	debug("%s VMX\n", enable ? "Enabling" : "Disabling");
58 
59 	/*
60 	 * Even though the Intel manual says you must set the lock bit in
61 	 * addition to the VMX bit in order for VMX to work, it is incorrect.
62 	 * Thus we leave it unlocked for the OS to manage things itself.
63 	 * This is good for a few reasons:
64 	 * - No need to reflash the bios just to toggle the lock bit.
65 	 * - The VMX bits really really should match each other across cores,
66 	 *   so hard locking it on one while another has the opposite setting
67 	 *   can easily lead to crashes as code using VMX migrates between
68 	 *   them.
69 	 * - Vendors that want to "upsell" from a bios that disables+locks to
70 	 *   one that doesn't is sleazy.
71 	 * By leaving this to the OS (e.g. Linux), people can do exactly what
72 	 * they want on the fly, and do it correctly (e.g. across multiple
73 	 * cores).
74 	 */
75 	if (enable) {
76 		msr.lo |= (1 << 2);
77 		if (regs.ecx & CPUID_SMX)
78 			msr.lo |= (1 << 1);
79 	}
80 
81 	msr_write(MSR_IA32_FEATURE_CONTROL, msr);
82 }
83 
84 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
85 static const u8 power_limit_time_sec_to_msr[] = {
86 	[0]   = 0x00,
87 	[1]   = 0x0a,
88 	[2]   = 0x0b,
89 	[3]   = 0x4b,
90 	[4]   = 0x0c,
91 	[5]   = 0x2c,
92 	[6]   = 0x4c,
93 	[7]   = 0x6c,
94 	[8]   = 0x0d,
95 	[10]  = 0x2d,
96 	[12]  = 0x4d,
97 	[14]  = 0x6d,
98 	[16]  = 0x0e,
99 	[20]  = 0x2e,
100 	[24]  = 0x4e,
101 	[28]  = 0x6e,
102 	[32]  = 0x0f,
103 	[40]  = 0x2f,
104 	[48]  = 0x4f,
105 	[56]  = 0x6f,
106 	[64]  = 0x10,
107 	[80]  = 0x30,
108 	[96]  = 0x50,
109 	[112] = 0x70,
110 	[128] = 0x11,
111 };
112 
113 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
114 static const u8 power_limit_time_msr_to_sec[] = {
115 	[0x00] = 0,
116 	[0x0a] = 1,
117 	[0x0b] = 2,
118 	[0x4b] = 3,
119 	[0x0c] = 4,
120 	[0x2c] = 5,
121 	[0x4c] = 6,
122 	[0x6c] = 7,
123 	[0x0d] = 8,
124 	[0x2d] = 10,
125 	[0x4d] = 12,
126 	[0x6d] = 14,
127 	[0x0e] = 16,
128 	[0x2e] = 20,
129 	[0x4e] = 24,
130 	[0x6e] = 28,
131 	[0x0f] = 32,
132 	[0x2f] = 40,
133 	[0x4f] = 48,
134 	[0x6f] = 56,
135 	[0x10] = 64,
136 	[0x30] = 80,
137 	[0x50] = 96,
138 	[0x70] = 112,
139 	[0x11] = 128,
140 };
141 
cpu_config_tdp_levels(void)142 int cpu_config_tdp_levels(void)
143 {
144 	struct cpuid_result result;
145 	msr_t platform_info;
146 
147 	/* Minimum CPU revision */
148 	result = cpuid(1);
149 	if (result.eax < IVB_CONFIG_TDP_MIN_CPUID)
150 		return 0;
151 
152 	/* Bits 34:33 indicate how many levels supported */
153 	platform_info = msr_read(MSR_PLATFORM_INFO);
154 	return (platform_info.hi >> 1) & 3;
155 }
156 
157 /*
158  * Configure processor power limits if possible
159  * This must be done AFTER set of BIOS_RESET_CPL
160  */
set_power_limits(u8 power_limit_1_time)161 void set_power_limits(u8 power_limit_1_time)
162 {
163 	msr_t msr = msr_read(MSR_PLATFORM_INFO);
164 	msr_t limit;
165 	unsigned power_unit;
166 	unsigned tdp, min_power, max_power, max_time;
167 	u8 power_limit_1_val;
168 
169 	if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
170 		return;
171 
172 	if (!(msr.lo & PLATFORM_INFO_SET_TDP))
173 		return;
174 
175 	/* Get units */
176 	msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
177 	power_unit = 2 << ((msr.lo & 0xf) - 1);
178 
179 	/* Get power defaults for this SKU */
180 	msr = msr_read(MSR_PKG_POWER_SKU);
181 	tdp = msr.lo & 0x7fff;
182 	min_power = (msr.lo >> 16) & 0x7fff;
183 	max_power = msr.hi & 0x7fff;
184 	max_time = (msr.hi >> 16) & 0x7f;
185 
186 	debug("CPU TDP: %u Watts\n", tdp / power_unit);
187 
188 	if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
189 		power_limit_1_time = power_limit_time_msr_to_sec[max_time];
190 
191 	if (min_power > 0 && tdp < min_power)
192 		tdp = min_power;
193 
194 	if (max_power > 0 && tdp > max_power)
195 		tdp = max_power;
196 
197 	power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
198 
199 	/* Set long term power limit to TDP */
200 	limit.lo = 0;
201 	limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
202 	limit.lo |= PKG_POWER_LIMIT_EN;
203 	limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
204 		PKG_POWER_LIMIT_TIME_SHIFT;
205 
206 	/* Set short term power limit to 1.25 * TDP */
207 	limit.hi = 0;
208 	limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
209 	limit.hi |= PKG_POWER_LIMIT_EN;
210 	/* Power limit 2 time is only programmable on SNB EP/EX */
211 
212 	msr_write(MSR_PKG_POWER_LIMIT, limit);
213 
214 	/* Use nominal TDP values for CPUs with configurable TDP */
215 	if (cpu_config_tdp_levels()) {
216 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
217 		limit.hi = 0;
218 		limit.lo = msr.lo & 0xff;
219 		msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
220 	}
221 }
222 
configure_c_states(void)223 static void configure_c_states(void)
224 {
225 	struct cpuid_result result;
226 	msr_t msr;
227 
228 	msr = msr_read(MSR_PMG_CST_CONFIG_CTL);
229 	msr.lo |= (1 << 28);	/* C1 Auto Undemotion Enable */
230 	msr.lo |= (1 << 27);	/* C3 Auto Undemotion Enable */
231 	msr.lo |= (1 << 26);	/* C1 Auto Demotion Enable */
232 	msr.lo |= (1 << 25);	/* C3 Auto Demotion Enable */
233 	msr.lo &= ~(1 << 10);	/* Disable IO MWAIT redirection */
234 	msr.lo |= 7;		/* No package C-state limit */
235 	msr_write(MSR_PMG_CST_CONFIG_CTL, msr);
236 
237 	msr = msr_read(MSR_PMG_IO_CAPTURE_ADR);
238 	msr.lo &= ~0x7ffff;
239 	msr.lo |= (PMB0_BASE + 4);	/* LVL_2 base address */
240 	msr.lo |= (2 << 16);		/* CST Range: C7 is max C-state */
241 	msr_write(MSR_PMG_IO_CAPTURE_ADR, msr);
242 
243 	msr = msr_read(MSR_MISC_PWR_MGMT);
244 	msr.lo &= ~(1 << 0);	/* Enable P-state HW_ALL coordination */
245 	msr_write(MSR_MISC_PWR_MGMT, msr);
246 
247 	msr = msr_read(MSR_POWER_CTL);
248 	msr.lo |= (1 << 18);	/* Enable Energy Perf Bias MSR 0x1b0 */
249 	msr.lo |= (1 << 1);	/* C1E Enable */
250 	msr.lo |= (1 << 0);	/* Bi-directional PROCHOT# */
251 	msr_write(MSR_POWER_CTL, msr);
252 
253 	/* C3 Interrupt Response Time Limit */
254 	msr.hi = 0;
255 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x50;
256 	msr_write(MSR_PKGC3_IRTL, msr);
257 
258 	/* C6 Interrupt Response Time Limit */
259 	msr.hi = 0;
260 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x68;
261 	msr_write(MSR_PKGC6_IRTL, msr);
262 
263 	/* C7 Interrupt Response Time Limit */
264 	msr.hi = 0;
265 	msr.lo = IRTL_VALID | IRTL_1024_NS | 0x6D;
266 	msr_write(MSR_PKGC7_IRTL, msr);
267 
268 	/* Primary Plane Current Limit */
269 	msr = msr_read(MSR_PP0_CURRENT_CONFIG);
270 	msr.lo &= ~0x1fff;
271 	msr.lo |= PP0_CURRENT_LIMIT;
272 	msr_write(MSR_PP0_CURRENT_CONFIG, msr);
273 
274 	/* Secondary Plane Current Limit */
275 	msr = msr_read(MSR_PP1_CURRENT_CONFIG);
276 	msr.lo &= ~0x1fff;
277 	result = cpuid(1);
278 	if (result.eax >= 0x30600)
279 		msr.lo |= PP1_CURRENT_LIMIT_IVB;
280 	else
281 		msr.lo |= PP1_CURRENT_LIMIT_SNB;
282 	msr_write(MSR_PP1_CURRENT_CONFIG, msr);
283 }
284 
configure_thermal_target(struct udevice * dev)285 static int configure_thermal_target(struct udevice *dev)
286 {
287 	int tcc_offset;
288 	msr_t msr;
289 
290 	tcc_offset = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
291 				    "tcc-offset", 0);
292 
293 	/* Set TCC activaiton offset if supported */
294 	msr = msr_read(MSR_PLATFORM_INFO);
295 	if ((msr.lo & (1 << 30)) && tcc_offset) {
296 		msr = msr_read(MSR_TEMPERATURE_TARGET);
297 		msr.lo &= ~(0xf << 24); /* Bits 27:24 */
298 		msr.lo |= (tcc_offset & 0xf) << 24;
299 		msr_write(MSR_TEMPERATURE_TARGET, msr);
300 	}
301 
302 	return 0;
303 }
304 
configure_misc(void)305 static void configure_misc(void)
306 {
307 	msr_t msr;
308 
309 	msr = msr_read(IA32_MISC_ENABLE);
310 	msr.lo |= (1 << 0);	  /* Fast String enable */
311 	msr.lo |= (1 << 3);	  /* TM1/TM2/EMTTM enable */
312 	msr.lo |= (1 << 16);	  /* Enhanced SpeedStep Enable */
313 	msr_write(IA32_MISC_ENABLE, msr);
314 
315 	/* Disable Thermal interrupts */
316 	msr.lo = 0;
317 	msr.hi = 0;
318 	msr_write(IA32_THERM_INTERRUPT, msr);
319 
320 	/* Enable package critical interrupt only */
321 	msr.lo = 1 << 4;
322 	msr.hi = 0;
323 	msr_write(IA32_PACKAGE_THERM_INTERRUPT, msr);
324 }
325 
enable_lapic_tpr(void)326 static void enable_lapic_tpr(void)
327 {
328 	msr_t msr;
329 
330 	msr = msr_read(MSR_PIC_MSG_CONTROL);
331 	msr.lo &= ~(1 << 10);	/* Enable APIC TPR updates */
332 	msr_write(MSR_PIC_MSG_CONTROL, msr);
333 }
334 
configure_dca_cap(void)335 static void configure_dca_cap(void)
336 {
337 	struct cpuid_result cpuid_regs;
338 	msr_t msr;
339 
340 	/* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
341 	cpuid_regs = cpuid(1);
342 	if (cpuid_regs.ecx & (1 << 18)) {
343 		msr = msr_read(IA32_PLATFORM_DCA_CAP);
344 		msr.lo |= 1;
345 		msr_write(IA32_PLATFORM_DCA_CAP, msr);
346 	}
347 }
348 
set_max_ratio(void)349 static void set_max_ratio(void)
350 {
351 	msr_t msr, perf_ctl;
352 
353 	perf_ctl.hi = 0;
354 
355 	/* Check for configurable TDP option */
356 	if (cpu_config_tdp_levels()) {
357 		/* Set to nominal TDP ratio */
358 		msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
359 		perf_ctl.lo = (msr.lo & 0xff) << 8;
360 	} else {
361 		/* Platform Info bits 15:8 give max ratio */
362 		msr = msr_read(MSR_PLATFORM_INFO);
363 		perf_ctl.lo = msr.lo & 0xff00;
364 	}
365 	msr_write(MSR_IA32_PERF_CTL, perf_ctl);
366 
367 	debug("model_x06ax: frequency set to %d\n",
368 	      ((perf_ctl.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK);
369 }
370 
set_energy_perf_bias(u8 policy)371 static void set_energy_perf_bias(u8 policy)
372 {
373 	msr_t msr;
374 
375 	/* Energy Policy is bits 3:0 */
376 	msr = msr_read(IA32_ENERGY_PERFORMANCE_BIAS);
377 	msr.lo &= ~0xf;
378 	msr.lo |= policy & 0xf;
379 	msr_write(IA32_ENERGY_PERFORMANCE_BIAS, msr);
380 
381 	debug("model_x06ax: energy policy set to %u\n", policy);
382 }
383 
configure_mca(void)384 static void configure_mca(void)
385 {
386 	msr_t msr;
387 	int i;
388 
389 	msr.lo = 0;
390 	msr.hi = 0;
391 	/* This should only be done on a cold boot */
392 	for (i = 0; i < 7; i++)
393 		msr_write(IA32_MC0_STATUS + (i * 4), msr);
394 }
395 
model_206ax_init(struct udevice * dev)396 static int model_206ax_init(struct udevice *dev)
397 {
398 	int ret;
399 
400 	/* Clear out pending MCEs */
401 	configure_mca();
402 
403 	/* Enable the local cpu apics */
404 	enable_lapic_tpr();
405 
406 	/* Enable virtualization if enabled in CMOS */
407 	enable_vmx();
408 
409 	/* Configure C States */
410 	configure_c_states();
411 
412 	/* Configure Enhanced SpeedStep and Thermal Sensors */
413 	configure_misc();
414 
415 	/* Thermal throttle activation offset */
416 	ret = configure_thermal_target(dev);
417 	if (ret) {
418 		debug("Cannot set thermal target\n");
419 		return ret;
420 	}
421 
422 	/* Enable Direct Cache Access */
423 	configure_dca_cap();
424 
425 	/* Set energy policy */
426 	set_energy_perf_bias(ENERGY_POLICY_NORMAL);
427 
428 	/* Set Max Ratio */
429 	set_max_ratio();
430 
431 	/* Enable Turbo */
432 	turbo_enable();
433 
434 	return 0;
435 }
436 
model_206ax_get_info(struct udevice * dev,struct cpu_info * info)437 static int model_206ax_get_info(struct udevice *dev, struct cpu_info *info)
438 {
439 	msr_t msr;
440 
441 	msr = msr_read(MSR_IA32_PERF_CTL);
442 	info->cpu_freq = ((msr.lo >> 8) & 0xff) * SANDYBRIDGE_BCLK * 1000000;
443 	info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU |
444 		1 << CPU_FEAT_UCODE;
445 
446 	return 0;
447 }
448 
model_206ax_get_count(struct udevice * dev)449 static int model_206ax_get_count(struct udevice *dev)
450 {
451 	return 4;
452 }
453 
cpu_x86_model_206ax_probe(struct udevice * dev)454 static int cpu_x86_model_206ax_probe(struct udevice *dev)
455 {
456 	if (dev->seq == 0)
457 		model_206ax_init(dev);
458 
459 	return 0;
460 }
461 
462 static const struct cpu_ops cpu_x86_model_206ax_ops = {
463 	.get_desc	= cpu_x86_get_desc,
464 	.get_info	= model_206ax_get_info,
465 	.get_count	= model_206ax_get_count,
466 	.get_vendor	= cpu_x86_get_vendor,
467 };
468 
469 static const struct udevice_id cpu_x86_model_206ax_ids[] = {
470 	{ .compatible = "intel,core-gen3" },
471 	{ }
472 };
473 
474 U_BOOT_DRIVER(cpu_x86_model_206ax_drv) = {
475 	.name		= "cpu_x86_model_206ax",
476 	.id		= UCLASS_CPU,
477 	.of_match	= cpu_x86_model_206ax_ids,
478 	.bind		= cpu_x86_bind,
479 	.probe		= cpu_x86_model_206ax_probe,
480 	.ops		= &cpu_x86_model_206ax_ops,
481 };
482