1 #include <linux/kernel.h>
2
3 #include <linux/string.h>
4 #include <linux/bitops.h>
5 #include <linux/smp.h>
6 #include <linux/sched.h>
7 #include <linux/thread_info.h>
8 #include <linux/init.h>
9 #include <linux/uaccess.h>
10
11 #include <asm/cpufeature.h>
12 #include <asm/pgtable.h>
13 #include <asm/msr.h>
14 #include <asm/bugs.h>
15 #include <asm/cpu.h>
16 #include <asm/intel-family.h>
17
18 #ifdef CONFIG_X86_64
19 #include <linux/topology.h>
20 #endif
21
22 #include "cpu.h"
23
24 #ifdef CONFIG_X86_LOCAL_APIC
25 #include <asm/mpspec.h>
26 #include <asm/apic.h>
27 #endif
28
29 /*
30 * Just in case our CPU detection goes bad, or you have a weird system,
31 * allow a way to override the automatic disabling of MPX.
32 */
33 static int forcempx;
34
forcempx_setup(char * __unused)35 static int __init forcempx_setup(char *__unused)
36 {
37 forcempx = 1;
38
39 return 1;
40 }
41 __setup("intel-skd-046-workaround=disable", forcempx_setup);
42
check_mpx_erratum(struct cpuinfo_x86 * c)43 void check_mpx_erratum(struct cpuinfo_x86 *c)
44 {
45 if (forcempx)
46 return;
47 /*
48 * Turn off the MPX feature on CPUs where SMEP is not
49 * available or disabled.
50 *
51 * Works around Intel Erratum SKD046: "Branch Instructions
52 * May Initialize MPX Bound Registers Incorrectly".
53 *
54 * This might falsely disable MPX on systems without
55 * SMEP, like Atom processors without SMEP. But there
56 * is no such hardware known at the moment.
57 */
58 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
59 setup_clear_cpu_cap(X86_FEATURE_MPX);
60 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
61 }
62 }
63
64 /*
65 * Early microcode releases for the Spectre v2 mitigation were broken.
66 * Information taken from;
67 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
68 * - https://kb.vmware.com/s/article/52345
69 * - Microcode revisions observed in the wild
70 * - Release note from 20180108 microcode release
71 */
72 struct sku_microcode {
73 u8 model;
74 u8 stepping;
75 u32 microcode;
76 };
77 static const struct sku_microcode spectre_bad_microcodes[] = {
78 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
79 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
80 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
81 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
82 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
83 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
84 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
85 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
86 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
87 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
88 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
89 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
90 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
91 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
92 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
93 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
94 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
95 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
96 /* Observed in the wild */
97 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
98 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
99 };
100
bad_spectre_microcode(struct cpuinfo_x86 * c)101 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
102 {
103 int i;
104
105 /*
106 * We know that the hypervisor lie to us on the microcode version so
107 * we may as well hope that it is running the correct version.
108 */
109 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
110 return false;
111
112 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
113 if (c->x86_model == spectre_bad_microcodes[i].model &&
114 c->x86_stepping == spectre_bad_microcodes[i].stepping)
115 return (c->microcode <= spectre_bad_microcodes[i].microcode);
116 }
117 return false;
118 }
119
early_init_intel(struct cpuinfo_x86 * c)120 static void early_init_intel(struct cpuinfo_x86 *c)
121 {
122 u64 misc_enable;
123
124 /* Unmask CPUID levels if masked: */
125 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
126 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
127 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
128 c->cpuid_level = cpuid_eax(0);
129 get_cpu_cap(c);
130 }
131 }
132
133 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
134 (c->x86 == 0x6 && c->x86_model >= 0x0e))
135 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
136
137 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
138 unsigned lower_word;
139
140 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
141 /* Required by the SDM */
142 sync_core();
143 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
144 }
145
146 /* Now if any of them are set, check the blacklist and clear the lot */
147 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
148 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
149 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
150 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
151 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
152 setup_clear_cpu_cap(X86_FEATURE_IBRS);
153 setup_clear_cpu_cap(X86_FEATURE_IBPB);
154 setup_clear_cpu_cap(X86_FEATURE_STIBP);
155 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
156 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
157 }
158
159 /*
160 * Atom erratum AAE44/AAF40/AAG38/AAH41:
161 *
162 * A race condition between speculative fetches and invalidating
163 * a large page. This is worked around in microcode, but we
164 * need the microcode to have already been loaded... so if it is
165 * not, recommend a BIOS update and disable large pages.
166 */
167 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
168 c->microcode < 0x20e) {
169 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
170 clear_cpu_cap(c, X86_FEATURE_PSE);
171 }
172
173 #ifdef CONFIG_X86_64
174 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
175 #else
176 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
177 if (c->x86 == 15 && c->x86_cache_alignment == 64)
178 c->x86_cache_alignment = 128;
179 #endif
180
181 /* CPUID workaround for 0F33/0F34 CPU */
182 if (c->x86 == 0xF && c->x86_model == 0x3
183 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
184 c->x86_phys_bits = 36;
185
186 /*
187 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
188 * with P/T states and does not stop in deep C-states.
189 *
190 * It is also reliable across cores and sockets. (but not across
191 * cabinets - we turn it off in that case explicitly.)
192 */
193 if (c->x86_power & (1 << 8)) {
194 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
195 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
196 if (!check_tsc_unstable())
197 set_sched_clock_stable();
198 }
199
200 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
201 if (c->x86 == 6) {
202 switch (c->x86_model) {
203 case 0x27: /* Penwell */
204 case 0x35: /* Cloverview */
205 case 0x4a: /* Merrifield */
206 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
207 break;
208 default:
209 break;
210 }
211 }
212
213 /*
214 * There is a known erratum on Pentium III and Core Solo
215 * and Core Duo CPUs.
216 * " Page with PAT set to WC while associated MTRR is UC
217 * may consolidate to UC "
218 * Because of this erratum, it is better to stick with
219 * setting WC in MTRR rather than using PAT on these CPUs.
220 *
221 * Enable PAT WC only on P4, Core 2 or later CPUs.
222 */
223 if (c->x86 == 6 && c->x86_model < 15)
224 clear_cpu_cap(c, X86_FEATURE_PAT);
225
226 #ifdef CONFIG_KMEMCHECK
227 /*
228 * P4s have a "fast strings" feature which causes single-
229 * stepping REP instructions to only generate a #DB on
230 * cache-line boundaries.
231 *
232 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
233 * (model 2) with the same problem.
234 */
235 if (c->x86 == 15)
236 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
237 MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
238 pr_info("kmemcheck: Disabling fast string operations\n");
239 #endif
240
241 /*
242 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
243 * clear the fast string and enhanced fast string CPU capabilities.
244 */
245 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
246 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
247 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
248 pr_info("Disabled fast string operations\n");
249 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
250 setup_clear_cpu_cap(X86_FEATURE_ERMS);
251 }
252 }
253
254 /*
255 * Intel Quark Core DevMan_001.pdf section 6.4.11
256 * "The operating system also is required to invalidate (i.e., flush)
257 * the TLB when any changes are made to any of the page table entries.
258 * The operating system must reload CR3 to cause the TLB to be flushed"
259 *
260 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
261 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
262 * to be modified.
263 */
264 if (c->x86 == 5 && c->x86_model == 9) {
265 pr_info("Disabling PGE capability bit\n");
266 setup_clear_cpu_cap(X86_FEATURE_PGE);
267 }
268
269 if (c->cpuid_level >= 0x00000001) {
270 u32 eax, ebx, ecx, edx;
271
272 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
273 /*
274 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
275 * apicids which are reserved per package. Store the resulting
276 * shift value for the package management code.
277 */
278 if (edx & (1U << 28))
279 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
280 }
281
282 check_mpx_erratum(c);
283 }
284
285 #ifdef CONFIG_X86_32
286 /*
287 * Early probe support logic for ppro memory erratum #50
288 *
289 * This is called before we do cpu ident work
290 */
291
ppro_with_ram_bug(void)292 int ppro_with_ram_bug(void)
293 {
294 /* Uses data from early_cpu_detect now */
295 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
296 boot_cpu_data.x86 == 6 &&
297 boot_cpu_data.x86_model == 1 &&
298 boot_cpu_data.x86_stepping < 8) {
299 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
300 return 1;
301 }
302 return 0;
303 }
304
intel_smp_check(struct cpuinfo_x86 * c)305 static void intel_smp_check(struct cpuinfo_x86 *c)
306 {
307 /* calling is from identify_secondary_cpu() ? */
308 if (!c->cpu_index)
309 return;
310
311 /*
312 * Mask B, Pentium, but not Pentium MMX
313 */
314 if (c->x86 == 5 &&
315 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
316 c->x86_model <= 3) {
317 /*
318 * Remember we have B step Pentia with bugs
319 */
320 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
321 "with B stepping processors.\n");
322 }
323 }
324
325 static int forcepae;
forcepae_setup(char * __unused)326 static int __init forcepae_setup(char *__unused)
327 {
328 forcepae = 1;
329 return 1;
330 }
331 __setup("forcepae", forcepae_setup);
332
intel_workarounds(struct cpuinfo_x86 * c)333 static void intel_workarounds(struct cpuinfo_x86 *c)
334 {
335 #ifdef CONFIG_X86_F00F_BUG
336 /*
337 * All models of Pentium and Pentium with MMX technology CPUs
338 * have the F0 0F bug, which lets nonprivileged users lock up the
339 * system. Announce that the fault handler will be checking for it.
340 * The Quark is also family 5, but does not have the same bug.
341 */
342 clear_cpu_bug(c, X86_BUG_F00F);
343 if (c->x86 == 5 && c->x86_model < 9) {
344 static int f00f_workaround_enabled;
345
346 set_cpu_bug(c, X86_BUG_F00F);
347 if (!f00f_workaround_enabled) {
348 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
349 f00f_workaround_enabled = 1;
350 }
351 }
352 #endif
353
354 /*
355 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
356 * model 3 mask 3
357 */
358 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
359 clear_cpu_cap(c, X86_FEATURE_SEP);
360
361 /*
362 * PAE CPUID issue: many Pentium M report no PAE but may have a
363 * functionally usable PAE implementation.
364 * Forcefully enable PAE if kernel parameter "forcepae" is present.
365 */
366 if (forcepae) {
367 pr_warn("PAE forced!\n");
368 set_cpu_cap(c, X86_FEATURE_PAE);
369 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
370 }
371
372 /*
373 * P4 Xeon erratum 037 workaround.
374 * Hardware prefetcher may cause stale data to be loaded into the cache.
375 */
376 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
377 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
378 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
379 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
380 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
381 }
382 }
383
384 /*
385 * See if we have a good local APIC by checking for buggy Pentia,
386 * i.e. all B steppings and the C2 stepping of P54C when using their
387 * integrated APIC (see 11AP erratum in "Pentium Processor
388 * Specification Update").
389 */
390 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
391 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
392 set_cpu_bug(c, X86_BUG_11AP);
393
394
395 #ifdef CONFIG_X86_INTEL_USERCOPY
396 /*
397 * Set up the preferred alignment for movsl bulk memory moves
398 */
399 switch (c->x86) {
400 case 4: /* 486: untested */
401 break;
402 case 5: /* Old Pentia: untested */
403 break;
404 case 6: /* PII/PIII only like movsl with 8-byte alignment */
405 movsl_mask.mask = 7;
406 break;
407 case 15: /* P4 is OK down to 8-byte alignment */
408 movsl_mask.mask = 7;
409 break;
410 }
411 #endif
412
413 intel_smp_check(c);
414 }
415 #else
intel_workarounds(struct cpuinfo_x86 * c)416 static void intel_workarounds(struct cpuinfo_x86 *c)
417 {
418 }
419 #endif
420
srat_detect_node(struct cpuinfo_x86 * c)421 static void srat_detect_node(struct cpuinfo_x86 *c)
422 {
423 #ifdef CONFIG_NUMA
424 unsigned node;
425 int cpu = smp_processor_id();
426
427 /* Don't do the funky fallback heuristics the AMD version employs
428 for now. */
429 node = numa_cpu_node(cpu);
430 if (node == NUMA_NO_NODE || !node_online(node)) {
431 /* reuse the value from init_cpu_to_node() */
432 node = cpu_to_node(cpu);
433 }
434 numa_set_node(cpu, node);
435 #endif
436 }
437
438 /*
439 * find out the number of processor cores on the die
440 */
intel_num_cpu_cores(struct cpuinfo_x86 * c)441 static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
442 {
443 unsigned int eax, ebx, ecx, edx;
444
445 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
446 return 1;
447
448 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
449 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
450 if (eax & 0x1f)
451 return (eax >> 26) + 1;
452 else
453 return 1;
454 }
455
detect_vmx_virtcap(struct cpuinfo_x86 * c)456 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
457 {
458 /* Intel VMX MSR indicated features */
459 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
460 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
461 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
462 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
463 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
464 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
465
466 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
467
468 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
469 clear_cpu_cap(c, X86_FEATURE_VNMI);
470 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
471 clear_cpu_cap(c, X86_FEATURE_EPT);
472 clear_cpu_cap(c, X86_FEATURE_VPID);
473
474 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
475 msr_ctl = vmx_msr_high | vmx_msr_low;
476 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
477 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
478 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
479 set_cpu_cap(c, X86_FEATURE_VNMI);
480 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
481 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
482 vmx_msr_low, vmx_msr_high);
483 msr_ctl2 = vmx_msr_high | vmx_msr_low;
484 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
485 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
486 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
487 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
488 set_cpu_cap(c, X86_FEATURE_EPT);
489 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
490 set_cpu_cap(c, X86_FEATURE_VPID);
491 }
492 }
493
init_intel_energy_perf(struct cpuinfo_x86 * c)494 static void init_intel_energy_perf(struct cpuinfo_x86 *c)
495 {
496 u64 epb;
497
498 /*
499 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
500 * (x86_energy_perf_policy(8) is available to change it at run-time.)
501 */
502 if (!cpu_has(c, X86_FEATURE_EPB))
503 return;
504
505 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
506 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
507 return;
508
509 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
510 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
511 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
512 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
513 }
514
intel_bsp_resume(struct cpuinfo_x86 * c)515 static void intel_bsp_resume(struct cpuinfo_x86 *c)
516 {
517 /*
518 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
519 * so reinitialize it properly like during bootup:
520 */
521 init_intel_energy_perf(c);
522 }
523
init_intel(struct cpuinfo_x86 * c)524 static void init_intel(struct cpuinfo_x86 *c)
525 {
526 unsigned int l2 = 0;
527
528 early_init_intel(c);
529
530 intel_workarounds(c);
531
532 /*
533 * Detect the extended topology information if available. This
534 * will reinitialise the initial_apicid which will be used
535 * in init_intel_cacheinfo()
536 */
537 detect_extended_topology(c);
538
539 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
540 /*
541 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
542 * detection.
543 */
544 c->x86_max_cores = intel_num_cpu_cores(c);
545 #ifdef CONFIG_X86_32
546 detect_ht(c);
547 #endif
548 }
549
550 l2 = init_intel_cacheinfo(c);
551
552 /* Detect legacy cache sizes if init_intel_cacheinfo did not */
553 if (l2 == 0) {
554 cpu_detect_cache_sizes(c);
555 l2 = c->x86_cache_size;
556 }
557
558 if (c->cpuid_level > 9) {
559 unsigned eax = cpuid_eax(10);
560 /* Check for version and the number of counters */
561 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
562 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
563 }
564
565 if (cpu_has(c, X86_FEATURE_XMM2))
566 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
567
568 if (boot_cpu_has(X86_FEATURE_DS)) {
569 unsigned int l1;
570 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
571 if (!(l1 & (1<<11)))
572 set_cpu_cap(c, X86_FEATURE_BTS);
573 if (!(l1 & (1<<12)))
574 set_cpu_cap(c, X86_FEATURE_PEBS);
575 }
576
577 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
578 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
579 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
580
581 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
582 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
583 set_cpu_bug(c, X86_BUG_MONITOR);
584
585 #ifdef CONFIG_X86_64
586 if (c->x86 == 15)
587 c->x86_cache_alignment = c->x86_clflush_size * 2;
588 if (c->x86 == 6)
589 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
590 #else
591 /*
592 * Names for the Pentium II/Celeron processors
593 * detectable only by also checking the cache size.
594 * Dixon is NOT a Celeron.
595 */
596 if (c->x86 == 6) {
597 char *p = NULL;
598
599 switch (c->x86_model) {
600 case 5:
601 if (l2 == 0)
602 p = "Celeron (Covington)";
603 else if (l2 == 256)
604 p = "Mobile Pentium II (Dixon)";
605 break;
606
607 case 6:
608 if (l2 == 128)
609 p = "Celeron (Mendocino)";
610 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
611 p = "Celeron-A";
612 break;
613
614 case 8:
615 if (l2 == 128)
616 p = "Celeron (Coppermine)";
617 break;
618 }
619
620 if (p)
621 strcpy(c->x86_model_id, p);
622 }
623
624 if (c->x86 == 15)
625 set_cpu_cap(c, X86_FEATURE_P4);
626 if (c->x86 == 6)
627 set_cpu_cap(c, X86_FEATURE_P3);
628 #endif
629
630 /* Work around errata */
631 srat_detect_node(c);
632
633 if (cpu_has(c, X86_FEATURE_VMX))
634 detect_vmx_virtcap(c);
635
636 init_intel_energy_perf(c);
637 }
638
639 #ifdef CONFIG_X86_32
intel_size_cache(struct cpuinfo_x86 * c,unsigned int size)640 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
641 {
642 /*
643 * Intel PIII Tualatin. This comes in two flavours.
644 * One has 256kb of cache, the other 512. We have no way
645 * to determine which, so we use a boottime override
646 * for the 512kb model, and assume 256 otherwise.
647 */
648 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
649 size = 256;
650
651 /*
652 * Intel Quark SoC X1000 contains a 4-way set associative
653 * 16K cache with a 16 byte cache line and 256 lines per tag
654 */
655 if ((c->x86 == 5) && (c->x86_model == 9))
656 size = 16;
657 return size;
658 }
659 #endif
660
661 #define TLB_INST_4K 0x01
662 #define TLB_INST_4M 0x02
663 #define TLB_INST_2M_4M 0x03
664
665 #define TLB_INST_ALL 0x05
666 #define TLB_INST_1G 0x06
667
668 #define TLB_DATA_4K 0x11
669 #define TLB_DATA_4M 0x12
670 #define TLB_DATA_2M_4M 0x13
671 #define TLB_DATA_4K_4M 0x14
672
673 #define TLB_DATA_1G 0x16
674
675 #define TLB_DATA0_4K 0x21
676 #define TLB_DATA0_4M 0x22
677 #define TLB_DATA0_2M_4M 0x23
678
679 #define STLB_4K 0x41
680 #define STLB_4K_2M 0x42
681
682 static const struct _tlb_table intel_tlb_table[] = {
683 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
684 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
685 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
686 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
687 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
688 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
689 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
690 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
691 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
692 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
693 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
694 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
695 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
696 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
697 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
698 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
699 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
700 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
701 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
702 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
703 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
704 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
705 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
706 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
707 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
708 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
709 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
710 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
711 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
712 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
713 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
714 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
715 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
716 { 0x00, 0, 0 }
717 };
718
intel_tlb_lookup(const unsigned char desc)719 static void intel_tlb_lookup(const unsigned char desc)
720 {
721 unsigned char k;
722 if (desc == 0)
723 return;
724
725 /* look up this descriptor in the table */
726 for (k = 0; intel_tlb_table[k].descriptor != desc && \
727 intel_tlb_table[k].descriptor != 0; k++)
728 ;
729
730 if (intel_tlb_table[k].tlb_type == 0)
731 return;
732
733 switch (intel_tlb_table[k].tlb_type) {
734 case STLB_4K:
735 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
736 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
737 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
738 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
739 break;
740 case STLB_4K_2M:
741 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
742 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
743 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
744 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
745 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
746 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
747 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
748 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
749 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
750 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
751 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
752 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
753 break;
754 case TLB_INST_ALL:
755 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
756 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
757 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
758 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
759 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
760 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
761 break;
762 case TLB_INST_4K:
763 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
764 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
765 break;
766 case TLB_INST_4M:
767 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
768 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
769 break;
770 case TLB_INST_2M_4M:
771 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
772 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
773 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
774 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
775 break;
776 case TLB_DATA_4K:
777 case TLB_DATA0_4K:
778 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
779 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
780 break;
781 case TLB_DATA_4M:
782 case TLB_DATA0_4M:
783 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
784 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
785 break;
786 case TLB_DATA_2M_4M:
787 case TLB_DATA0_2M_4M:
788 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
789 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
790 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
791 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
792 break;
793 case TLB_DATA_4K_4M:
794 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
795 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
796 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
797 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
798 break;
799 case TLB_DATA_1G:
800 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
801 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
802 break;
803 }
804 }
805
intel_detect_tlb(struct cpuinfo_x86 * c)806 static void intel_detect_tlb(struct cpuinfo_x86 *c)
807 {
808 int i, j, n;
809 unsigned int regs[4];
810 unsigned char *desc = (unsigned char *)regs;
811
812 if (c->cpuid_level < 2)
813 return;
814
815 /* Number of times to iterate */
816 n = cpuid_eax(2) & 0xFF;
817
818 for (i = 0 ; i < n ; i++) {
819 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
820
821 /* If bit 31 is set, this is an unknown format */
822 for (j = 0 ; j < 3 ; j++)
823 if (regs[j] & (1 << 31))
824 regs[j] = 0;
825
826 /* Byte 0 is level count, not a descriptor */
827 for (j = 1 ; j < 16 ; j++)
828 intel_tlb_lookup(desc[j]);
829 }
830 }
831
832 static const struct cpu_dev intel_cpu_dev = {
833 .c_vendor = "Intel",
834 .c_ident = { "GenuineIntel" },
835 #ifdef CONFIG_X86_32
836 .legacy_models = {
837 { .family = 4, .model_names =
838 {
839 [0] = "486 DX-25/33",
840 [1] = "486 DX-50",
841 [2] = "486 SX",
842 [3] = "486 DX/2",
843 [4] = "486 SL",
844 [5] = "486 SX/2",
845 [7] = "486 DX/2-WB",
846 [8] = "486 DX/4",
847 [9] = "486 DX/4-WB"
848 }
849 },
850 { .family = 5, .model_names =
851 {
852 [0] = "Pentium 60/66 A-step",
853 [1] = "Pentium 60/66",
854 [2] = "Pentium 75 - 200",
855 [3] = "OverDrive PODP5V83",
856 [4] = "Pentium MMX",
857 [7] = "Mobile Pentium 75 - 200",
858 [8] = "Mobile Pentium MMX",
859 [9] = "Quark SoC X1000",
860 }
861 },
862 { .family = 6, .model_names =
863 {
864 [0] = "Pentium Pro A-step",
865 [1] = "Pentium Pro",
866 [3] = "Pentium II (Klamath)",
867 [4] = "Pentium II (Deschutes)",
868 [5] = "Pentium II (Deschutes)",
869 [6] = "Mobile Pentium II",
870 [7] = "Pentium III (Katmai)",
871 [8] = "Pentium III (Coppermine)",
872 [10] = "Pentium III (Cascades)",
873 [11] = "Pentium III (Tualatin)",
874 }
875 },
876 { .family = 15, .model_names =
877 {
878 [0] = "Pentium 4 (Unknown)",
879 [1] = "Pentium 4 (Willamette)",
880 [2] = "Pentium 4 (Northwood)",
881 [4] = "Pentium 4 (Foster)",
882 [5] = "Pentium 4 (Foster)",
883 }
884 },
885 },
886 .legacy_cache_size = intel_size_cache,
887 #endif
888 .c_detect_tlb = intel_detect_tlb,
889 .c_early_init = early_init_intel,
890 .c_init = init_intel,
891 .c_bsp_resume = intel_bsp_resume,
892 .c_x86_vendor = X86_VENDOR_INTEL,
893 };
894
895 cpu_dev_register(intel_cpu_dev);
896
897