1 #include <linux/export.h>
2 #include <linux/bitops.h>
3 #include <linux/elf.h>
4 #include <linux/mm.h>
5
6 #include <linux/io.h>
7 #include <linux/sched.h>
8 #include <asm/processor.h>
9 #include <asm/apic.h>
10 #include <asm/cpu.h>
11 #include <asm/smp.h>
12 #include <asm/pci-direct.h>
13
14 #ifdef CONFIG_X86_64
15 # include <asm/mmconfig.h>
16 # include <asm/cacheflush.h>
17 #endif
18
19 #include "cpu.h"
20
rdmsrl_amd_safe(unsigned msr,unsigned long long * p)21 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22 {
23 u32 gprs[8] = { 0 };
24 int err;
25
26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28
29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a;
31
32 err = rdmsr_safe_regs(gprs);
33
34 *p = gprs[0] | ((u64)gprs[2] << 32);
35
36 return err;
37 }
38
wrmsrl_amd_safe(unsigned msr,unsigned long long val)39 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40 {
41 u32 gprs[8] = { 0 };
42
43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45
46 gprs[0] = (u32)val;
47 gprs[1] = msr;
48 gprs[2] = val >> 32;
49 gprs[7] = 0x9c5a203a;
50
51 return wrmsr_safe_regs(gprs);
52 }
53
54 /*
55 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
56 * misexecution of code under Linux. Owners of such processors should
57 * contact AMD for precise details and a CPU swap.
58 *
59 * See http://www.multimania.com/poulot/k6bug.html
60 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
61 * (Publication # 21266 Issue Date: August 1998)
62 *
63 * The following test is erm.. interesting. AMD neglected to up
64 * the chip setting when fixing the bug but they also tweaked some
65 * performance at the same time..
66 */
67
68 extern __visible void vide(void);
69 __asm__(".globl vide\n\t.align 4\nvide: ret");
70
init_amd_k5(struct cpuinfo_x86 * c)71 static void init_amd_k5(struct cpuinfo_x86 *c)
72 {
73 #ifdef CONFIG_X86_32
74 /*
75 * General Systems BIOSen alias the cpu frequency registers
76 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
77 * drivers subsequently pokes it, and changes the CPU speed.
78 * Workaround : Remove the unneeded alias.
79 */
80 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
81 #define CBAR_ENB (0x80000000)
82 #define CBAR_KEY (0X000000CB)
83 if (c->x86_model == 9 || c->x86_model == 10) {
84 if (inl(CBAR) & CBAR_ENB)
85 outl(0 | CBAR_KEY, CBAR);
86 }
87 #endif
88 }
89
init_amd_k6(struct cpuinfo_x86 * c)90 static void init_amd_k6(struct cpuinfo_x86 *c)
91 {
92 #ifdef CONFIG_X86_32
93 u32 l, h;
94 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
95
96 if (c->x86_model < 6) {
97 /* Based on AMD doc 20734R - June 2000 */
98 if (c->x86_model == 0) {
99 clear_cpu_cap(c, X86_FEATURE_APIC);
100 set_cpu_cap(c, X86_FEATURE_PGE);
101 }
102 return;
103 }
104
105 if (c->x86_model == 6 && c->x86_mask == 1) {
106 const int K6_BUG_LOOP = 1000000;
107 int n;
108 void (*f_vide)(void);
109 unsigned long d, d2;
110
111 printk(KERN_INFO "AMD K6 stepping B detected - ");
112
113 /*
114 * It looks like AMD fixed the 2.6.2 bug and improved indirect
115 * calls at the same time.
116 */
117
118 n = K6_BUG_LOOP;
119 f_vide = vide;
120 rdtscl(d);
121 while (n--)
122 f_vide();
123 rdtscl(d2);
124 d = d2-d;
125
126 if (d > 20*K6_BUG_LOOP)
127 printk(KERN_CONT
128 "system stability may be impaired when more than 32 MB are used.\n");
129 else
130 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
131 }
132
133 /* K6 with old style WHCR */
134 if (c->x86_model < 8 ||
135 (c->x86_model == 8 && c->x86_mask < 8)) {
136 /* We can only write allocate on the low 508Mb */
137 if (mbytes > 508)
138 mbytes = 508;
139
140 rdmsr(MSR_K6_WHCR, l, h);
141 if ((l&0x0000FFFF) == 0) {
142 unsigned long flags;
143 l = (1<<0)|((mbytes/4)<<1);
144 local_irq_save(flags);
145 wbinvd();
146 wrmsr(MSR_K6_WHCR, l, h);
147 local_irq_restore(flags);
148 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
149 mbytes);
150 }
151 return;
152 }
153
154 if ((c->x86_model == 8 && c->x86_mask > 7) ||
155 c->x86_model == 9 || c->x86_model == 13) {
156 /* The more serious chips .. */
157
158 if (mbytes > 4092)
159 mbytes = 4092;
160
161 rdmsr(MSR_K6_WHCR, l, h);
162 if ((l&0xFFFF0000) == 0) {
163 unsigned long flags;
164 l = ((mbytes>>2)<<22)|(1<<16);
165 local_irq_save(flags);
166 wbinvd();
167 wrmsr(MSR_K6_WHCR, l, h);
168 local_irq_restore(flags);
169 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
170 mbytes);
171 }
172
173 return;
174 }
175
176 if (c->x86_model == 10) {
177 /* AMD Geode LX is model 10 */
178 /* placeholder for any needed mods */
179 return;
180 }
181 #endif
182 }
183
init_amd_k7(struct cpuinfo_x86 * c)184 static void init_amd_k7(struct cpuinfo_x86 *c)
185 {
186 #ifdef CONFIG_X86_32
187 u32 l, h;
188
189 /*
190 * Bit 15 of Athlon specific MSR 15, needs to be 0
191 * to enable SSE on Palomino/Morgan/Barton CPU's.
192 * If the BIOS didn't enable it already, enable it here.
193 */
194 if (c->x86_model >= 6 && c->x86_model <= 10) {
195 if (!cpu_has(c, X86_FEATURE_XMM)) {
196 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
197 msr_clear_bit(MSR_K7_HWCR, 15);
198 set_cpu_cap(c, X86_FEATURE_XMM);
199 }
200 }
201
202 /*
203 * It's been determined by AMD that Athlons since model 8 stepping 1
204 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
205 * As per AMD technical note 27212 0.2
206 */
207 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
208 rdmsr(MSR_K7_CLK_CTL, l, h);
209 if ((l & 0xfff00000) != 0x20000000) {
210 printk(KERN_INFO
211 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
212 l, ((l & 0x000fffff)|0x20000000));
213 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
214 }
215 }
216
217 set_cpu_cap(c, X86_FEATURE_K7);
218
219 /* calling is from identify_secondary_cpu() ? */
220 if (!c->cpu_index)
221 return;
222
223 /*
224 * Certain Athlons might work (for various values of 'work') in SMP
225 * but they are not certified as MP capable.
226 */
227 /* Athlon 660/661 is valid. */
228 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
229 (c->x86_mask == 1)))
230 return;
231
232 /* Duron 670 is valid */
233 if ((c->x86_model == 7) && (c->x86_mask == 0))
234 return;
235
236 /*
237 * Athlon 662, Duron 671, and Athlon >model 7 have capability
238 * bit. It's worth noting that the A5 stepping (662) of some
239 * Athlon XP's have the MP bit set.
240 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
241 * more.
242 */
243 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
244 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
245 (c->x86_model > 7))
246 if (cpu_has(c, X86_FEATURE_MP))
247 return;
248
249 /* If we get here, not a certified SMP capable AMD system. */
250
251 /*
252 * Don't taint if we are running SMP kernel on a single non-MP
253 * approved Athlon
254 */
255 WARN_ONCE(1, "WARNING: This combination of AMD"
256 " processors is not suitable for SMP.\n");
257 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
258 #endif
259 }
260
261 #ifdef CONFIG_NUMA
262 /*
263 * To workaround broken NUMA config. Read the comment in
264 * srat_detect_node().
265 */
nearby_node(int apicid)266 static int nearby_node(int apicid)
267 {
268 int i, node;
269
270 for (i = apicid - 1; i >= 0; i--) {
271 node = __apicid_to_node[i];
272 if (node != NUMA_NO_NODE && node_online(node))
273 return node;
274 }
275 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
276 node = __apicid_to_node[i];
277 if (node != NUMA_NO_NODE && node_online(node))
278 return node;
279 }
280 return first_node(node_online_map); /* Shouldn't happen */
281 }
282 #endif
283
284 /*
285 * Fixup core topology information for
286 * (1) AMD multi-node processors
287 * Assumption: Number of cores in each internal node is the same.
288 * (2) AMD processors supporting compute units
289 */
290 #ifdef CONFIG_X86_HT
amd_get_topology(struct cpuinfo_x86 * c)291 static void amd_get_topology(struct cpuinfo_x86 *c)
292 {
293 u32 nodes, cores_per_cu = 1;
294 u8 node_id;
295 int cpu = smp_processor_id();
296
297 /* get information required for multi-node processors */
298 if (cpu_has_topoext) {
299 u32 eax, ebx, ecx, edx;
300
301 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
302 nodes = ((ecx >> 8) & 7) + 1;
303 node_id = ecx & 7;
304
305 /* get compute unit information */
306 smp_num_siblings = ((ebx >> 8) & 3) + 1;
307 c->compute_unit_id = ebx & 0xff;
308 cores_per_cu += ((ebx >> 8) & 3);
309 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
310 u64 value;
311
312 rdmsrl(MSR_FAM10H_NODE_ID, value);
313 nodes = ((value >> 3) & 7) + 1;
314 node_id = value & 7;
315 } else
316 return;
317
318 /* fixup multi-node processor information */
319 if (nodes > 1) {
320 u32 cores_per_node;
321 u32 cus_per_node;
322
323 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
324 cores_per_node = c->x86_max_cores / nodes;
325 cus_per_node = cores_per_node / cores_per_cu;
326
327 /* store NodeID, use llc_shared_map to store sibling info */
328 per_cpu(cpu_llc_id, cpu) = node_id;
329
330 /* core id has to be in the [0 .. cores_per_node - 1] range */
331 c->cpu_core_id %= cores_per_node;
332 c->compute_unit_id %= cus_per_node;
333 }
334 }
335 #endif
336
337 /*
338 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
339 * Assumes number of cores is a power of two.
340 */
amd_detect_cmp(struct cpuinfo_x86 * c)341 static void amd_detect_cmp(struct cpuinfo_x86 *c)
342 {
343 #ifdef CONFIG_X86_HT
344 unsigned bits;
345 int cpu = smp_processor_id();
346
347 bits = c->x86_coreid_bits;
348 /* Low order bits define the core id (index of core in socket) */
349 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
350 /* Convert the initial APIC ID into the socket ID */
351 c->phys_proc_id = c->initial_apicid >> bits;
352 /* use socket ID also for last level cache */
353 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
354 amd_get_topology(c);
355 #endif
356 }
357
amd_get_nb_id(int cpu)358 u16 amd_get_nb_id(int cpu)
359 {
360 u16 id = 0;
361 #ifdef CONFIG_SMP
362 id = per_cpu(cpu_llc_id, cpu);
363 #endif
364 return id;
365 }
366 EXPORT_SYMBOL_GPL(amd_get_nb_id);
367
srat_detect_node(struct cpuinfo_x86 * c)368 static void srat_detect_node(struct cpuinfo_x86 *c)
369 {
370 #ifdef CONFIG_NUMA
371 int cpu = smp_processor_id();
372 int node;
373 unsigned apicid = c->apicid;
374
375 node = numa_cpu_node(cpu);
376 if (node == NUMA_NO_NODE)
377 node = per_cpu(cpu_llc_id, cpu);
378
379 /*
380 * On multi-fabric platform (e.g. Numascale NumaChip) a
381 * platform-specific handler needs to be called to fixup some
382 * IDs of the CPU.
383 */
384 if (x86_cpuinit.fixup_cpu_id)
385 x86_cpuinit.fixup_cpu_id(c, node);
386
387 if (!node_online(node)) {
388 /*
389 * Two possibilities here:
390 *
391 * - The CPU is missing memory and no node was created. In
392 * that case try picking one from a nearby CPU.
393 *
394 * - The APIC IDs differ from the HyperTransport node IDs
395 * which the K8 northbridge parsing fills in. Assume
396 * they are all increased by a constant offset, but in
397 * the same order as the HT nodeids. If that doesn't
398 * result in a usable node fall back to the path for the
399 * previous case.
400 *
401 * This workaround operates directly on the mapping between
402 * APIC ID and NUMA node, assuming certain relationship
403 * between APIC ID, HT node ID and NUMA topology. As going
404 * through CPU mapping may alter the outcome, directly
405 * access __apicid_to_node[].
406 */
407 int ht_nodeid = c->initial_apicid;
408
409 if (ht_nodeid >= 0 &&
410 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
411 node = __apicid_to_node[ht_nodeid];
412 /* Pick a nearby node */
413 if (!node_online(node))
414 node = nearby_node(apicid);
415 }
416 numa_set_node(cpu, node);
417 #endif
418 }
419
early_init_amd_mc(struct cpuinfo_x86 * c)420 static void early_init_amd_mc(struct cpuinfo_x86 *c)
421 {
422 #ifdef CONFIG_X86_HT
423 unsigned bits, ecx;
424
425 /* Multi core CPU? */
426 if (c->extended_cpuid_level < 0x80000008)
427 return;
428
429 ecx = cpuid_ecx(0x80000008);
430
431 c->x86_max_cores = (ecx & 0xff) + 1;
432
433 /* CPU telling us the core id bits shift? */
434 bits = (ecx >> 12) & 0xF;
435
436 /* Otherwise recompute */
437 if (bits == 0) {
438 while ((1 << bits) < c->x86_max_cores)
439 bits++;
440 }
441
442 c->x86_coreid_bits = bits;
443 #endif
444 }
445
bsp_init_amd(struct cpuinfo_x86 * c)446 static void bsp_init_amd(struct cpuinfo_x86 *c)
447 {
448
449 #ifdef CONFIG_X86_64
450 if (c->x86 >= 0xf) {
451 unsigned long long tseg;
452
453 /*
454 * Split up direct mapping around the TSEG SMM area.
455 * Don't do it for gbpages because there seems very little
456 * benefit in doing so.
457 */
458 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
459 unsigned long pfn = tseg >> PAGE_SHIFT;
460
461 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
462 if (pfn_range_is_mapped(pfn, pfn + 1))
463 set_memory_4k((unsigned long)__va(tseg), 1);
464 }
465 }
466 #endif
467
468 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
469
470 if (c->x86 > 0x10 ||
471 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
472 u64 val;
473
474 rdmsrl(MSR_K7_HWCR, val);
475 if (!(val & BIT(24)))
476 printk(KERN_WARNING FW_BUG "TSC doesn't count "
477 "with P0 frequency!\n");
478 }
479 }
480
481 if (c->x86 == 0x15) {
482 unsigned long upperbit;
483 u32 cpuid, assoc;
484
485 cpuid = cpuid_edx(0x80000005);
486 assoc = cpuid >> 16 & 0xff;
487 upperbit = ((cpuid >> 24) << 10) / assoc;
488
489 va_align.mask = (upperbit - 1) & PAGE_MASK;
490 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
491 }
492 }
493
early_init_amd(struct cpuinfo_x86 * c)494 static void early_init_amd(struct cpuinfo_x86 *c)
495 {
496 early_init_amd_mc(c);
497
498 /*
499 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
500 * with P/T states and does not stop in deep C-states
501 */
502 if (c->x86_power & (1 << 8)) {
503 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
504 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
505 if (!check_tsc_unstable())
506 set_sched_clock_stable();
507 }
508
509 #ifdef CONFIG_X86_64
510 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
511 #else
512 /* Set MTRR capability flag if appropriate */
513 if (c->x86 == 5)
514 if (c->x86_model == 13 || c->x86_model == 9 ||
515 (c->x86_model == 8 && c->x86_mask >= 8))
516 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
517 #endif
518 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
519 /* check CPU config space for extended APIC ID */
520 if (cpu_has_apic && c->x86 >= 0xf) {
521 unsigned int val;
522 val = read_pci_config(0, 24, 0, 0x68);
523 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
524 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
525 }
526 #endif
527
528 /*
529 * This is only needed to tell the kernel whether to use VMCALL
530 * and VMMCALL. VMMCALL is never executed except under virt, so
531 * we can set it unconditionally.
532 */
533 set_cpu_cap(c, X86_FEATURE_VMMCALL);
534
535 /* F16h erratum 793, CVE-2013-6885 */
536 if (c->x86 == 0x16 && c->x86_model <= 0xf)
537 msr_set_bit(MSR_AMD64_LS_CFG, 15);
538 }
539
540 static const int amd_erratum_383[];
541 static const int amd_erratum_400[];
542 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
543
init_amd_k8(struct cpuinfo_x86 * c)544 static void init_amd_k8(struct cpuinfo_x86 *c)
545 {
546 u32 level;
547 u64 value;
548
549 /* On C+ stepping K8 rep microcode works well for copy/memset */
550 level = cpuid_eax(1);
551 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
552 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
553
554 /*
555 * Some BIOSes incorrectly force this feature, but only K8 revision D
556 * (model = 0x14) and later actually support it.
557 * (AMD Erratum #110, docId: 25759).
558 */
559 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
560 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
561 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
562 value &= ~BIT_64(32);
563 wrmsrl_amd_safe(0xc001100d, value);
564 }
565 }
566
567 if (!c->x86_model_id[0])
568 strcpy(c->x86_model_id, "Hammer");
569 }
570
init_amd_gh(struct cpuinfo_x86 * c)571 static void init_amd_gh(struct cpuinfo_x86 *c)
572 {
573 #ifdef CONFIG_X86_64
574 /* do this for boot cpu */
575 if (c == &boot_cpu_data)
576 check_enable_amd_mmconf_dmi();
577
578 fam10h_check_enable_mmcfg();
579 #endif
580
581 /*
582 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
583 * is always needed when GART is enabled, even in a kernel which has no
584 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
585 * If it doesn't, we do it here as suggested by the BKDG.
586 *
587 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
588 */
589 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
590
591 /*
592 * On family 10h BIOS may not have properly enabled WC+ support, causing
593 * it to be converted to CD memtype. This may result in performance
594 * degradation for certain nested-paging guests. Prevent this conversion
595 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
596 *
597 * NOTE: we want to use the _safe accessors so as not to #GP kvm
598 * guests on older kvm hosts.
599 */
600 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
601
602 if (cpu_has_amd_erratum(c, amd_erratum_383))
603 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
604 }
605
606 #define MSR_AMD64_DE_CFG 0xC0011029
607
init_amd_ln(struct cpuinfo_x86 * c)608 static void init_amd_ln(struct cpuinfo_x86 *c)
609 {
610 /*
611 * Apply erratum 665 fix unconditionally so machines without a BIOS
612 * fix work.
613 */
614 msr_set_bit(MSR_AMD64_DE_CFG, 31);
615 }
616
init_amd_bd(struct cpuinfo_x86 * c)617 static void init_amd_bd(struct cpuinfo_x86 *c)
618 {
619 u64 value;
620
621 /* re-enable TopologyExtensions if switched off by BIOS */
622 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
623 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
624
625 if (msr_set_bit(0xc0011005, 54) > 0) {
626 rdmsrl(0xc0011005, value);
627 if (value & BIT_64(54)) {
628 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
629 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
630 }
631 }
632 }
633
634 /*
635 * The way access filter has a performance penalty on some workloads.
636 * Disable it on the affected CPUs.
637 */
638 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
639 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
640 value |= 0x1E;
641 wrmsrl_safe(0xc0011021, value);
642 }
643 }
644 }
645
init_amd(struct cpuinfo_x86 * c)646 static void init_amd(struct cpuinfo_x86 *c)
647 {
648 u32 dummy;
649
650 #ifdef CONFIG_SMP
651 /*
652 * Disable TLB flush filter by setting HWCR.FFDIS on K8
653 * bit 6 of msr C001_0015
654 *
655 * Errata 63 for SH-B3 steppings
656 * Errata 122 for all steppings (F+ have it disabled by default)
657 */
658 if (c->x86 == 0xf)
659 msr_set_bit(MSR_K7_HWCR, 6);
660 #endif
661
662 early_init_amd(c);
663
664 /*
665 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
666 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
667 */
668 clear_cpu_cap(c, 0*32+31);
669
670 if (c->x86 >= 0x10)
671 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
672
673 /* get apicid instead of initial apic id from cpuid */
674 c->apicid = hard_smp_processor_id();
675
676 /* K6s reports MCEs but don't actually have all the MSRs */
677 if (c->x86 < 6)
678 clear_cpu_cap(c, X86_FEATURE_MCE);
679
680 switch (c->x86) {
681 case 4: init_amd_k5(c); break;
682 case 5: init_amd_k6(c); break;
683 case 6: init_amd_k7(c); break;
684 case 0xf: init_amd_k8(c); break;
685 case 0x10: init_amd_gh(c); break;
686 case 0x12: init_amd_ln(c); break;
687 case 0x15: init_amd_bd(c); break;
688 }
689
690 /* Enable workaround for FXSAVE leak */
691 if (c->x86 >= 6)
692 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
693
694 cpu_detect_cache_sizes(c);
695
696 /* Multi core CPU? */
697 if (c->extended_cpuid_level >= 0x80000008) {
698 amd_detect_cmp(c);
699 srat_detect_node(c);
700 }
701
702 #ifdef CONFIG_X86_32
703 detect_ht(c);
704 #endif
705
706 init_amd_cacheinfo(c);
707
708 if (c->x86 >= 0xf)
709 set_cpu_cap(c, X86_FEATURE_K8);
710
711 if (cpu_has_xmm2) {
712 /* MFENCE stops RDTSC speculation */
713 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
714 }
715
716 /*
717 * Family 0x12 and above processors have APIC timer
718 * running in deep C states.
719 */
720 if (c->x86 > 0x11)
721 set_cpu_cap(c, X86_FEATURE_ARAT);
722
723 if (cpu_has_amd_erratum(c, amd_erratum_400))
724 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
725
726 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
727 }
728
729 #ifdef CONFIG_X86_32
amd_size_cache(struct cpuinfo_x86 * c,unsigned int size)730 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
731 {
732 /* AMD errata T13 (order #21922) */
733 if ((c->x86 == 6)) {
734 /* Duron Rev A0 */
735 if (c->x86_model == 3 && c->x86_mask == 0)
736 size = 64;
737 /* Tbird rev A1/A2 */
738 if (c->x86_model == 4 &&
739 (c->x86_mask == 0 || c->x86_mask == 1))
740 size = 256;
741 }
742 return size;
743 }
744 #endif
745
cpu_detect_tlb_amd(struct cpuinfo_x86 * c)746 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
747 {
748 u32 ebx, eax, ecx, edx;
749 u16 mask = 0xfff;
750
751 if (c->x86 < 0xf)
752 return;
753
754 if (c->extended_cpuid_level < 0x80000006)
755 return;
756
757 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
758
759 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
760 tlb_lli_4k[ENTRIES] = ebx & mask;
761
762 /*
763 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
764 * characteristics from the CPUID function 0x80000005 instead.
765 */
766 if (c->x86 == 0xf) {
767 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
768 mask = 0xff;
769 }
770
771 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
772 if (!((eax >> 16) & mask))
773 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
774 else
775 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
776
777 /* a 4M entry uses two 2M entries */
778 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
779
780 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
781 if (!(eax & mask)) {
782 /* Erratum 658 */
783 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
784 tlb_lli_2m[ENTRIES] = 1024;
785 } else {
786 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
787 tlb_lli_2m[ENTRIES] = eax & 0xff;
788 }
789 } else
790 tlb_lli_2m[ENTRIES] = eax & mask;
791
792 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
793 }
794
795 static const struct cpu_dev amd_cpu_dev = {
796 .c_vendor = "AMD",
797 .c_ident = { "AuthenticAMD" },
798 #ifdef CONFIG_X86_32
799 .legacy_models = {
800 { .family = 4, .model_names =
801 {
802 [3] = "486 DX/2",
803 [7] = "486 DX/2-WB",
804 [8] = "486 DX/4",
805 [9] = "486 DX/4-WB",
806 [14] = "Am5x86-WT",
807 [15] = "Am5x86-WB"
808 }
809 },
810 },
811 .legacy_cache_size = amd_size_cache,
812 #endif
813 .c_early_init = early_init_amd,
814 .c_detect_tlb = cpu_detect_tlb_amd,
815 .c_bsp_init = bsp_init_amd,
816 .c_init = init_amd,
817 .c_x86_vendor = X86_VENDOR_AMD,
818 };
819
820 cpu_dev_register(amd_cpu_dev);
821
822 /*
823 * AMD errata checking
824 *
825 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
826 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
827 * have an OSVW id assigned, which it takes as first argument. Both take a
828 * variable number of family-specific model-stepping ranges created by
829 * AMD_MODEL_RANGE().
830 *
831 * Example:
832 *
833 * const int amd_erratum_319[] =
834 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
835 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
836 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
837 */
838
839 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
840 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
841 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
842 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
843 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
844 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
845 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
846
847 static const int amd_erratum_400[] =
848 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
849 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
850
851 static const int amd_erratum_383[] =
852 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
853
854
cpu_has_amd_erratum(struct cpuinfo_x86 * cpu,const int * erratum)855 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
856 {
857 int osvw_id = *erratum++;
858 u32 range;
859 u32 ms;
860
861 if (osvw_id >= 0 && osvw_id < 65536 &&
862 cpu_has(cpu, X86_FEATURE_OSVW)) {
863 u64 osvw_len;
864
865 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
866 if (osvw_id < osvw_len) {
867 u64 osvw_bits;
868
869 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
870 osvw_bits);
871 return osvw_bits & (1ULL << (osvw_id & 0x3f));
872 }
873 }
874
875 /* OSVW unavailable or ID unknown, match family-model-stepping range */
876 ms = (cpu->x86_model << 4) | cpu->x86_mask;
877 while ((range = *erratum++))
878 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
879 (ms >= AMD_MODEL_RANGE_START(range)) &&
880 (ms <= AMD_MODEL_RANGE_END(range)))
881 return true;
882
883 return false;
884 }
885