• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Routines to indentify additional cpu features that are scattered in
3  *	cpuid space.
4  */
5 #include <linux/cpu.h>
6 
7 #include <asm/pat.h>
8 #include <asm/processor.h>
9 
10 #include <mach_apic.h>
11 
12 struct cpuid_bit {
13 	u16 feature;
14 	u8 reg;
15 	u8 bit;
16 	u32 level;
17 };
18 
19 enum cpuid_regs {
20 	CR_EAX = 0,
21 	CR_ECX,
22 	CR_EDX,
23 	CR_EBX
24 };
25 
init_scattered_cpuid_features(struct cpuinfo_x86 * c)26 void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
27 {
28 	u32 max_level;
29 	u32 regs[4];
30 	const struct cpuid_bit *cb;
31 
32 	static const struct cpuid_bit cpuid_bits[] = {
33 		{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 		{ 0, 0, 0, 0 }
35 	};
36 
37 	for (cb = cpuid_bits; cb->feature; cb++) {
38 
39 		/* Verify that the level is valid */
40 		max_level = cpuid_eax(cb->level & 0xffff0000);
41 		if (max_level < cb->level ||
42 		    max_level > (cb->level | 0xffff))
43 			continue;
44 
45 		cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
46 			&regs[CR_ECX], &regs[CR_EDX]);
47 
48 		if (regs[cb->reg] & (1 << cb->bit))
49 			set_cpu_cap(c, cb->feature);
50 	}
51 }
52 
53 /* leaf 0xb SMT level */
54 #define SMT_LEVEL	0
55 
56 /* leaf 0xb sub-leaf types */
57 #define INVALID_TYPE	0
58 #define SMT_TYPE	1
59 #define CORE_TYPE	2
60 
61 #define LEAFB_SUBTYPE(ecx)		(((ecx) >> 8) & 0xff)
62 #define BITS_SHIFT_NEXT_LEVEL(eax)	((eax) & 0x1f)
63 #define LEVEL_MAX_SIBLINGS(ebx)		((ebx) & 0xffff)
64 
65 /*
66  * Check for extended topology enumeration cpuid leaf 0xb and if it
67  * exists, use it for populating initial_apicid and cpu topology
68  * detection.
69  */
detect_extended_topology(struct cpuinfo_x86 * c)70 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71 {
72 #ifdef CONFIG_X86_SMP
73 	unsigned int eax, ebx, ecx, edx, sub_index;
74 	unsigned int ht_mask_width, core_plus_mask_width;
75 	unsigned int core_select_mask, core_level_siblings;
76 
77 	if (c->cpuid_level < 0xb)
78 		return;
79 
80 	cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
81 
82 	/*
83 	 * check if the cpuid leaf 0xb is actually implemented.
84 	 */
85 	if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
86 		return;
87 
88 	set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
89 
90 	/*
91 	 * initial apic id, which also represents 32-bit extended x2apic id.
92 	 */
93 	c->initial_apicid = edx;
94 
95 	/*
96 	 * Populate HT related information from sub-leaf level 0.
97 	 */
98 	core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
99 	core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
100 
101 	sub_index = 1;
102 	do {
103 		cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
104 
105 		/*
106 		 * Check for the Core type in the implemented sub leaves.
107 		 */
108 		if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
109 			core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
110 			core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
111 			break;
112 		}
113 
114 		sub_index++;
115 	} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
116 
117 	core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118 
119 #ifdef CONFIG_X86_32
120 	c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 						 & core_select_mask;
122 	c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 	/*
124 	 * Reinit the apicid, now that we have extended initial_apicid.
125 	 */
126 	c->apicid = phys_pkg_id(c->initial_apicid, 0);
127 #else
128 	c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
129 	c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 	/*
131 	 * Reinit the apicid, now that we have extended initial_apicid.
132 	 */
133 	c->apicid = phys_pkg_id(0);
134 #endif
135 	c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136 
137 
138 	printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
139 	       c->phys_proc_id);
140 	if (c->x86_max_cores > 1)
141 		printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
142 		       c->cpu_core_id);
143 	return;
144 #endif
145 }
146 
147 #ifdef CONFIG_X86_PAT
validate_pat_support(struct cpuinfo_x86 * c)148 void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149 {
150 	if (!cpu_has_pat)
151 		pat_disable("PAT not supported by CPU.");
152 
153 	switch (c->x86_vendor) {
154 	case X86_VENDOR_INTEL:
155 		/*
156 		 * There is a known erratum on Pentium III and Core Solo
157 		 * and Core Duo CPUs.
158 		 * " Page with PAT set to WC while associated MTRR is UC
159 		 *   may consolidate to UC "
160 		 * Because of this erratum, it is better to stick with
161 		 * setting WC in MTRR rather than using PAT on these CPUs.
162 		 *
163 		 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 		 */
165 		if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 			return;
167 
168 		pat_disable("PAT WC disabled due to known CPU erratum.");
169 		return;
170 
171 	case X86_VENDOR_AMD:
172 	case X86_VENDOR_CENTAUR:
173 	case X86_VENDOR_TRANSMETA:
174 		return;
175 	}
176 
177 	pat_disable("PAT disabled. Not yet verified on this CPU type.");
178 }
179 #endif
180