• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- linux-c -*- ------------------------------------------------------- *
2  *
3  *   Copyright (C) 1991, 1992 Linus Torvalds
4  *   Copyright 2007 rPath, Inc. - All Rights Reserved
5  *
6  *   This file is part of the Linux kernel, and is made available under
7  *   the terms of the GNU General Public License version 2.
8  *
9  * ----------------------------------------------------------------------- */
10 
11 /*
12  * Check for obligatory CPU features and abort if the features are not
13  * present.  This code should be compilable as 16-, 32- or 64-bit
14  * code, so be very careful with types and inline assembly.
15  *
16  * This code should not contain any messages; that requires an
17  * additional wrapper.
18  *
19  * As written, this code is not safe for inclusion into the kernel
20  * proper (after FPU initialization, in particular).
21  */
22 
23 #ifdef _SETUP
24 # include "boot.h"
25 #endif
26 #include <linux/types.h>
27 #include <asm/processor-flags.h>
28 #include <asm/required-features.h>
29 #include <asm/msr-index.h>
30 
31 struct cpu_features cpu;
32 static u32 cpu_vendor[3];
33 static u32 err_flags[NCAPINTS];
34 
35 static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
36 
37 static const u32 req_flags[NCAPINTS] =
38 {
39 	REQUIRED_MASK0,
40 	REQUIRED_MASK1,
41 	0, /* REQUIRED_MASK2 not implemented in this file */
42 	0, /* REQUIRED_MASK3 not implemented in this file */
43 	REQUIRED_MASK4,
44 	0, /* REQUIRED_MASK5 not implemented in this file */
45 	REQUIRED_MASK6,
46 	0, /* REQUIRED_MASK7 not implemented in this file */
47 };
48 
49 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
50 
is_amd(void)51 static int is_amd(void)
52 {
53 	return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
54 	       cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
55 	       cpu_vendor[2] == A32('c', 'A', 'M', 'D');
56 }
57 
is_centaur(void)58 static int is_centaur(void)
59 {
60 	return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
61 	       cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
62 	       cpu_vendor[2] == A32('a', 'u', 'l', 's');
63 }
64 
is_transmeta(void)65 static int is_transmeta(void)
66 {
67 	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
68 	       cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
69 	       cpu_vendor[2] == A32('M', 'x', '8', '6');
70 }
71 
has_fpu(void)72 static int has_fpu(void)
73 {
74 	u16 fcw = -1, fsw = -1;
75 	u32 cr0;
76 
77 	asm("movl %%cr0,%0" : "=r" (cr0));
78 	if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
79 		cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
80 		asm volatile("movl %0,%%cr0" : : "r" (cr0));
81 	}
82 
83 	asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
84 		     : "+m" (fsw), "+m" (fcw));
85 
86 	return fsw == 0 && (fcw & 0x103f) == 0x003f;
87 }
88 
has_eflag(u32 mask)89 static int has_eflag(u32 mask)
90 {
91 	u32 f0, f1;
92 
93 	asm("pushfl ; "
94 	    "pushfl ; "
95 	    "popl %0 ; "
96 	    "movl %0,%1 ; "
97 	    "xorl %2,%1 ; "
98 	    "pushl %1 ; "
99 	    "popfl ; "
100 	    "pushfl ; "
101 	    "popl %1 ; "
102 	    "popfl"
103 	    : "=&r" (f0), "=&r" (f1)
104 	    : "ri" (mask));
105 
106 	return !!((f0^f1) & mask);
107 }
108 
get_flags(void)109 static void get_flags(void)
110 {
111 	u32 max_intel_level, max_amd_level;
112 	u32 tfms;
113 
114 	if (has_fpu())
115 		set_bit(X86_FEATURE_FPU, cpu.flags);
116 
117 	if (has_eflag(X86_EFLAGS_ID)) {
118 		asm("cpuid"
119 		    : "=a" (max_intel_level),
120 		      "=b" (cpu_vendor[0]),
121 		      "=d" (cpu_vendor[1]),
122 		      "=c" (cpu_vendor[2])
123 		    : "a" (0));
124 
125 		if (max_intel_level >= 0x00000001 &&
126 		    max_intel_level <= 0x0000ffff) {
127 			asm("cpuid"
128 			    : "=a" (tfms),
129 			      "=c" (cpu.flags[4]),
130 			      "=d" (cpu.flags[0])
131 			    : "a" (0x00000001)
132 			    : "ebx");
133 			cpu.level = (tfms >> 8) & 15;
134 			cpu.model = (tfms >> 4) & 15;
135 			if (cpu.level >= 6)
136 				cpu.model += ((tfms >> 16) & 0xf) << 4;
137 		}
138 
139 		asm("cpuid"
140 		    : "=a" (max_amd_level)
141 		    : "a" (0x80000000)
142 		    : "ebx", "ecx", "edx");
143 
144 		if (max_amd_level >= 0x80000001 &&
145 		    max_amd_level <= 0x8000ffff) {
146 			u32 eax = 0x80000001;
147 			asm("cpuid"
148 			    : "+a" (eax),
149 			      "=c" (cpu.flags[6]),
150 			      "=d" (cpu.flags[1])
151 			    : : "ebx");
152 		}
153 	}
154 }
155 
156 /* Returns a bitmask of which words we have error bits in */
check_flags(void)157 static int check_flags(void)
158 {
159 	u32 err;
160 	int i;
161 
162 	err = 0;
163 	for (i = 0; i < NCAPINTS; i++) {
164 		err_flags[i] = req_flags[i] & ~cpu.flags[i];
165 		if (err_flags[i])
166 			err |= 1 << i;
167 	}
168 
169 	return err;
170 }
171 
172 /*
173  * Returns -1 on error.
174  *
175  * *cpu_level is set to the current CPU level; *req_level to the required
176  * level.  x86-64 is considered level 64 for this purpose.
177  *
178  * *err_flags_ptr is set to the flags error array if there are flags missing.
179  */
check_cpu(int * cpu_level_ptr,int * req_level_ptr,u32 ** err_flags_ptr)180 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
181 {
182 	int err;
183 
184 	memset(&cpu.flags, 0, sizeof cpu.flags);
185 	cpu.level = 3;
186 
187 	if (has_eflag(X86_EFLAGS_AC))
188 		cpu.level = 4;
189 
190 	get_flags();
191 	err = check_flags();
192 
193 	if (test_bit(X86_FEATURE_LM, cpu.flags))
194 		cpu.level = 64;
195 
196 	if (err == 0x01 &&
197 	    !(err_flags[0] &
198 	      ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
199 	    is_amd()) {
200 		/* If this is an AMD and we're only missing SSE+SSE2, try to
201 		   turn them on */
202 
203 		u32 ecx = MSR_K7_HWCR;
204 		u32 eax, edx;
205 
206 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
207 		eax &= ~(1 << 15);
208 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
209 
210 		get_flags();	/* Make sure it really did something */
211 		err = check_flags();
212 	} else if (err == 0x01 &&
213 		   !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
214 		   is_centaur() && cpu.model >= 6) {
215 		/* If this is a VIA C3, we might have to enable CX8
216 		   explicitly */
217 
218 		u32 ecx = MSR_VIA_FCR;
219 		u32 eax, edx;
220 
221 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
222 		eax |= (1<<1)|(1<<7);
223 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
224 
225 		set_bit(X86_FEATURE_CX8, cpu.flags);
226 		err = check_flags();
227 	} else if (err == 0x01 && is_transmeta()) {
228 		/* Transmeta might have masked feature bits in word 0 */
229 
230 		u32 ecx = 0x80860004;
231 		u32 eax, edx;
232 		u32 level = 1;
233 
234 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
235 		asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
236 		asm("cpuid"
237 		    : "+a" (level), "=d" (cpu.flags[0])
238 		    : : "ecx", "ebx");
239 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
240 
241 		err = check_flags();
242 	}
243 
244 	if (err_flags_ptr)
245 		*err_flags_ptr = err ? err_flags : NULL;
246 	if (cpu_level_ptr)
247 		*cpu_level_ptr = cpu.level;
248 	if (req_level_ptr)
249 		*req_level_ptr = req_level;
250 
251 	return (cpu.level < req_level || err) ? -1 : 0;
252 }
253