• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * @file architecture specific interfaces
3  * @remark Copyright 2008 Intel Corporation
4  * @remark Read the file COPYING
5  * @author Andi Kleen
6  */
7 
8 #if defined(__i386__) || defined(__x86_64__)
9 
10 /* Assume we run on the same host as the profilee */
11 
12 #define num_to_mask(x) ((1U << (x)) - 1)
13 
cpuid_vendor(char * vnd)14 static inline int cpuid_vendor(char *vnd)
15 {
16 	union {
17 		struct {
18 			unsigned b,d,c;
19 		};
20 		char v[12];
21 	} v;
22 	unsigned eax;
23 	asm("cpuid" : "=a" (eax), "=b" (v.b), "=c" (v.c), "=d" (v.d) : "0" (0));
24 	return !strncmp(v.v, vnd, 12);
25 }
26 
27 /* Work around Nehalem spec update AAJ79: CPUID incorrectly indicates
28    unhalted reference cycle architectural event is supported. We assume
29    steppings after C0 report correct data in CPUID. */
workaround_nehalem_aaj79(unsigned * ebx)30 static inline void workaround_nehalem_aaj79(unsigned *ebx)
31 {
32 	union {
33 		unsigned eax;
34 		struct {
35 			unsigned stepping : 4;
36 			unsigned model : 4;
37 			unsigned family : 4;
38 			unsigned type : 2;
39 			unsigned res : 2;
40 			unsigned ext_model : 4;
41 			unsigned ext_family : 8;
42 			unsigned res2 : 4;
43 		};
44 	} v;
45 	unsigned model;
46 
47 	if (!cpuid_vendor("GenuineIntel"))
48 		return;
49 	asm("cpuid" : "=a" (v.eax) : "0" (1) : "ecx","ebx","edx");
50 	model = (v.ext_model << 4) + v.model;
51 	if (v.family != 6 || model != 26 || v.stepping > 4)
52 		return;
53 	*ebx |= (1 << 2);	/* disable unsupported event */
54 }
55 
arch_get_filter(op_cpu cpu_type)56 static inline unsigned arch_get_filter(op_cpu cpu_type)
57 {
58 	if (cpu_type == CPU_ARCH_PERFMON) {
59 		unsigned ebx, eax;
60 		asm("cpuid" : "=a" (eax), "=b" (ebx) : "0" (0xa) : "ecx","edx");
61 		workaround_nehalem_aaj79(&ebx);
62 		return ebx & num_to_mask(eax >> 24);
63 	}
64 	return -1U;
65 }
66 
arch_num_counters(op_cpu cpu_type)67 static inline int arch_num_counters(op_cpu cpu_type)
68 {
69 	if (cpu_type == CPU_ARCH_PERFMON) {
70 		unsigned v;
71 		asm("cpuid" : "=a" (v) : "0" (0xa) : "ebx","ecx","edx");
72 		return (v >> 8) & 0xff;
73 	}
74 	return -1;
75 }
76 
arch_get_counter_mask(void)77 static inline unsigned arch_get_counter_mask(void)
78 {
79 	unsigned v;
80 	asm("cpuid" : "=a" (v) : "0" (0xa) : "ebx","ecx","edx");
81 	return num_to_mask((v >> 8) & 0xff);
82 }
83 
84 #else
85 
arch_get_filter(op_cpu cpu_type)86 static inline unsigned arch_get_filter(op_cpu cpu_type)
87 {
88 	/* Do something with passed arg to shut up the compiler warning */
89 	if (cpu_type != CPU_NO_GOOD)
90 		return 0;
91 	return 0;
92 }
93 
arch_num_counters(op_cpu cpu_type)94 static inline int arch_num_counters(op_cpu cpu_type)
95 {
96 	/* Do something with passed arg to shut up the compiler warning */
97 	if (cpu_type != CPU_NO_GOOD)
98 		return -1;
99 	return -1;
100 }
101 
arch_get_counter_mask(void)102 static inline unsigned arch_get_counter_mask(void)
103 {
104 	return 0;
105 }
106 
107 #endif
108