1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/sysfs.h>
4 #include <linux/nospec.h>
5 #include <asm/intel-family.h>
6 #include "probe.h"
7
8 enum perf_msr_id {
9 PERF_MSR_TSC = 0,
10 PERF_MSR_APERF = 1,
11 PERF_MSR_MPERF = 2,
12 PERF_MSR_PPERF = 3,
13 PERF_MSR_SMI = 4,
14 PERF_MSR_PTSC = 5,
15 PERF_MSR_IRPERF = 6,
16 PERF_MSR_THERM = 7,
17 PERF_MSR_EVENT_MAX,
18 };
19
test_aperfmperf(int idx,void * data)20 static bool test_aperfmperf(int idx, void *data)
21 {
22 return boot_cpu_has(X86_FEATURE_APERFMPERF);
23 }
24
test_ptsc(int idx,void * data)25 static bool test_ptsc(int idx, void *data)
26 {
27 return boot_cpu_has(X86_FEATURE_PTSC);
28 }
29
test_irperf(int idx,void * data)30 static bool test_irperf(int idx, void *data)
31 {
32 return boot_cpu_has(X86_FEATURE_IRPERF);
33 }
34
test_therm_status(int idx,void * data)35 static bool test_therm_status(int idx, void *data)
36 {
37 return boot_cpu_has(X86_FEATURE_DTHERM);
38 }
39
test_intel(int idx,void * data)40 static bool test_intel(int idx, void *data)
41 {
42 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
43 boot_cpu_data.x86 != 6)
44 return false;
45
46 switch (boot_cpu_data.x86_model) {
47 case INTEL_FAM6_NEHALEM:
48 case INTEL_FAM6_NEHALEM_G:
49 case INTEL_FAM6_NEHALEM_EP:
50 case INTEL_FAM6_NEHALEM_EX:
51
52 case INTEL_FAM6_WESTMERE:
53 case INTEL_FAM6_WESTMERE_EP:
54 case INTEL_FAM6_WESTMERE_EX:
55
56 case INTEL_FAM6_SANDYBRIDGE:
57 case INTEL_FAM6_SANDYBRIDGE_X:
58
59 case INTEL_FAM6_IVYBRIDGE:
60 case INTEL_FAM6_IVYBRIDGE_X:
61
62 case INTEL_FAM6_HASWELL:
63 case INTEL_FAM6_HASWELL_X:
64 case INTEL_FAM6_HASWELL_L:
65 case INTEL_FAM6_HASWELL_G:
66
67 case INTEL_FAM6_BROADWELL:
68 case INTEL_FAM6_BROADWELL_D:
69 case INTEL_FAM6_BROADWELL_G:
70 case INTEL_FAM6_BROADWELL_X:
71 case INTEL_FAM6_SAPPHIRERAPIDS_X:
72
73 case INTEL_FAM6_ATOM_SILVERMONT:
74 case INTEL_FAM6_ATOM_SILVERMONT_D:
75 case INTEL_FAM6_ATOM_AIRMONT:
76
77 case INTEL_FAM6_ATOM_GOLDMONT:
78 case INTEL_FAM6_ATOM_GOLDMONT_D:
79 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
80 case INTEL_FAM6_ATOM_TREMONT_D:
81 case INTEL_FAM6_ATOM_TREMONT:
82 case INTEL_FAM6_ATOM_TREMONT_L:
83
84 case INTEL_FAM6_XEON_PHI_KNL:
85 case INTEL_FAM6_XEON_PHI_KNM:
86 if (idx == PERF_MSR_SMI)
87 return true;
88 break;
89
90 case INTEL_FAM6_SKYLAKE_L:
91 case INTEL_FAM6_SKYLAKE:
92 case INTEL_FAM6_SKYLAKE_X:
93 case INTEL_FAM6_KABYLAKE_L:
94 case INTEL_FAM6_KABYLAKE:
95 case INTEL_FAM6_COMETLAKE_L:
96 case INTEL_FAM6_COMETLAKE:
97 case INTEL_FAM6_ICELAKE_L:
98 case INTEL_FAM6_ICELAKE:
99 case INTEL_FAM6_ICELAKE_X:
100 case INTEL_FAM6_ICELAKE_D:
101 case INTEL_FAM6_TIGERLAKE_L:
102 case INTEL_FAM6_TIGERLAKE:
103 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
104 return true;
105 break;
106 }
107
108 return false;
109 }
110
111 PMU_EVENT_ATTR_STRING(tsc, attr_tsc, "event=0x00" );
112 PMU_EVENT_ATTR_STRING(aperf, attr_aperf, "event=0x01" );
113 PMU_EVENT_ATTR_STRING(mperf, attr_mperf, "event=0x02" );
114 PMU_EVENT_ATTR_STRING(pperf, attr_pperf, "event=0x03" );
115 PMU_EVENT_ATTR_STRING(smi, attr_smi, "event=0x04" );
116 PMU_EVENT_ATTR_STRING(ptsc, attr_ptsc, "event=0x05" );
117 PMU_EVENT_ATTR_STRING(irperf, attr_irperf, "event=0x06" );
118 PMU_EVENT_ATTR_STRING(cpu_thermal_margin, attr_therm, "event=0x07" );
119 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, attr_therm_snap, "1" );
120 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, attr_therm_unit, "C" );
121
122 static unsigned long msr_mask;
123
124 PMU_EVENT_GROUP(events, aperf);
125 PMU_EVENT_GROUP(events, mperf);
126 PMU_EVENT_GROUP(events, pperf);
127 PMU_EVENT_GROUP(events, smi);
128 PMU_EVENT_GROUP(events, ptsc);
129 PMU_EVENT_GROUP(events, irperf);
130
131 static struct attribute *attrs_therm[] = {
132 &attr_therm.attr.attr,
133 &attr_therm_snap.attr.attr,
134 &attr_therm_unit.attr.attr,
135 NULL,
136 };
137
138 static struct attribute_group group_therm = {
139 .name = "events",
140 .attrs = attrs_therm,
141 };
142
143 static struct perf_msr msr[] = {
144 [PERF_MSR_TSC] = { .no_check = true, },
145 [PERF_MSR_APERF] = { MSR_IA32_APERF, &group_aperf, test_aperfmperf, },
146 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &group_mperf, test_aperfmperf, },
147 [PERF_MSR_PPERF] = { MSR_PPERF, &group_pperf, test_intel, },
148 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &group_smi, test_intel, },
149 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &group_ptsc, test_ptsc, },
150 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &group_irperf, test_irperf, },
151 [PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &group_therm, test_therm_status, },
152 };
153
154 static struct attribute *events_attrs[] = {
155 &attr_tsc.attr.attr,
156 NULL,
157 };
158
159 static struct attribute_group events_attr_group = {
160 .name = "events",
161 .attrs = events_attrs,
162 };
163
164 PMU_FORMAT_ATTR(event, "config:0-63");
165 static struct attribute *format_attrs[] = {
166 &format_attr_event.attr,
167 NULL,
168 };
169 static struct attribute_group format_attr_group = {
170 .name = "format",
171 .attrs = format_attrs,
172 };
173
174 static const struct attribute_group *attr_groups[] = {
175 &events_attr_group,
176 &format_attr_group,
177 NULL,
178 };
179
180 static const struct attribute_group *attr_update[] = {
181 &group_aperf,
182 &group_mperf,
183 &group_pperf,
184 &group_smi,
185 &group_ptsc,
186 &group_irperf,
187 &group_therm,
188 NULL,
189 };
190
msr_event_init(struct perf_event * event)191 static int msr_event_init(struct perf_event *event)
192 {
193 u64 cfg = event->attr.config;
194
195 if (event->attr.type != event->pmu->type)
196 return -ENOENT;
197
198 /* unsupported modes and filters */
199 if (event->attr.sample_period) /* no sampling */
200 return -EINVAL;
201
202 if (cfg >= PERF_MSR_EVENT_MAX)
203 return -EINVAL;
204
205 cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
206
207 if (!(msr_mask & (1 << cfg)))
208 return -EINVAL;
209
210 event->hw.idx = -1;
211 event->hw.event_base = msr[cfg].msr;
212 event->hw.config = cfg;
213
214 return 0;
215 }
216
msr_read_counter(struct perf_event * event)217 static inline u64 msr_read_counter(struct perf_event *event)
218 {
219 u64 now;
220
221 if (event->hw.event_base)
222 rdmsrl(event->hw.event_base, now);
223 else
224 now = rdtsc_ordered();
225
226 return now;
227 }
228
msr_event_update(struct perf_event * event)229 static void msr_event_update(struct perf_event *event)
230 {
231 u64 prev, now;
232 s64 delta;
233
234 /* Careful, an NMI might modify the previous event value: */
235 again:
236 prev = local64_read(&event->hw.prev_count);
237 now = msr_read_counter(event);
238
239 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
240 goto again;
241
242 delta = now - prev;
243 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
244 delta = sign_extend64(delta, 31);
245 local64_add(delta, &event->count);
246 } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
247 /* If valid, extract digital readout, otherwise set to -1: */
248 now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
249 local64_set(&event->count, now);
250 } else {
251 local64_add(delta, &event->count);
252 }
253 }
254
msr_event_start(struct perf_event * event,int flags)255 static void msr_event_start(struct perf_event *event, int flags)
256 {
257 u64 now = msr_read_counter(event);
258
259 local64_set(&event->hw.prev_count, now);
260 }
261
msr_event_stop(struct perf_event * event,int flags)262 static void msr_event_stop(struct perf_event *event, int flags)
263 {
264 msr_event_update(event);
265 }
266
msr_event_del(struct perf_event * event,int flags)267 static void msr_event_del(struct perf_event *event, int flags)
268 {
269 msr_event_stop(event, PERF_EF_UPDATE);
270 }
271
msr_event_add(struct perf_event * event,int flags)272 static int msr_event_add(struct perf_event *event, int flags)
273 {
274 if (flags & PERF_EF_START)
275 msr_event_start(event, flags);
276
277 return 0;
278 }
279
280 static struct pmu pmu_msr = {
281 .task_ctx_nr = perf_sw_context,
282 .attr_groups = attr_groups,
283 .event_init = msr_event_init,
284 .add = msr_event_add,
285 .del = msr_event_del,
286 .start = msr_event_start,
287 .stop = msr_event_stop,
288 .read = msr_event_update,
289 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
290 .attr_update = attr_update,
291 };
292
msr_init(void)293 static int __init msr_init(void)
294 {
295 if (!boot_cpu_has(X86_FEATURE_TSC)) {
296 pr_cont("no MSR PMU driver.\n");
297 return 0;
298 }
299
300 msr_mask = perf_msr_probe(msr, PERF_MSR_EVENT_MAX, true, NULL);
301
302 perf_pmu_register(&pmu_msr, "msr", -1);
303
304 return 0;
305 }
306 device_initcall(msr_init);
307