1 /*
2 * Support cstate residency counters
3 *
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
16 *
17 */
18
19 /*
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
25 * access code.
26 *
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
29 *
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
36 *
37 * All of these counters are specified in the Intel® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
39 *
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
42 * perf code: 0x00
43 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46 * perf code: 0x01
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
48 * CNL,KBL,CML,TNT
49 * Scope: Core
50 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * perf code: 0x02
52 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
53 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
54 * TGL,TNT,RKL,ADL
55 * Scope: Core
56 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
57 * perf code: 0x03
58 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
59 * ICL,TGL,RKL,ADL
60 * Scope: Core
61 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
62 * perf code: 0x00
63 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
64 * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL
65 * Scope: Package (physical package)
66 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
67 * perf code: 0x01
68 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
69 * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
70 * ADL
71 * Scope: Package (physical package)
72 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
73 * perf code: 0x02
74 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
75 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
76 * TGL,TNT,RKL,ADL
77 * Scope: Package (physical package)
78 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
79 * perf code: 0x03
80 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
81 * KBL,CML,ICL,TGL,RKL,ADL
82 * Scope: Package (physical package)
83 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
84 * perf code: 0x04
85 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
86 * ADL
87 * Scope: Package (physical package)
88 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
89 * perf code: 0x05
90 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
91 * ADL
92 * Scope: Package (physical package)
93 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
94 * perf code: 0x06
95 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
96 * TNT,RKL,ADL
97 * Scope: Package (physical package)
98 *
99 */
100
101 #include <linux/module.h>
102 #include <linux/slab.h>
103 #include <linux/perf_event.h>
104 #include <linux/nospec.h>
105 #include <asm/cpu_device_id.h>
106 #include <asm/intel-family.h>
107 #include "../perf_event.h"
108 #include "../probe.h"
109
110 MODULE_LICENSE("GPL");
111
112 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
113 static ssize_t __cstate_##_var##_show(struct device *dev, \
114 struct device_attribute *attr, \
115 char *page) \
116 { \
117 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
118 return sprintf(page, _format "\n"); \
119 } \
120 static struct device_attribute format_attr_##_var = \
121 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
122
123 static ssize_t cstate_get_attr_cpumask(struct device *dev,
124 struct device_attribute *attr,
125 char *buf);
126
127 /* Model -> events mapping */
128 struct cstate_model {
129 unsigned long core_events;
130 unsigned long pkg_events;
131 unsigned long quirks;
132 };
133
134 /* Quirk flags */
135 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
136 #define KNL_CORE_C6_MSR (1UL << 1)
137
138 struct perf_cstate_msr {
139 u64 msr;
140 struct perf_pmu_events_attr *attr;
141 };
142
143
144 /* cstate_core PMU */
145 static struct pmu cstate_core_pmu;
146 static bool has_cstate_core;
147
148 enum perf_cstate_core_events {
149 PERF_CSTATE_CORE_C1_RES = 0,
150 PERF_CSTATE_CORE_C3_RES,
151 PERF_CSTATE_CORE_C6_RES,
152 PERF_CSTATE_CORE_C7_RES,
153
154 PERF_CSTATE_CORE_EVENT_MAX,
155 };
156
157 PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
158 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
159 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
160 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
161
162 static unsigned long core_msr_mask;
163
164 PMU_EVENT_GROUP(events, cstate_core_c1);
165 PMU_EVENT_GROUP(events, cstate_core_c3);
166 PMU_EVENT_GROUP(events, cstate_core_c6);
167 PMU_EVENT_GROUP(events, cstate_core_c7);
168
test_msr(int idx,void * data)169 static bool test_msr(int idx, void *data)
170 {
171 return test_bit(idx, (unsigned long *) data);
172 }
173
174 static struct perf_msr core_msr[] = {
175 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr },
176 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr },
177 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr },
178 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr },
179 };
180
181 static struct attribute *attrs_empty[] = {
182 NULL,
183 };
184
185 /*
186 * There are no default events, but we need to create
187 * "events" group (with empty attrs) before updating
188 * it with detected events.
189 */
190 static struct attribute_group core_events_attr_group = {
191 .name = "events",
192 .attrs = attrs_empty,
193 };
194
195 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
196 static struct attribute *core_format_attrs[] = {
197 &format_attr_core_event.attr,
198 NULL,
199 };
200
201 static struct attribute_group core_format_attr_group = {
202 .name = "format",
203 .attrs = core_format_attrs,
204 };
205
206 static cpumask_t cstate_core_cpu_mask;
207 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
208
209 static struct attribute *cstate_cpumask_attrs[] = {
210 &dev_attr_cpumask.attr,
211 NULL,
212 };
213
214 static struct attribute_group cpumask_attr_group = {
215 .attrs = cstate_cpumask_attrs,
216 };
217
218 static const struct attribute_group *core_attr_groups[] = {
219 &core_events_attr_group,
220 &core_format_attr_group,
221 &cpumask_attr_group,
222 NULL,
223 };
224
225 /* cstate_pkg PMU */
226 static struct pmu cstate_pkg_pmu;
227 static bool has_cstate_pkg;
228
229 enum perf_cstate_pkg_events {
230 PERF_CSTATE_PKG_C2_RES = 0,
231 PERF_CSTATE_PKG_C3_RES,
232 PERF_CSTATE_PKG_C6_RES,
233 PERF_CSTATE_PKG_C7_RES,
234 PERF_CSTATE_PKG_C8_RES,
235 PERF_CSTATE_PKG_C9_RES,
236 PERF_CSTATE_PKG_C10_RES,
237
238 PERF_CSTATE_PKG_EVENT_MAX,
239 };
240
241 PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00");
242 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01");
243 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02");
244 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03");
245 PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04");
246 PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05");
247 PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
248
249 static unsigned long pkg_msr_mask;
250
251 PMU_EVENT_GROUP(events, cstate_pkg_c2);
252 PMU_EVENT_GROUP(events, cstate_pkg_c3);
253 PMU_EVENT_GROUP(events, cstate_pkg_c6);
254 PMU_EVENT_GROUP(events, cstate_pkg_c7);
255 PMU_EVENT_GROUP(events, cstate_pkg_c8);
256 PMU_EVENT_GROUP(events, cstate_pkg_c9);
257 PMU_EVENT_GROUP(events, cstate_pkg_c10);
258
259 static struct perf_msr pkg_msr[] = {
260 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr },
261 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr },
262 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr },
263 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr },
264 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr },
265 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr },
266 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
267 };
268
269 static struct attribute_group pkg_events_attr_group = {
270 .name = "events",
271 .attrs = attrs_empty,
272 };
273
274 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
275 static struct attribute *pkg_format_attrs[] = {
276 &format_attr_pkg_event.attr,
277 NULL,
278 };
279 static struct attribute_group pkg_format_attr_group = {
280 .name = "format",
281 .attrs = pkg_format_attrs,
282 };
283
284 static cpumask_t cstate_pkg_cpu_mask;
285
286 static const struct attribute_group *pkg_attr_groups[] = {
287 &pkg_events_attr_group,
288 &pkg_format_attr_group,
289 &cpumask_attr_group,
290 NULL,
291 };
292
cstate_get_attr_cpumask(struct device * dev,struct device_attribute * attr,char * buf)293 static ssize_t cstate_get_attr_cpumask(struct device *dev,
294 struct device_attribute *attr,
295 char *buf)
296 {
297 struct pmu *pmu = dev_get_drvdata(dev);
298
299 if (pmu == &cstate_core_pmu)
300 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
301 else if (pmu == &cstate_pkg_pmu)
302 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
303 else
304 return 0;
305 }
306
cstate_pmu_event_init(struct perf_event * event)307 static int cstate_pmu_event_init(struct perf_event *event)
308 {
309 u64 cfg = event->attr.config;
310 int cpu;
311
312 if (event->attr.type != event->pmu->type)
313 return -ENOENT;
314
315 /* unsupported modes and filters */
316 if (event->attr.sample_period) /* no sampling */
317 return -EINVAL;
318
319 if (event->cpu < 0)
320 return -EINVAL;
321
322 if (event->pmu == &cstate_core_pmu) {
323 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
324 return -EINVAL;
325 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
326 if (!(core_msr_mask & (1 << cfg)))
327 return -EINVAL;
328 event->hw.event_base = core_msr[cfg].msr;
329 cpu = cpumask_any_and(&cstate_core_cpu_mask,
330 topology_sibling_cpumask(event->cpu));
331 } else if (event->pmu == &cstate_pkg_pmu) {
332 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
333 return -EINVAL;
334 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
335 if (!(pkg_msr_mask & (1 << cfg)))
336 return -EINVAL;
337 event->hw.event_base = pkg_msr[cfg].msr;
338 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
339 topology_die_cpumask(event->cpu));
340 } else {
341 return -ENOENT;
342 }
343
344 if (cpu >= nr_cpu_ids)
345 return -ENODEV;
346
347 event->cpu = cpu;
348 event->hw.config = cfg;
349 event->hw.idx = -1;
350 return 0;
351 }
352
cstate_pmu_read_counter(struct perf_event * event)353 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
354 {
355 u64 val;
356
357 rdmsrl(event->hw.event_base, val);
358 return val;
359 }
360
cstate_pmu_event_update(struct perf_event * event)361 static void cstate_pmu_event_update(struct perf_event *event)
362 {
363 struct hw_perf_event *hwc = &event->hw;
364 u64 prev_raw_count, new_raw_count;
365
366 again:
367 prev_raw_count = local64_read(&hwc->prev_count);
368 new_raw_count = cstate_pmu_read_counter(event);
369
370 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
371 new_raw_count) != prev_raw_count)
372 goto again;
373
374 local64_add(new_raw_count - prev_raw_count, &event->count);
375 }
376
cstate_pmu_event_start(struct perf_event * event,int mode)377 static void cstate_pmu_event_start(struct perf_event *event, int mode)
378 {
379 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
380 }
381
cstate_pmu_event_stop(struct perf_event * event,int mode)382 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
383 {
384 cstate_pmu_event_update(event);
385 }
386
cstate_pmu_event_del(struct perf_event * event,int mode)387 static void cstate_pmu_event_del(struct perf_event *event, int mode)
388 {
389 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
390 }
391
cstate_pmu_event_add(struct perf_event * event,int mode)392 static int cstate_pmu_event_add(struct perf_event *event, int mode)
393 {
394 if (mode & PERF_EF_START)
395 cstate_pmu_event_start(event, mode);
396
397 return 0;
398 }
399
400 /*
401 * Check if exiting cpu is the designated reader. If so migrate the
402 * events when there is a valid target available
403 */
cstate_cpu_exit(unsigned int cpu)404 static int cstate_cpu_exit(unsigned int cpu)
405 {
406 unsigned int target;
407
408 if (has_cstate_core &&
409 cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
410
411 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
412 /* Migrate events if there is a valid target */
413 if (target < nr_cpu_ids) {
414 cpumask_set_cpu(target, &cstate_core_cpu_mask);
415 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
416 }
417 }
418
419 if (has_cstate_pkg &&
420 cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
421
422 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
423 /* Migrate events if there is a valid target */
424 if (target < nr_cpu_ids) {
425 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
426 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
427 }
428 }
429 return 0;
430 }
431
cstate_cpu_init(unsigned int cpu)432 static int cstate_cpu_init(unsigned int cpu)
433 {
434 unsigned int target;
435
436 /*
437 * If this is the first online thread of that core, set it in
438 * the core cpu mask as the designated reader.
439 */
440 target = cpumask_any_and(&cstate_core_cpu_mask,
441 topology_sibling_cpumask(cpu));
442
443 if (has_cstate_core && target >= nr_cpu_ids)
444 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
445
446 /*
447 * If this is the first online thread of that package, set it
448 * in the package cpu mask as the designated reader.
449 */
450 target = cpumask_any_and(&cstate_pkg_cpu_mask,
451 topology_die_cpumask(cpu));
452 if (has_cstate_pkg && target >= nr_cpu_ids)
453 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
454
455 return 0;
456 }
457
458 static const struct attribute_group *core_attr_update[] = {
459 &group_cstate_core_c1,
460 &group_cstate_core_c3,
461 &group_cstate_core_c6,
462 &group_cstate_core_c7,
463 NULL,
464 };
465
466 static const struct attribute_group *pkg_attr_update[] = {
467 &group_cstate_pkg_c2,
468 &group_cstate_pkg_c3,
469 &group_cstate_pkg_c6,
470 &group_cstate_pkg_c7,
471 &group_cstate_pkg_c8,
472 &group_cstate_pkg_c9,
473 &group_cstate_pkg_c10,
474 NULL,
475 };
476
477 static struct pmu cstate_core_pmu = {
478 .attr_groups = core_attr_groups,
479 .attr_update = core_attr_update,
480 .name = "cstate_core",
481 .task_ctx_nr = perf_invalid_context,
482 .event_init = cstate_pmu_event_init,
483 .add = cstate_pmu_event_add,
484 .del = cstate_pmu_event_del,
485 .start = cstate_pmu_event_start,
486 .stop = cstate_pmu_event_stop,
487 .read = cstate_pmu_event_update,
488 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
489 .module = THIS_MODULE,
490 };
491
492 static struct pmu cstate_pkg_pmu = {
493 .attr_groups = pkg_attr_groups,
494 .attr_update = pkg_attr_update,
495 .name = "cstate_pkg",
496 .task_ctx_nr = perf_invalid_context,
497 .event_init = cstate_pmu_event_init,
498 .add = cstate_pmu_event_add,
499 .del = cstate_pmu_event_del,
500 .start = cstate_pmu_event_start,
501 .stop = cstate_pmu_event_stop,
502 .read = cstate_pmu_event_update,
503 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
504 .module = THIS_MODULE,
505 };
506
507 static const struct cstate_model nhm_cstates __initconst = {
508 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
509 BIT(PERF_CSTATE_CORE_C6_RES),
510
511 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
512 BIT(PERF_CSTATE_PKG_C6_RES) |
513 BIT(PERF_CSTATE_PKG_C7_RES),
514 };
515
516 static const struct cstate_model snb_cstates __initconst = {
517 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
518 BIT(PERF_CSTATE_CORE_C6_RES) |
519 BIT(PERF_CSTATE_CORE_C7_RES),
520
521 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
522 BIT(PERF_CSTATE_PKG_C3_RES) |
523 BIT(PERF_CSTATE_PKG_C6_RES) |
524 BIT(PERF_CSTATE_PKG_C7_RES),
525 };
526
527 static const struct cstate_model hswult_cstates __initconst = {
528 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
529 BIT(PERF_CSTATE_CORE_C6_RES) |
530 BIT(PERF_CSTATE_CORE_C7_RES),
531
532 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
533 BIT(PERF_CSTATE_PKG_C3_RES) |
534 BIT(PERF_CSTATE_PKG_C6_RES) |
535 BIT(PERF_CSTATE_PKG_C7_RES) |
536 BIT(PERF_CSTATE_PKG_C8_RES) |
537 BIT(PERF_CSTATE_PKG_C9_RES) |
538 BIT(PERF_CSTATE_PKG_C10_RES),
539 };
540
541 static const struct cstate_model cnl_cstates __initconst = {
542 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
543 BIT(PERF_CSTATE_CORE_C3_RES) |
544 BIT(PERF_CSTATE_CORE_C6_RES) |
545 BIT(PERF_CSTATE_CORE_C7_RES),
546
547 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
548 BIT(PERF_CSTATE_PKG_C3_RES) |
549 BIT(PERF_CSTATE_PKG_C6_RES) |
550 BIT(PERF_CSTATE_PKG_C7_RES) |
551 BIT(PERF_CSTATE_PKG_C8_RES) |
552 BIT(PERF_CSTATE_PKG_C9_RES) |
553 BIT(PERF_CSTATE_PKG_C10_RES),
554 };
555
556 static const struct cstate_model icl_cstates __initconst = {
557 .core_events = BIT(PERF_CSTATE_CORE_C6_RES) |
558 BIT(PERF_CSTATE_CORE_C7_RES),
559
560 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
561 BIT(PERF_CSTATE_PKG_C3_RES) |
562 BIT(PERF_CSTATE_PKG_C6_RES) |
563 BIT(PERF_CSTATE_PKG_C7_RES) |
564 BIT(PERF_CSTATE_PKG_C8_RES) |
565 BIT(PERF_CSTATE_PKG_C9_RES) |
566 BIT(PERF_CSTATE_PKG_C10_RES),
567 };
568
569 static const struct cstate_model icx_cstates __initconst = {
570 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
571 BIT(PERF_CSTATE_CORE_C6_RES),
572
573 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
574 BIT(PERF_CSTATE_PKG_C6_RES),
575 };
576
577 static const struct cstate_model adl_cstates __initconst = {
578 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
579 BIT(PERF_CSTATE_CORE_C6_RES) |
580 BIT(PERF_CSTATE_CORE_C7_RES),
581
582 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
583 BIT(PERF_CSTATE_PKG_C3_RES) |
584 BIT(PERF_CSTATE_PKG_C6_RES) |
585 BIT(PERF_CSTATE_PKG_C7_RES) |
586 BIT(PERF_CSTATE_PKG_C8_RES) |
587 BIT(PERF_CSTATE_PKG_C9_RES) |
588 BIT(PERF_CSTATE_PKG_C10_RES),
589 };
590
591 static const struct cstate_model slm_cstates __initconst = {
592 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
593 BIT(PERF_CSTATE_CORE_C6_RES),
594
595 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
596 .quirks = SLM_PKG_C6_USE_C7_MSR,
597 };
598
599
600 static const struct cstate_model knl_cstates __initconst = {
601 .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
602
603 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
604 BIT(PERF_CSTATE_PKG_C3_RES) |
605 BIT(PERF_CSTATE_PKG_C6_RES),
606 .quirks = KNL_CORE_C6_MSR,
607 };
608
609
610 static const struct cstate_model glm_cstates __initconst = {
611 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
612 BIT(PERF_CSTATE_CORE_C3_RES) |
613 BIT(PERF_CSTATE_CORE_C6_RES),
614
615 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
616 BIT(PERF_CSTATE_PKG_C3_RES) |
617 BIT(PERF_CSTATE_PKG_C6_RES) |
618 BIT(PERF_CSTATE_PKG_C10_RES),
619 };
620
621
622 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
623 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
624 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates),
625 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates),
626
627 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates),
628 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates),
629 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates),
630
631 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates),
632 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates),
633
634 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates),
635 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates),
636
637 X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates),
638 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates),
639 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates),
640
641 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates),
642
643 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates),
644 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates),
645 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates),
646
647 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates),
648 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates),
649 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates),
650 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates),
651
652 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates),
653 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates),
654 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates),
655
656 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates),
657 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates),
658 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates),
659 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates),
660
661 X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates),
662
663 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates),
664 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates),
665
666 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates),
667 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates),
668 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates),
669 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
670 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
671 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
672
673 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
674 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
675 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
676 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
677
678 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
679 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
680 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
681 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
682 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
683 { },
684 };
685 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
686
cstate_probe(const struct cstate_model * cm)687 static int __init cstate_probe(const struct cstate_model *cm)
688 {
689 /* SLM has different MSR for PKG C6 */
690 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
691 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
692
693 /* KNL has different MSR for CORE C6 */
694 if (cm->quirks & KNL_CORE_C6_MSR)
695 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
696
697
698 core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
699 true, (void *) &cm->core_events);
700
701 pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
702 true, (void *) &cm->pkg_events);
703
704 has_cstate_core = !!core_msr_mask;
705 has_cstate_pkg = !!pkg_msr_mask;
706
707 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
708 }
709
cstate_cleanup(void)710 static inline void cstate_cleanup(void)
711 {
712 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
713 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
714
715 if (has_cstate_core)
716 perf_pmu_unregister(&cstate_core_pmu);
717
718 if (has_cstate_pkg)
719 perf_pmu_unregister(&cstate_pkg_pmu);
720 }
721
cstate_init(void)722 static int __init cstate_init(void)
723 {
724 int err;
725
726 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
727 "perf/x86/cstate:starting", cstate_cpu_init, NULL);
728 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
729 "perf/x86/cstate:online", NULL, cstate_cpu_exit);
730
731 if (has_cstate_core) {
732 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
733 if (err) {
734 has_cstate_core = false;
735 pr_info("Failed to register cstate core pmu\n");
736 cstate_cleanup();
737 return err;
738 }
739 }
740
741 if (has_cstate_pkg) {
742 if (topology_max_die_per_package() > 1) {
743 err = perf_pmu_register(&cstate_pkg_pmu,
744 "cstate_die", -1);
745 } else {
746 err = perf_pmu_register(&cstate_pkg_pmu,
747 cstate_pkg_pmu.name, -1);
748 }
749 if (err) {
750 has_cstate_pkg = false;
751 pr_info("Failed to register cstate pkg pmu\n");
752 cstate_cleanup();
753 return err;
754 }
755 }
756 return 0;
757 }
758
cstate_pmu_init(void)759 static int __init cstate_pmu_init(void)
760 {
761 const struct x86_cpu_id *id;
762 int err;
763
764 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
765 return -ENODEV;
766
767 id = x86_match_cpu(intel_cstates_match);
768 if (!id)
769 return -ENODEV;
770
771 err = cstate_probe((const struct cstate_model *) id->driver_data);
772 if (err)
773 return err;
774
775 return cstate_init();
776 }
777 module_init(cstate_pmu_init);
778
cstate_pmu_exit(void)779 static void __exit cstate_pmu_exit(void)
780 {
781 cstate_cleanup();
782 }
783 module_exit(cstate_pmu_exit);
784