1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance counter support for POWER10 processors.
4 *
5 * Copyright 2020 Madhavan Srinivasan, IBM Corporation.
6 * Copyright 2020 Athira Rajeev, IBM Corporation.
7 */
8
9 #define pr_fmt(fmt) "power10-pmu: " fmt
10
11 #include "isa207-common.h"
12
13 /*
14 * Raw event encoding for Power10:
15 *
16 * 60 56 52 48 44 40 36 32
17 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
18 * | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ]
19 * | | | | | |
20 * | | *- IFM (Linux) | | thresh start/stop -*
21 * | *- BHRB (Linux) | src_sel
22 * *- EBB (Linux) *invert_bit
23 *
24 * 28 24 20 16 12 8 4 0
25 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
26 * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] | m [ pmcxsel ]
27 * | | | | | | |
28 * | | | | | | *- mark
29 * | | | *- L1/L2/L3 cache_sel | |*-radix_scope_qual
30 * | | sdar_mode |
31 * | *- sampling mode for marked events *- combine
32 * |
33 * *- thresh_sel
34 *
35 * Below uses IBM bit numbering.
36 *
37 * MMCR1[x:y] = unit (PMCxUNIT)
38 * MMCR1[24] = pmc1combine[0]
39 * MMCR1[25] = pmc1combine[1]
40 * MMCR1[26] = pmc2combine[0]
41 * MMCR1[27] = pmc2combine[1]
42 * MMCR1[28] = pmc3combine[0]
43 * MMCR1[29] = pmc3combine[1]
44 * MMCR1[30] = pmc4combine[0]
45 * MMCR1[31] = pmc4combine[1]
46 *
47 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
48 * MMCR1[20:27] = thresh_ctl
49 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
50 * MMCR1[20:27] = thresh_ctl
51 * else
52 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
53 *
54 * if thresh_sel:
55 * MMCRA[45:47] = thresh_sel
56 *
57 * if l2l3_sel:
58 * MMCR2[56:60] = l2l3_sel[0:4]
59 *
60 * MMCR1[16] = cache_sel[0]
61 * MMCR1[17] = cache_sel[1]
62 * MMCR1[18] = radix_scope_qual
63 *
64 * if mark:
65 * MMCRA[63] = 1 (SAMPLE_ENABLE)
66 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
67 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
68 *
69 * if EBB and BHRB:
70 * MMCRA[32:33] = IFM
71 *
72 * MMCRA[SDAR_MODE] = sdar_mode[0:1]
73 */
74
75 /*
76 * Some power10 event codes.
77 */
78 #define EVENT(_name, _code) enum{_name = _code}
79
80 #include "power10-events-list.h"
81
82 #undef EVENT
83
84 /* MMCRA IFM bits - POWER10 */
85 #define POWER10_MMCRA_IFM1 0x0000000040000000UL
86 #define POWER10_MMCRA_IFM2 0x0000000080000000UL
87 #define POWER10_MMCRA_IFM3 0x00000000C0000000UL
88 #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
89
90 extern u64 PERF_REG_EXTENDED_MASK;
91
92 /* Table of alternatives, sorted by column 0 */
93 static const unsigned int power10_event_alternatives[][MAX_ALT] = {
94 { PM_RUN_CYC_ALT, PM_RUN_CYC },
95 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
96 };
97
power10_get_alternatives(u64 event,unsigned int flags,u64 alt[])98 static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
99 {
100 int num_alt = 0;
101
102 num_alt = isa207_get_alternatives(event, alt,
103 ARRAY_SIZE(power10_event_alternatives), flags,
104 power10_event_alternatives);
105
106 return num_alt;
107 }
108
109 GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC);
110 GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL);
111 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
112 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
113 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
114 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
115 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
116 GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
117
118 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
119 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
120 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
121 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
122 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
123 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
124 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
125 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
126 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
127 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
128 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
129 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
130 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
131
132 static struct attribute *power10_events_attr[] = {
133 GENERIC_EVENT_PTR(PM_RUN_CYC),
134 GENERIC_EVENT_PTR(PM_RUN_INST_CMPL),
135 GENERIC_EVENT_PTR(PM_BR_CMPL),
136 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
137 GENERIC_EVENT_PTR(PM_LD_REF_L1),
138 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
139 GENERIC_EVENT_PTR(MEM_LOADS),
140 GENERIC_EVENT_PTR(MEM_STORES),
141 CACHE_EVENT_PTR(PM_LD_MISS_L1),
142 CACHE_EVENT_PTR(PM_LD_REF_L1),
143 CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
144 CACHE_EVENT_PTR(PM_ST_MISS_L1),
145 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
146 CACHE_EVENT_PTR(PM_INST_FROM_L1),
147 CACHE_EVENT_PTR(PM_IC_PREF_REQ),
148 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
149 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
150 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
151 CACHE_EVENT_PTR(PM_BR_CMPL),
152 CACHE_EVENT_PTR(PM_DTLB_MISS),
153 CACHE_EVENT_PTR(PM_ITLB_MISS),
154 NULL
155 };
156
157 static struct attribute_group power10_pmu_events_group = {
158 .name = "events",
159 .attrs = power10_events_attr,
160 };
161
162 PMU_FORMAT_ATTR(event, "config:0-59");
163 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
164 PMU_FORMAT_ATTR(mark, "config:8");
165 PMU_FORMAT_ATTR(combine, "config:10-11");
166 PMU_FORMAT_ATTR(unit, "config:12-15");
167 PMU_FORMAT_ATTR(pmc, "config:16-19");
168 PMU_FORMAT_ATTR(cache_sel, "config:20-21");
169 PMU_FORMAT_ATTR(sdar_mode, "config:22-23");
170 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
171 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
172 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
173 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
174 PMU_FORMAT_ATTR(l2l3_sel, "config:40-44");
175 PMU_FORMAT_ATTR(src_sel, "config:45-46");
176 PMU_FORMAT_ATTR(invert_bit, "config:47");
177 PMU_FORMAT_ATTR(src_mask, "config:48-53");
178 PMU_FORMAT_ATTR(src_match, "config:54-59");
179 PMU_FORMAT_ATTR(radix_scope, "config:9");
180
181 static struct attribute *power10_pmu_format_attr[] = {
182 &format_attr_event.attr,
183 &format_attr_pmcxsel.attr,
184 &format_attr_mark.attr,
185 &format_attr_combine.attr,
186 &format_attr_unit.attr,
187 &format_attr_pmc.attr,
188 &format_attr_cache_sel.attr,
189 &format_attr_sdar_mode.attr,
190 &format_attr_sample_mode.attr,
191 &format_attr_thresh_sel.attr,
192 &format_attr_thresh_stop.attr,
193 &format_attr_thresh_start.attr,
194 &format_attr_l2l3_sel.attr,
195 &format_attr_src_sel.attr,
196 &format_attr_invert_bit.attr,
197 &format_attr_src_mask.attr,
198 &format_attr_src_match.attr,
199 &format_attr_radix_scope.attr,
200 NULL,
201 };
202
203 static struct attribute_group power10_pmu_format_group = {
204 .name = "format",
205 .attrs = power10_pmu_format_attr,
206 };
207
208 static const struct attribute_group *power10_pmu_attr_groups[] = {
209 &power10_pmu_format_group,
210 &power10_pmu_events_group,
211 NULL,
212 };
213
214 static int power10_generic_events[] = {
215 [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC,
216 [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL,
217 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
218 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
219 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
220 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
221 };
222
power10_bhrb_filter_map(u64 branch_sample_type)223 static u64 power10_bhrb_filter_map(u64 branch_sample_type)
224 {
225 u64 pmu_bhrb_filter = 0;
226
227 /* BHRB and regular PMU events share the same privilege state
228 * filter configuration. BHRB is always recorded along with a
229 * regular PMU event. As the privilege state filter is handled
230 * in the basic PMC configuration of the accompanying regular
231 * PMU event, we ignore any separate BHRB specific request.
232 */
233
234 /* No branch filter requested */
235 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
236 return pmu_bhrb_filter;
237
238 /* Invalid branch filter options - HW does not support */
239 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
240 return -1;
241
242 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) {
243 pmu_bhrb_filter |= POWER10_MMCRA_IFM2;
244 return pmu_bhrb_filter;
245 }
246
247 if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) {
248 pmu_bhrb_filter |= POWER10_MMCRA_IFM3;
249 return pmu_bhrb_filter;
250 }
251
252 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
253 return -1;
254
255 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
256 pmu_bhrb_filter |= POWER10_MMCRA_IFM1;
257 return pmu_bhrb_filter;
258 }
259
260 /* Every thing else is unsupported */
261 return -1;
262 }
263
power10_config_bhrb(u64 pmu_bhrb_filter)264 static void power10_config_bhrb(u64 pmu_bhrb_filter)
265 {
266 pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK;
267
268 /* Enable BHRB filter in PMU */
269 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
270 }
271
272 #define C(x) PERF_COUNT_HW_CACHE_##x
273
274 /*
275 * Table of generalized cache-related events.
276 * 0 means not supported, -1 means nonsensical, other values
277 * are event codes.
278 */
279 static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
280 [C(L1D)] = {
281 [C(OP_READ)] = {
282 [C(RESULT_ACCESS)] = PM_LD_REF_L1,
283 [C(RESULT_MISS)] = PM_LD_MISS_L1,
284 },
285 [C(OP_WRITE)] = {
286 [C(RESULT_ACCESS)] = 0,
287 [C(RESULT_MISS)] = PM_ST_MISS_L1,
288 },
289 [C(OP_PREFETCH)] = {
290 [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
291 [C(RESULT_MISS)] = 0,
292 },
293 },
294 [C(L1I)] = {
295 [C(OP_READ)] = {
296 [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
297 [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
298 },
299 [C(OP_WRITE)] = {
300 [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
301 [C(RESULT_MISS)] = -1,
302 },
303 [C(OP_PREFETCH)] = {
304 [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
305 [C(RESULT_MISS)] = 0,
306 },
307 },
308 [C(LL)] = {
309 [C(OP_READ)] = {
310 [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
311 [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
312 },
313 [C(OP_WRITE)] = {
314 [C(RESULT_ACCESS)] = -1,
315 [C(RESULT_MISS)] = -1,
316 },
317 [C(OP_PREFETCH)] = {
318 [C(RESULT_ACCESS)] = -1,
319 [C(RESULT_MISS)] = 0,
320 },
321 },
322 [C(DTLB)] = {
323 [C(OP_READ)] = {
324 [C(RESULT_ACCESS)] = 0,
325 [C(RESULT_MISS)] = PM_DTLB_MISS,
326 },
327 [C(OP_WRITE)] = {
328 [C(RESULT_ACCESS)] = -1,
329 [C(RESULT_MISS)] = -1,
330 },
331 [C(OP_PREFETCH)] = {
332 [C(RESULT_ACCESS)] = -1,
333 [C(RESULT_MISS)] = -1,
334 },
335 },
336 [C(ITLB)] = {
337 [C(OP_READ)] = {
338 [C(RESULT_ACCESS)] = 0,
339 [C(RESULT_MISS)] = PM_ITLB_MISS,
340 },
341 [C(OP_WRITE)] = {
342 [C(RESULT_ACCESS)] = -1,
343 [C(RESULT_MISS)] = -1,
344 },
345 [C(OP_PREFETCH)] = {
346 [C(RESULT_ACCESS)] = -1,
347 [C(RESULT_MISS)] = -1,
348 },
349 },
350 [C(BPU)] = {
351 [C(OP_READ)] = {
352 [C(RESULT_ACCESS)] = PM_BR_CMPL,
353 [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
354 },
355 [C(OP_WRITE)] = {
356 [C(RESULT_ACCESS)] = -1,
357 [C(RESULT_MISS)] = -1,
358 },
359 [C(OP_PREFETCH)] = {
360 [C(RESULT_ACCESS)] = -1,
361 [C(RESULT_MISS)] = -1,
362 },
363 },
364 [C(NODE)] = {
365 [C(OP_READ)] = {
366 [C(RESULT_ACCESS)] = -1,
367 [C(RESULT_MISS)] = -1,
368 },
369 [C(OP_WRITE)] = {
370 [C(RESULT_ACCESS)] = -1,
371 [C(RESULT_MISS)] = -1,
372 },
373 [C(OP_PREFETCH)] = {
374 [C(RESULT_ACCESS)] = -1,
375 [C(RESULT_MISS)] = -1,
376 },
377 },
378 };
379
380 #undef C
381
382 static struct power_pmu power10_pmu = {
383 .name = "POWER10",
384 .n_counter = MAX_PMU_COUNTERS,
385 .add_fields = ISA207_ADD_FIELDS,
386 .test_adder = ISA207_TEST_ADDER,
387 .group_constraint_mask = CNST_CACHE_PMC4_MASK,
388 .group_constraint_val = CNST_CACHE_PMC4_VAL,
389 .compute_mmcr = isa207_compute_mmcr,
390 .config_bhrb = power10_config_bhrb,
391 .bhrb_filter_map = power10_bhrb_filter_map,
392 .get_constraint = isa207_get_constraint,
393 .get_alternatives = power10_get_alternatives,
394 .get_mem_data_src = isa207_get_mem_data_src,
395 .get_mem_weight = isa207_get_mem_weight,
396 .disable_pmc = isa207_disable_pmc,
397 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
398 PPMU_ARCH_31,
399 .n_generic = ARRAY_SIZE(power10_generic_events),
400 .generic_events = power10_generic_events,
401 .cache_events = &power10_cache_events,
402 .attr_groups = power10_pmu_attr_groups,
403 .bhrb_nr = 32,
404 .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
405 };
406
init_power10_pmu(void)407 int init_power10_pmu(void)
408 {
409 int rc;
410
411 /* Comes from cpu_specs[] */
412 if (!cur_cpu_spec->oprofile_cpu_type ||
413 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
414 return -ENODEV;
415
416 /* Set the PERF_REG_EXTENDED_MASK here */
417 PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
418
419 rc = register_power_pmu(&power10_pmu);
420 if (rc)
421 return rc;
422
423 /* Tell userspace that EBB is supported */
424 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
425
426 return 0;
427 }
428