1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
7 *
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
11 * smmuv3_pmcg_ff88840
12 *
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
18 *
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
26 *
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
32 *
33 * SMMU events are not attributable to a CPU, so task mode and sampling
34 * are not supported.
35 */
36
37 #include <linux/acpi.h>
38 #include <linux/acpi_iort.h>
39 #include <linux/bitfield.h>
40 #include <linux/bitops.h>
41 #include <linux/cpuhotplug.h>
42 #include <linux/cpumask.h>
43 #include <linux/device.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/irq.h>
47 #include <linux/kernel.h>
48 #include <linux/list.h>
49 #include <linux/msi.h>
50 #include <linux/perf_event.h>
51 #include <linux/platform_device.h>
52 #include <linux/smp.h>
53 #include <linux/sysfs.h>
54 #include <linux/types.h>
55
56 #define SMMU_PMCG_EVCNTR0 0x0
57 #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
58 #define SMMU_PMCG_EVTYPER0 0x400
59 #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
60 #define SMMU_PMCG_SID_SPAN_SHIFT 29
61 #define SMMU_PMCG_SMR0 0xA00
62 #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
63 #define SMMU_PMCG_CNTENSET0 0xC00
64 #define SMMU_PMCG_CNTENCLR0 0xC20
65 #define SMMU_PMCG_INTENSET0 0xC40
66 #define SMMU_PMCG_INTENCLR0 0xC60
67 #define SMMU_PMCG_OVSCLR0 0xC80
68 #define SMMU_PMCG_OVSSET0 0xCC0
69 #define SMMU_PMCG_CFGR 0xE00
70 #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
71 #define SMMU_PMCG_CFGR_MSI BIT(21)
72 #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
73 #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
74 #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
75 #define SMMU_PMCG_CR 0xE04
76 #define SMMU_PMCG_CR_ENABLE BIT(0)
77 #define SMMU_PMCG_IIDR 0xE08
78 #define SMMU_PMCG_CEID0 0xE20
79 #define SMMU_PMCG_CEID1 0xE28
80 #define SMMU_PMCG_IRQ_CTRL 0xE50
81 #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
82 #define SMMU_PMCG_IRQ_CFG0 0xE58
83 #define SMMU_PMCG_IRQ_CFG1 0xE60
84 #define SMMU_PMCG_IRQ_CFG2 0xE64
85
86 /* MSI config fields */
87 #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
88 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
89
90 #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
91 #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
92
93 #define SMMU_PMCG_MAX_COUNTERS 64
94 #define SMMU_PMCG_ARCH_MAX_EVENTS 128
95
96 #define SMMU_PMCG_PA_SHIFT 12
97
98 #define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
99 #define SMMU_PMCG_HARDEN_DISABLE BIT(1)
100
101 static int cpuhp_state_num;
102
103 struct smmu_pmu {
104 struct hlist_node node;
105 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
106 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
107 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
108 unsigned int irq;
109 unsigned int on_cpu;
110 struct pmu pmu;
111 unsigned int num_counters;
112 struct device *dev;
113 void __iomem *reg_base;
114 void __iomem *reloc_base;
115 u64 counter_mask;
116 u32 options;
117 u32 iidr;
118 bool global_filter;
119 };
120
121 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
122
123 #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
124 static inline u32 get_##_name(struct perf_event *event) \
125 { \
126 return FIELD_GET(GENMASK_ULL(_end, _start), \
127 event->attr._config); \
128 } \
129
130 SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
131 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
132 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
133 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
134
smmu_pmu_enable(struct pmu * pmu)135 static inline void smmu_pmu_enable(struct pmu *pmu)
136 {
137 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
138
139 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
140 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
141 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
142 }
143
144 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
145 struct perf_event *event, int idx);
146
smmu_pmu_enable_quirk_hip08_09(struct pmu * pmu)147 static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
148 {
149 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
150 unsigned int idx;
151
152 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
153 smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
154
155 smmu_pmu_enable(pmu);
156 }
157
smmu_pmu_disable(struct pmu * pmu)158 static inline void smmu_pmu_disable(struct pmu *pmu)
159 {
160 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
161
162 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
163 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
164 }
165
smmu_pmu_disable_quirk_hip08_09(struct pmu * pmu)166 static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
167 {
168 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
169 unsigned int idx;
170
171 /*
172 * The global disable of PMU sometimes fail to stop the counting.
173 * Harden this by writing an invalid event type to each used counter
174 * to forcibly stop counting.
175 */
176 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
177 writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
178
179 smmu_pmu_disable(pmu);
180 }
181
smmu_pmu_counter_set_value(struct smmu_pmu * smmu_pmu,u32 idx,u64 value)182 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
183 u32 idx, u64 value)
184 {
185 if (smmu_pmu->counter_mask & BIT(32))
186 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
187 else
188 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
189 }
190
smmu_pmu_counter_get_value(struct smmu_pmu * smmu_pmu,u32 idx)191 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
192 {
193 u64 value;
194
195 if (smmu_pmu->counter_mask & BIT(32))
196 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
197 else
198 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
199
200 return value;
201 }
202
smmu_pmu_counter_enable(struct smmu_pmu * smmu_pmu,u32 idx)203 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
204 {
205 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
206 }
207
smmu_pmu_counter_disable(struct smmu_pmu * smmu_pmu,u32 idx)208 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
209 {
210 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
211 }
212
smmu_pmu_interrupt_enable(struct smmu_pmu * smmu_pmu,u32 idx)213 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
214 {
215 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
216 }
217
smmu_pmu_interrupt_disable(struct smmu_pmu * smmu_pmu,u32 idx)218 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
219 u32 idx)
220 {
221 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
222 }
223
smmu_pmu_set_evtyper(struct smmu_pmu * smmu_pmu,u32 idx,u32 val)224 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
225 u32 val)
226 {
227 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
228 }
229
smmu_pmu_set_smr(struct smmu_pmu * smmu_pmu,u32 idx,u32 val)230 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
231 {
232 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
233 }
234
smmu_pmu_event_update(struct perf_event * event)235 static void smmu_pmu_event_update(struct perf_event *event)
236 {
237 struct hw_perf_event *hwc = &event->hw;
238 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
239 u64 delta, prev, now;
240 u32 idx = hwc->idx;
241
242 do {
243 prev = local64_read(&hwc->prev_count);
244 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
245 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
246
247 /* handle overflow. */
248 delta = now - prev;
249 delta &= smmu_pmu->counter_mask;
250
251 local64_add(delta, &event->count);
252 }
253
smmu_pmu_set_period(struct smmu_pmu * smmu_pmu,struct hw_perf_event * hwc)254 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
255 struct hw_perf_event *hwc)
256 {
257 u32 idx = hwc->idx;
258 u64 new;
259
260 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
261 /*
262 * On platforms that require this quirk, if the counter starts
263 * at < half_counter value and wraps, the current logic of
264 * handling the overflow may not work. It is expected that,
265 * those platforms will have full 64 counter bits implemented
266 * so that such a possibility is remote(eg: HiSilicon HIP08).
267 */
268 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
269 } else {
270 /*
271 * We limit the max period to half the max counter value
272 * of the counter size, so that even in the case of extreme
273 * interrupt latency the counter will (hopefully) not wrap
274 * past its initial value.
275 */
276 new = smmu_pmu->counter_mask >> 1;
277 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
278 }
279
280 local64_set(&hwc->prev_count, new);
281 }
282
smmu_pmu_set_event_filter(struct perf_event * event,int idx,u32 span,u32 sid)283 static void smmu_pmu_set_event_filter(struct perf_event *event,
284 int idx, u32 span, u32 sid)
285 {
286 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
287 u32 evtyper;
288
289 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
290 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
291 smmu_pmu_set_smr(smmu_pmu, idx, sid);
292 }
293
smmu_pmu_check_global_filter(struct perf_event * curr,struct perf_event * new)294 static bool smmu_pmu_check_global_filter(struct perf_event *curr,
295 struct perf_event *new)
296 {
297 if (get_filter_enable(new) != get_filter_enable(curr))
298 return false;
299
300 if (!get_filter_enable(new))
301 return true;
302
303 return get_filter_span(new) == get_filter_span(curr) &&
304 get_filter_stream_id(new) == get_filter_stream_id(curr);
305 }
306
smmu_pmu_apply_event_filter(struct smmu_pmu * smmu_pmu,struct perf_event * event,int idx)307 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
308 struct perf_event *event, int idx)
309 {
310 u32 span, sid;
311 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
312 bool filter_en = !!get_filter_enable(event);
313
314 span = filter_en ? get_filter_span(event) :
315 SMMU_PMCG_DEFAULT_FILTER_SPAN;
316 sid = filter_en ? get_filter_stream_id(event) :
317 SMMU_PMCG_DEFAULT_FILTER_SID;
318
319 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
320 /*
321 * Per-counter filtering, or scheduling the first globally-filtered
322 * event into an empty PMU so idx == 0 and it works out equivalent.
323 */
324 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
325 smmu_pmu_set_event_filter(event, idx, span, sid);
326 return 0;
327 }
328
329 /* Otherwise, must match whatever's currently scheduled */
330 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
331 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
332 return 0;
333 }
334
335 return -EAGAIN;
336 }
337
smmu_pmu_get_event_idx(struct smmu_pmu * smmu_pmu,struct perf_event * event)338 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
339 struct perf_event *event)
340 {
341 int idx, err;
342 unsigned int num_ctrs = smmu_pmu->num_counters;
343
344 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
345 if (idx == num_ctrs)
346 /* The counters are all in use. */
347 return -EAGAIN;
348
349 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
350 if (err)
351 return err;
352
353 set_bit(idx, smmu_pmu->used_counters);
354
355 return idx;
356 }
357
smmu_pmu_events_compatible(struct perf_event * curr,struct perf_event * new)358 static bool smmu_pmu_events_compatible(struct perf_event *curr,
359 struct perf_event *new)
360 {
361 if (new->pmu != curr->pmu)
362 return false;
363
364 if (to_smmu_pmu(new->pmu)->global_filter &&
365 !smmu_pmu_check_global_filter(curr, new))
366 return false;
367
368 return true;
369 }
370
371 /*
372 * Implementation of abstract pmu functionality required by
373 * the core perf events code.
374 */
375
smmu_pmu_event_init(struct perf_event * event)376 static int smmu_pmu_event_init(struct perf_event *event)
377 {
378 struct hw_perf_event *hwc = &event->hw;
379 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
380 struct device *dev = smmu_pmu->dev;
381 struct perf_event *sibling;
382 int group_num_events = 1;
383 u16 event_id;
384
385 if (event->attr.type != event->pmu->type)
386 return -ENOENT;
387
388 if (hwc->sample_period) {
389 dev_dbg(dev, "Sampling not supported\n");
390 return -EOPNOTSUPP;
391 }
392
393 if (event->cpu < 0) {
394 dev_dbg(dev, "Per-task mode not supported\n");
395 return -EOPNOTSUPP;
396 }
397
398 /* Verify specified event is supported on this PMU */
399 event_id = get_event(event);
400 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
401 (!test_bit(event_id, smmu_pmu->supported_events))) {
402 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
403 return -EINVAL;
404 }
405
406 /* Don't allow groups with mixed PMUs, except for s/w events */
407 if (!is_software_event(event->group_leader)) {
408 if (!smmu_pmu_events_compatible(event->group_leader, event))
409 return -EINVAL;
410
411 if (++group_num_events > smmu_pmu->num_counters)
412 return -EINVAL;
413 }
414
415 for_each_sibling_event(sibling, event->group_leader) {
416 if (is_software_event(sibling))
417 continue;
418
419 if (!smmu_pmu_events_compatible(sibling, event))
420 return -EINVAL;
421
422 if (++group_num_events > smmu_pmu->num_counters)
423 return -EINVAL;
424 }
425
426 hwc->idx = -1;
427
428 /*
429 * Ensure all events are on the same cpu so all events are in the
430 * same cpu context, to avoid races on pmu_enable etc.
431 */
432 event->cpu = smmu_pmu->on_cpu;
433
434 return 0;
435 }
436
smmu_pmu_event_start(struct perf_event * event,int flags)437 static void smmu_pmu_event_start(struct perf_event *event, int flags)
438 {
439 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
440 struct hw_perf_event *hwc = &event->hw;
441 int idx = hwc->idx;
442
443 hwc->state = 0;
444
445 smmu_pmu_set_period(smmu_pmu, hwc);
446
447 smmu_pmu_counter_enable(smmu_pmu, idx);
448 }
449
smmu_pmu_event_stop(struct perf_event * event,int flags)450 static void smmu_pmu_event_stop(struct perf_event *event, int flags)
451 {
452 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
453 struct hw_perf_event *hwc = &event->hw;
454 int idx = hwc->idx;
455
456 if (hwc->state & PERF_HES_STOPPED)
457 return;
458
459 smmu_pmu_counter_disable(smmu_pmu, idx);
460 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
461 smmu_pmu_event_update(event);
462 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
463 }
464
smmu_pmu_event_add(struct perf_event * event,int flags)465 static int smmu_pmu_event_add(struct perf_event *event, int flags)
466 {
467 struct hw_perf_event *hwc = &event->hw;
468 int idx;
469 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
470
471 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
472 if (idx < 0)
473 return idx;
474
475 hwc->idx = idx;
476 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
477 smmu_pmu->events[idx] = event;
478 local64_set(&hwc->prev_count, 0);
479
480 smmu_pmu_interrupt_enable(smmu_pmu, idx);
481
482 if (flags & PERF_EF_START)
483 smmu_pmu_event_start(event, flags);
484
485 /* Propagate changes to the userspace mapping. */
486 perf_event_update_userpage(event);
487
488 return 0;
489 }
490
smmu_pmu_event_del(struct perf_event * event,int flags)491 static void smmu_pmu_event_del(struct perf_event *event, int flags)
492 {
493 struct hw_perf_event *hwc = &event->hw;
494 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
495 int idx = hwc->idx;
496
497 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
498 smmu_pmu_interrupt_disable(smmu_pmu, idx);
499 smmu_pmu->events[idx] = NULL;
500 clear_bit(idx, smmu_pmu->used_counters);
501
502 perf_event_update_userpage(event);
503 }
504
smmu_pmu_event_read(struct perf_event * event)505 static void smmu_pmu_event_read(struct perf_event *event)
506 {
507 smmu_pmu_event_update(event);
508 }
509
510 /* cpumask */
511
smmu_pmu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)512 static ssize_t smmu_pmu_cpumask_show(struct device *dev,
513 struct device_attribute *attr,
514 char *buf)
515 {
516 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
517
518 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
519 }
520
521 static struct device_attribute smmu_pmu_cpumask_attr =
522 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
523
524 static struct attribute *smmu_pmu_cpumask_attrs[] = {
525 &smmu_pmu_cpumask_attr.attr,
526 NULL
527 };
528
529 static const struct attribute_group smmu_pmu_cpumask_group = {
530 .attrs = smmu_pmu_cpumask_attrs,
531 };
532
533 /* Events */
534
smmu_pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)535 static ssize_t smmu_pmu_event_show(struct device *dev,
536 struct device_attribute *attr, char *page)
537 {
538 struct perf_pmu_events_attr *pmu_attr;
539
540 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
541
542 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
543 }
544
545 #define SMMU_EVENT_ATTR(name, config) \
546 PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
547
548 static struct attribute *smmu_pmu_events[] = {
549 SMMU_EVENT_ATTR(cycles, 0),
550 SMMU_EVENT_ATTR(transaction, 1),
551 SMMU_EVENT_ATTR(tlb_miss, 2),
552 SMMU_EVENT_ATTR(config_cache_miss, 3),
553 SMMU_EVENT_ATTR(trans_table_walk_access, 4),
554 SMMU_EVENT_ATTR(config_struct_access, 5),
555 SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
556 SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
557 NULL
558 };
559
smmu_pmu_event_is_visible(struct kobject * kobj,struct attribute * attr,int unused)560 static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
561 struct attribute *attr, int unused)
562 {
563 struct device *dev = kobj_to_dev(kobj);
564 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
565 struct perf_pmu_events_attr *pmu_attr;
566
567 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
568
569 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
570 return attr->mode;
571
572 return 0;
573 }
574
575 static const struct attribute_group smmu_pmu_events_group = {
576 .name = "events",
577 .attrs = smmu_pmu_events,
578 .is_visible = smmu_pmu_event_is_visible,
579 };
580
smmu_pmu_identifier_attr_show(struct device * dev,struct device_attribute * attr,char * page)581 static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
582 struct device_attribute *attr,
583 char *page)
584 {
585 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
586
587 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
588 }
589
smmu_pmu_identifier_attr_visible(struct kobject * kobj,struct attribute * attr,int n)590 static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
591 struct attribute *attr,
592 int n)
593 {
594 struct device *dev = kobj_to_dev(kobj);
595 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
596
597 if (!smmu_pmu->iidr)
598 return 0;
599 return attr->mode;
600 }
601
602 static struct device_attribute smmu_pmu_identifier_attr =
603 __ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
604
605 static struct attribute *smmu_pmu_identifier_attrs[] = {
606 &smmu_pmu_identifier_attr.attr,
607 NULL
608 };
609
610 static const struct attribute_group smmu_pmu_identifier_group = {
611 .attrs = smmu_pmu_identifier_attrs,
612 .is_visible = smmu_pmu_identifier_attr_visible,
613 };
614
615 /* Formats */
616 PMU_FORMAT_ATTR(event, "config:0-15");
617 PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
618 PMU_FORMAT_ATTR(filter_span, "config1:32");
619 PMU_FORMAT_ATTR(filter_enable, "config1:33");
620
621 static struct attribute *smmu_pmu_formats[] = {
622 &format_attr_event.attr,
623 &format_attr_filter_stream_id.attr,
624 &format_attr_filter_span.attr,
625 &format_attr_filter_enable.attr,
626 NULL
627 };
628
629 static const struct attribute_group smmu_pmu_format_group = {
630 .name = "format",
631 .attrs = smmu_pmu_formats,
632 };
633
634 static const struct attribute_group *smmu_pmu_attr_grps[] = {
635 &smmu_pmu_cpumask_group,
636 &smmu_pmu_events_group,
637 &smmu_pmu_format_group,
638 &smmu_pmu_identifier_group,
639 NULL
640 };
641
642 /*
643 * Generic device handlers
644 */
645
smmu_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)646 static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
647 {
648 struct smmu_pmu *smmu_pmu;
649 unsigned int target;
650
651 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
652 if (cpu != smmu_pmu->on_cpu)
653 return 0;
654
655 target = cpumask_any_but(cpu_online_mask, cpu);
656 if (target >= nr_cpu_ids)
657 return 0;
658
659 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
660 smmu_pmu->on_cpu = target;
661 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
662
663 return 0;
664 }
665
smmu_pmu_handle_irq(int irq_num,void * data)666 static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
667 {
668 struct smmu_pmu *smmu_pmu = data;
669 u64 ovsr;
670 unsigned int idx;
671
672 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
673 if (!ovsr)
674 return IRQ_NONE;
675
676 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
677
678 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
679 struct perf_event *event = smmu_pmu->events[idx];
680 struct hw_perf_event *hwc;
681
682 if (WARN_ON_ONCE(!event))
683 continue;
684
685 smmu_pmu_event_update(event);
686 hwc = &event->hw;
687
688 smmu_pmu_set_period(smmu_pmu, hwc);
689 }
690
691 return IRQ_HANDLED;
692 }
693
smmu_pmu_free_msis(void * data)694 static void smmu_pmu_free_msis(void *data)
695 {
696 struct device *dev = data;
697
698 platform_msi_domain_free_irqs(dev);
699 }
700
smmu_pmu_write_msi_msg(struct msi_desc * desc,struct msi_msg * msg)701 static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
702 {
703 phys_addr_t doorbell;
704 struct device *dev = msi_desc_to_dev(desc);
705 struct smmu_pmu *pmu = dev_get_drvdata(dev);
706
707 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
708 doorbell &= MSI_CFG0_ADDR_MASK;
709
710 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
711 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
712 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
713 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
714 }
715
smmu_pmu_setup_msi(struct smmu_pmu * pmu)716 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
717 {
718 struct msi_desc *desc;
719 struct device *dev = pmu->dev;
720 int ret;
721
722 /* Clear MSI address reg */
723 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
724
725 /* MSI supported or not */
726 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
727 return;
728
729 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
730 if (ret) {
731 dev_warn(dev, "failed to allocate MSIs\n");
732 return;
733 }
734
735 desc = first_msi_entry(dev);
736 if (desc)
737 pmu->irq = desc->irq;
738
739 /* Add callback to free MSIs on teardown */
740 devm_add_action(dev, smmu_pmu_free_msis, dev);
741 }
742
smmu_pmu_setup_irq(struct smmu_pmu * pmu)743 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
744 {
745 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
746 int irq, ret = -ENXIO;
747
748 smmu_pmu_setup_msi(pmu);
749
750 irq = pmu->irq;
751 if (irq)
752 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
753 flags, "smmuv3-pmu", pmu);
754 return ret;
755 }
756
smmu_pmu_reset(struct smmu_pmu * smmu_pmu)757 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
758 {
759 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
760
761 smmu_pmu_disable(&smmu_pmu->pmu);
762
763 /* Disable counter and interrupt */
764 writeq_relaxed(counter_present_mask,
765 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
766 writeq_relaxed(counter_present_mask,
767 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
768 writeq_relaxed(counter_present_mask,
769 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
770 }
771
smmu_pmu_get_acpi_options(struct smmu_pmu * smmu_pmu)772 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
773 {
774 u32 model;
775
776 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
777
778 switch (model) {
779 case IORT_SMMU_V3_PMCG_HISI_HIP08:
780 /* HiSilicon Erratum 162001800 */
781 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
782 break;
783 case IORT_SMMU_V3_PMCG_HISI_HIP09:
784 smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
785 break;
786 }
787
788 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
789 }
790
smmu_pmu_probe(struct platform_device * pdev)791 static int smmu_pmu_probe(struct platform_device *pdev)
792 {
793 struct smmu_pmu *smmu_pmu;
794 struct resource *res_0;
795 u32 cfgr, reg_size;
796 u64 ceid_64[2];
797 int irq, err;
798 char *name;
799 struct device *dev = &pdev->dev;
800
801 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
802 if (!smmu_pmu)
803 return -ENOMEM;
804
805 smmu_pmu->dev = dev;
806 platform_set_drvdata(pdev, smmu_pmu);
807
808 smmu_pmu->pmu = (struct pmu) {
809 .module = THIS_MODULE,
810 .task_ctx_nr = perf_invalid_context,
811 .pmu_enable = smmu_pmu_enable,
812 .pmu_disable = smmu_pmu_disable,
813 .event_init = smmu_pmu_event_init,
814 .add = smmu_pmu_event_add,
815 .del = smmu_pmu_event_del,
816 .start = smmu_pmu_event_start,
817 .stop = smmu_pmu_event_stop,
818 .read = smmu_pmu_event_read,
819 .attr_groups = smmu_pmu_attr_grps,
820 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
821 };
822
823 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
824 if (IS_ERR(smmu_pmu->reg_base))
825 return PTR_ERR(smmu_pmu->reg_base);
826
827 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
828
829 /* Determine if page 1 is present */
830 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
831 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
832 if (IS_ERR(smmu_pmu->reloc_base))
833 return PTR_ERR(smmu_pmu->reloc_base);
834 } else {
835 smmu_pmu->reloc_base = smmu_pmu->reg_base;
836 }
837
838 irq = platform_get_irq_optional(pdev, 0);
839 if (irq > 0)
840 smmu_pmu->irq = irq;
841
842 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
843 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
844 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
845 SMMU_PMCG_ARCH_MAX_EVENTS);
846
847 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
848
849 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
850
851 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
852 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
853
854 smmu_pmu_reset(smmu_pmu);
855
856 err = smmu_pmu_setup_irq(smmu_pmu);
857 if (err) {
858 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
859 return err;
860 }
861
862 smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
863
864 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
865 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
866 if (!name) {
867 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
868 return -EINVAL;
869 }
870
871 smmu_pmu_get_acpi_options(smmu_pmu);
872
873 /*
874 * For platforms suffer this quirk, the PMU disable sometimes fails to
875 * stop the counters. This will leads to inaccurate or error counting.
876 * Forcibly disable the counters with these quirk handler.
877 */
878 if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
879 smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
880 smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
881 }
882
883 /* Pick one CPU to be the preferred one to use */
884 smmu_pmu->on_cpu = raw_smp_processor_id();
885 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
886
887 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
888 &smmu_pmu->node);
889 if (err) {
890 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
891 err, &res_0->start);
892 return err;
893 }
894
895 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
896 if (err) {
897 dev_err(dev, "Error %d registering PMU @%pa\n",
898 err, &res_0->start);
899 goto out_unregister;
900 }
901
902 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
903 &res_0->start, smmu_pmu->num_counters,
904 smmu_pmu->global_filter ? "Global(Counter0)" :
905 "Individual");
906
907 return 0;
908
909 out_unregister:
910 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
911 return err;
912 }
913
smmu_pmu_remove(struct platform_device * pdev)914 static int smmu_pmu_remove(struct platform_device *pdev)
915 {
916 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
917
918 perf_pmu_unregister(&smmu_pmu->pmu);
919 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
920
921 return 0;
922 }
923
smmu_pmu_shutdown(struct platform_device * pdev)924 static void smmu_pmu_shutdown(struct platform_device *pdev)
925 {
926 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
927
928 smmu_pmu_disable(&smmu_pmu->pmu);
929 }
930
931 static struct platform_driver smmu_pmu_driver = {
932 .driver = {
933 .name = "arm-smmu-v3-pmcg",
934 .suppress_bind_attrs = true,
935 },
936 .probe = smmu_pmu_probe,
937 .remove = smmu_pmu_remove,
938 .shutdown = smmu_pmu_shutdown,
939 };
940
arm_smmu_pmu_init(void)941 static int __init arm_smmu_pmu_init(void)
942 {
943 int ret;
944
945 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
946 "perf/arm/pmcg:online",
947 NULL,
948 smmu_pmu_offline_cpu);
949 if (cpuhp_state_num < 0)
950 return cpuhp_state_num;
951
952 ret = platform_driver_register(&smmu_pmu_driver);
953 if (ret)
954 cpuhp_remove_multi_state(cpuhp_state_num);
955
956 return ret;
957 }
958 module_init(arm_smmu_pmu_init);
959
arm_smmu_pmu_exit(void)960 static void __exit arm_smmu_pmu_exit(void)
961 {
962 platform_driver_unregister(&smmu_pmu_driver);
963 cpuhp_remove_multi_state(cpuhp_state_num);
964 }
965
966 module_exit(arm_smmu_pmu_exit);
967
968 MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
969 MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
970 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
971 MODULE_LICENSE("GPL v2");
972