1 /*
2 * PMU support
3 *
4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This code is based heavily on the ARMv7 perf event code.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <asm/irq_regs.h>
23
24 #include <linux/of.h>
25 #include <linux/perf/arm_pmu.h>
26 #include <linux/platform_device.h>
27
28 /*
29 * ARMv8 PMUv3 Performance Events handling code.
30 * Common event types.
31 */
32 enum armv8_pmuv3_perf_types {
33 /* Required events. */
34 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
35 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
36 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
37 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
38 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
39 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
40
41 /* At least one of the following is required. */
42 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
43 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
44
45 /* Common architectural events. */
46 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
47 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
48 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
49 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
50 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
51 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
52 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
53 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
54 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
55 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
56
57 /* Common microarchitectural events. */
58 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
59 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
60 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
61 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
62 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
63 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
64 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
65 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
66 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
67 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
68 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
69 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
70 };
71
72 /* ARMv8 Cortex-A53 specific event types. */
73 enum armv8_a53_pmu_perf_types {
74 ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2,
75 };
76
77 /* ARMv8 Cortex-A57 specific event types. */
78 enum armv8_a57_perf_types {
79 ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40,
80 ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41,
81 ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42,
82 ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43,
83 ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c,
84 ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d,
85 };
86
87 /* PMUv3 HW events mapping. */
88 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
89 PERF_MAP_ALL_UNSUPPORTED,
90 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
91 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
92 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
93 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
94 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
95 };
96
97 /* ARM Cortex-A53 HW events mapping. */
98 static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
99 PERF_MAP_ALL_UNSUPPORTED,
100 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
101 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
102 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
103 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
104 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
105 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
106 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
107 };
108
109 static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
110 PERF_MAP_ALL_UNSUPPORTED,
111 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
112 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
113 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
114 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
115 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
116 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
117 };
118
119 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
120 [PERF_COUNT_HW_CACHE_OP_MAX]
121 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
122 PERF_CACHE_MAP_ALL_UNSUPPORTED,
123
124 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
125 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
126 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
127 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
128
129 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
130 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
131 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
132 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
133 };
134
135 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
136 [PERF_COUNT_HW_CACHE_OP_MAX]
137 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
138 PERF_CACHE_MAP_ALL_UNSUPPORTED,
139
140 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
141 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
142 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
143 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
144 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
145
146 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
147 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
148
149 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
150
151 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
152 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
153 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
154 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
155 };
156
157 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
158 [PERF_COUNT_HW_CACHE_OP_MAX]
159 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
160 PERF_CACHE_MAP_ALL_UNSUPPORTED,
161
162 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
163 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
164 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
165 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
166
167 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
168 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
169
170 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
171 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
172
173 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
174
175 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
176 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
177 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
178 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
179 };
180
181
182 /*
183 * Perf Events' indices
184 */
185 #define ARMV8_IDX_CYCLE_COUNTER 0
186 #define ARMV8_IDX_COUNTER0 1
187 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
188 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
189
190 #define ARMV8_MAX_COUNTERS 32
191 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
192
193 /*
194 * ARMv8 low level PMU access
195 */
196
197 /*
198 * Perf Event to low level counters mapping
199 */
200 #define ARMV8_IDX_TO_COUNTER(x) \
201 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
202
203 /*
204 * Per-CPU PMCR: config reg
205 */
206 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
207 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
208 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
209 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
210 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
211 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
212 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
213 #define ARMV8_PMCR_N_MASK 0x1f
214 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
215
216 /*
217 * PMOVSR: counters overflow flag status reg
218 */
219 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
220 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
221
222 /*
223 * PMXEVTYPER: Event selection reg
224 */
225 #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
226 #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
227
228 /*
229 * Event filters for PMUv3
230 */
231 #define ARMV8_EXCLUDE_EL1 (1 << 31)
232 #define ARMV8_EXCLUDE_EL0 (1 << 30)
233 #define ARMV8_INCLUDE_EL2 (1 << 27)
234
armv8pmu_pmcr_read(void)235 static inline u32 armv8pmu_pmcr_read(void)
236 {
237 u32 val;
238 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
239 return val;
240 }
241
armv8pmu_pmcr_write(u32 val)242 static inline void armv8pmu_pmcr_write(u32 val)
243 {
244 val &= ARMV8_PMCR_MASK;
245 isb();
246 asm volatile("msr pmcr_el0, %0" :: "r" (val));
247 }
248
armv8pmu_has_overflowed(u32 pmovsr)249 static inline int armv8pmu_has_overflowed(u32 pmovsr)
250 {
251 return pmovsr & ARMV8_OVERFLOWED_MASK;
252 }
253
armv8pmu_counter_valid(struct arm_pmu * cpu_pmu,int idx)254 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
255 {
256 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
257 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
258 }
259
armv8pmu_counter_has_overflowed(u32 pmnc,int idx)260 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
261 {
262 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
263 }
264
armv8pmu_select_counter(int idx)265 static inline int armv8pmu_select_counter(int idx)
266 {
267 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
268 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
269 isb();
270
271 return idx;
272 }
273
armv8pmu_read_counter(struct perf_event * event)274 static inline u32 armv8pmu_read_counter(struct perf_event *event)
275 {
276 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
277 struct hw_perf_event *hwc = &event->hw;
278 int idx = hwc->idx;
279 u32 value = 0;
280
281 if (!armv8pmu_counter_valid(cpu_pmu, idx))
282 pr_err("CPU%u reading wrong counter %d\n",
283 smp_processor_id(), idx);
284 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
285 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
286 else if (armv8pmu_select_counter(idx) == idx)
287 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
288
289 return value;
290 }
291
armv8pmu_write_counter(struct perf_event * event,u32 value)292 static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
293 {
294 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
295 struct hw_perf_event *hwc = &event->hw;
296 int idx = hwc->idx;
297
298 if (!armv8pmu_counter_valid(cpu_pmu, idx))
299 pr_err("CPU%u writing wrong counter %d\n",
300 smp_processor_id(), idx);
301 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
302 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
303 else if (armv8pmu_select_counter(idx) == idx)
304 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
305 }
306
armv8pmu_write_evtype(int idx,u32 val)307 static inline void armv8pmu_write_evtype(int idx, u32 val)
308 {
309 if (armv8pmu_select_counter(idx) == idx) {
310 val &= ARMV8_EVTYPE_MASK;
311 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
312 }
313 }
314
armv8pmu_enable_counter(int idx)315 static inline int armv8pmu_enable_counter(int idx)
316 {
317 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
318 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
319 return idx;
320 }
321
armv8pmu_disable_counter(int idx)322 static inline int armv8pmu_disable_counter(int idx)
323 {
324 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
325 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
326 return idx;
327 }
328
armv8pmu_enable_intens(int idx)329 static inline int armv8pmu_enable_intens(int idx)
330 {
331 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
332 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
333 return idx;
334 }
335
armv8pmu_disable_intens(int idx)336 static inline int armv8pmu_disable_intens(int idx)
337 {
338 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
339 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
340 isb();
341 /* Clear the overflow flag in case an interrupt is pending. */
342 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
343 isb();
344
345 return idx;
346 }
347
armv8pmu_getreset_flags(void)348 static inline u32 armv8pmu_getreset_flags(void)
349 {
350 u32 value;
351
352 /* Read */
353 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
354
355 /* Write to clear flags */
356 value &= ARMV8_OVSR_MASK;
357 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
358
359 return value;
360 }
361
armv8pmu_enable_event(struct perf_event * event)362 static void armv8pmu_enable_event(struct perf_event *event)
363 {
364 unsigned long flags;
365 struct hw_perf_event *hwc = &event->hw;
366 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
367 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
368 int idx = hwc->idx;
369
370 /*
371 * Enable counter and interrupt, and set the counter to count
372 * the event that we're interested in.
373 */
374 raw_spin_lock_irqsave(&events->pmu_lock, flags);
375
376 /*
377 * Disable counter
378 */
379 armv8pmu_disable_counter(idx);
380
381 /*
382 * Set event (if destined for PMNx counters).
383 */
384 armv8pmu_write_evtype(idx, hwc->config_base);
385
386 /*
387 * Enable interrupt for this counter
388 */
389 armv8pmu_enable_intens(idx);
390
391 /*
392 * Enable counter
393 */
394 armv8pmu_enable_counter(idx);
395
396 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
397 }
398
armv8pmu_disable_event(struct perf_event * event)399 static void armv8pmu_disable_event(struct perf_event *event)
400 {
401 unsigned long flags;
402 struct hw_perf_event *hwc = &event->hw;
403 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
404 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
405 int idx = hwc->idx;
406
407 /*
408 * Disable counter and interrupt
409 */
410 raw_spin_lock_irqsave(&events->pmu_lock, flags);
411
412 /*
413 * Disable counter
414 */
415 armv8pmu_disable_counter(idx);
416
417 /*
418 * Disable interrupt for this counter
419 */
420 armv8pmu_disable_intens(idx);
421
422 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
423 }
424
armv8pmu_handle_irq(int irq_num,void * dev)425 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
426 {
427 u32 pmovsr;
428 struct perf_sample_data data;
429 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
430 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
431 struct pt_regs *regs;
432 int idx;
433
434 /*
435 * Get and reset the IRQ flags
436 */
437 pmovsr = armv8pmu_getreset_flags();
438
439 /*
440 * Did an overflow occur?
441 */
442 if (!armv8pmu_has_overflowed(pmovsr))
443 return IRQ_NONE;
444
445 /*
446 * Handle the counter(s) overflow(s)
447 */
448 regs = get_irq_regs();
449
450 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
451 struct perf_event *event = cpuc->events[idx];
452 struct hw_perf_event *hwc;
453
454 /* Ignore if we don't have an event. */
455 if (!event)
456 continue;
457
458 /*
459 * We have a single interrupt for all counters. Check that
460 * each counter has overflowed before we process it.
461 */
462 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
463 continue;
464
465 hwc = &event->hw;
466 armpmu_event_update(event);
467 perf_sample_data_init(&data, 0, hwc->last_period);
468 if (!armpmu_event_set_period(event))
469 continue;
470
471 if (perf_event_overflow(event, &data, regs))
472 cpu_pmu->disable(event);
473 }
474
475 /*
476 * Handle the pending perf events.
477 *
478 * Note: this call *must* be run with interrupts disabled. For
479 * platforms that can have the PMU interrupts raised as an NMI, this
480 * will not work.
481 */
482 irq_work_run();
483
484 return IRQ_HANDLED;
485 }
486
armv8pmu_start(struct arm_pmu * cpu_pmu)487 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
488 {
489 unsigned long flags;
490 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
491
492 raw_spin_lock_irqsave(&events->pmu_lock, flags);
493 /* Enable all counters */
494 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
495 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
496 }
497
armv8pmu_stop(struct arm_pmu * cpu_pmu)498 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
499 {
500 unsigned long flags;
501 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
502
503 raw_spin_lock_irqsave(&events->pmu_lock, flags);
504 /* Disable all counters */
505 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
506 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
507 }
508
armv8pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)509 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
510 struct perf_event *event)
511 {
512 int idx;
513 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
514 struct hw_perf_event *hwc = &event->hw;
515 unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
516
517 /* Always place a cycle counter into the cycle counter. */
518 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
519 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
520 return -EAGAIN;
521
522 return ARMV8_IDX_CYCLE_COUNTER;
523 }
524
525 /*
526 * For anything other than a cycle counter, try and use
527 * the events counters
528 */
529 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
530 if (!test_and_set_bit(idx, cpuc->used_mask))
531 return idx;
532 }
533
534 /* The counters are all in use. */
535 return -EAGAIN;
536 }
537
538 /*
539 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
540 */
armv8pmu_set_event_filter(struct hw_perf_event * event,struct perf_event_attr * attr)541 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
542 struct perf_event_attr *attr)
543 {
544 unsigned long config_base = 0;
545
546 if (attr->exclude_idle)
547 return -EPERM;
548 if (attr->exclude_user)
549 config_base |= ARMV8_EXCLUDE_EL0;
550 if (attr->exclude_kernel)
551 config_base |= ARMV8_EXCLUDE_EL1;
552 if (!attr->exclude_hv)
553 config_base |= ARMV8_INCLUDE_EL2;
554
555 /*
556 * Install the filter into config_base as this is used to
557 * construct the event type.
558 */
559 event->config_base = config_base;
560
561 return 0;
562 }
563
armv8pmu_reset(void * info)564 static void armv8pmu_reset(void *info)
565 {
566 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
567 u32 idx, nb_cnt = cpu_pmu->num_events;
568
569 /* The counter and interrupt enable registers are unknown at reset. */
570 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
571 armv8pmu_disable_counter(idx);
572 armv8pmu_disable_intens(idx);
573 }
574
575 /* Initialize & Reset PMNC: C and P bits. */
576 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
577 }
578
armv8_pmuv3_map_event(struct perf_event * event)579 static int armv8_pmuv3_map_event(struct perf_event *event)
580 {
581 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
582 &armv8_pmuv3_perf_cache_map,
583 ARMV8_EVTYPE_EVENT);
584 }
585
armv8_a53_map_event(struct perf_event * event)586 static int armv8_a53_map_event(struct perf_event *event)
587 {
588 return armpmu_map_event(event, &armv8_a53_perf_map,
589 &armv8_a53_perf_cache_map,
590 ARMV8_EVTYPE_EVENT);
591 }
592
armv8_a57_map_event(struct perf_event * event)593 static int armv8_a57_map_event(struct perf_event *event)
594 {
595 return armpmu_map_event(event, &armv8_a57_perf_map,
596 &armv8_a57_perf_cache_map,
597 ARMV8_EVTYPE_EVENT);
598 }
599
armv8pmu_read_num_pmnc_events(void * info)600 static void armv8pmu_read_num_pmnc_events(void *info)
601 {
602 int *nb_cnt = info;
603
604 /* Read the nb of CNTx counters supported from PMNC */
605 *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
606
607 /* Add the CPU cycles counter */
608 *nb_cnt += 1;
609 }
610
armv8pmu_probe_num_events(struct arm_pmu * arm_pmu)611 static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
612 {
613 return smp_call_function_any(&arm_pmu->supported_cpus,
614 armv8pmu_read_num_pmnc_events,
615 &arm_pmu->num_events, 1);
616 }
617
armv8_pmu_init(struct arm_pmu * cpu_pmu)618 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
619 {
620 cpu_pmu->handle_irq = armv8pmu_handle_irq,
621 cpu_pmu->enable = armv8pmu_enable_event,
622 cpu_pmu->disable = armv8pmu_disable_event,
623 cpu_pmu->read_counter = armv8pmu_read_counter,
624 cpu_pmu->write_counter = armv8pmu_write_counter,
625 cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
626 cpu_pmu->start = armv8pmu_start,
627 cpu_pmu->stop = armv8pmu_stop,
628 cpu_pmu->reset = armv8pmu_reset,
629 cpu_pmu->max_period = (1LLU << 32) - 1,
630 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
631 }
632
armv8_pmuv3_init(struct arm_pmu * cpu_pmu)633 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
634 {
635 armv8_pmu_init(cpu_pmu);
636 cpu_pmu->name = "armv8_pmuv3";
637 cpu_pmu->map_event = armv8_pmuv3_map_event;
638 return armv8pmu_probe_num_events(cpu_pmu);
639 }
640
armv8_a53_pmu_init(struct arm_pmu * cpu_pmu)641 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
642 {
643 armv8_pmu_init(cpu_pmu);
644 cpu_pmu->name = "armv8_cortex_a53";
645 cpu_pmu->map_event = armv8_a53_map_event;
646 return armv8pmu_probe_num_events(cpu_pmu);
647 }
648
armv8_a57_pmu_init(struct arm_pmu * cpu_pmu)649 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
650 {
651 armv8_pmu_init(cpu_pmu);
652 cpu_pmu->name = "armv8_cortex_a57";
653 cpu_pmu->map_event = armv8_a57_map_event;
654 return armv8pmu_probe_num_events(cpu_pmu);
655 }
656
657 static const struct of_device_id armv8_pmu_of_device_ids[] = {
658 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
659 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
660 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
661 {},
662 };
663
armv8_pmu_device_probe(struct platform_device * pdev)664 static int armv8_pmu_device_probe(struct platform_device *pdev)
665 {
666 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
667 }
668
669 static struct platform_driver armv8_pmu_driver = {
670 .driver = {
671 .name = "armv8-pmu",
672 .of_match_table = armv8_pmu_of_device_ids,
673 .suppress_bind_attrs = true,
674 },
675 .probe = armv8_pmu_device_probe,
676 };
677
register_armv8_pmu_driver(void)678 static int __init register_armv8_pmu_driver(void)
679 {
680 return platform_driver_register(&armv8_pmu_driver);
681 }
682 device_initcall(register_armv8_pmu_driver);
683