1 // SPDX-License-Identifier: GPL-2.0-only
2 /* pcr.c: Generic sparc64 performance counter infrastructure.
3 *
4 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
5 */
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/init.h>
9 #include <linux/irq.h>
10
11 #include <linux/irq_work.h>
12 #include <linux/ftrace.h>
13
14 #include <asm/pil.h>
15 #include <asm/pcr.h>
16 #include <asm/nmi.h>
17 #include <asm/asi.h>
18 #include <asm/spitfire.h>
19
20 /* This code is shared between various users of the performance
21 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
22 * perf_event support layer.
23 */
24
25 /* Performance counter interrupts run unmasked at PIL level 15.
26 * Therefore we can't do things like wakeups and other work
27 * that expects IRQ disabling to be adhered to in locking etc.
28 *
29 * Therefore in such situations we defer the work by signalling
30 * a lower level cpu IRQ.
31 */
deferred_pcr_work_irq(int irq,struct pt_regs * regs)32 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
33 {
34 struct pt_regs *old_regs;
35
36 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
37
38 old_regs = set_irq_regs(regs);
39 irq_enter();
40 #ifdef CONFIG_IRQ_WORK
41 irq_work_run();
42 #endif
43 irq_exit();
44 set_irq_regs(old_regs);
45 }
46
arch_irq_work_raise(void)47 void arch_irq_work_raise(void)
48 {
49 set_softint(1 << PIL_DEFERRED_PCR_WORK);
50 }
51
52 const struct pcr_ops *pcr_ops;
53 EXPORT_SYMBOL_GPL(pcr_ops);
54
direct_pcr_read(unsigned long reg_num)55 static u64 direct_pcr_read(unsigned long reg_num)
56 {
57 u64 val;
58
59 WARN_ON_ONCE(reg_num != 0);
60 __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
61 return val;
62 }
63
direct_pcr_write(unsigned long reg_num,u64 val)64 static void direct_pcr_write(unsigned long reg_num, u64 val)
65 {
66 WARN_ON_ONCE(reg_num != 0);
67 __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
68 }
69
direct_pic_read(unsigned long reg_num)70 static u64 direct_pic_read(unsigned long reg_num)
71 {
72 u64 val;
73
74 WARN_ON_ONCE(reg_num != 0);
75 __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
76 return val;
77 }
78
direct_pic_write(unsigned long reg_num,u64 val)79 static void direct_pic_write(unsigned long reg_num, u64 val)
80 {
81 WARN_ON_ONCE(reg_num != 0);
82
83 /* Blackbird errata workaround. See commentary in
84 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
85 * for more information.
86 */
87 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
88 " nop\n\t"
89 ".align 64\n"
90 "99:wr %0, 0x0, %%pic\n\t"
91 "rd %%pic, %%g0" : : "r" (val));
92 }
93
direct_picl_value(unsigned int nmi_hz)94 static u64 direct_picl_value(unsigned int nmi_hz)
95 {
96 u32 delta = local_cpu_data().clock_tick / nmi_hz;
97
98 return ((u64)((0 - delta) & 0xffffffff)) << 32;
99 }
100
101 static const struct pcr_ops direct_pcr_ops = {
102 .read_pcr = direct_pcr_read,
103 .write_pcr = direct_pcr_write,
104 .read_pic = direct_pic_read,
105 .write_pic = direct_pic_write,
106 .nmi_picl_value = direct_picl_value,
107 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
108 .pcr_nmi_disable = PCR_PIC_PRIV,
109 };
110
n2_pcr_write(unsigned long reg_num,u64 val)111 static void n2_pcr_write(unsigned long reg_num, u64 val)
112 {
113 unsigned long ret;
114
115 WARN_ON_ONCE(reg_num != 0);
116 if (val & PCR_N2_HTRACE) {
117 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
118 if (ret != HV_EOK)
119 direct_pcr_write(reg_num, val);
120 } else
121 direct_pcr_write(reg_num, val);
122 }
123
n2_picl_value(unsigned int nmi_hz)124 static u64 n2_picl_value(unsigned int nmi_hz)
125 {
126 u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
127
128 return ((u64)((0 - delta) & 0xffffffff)) << 32;
129 }
130
131 static const struct pcr_ops n2_pcr_ops = {
132 .read_pcr = direct_pcr_read,
133 .write_pcr = n2_pcr_write,
134 .read_pic = direct_pic_read,
135 .write_pic = direct_pic_write,
136 .nmi_picl_value = n2_picl_value,
137 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
138 PCR_N2_TOE_OV1 |
139 (2 << PCR_N2_SL1_SHIFT) |
140 (0xff << PCR_N2_MASK1_SHIFT)),
141 .pcr_nmi_disable = PCR_PIC_PRIV,
142 };
143
n4_pcr_read(unsigned long reg_num)144 static u64 n4_pcr_read(unsigned long reg_num)
145 {
146 unsigned long val;
147
148 (void) sun4v_vt_get_perfreg(reg_num, &val);
149
150 return val;
151 }
152
n4_pcr_write(unsigned long reg_num,u64 val)153 static void n4_pcr_write(unsigned long reg_num, u64 val)
154 {
155 (void) sun4v_vt_set_perfreg(reg_num, val);
156 }
157
n4_pic_read(unsigned long reg_num)158 static u64 n4_pic_read(unsigned long reg_num)
159 {
160 unsigned long val;
161
162 __asm__ __volatile__("ldxa [%1] %2, %0"
163 : "=r" (val)
164 : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
165
166 return val;
167 }
168
n4_pic_write(unsigned long reg_num,u64 val)169 static void n4_pic_write(unsigned long reg_num, u64 val)
170 {
171 __asm__ __volatile__("stxa %0, [%1] %2"
172 : /* no outputs */
173 : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
174 }
175
n4_picl_value(unsigned int nmi_hz)176 static u64 n4_picl_value(unsigned int nmi_hz)
177 {
178 u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
179
180 return ((u64)((0 - delta) & 0xffffffff));
181 }
182
183 static const struct pcr_ops n4_pcr_ops = {
184 .read_pcr = n4_pcr_read,
185 .write_pcr = n4_pcr_write,
186 .read_pic = n4_pic_read,
187 .write_pic = n4_pic_write,
188 .nmi_picl_value = n4_picl_value,
189 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
190 PCR_N4_UTRACE | PCR_N4_TOE |
191 (26 << PCR_N4_SL_SHIFT)),
192 .pcr_nmi_disable = PCR_N4_PICNPT,
193 };
194
n5_pcr_read(unsigned long reg_num)195 static u64 n5_pcr_read(unsigned long reg_num)
196 {
197 unsigned long val;
198
199 (void) sun4v_t5_get_perfreg(reg_num, &val);
200
201 return val;
202 }
203
n5_pcr_write(unsigned long reg_num,u64 val)204 static void n5_pcr_write(unsigned long reg_num, u64 val)
205 {
206 (void) sun4v_t5_set_perfreg(reg_num, val);
207 }
208
209 static const struct pcr_ops n5_pcr_ops = {
210 .read_pcr = n5_pcr_read,
211 .write_pcr = n5_pcr_write,
212 .read_pic = n4_pic_read,
213 .write_pic = n4_pic_write,
214 .nmi_picl_value = n4_picl_value,
215 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
216 PCR_N4_UTRACE | PCR_N4_TOE |
217 (26 << PCR_N4_SL_SHIFT)),
218 .pcr_nmi_disable = PCR_N4_PICNPT,
219 };
220
m7_pcr_read(unsigned long reg_num)221 static u64 m7_pcr_read(unsigned long reg_num)
222 {
223 unsigned long val;
224
225 (void) sun4v_m7_get_perfreg(reg_num, &val);
226
227 return val;
228 }
229
m7_pcr_write(unsigned long reg_num,u64 val)230 static void m7_pcr_write(unsigned long reg_num, u64 val)
231 {
232 (void) sun4v_m7_set_perfreg(reg_num, val);
233 }
234
235 static const struct pcr_ops m7_pcr_ops = {
236 .read_pcr = m7_pcr_read,
237 .write_pcr = m7_pcr_write,
238 .read_pic = n4_pic_read,
239 .write_pic = n4_pic_write,
240 .nmi_picl_value = n4_picl_value,
241 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
242 PCR_N4_UTRACE | PCR_N4_TOE |
243 (26 << PCR_N4_SL_SHIFT)),
244 .pcr_nmi_disable = PCR_N4_PICNPT,
245 };
246
247 static unsigned long perf_hsvc_group;
248 static unsigned long perf_hsvc_major;
249 static unsigned long perf_hsvc_minor;
250
register_perf_hsvc(void)251 static int __init register_perf_hsvc(void)
252 {
253 unsigned long hverror;
254
255 if (tlb_type == hypervisor) {
256 switch (sun4v_chip_type) {
257 case SUN4V_CHIP_NIAGARA1:
258 perf_hsvc_group = HV_GRP_NIAG_PERF;
259 break;
260
261 case SUN4V_CHIP_NIAGARA2:
262 perf_hsvc_group = HV_GRP_N2_CPU;
263 break;
264
265 case SUN4V_CHIP_NIAGARA3:
266 perf_hsvc_group = HV_GRP_KT_CPU;
267 break;
268
269 case SUN4V_CHIP_NIAGARA4:
270 perf_hsvc_group = HV_GRP_VT_CPU;
271 break;
272
273 case SUN4V_CHIP_NIAGARA5:
274 perf_hsvc_group = HV_GRP_T5_CPU;
275 break;
276
277 case SUN4V_CHIP_SPARC_M7:
278 perf_hsvc_group = HV_GRP_M7_PERF;
279 break;
280
281 default:
282 return -ENODEV;
283 }
284
285
286 perf_hsvc_major = 1;
287 perf_hsvc_minor = 0;
288 hverror = sun4v_hvapi_register(perf_hsvc_group,
289 perf_hsvc_major,
290 &perf_hsvc_minor);
291 if (hverror) {
292 pr_err("perfmon: Could not register hvapi(0x%lx).\n",
293 hverror);
294 return -ENODEV;
295 }
296 }
297 return 0;
298 }
299
unregister_perf_hsvc(void)300 static void __init unregister_perf_hsvc(void)
301 {
302 if (tlb_type != hypervisor)
303 return;
304 sun4v_hvapi_unregister(perf_hsvc_group);
305 }
306
setup_sun4v_pcr_ops(void)307 static int __init setup_sun4v_pcr_ops(void)
308 {
309 int ret = 0;
310
311 switch (sun4v_chip_type) {
312 case SUN4V_CHIP_NIAGARA1:
313 case SUN4V_CHIP_NIAGARA2:
314 case SUN4V_CHIP_NIAGARA3:
315 pcr_ops = &n2_pcr_ops;
316 break;
317
318 case SUN4V_CHIP_NIAGARA4:
319 pcr_ops = &n4_pcr_ops;
320 break;
321
322 case SUN4V_CHIP_NIAGARA5:
323 pcr_ops = &n5_pcr_ops;
324 break;
325
326 case SUN4V_CHIP_SPARC_M7:
327 pcr_ops = &m7_pcr_ops;
328 break;
329
330 default:
331 ret = -ENODEV;
332 break;
333 }
334
335 return ret;
336 }
337
pcr_arch_init(void)338 int __init pcr_arch_init(void)
339 {
340 int err = register_perf_hsvc();
341
342 if (err)
343 return err;
344
345 switch (tlb_type) {
346 case hypervisor:
347 err = setup_sun4v_pcr_ops();
348 if (err)
349 goto out_unregister;
350 break;
351
352 case cheetah:
353 case cheetah_plus:
354 pcr_ops = &direct_pcr_ops;
355 break;
356
357 case spitfire:
358 /* UltraSPARC-I/II and derivatives lack a profile
359 * counter overflow interrupt so we can't make use of
360 * their hardware currently.
361 */
362 /* fallthrough */
363 default:
364 err = -ENODEV;
365 goto out_unregister;
366 }
367
368 return nmi_init();
369
370 out_unregister:
371 unregister_perf_hsvc();
372 return err;
373 }
374