• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004, 05, 06 by Ralf Baechle
7  * Copyright (C) 2005 by MIPS Technologies, Inc.
8  */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14 
15 #include "op_impl.h"
16 
17 #define M_PERFCTL_EXL			(1UL	  <<  0)
18 #define M_PERFCTL_KERNEL		(1UL	  <<  1)
19 #define M_PERFCTL_SUPERVISOR		(1UL	  <<  2)
20 #define M_PERFCTL_USER			(1UL	  <<  3)
21 #define M_PERFCTL_INTERRUPT_ENABLE	(1UL	  <<  4)
22 #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
23 #define M_PERFCTL_VPEID(vpe)		((vpe)	  << 16)
24 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
25 #define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
26 #define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
27 #define	   M_TC_EN_TC			M_PERFCTL_MT_EN(2)
28 #define M_PERFCTL_TCID(tcid)		((tcid)	  << 22)
29 #define M_PERFCTL_WIDE			(1UL	  << 30)
30 #define M_PERFCTL_MORE			(1UL	  << 31)
31 
32 #define M_COUNTER_OVERFLOW		(1UL	  << 31)
33 
34 /* Netlogic XLR specific, count events in all threads in a core */
35 #define M_PERFCTL_COUNT_ALL_THREADS	(1UL	  << 13)
36 
37 static int (*save_perf_irq)(void);
38 
39 /*
40  * XLR has only one set of counters per core. Designate the
41  * first hardware thread in the core for setup and init.
42  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
43  */
44 #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
45 #define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0)
46 #else
47 #define oprofile_skip_cpu(c)	0
48 #endif
49 
50 #ifdef CONFIG_MIPS_MT_SMP
51 static int cpu_has_mipsmt_pertccounters;
52 #define WHAT		(M_TC_EN_VPE | \
53 			 M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
54 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
55 			0 : cpu_data[smp_processor_id()].vpe_id)
56 
57 /*
58  * The number of bits to shift to convert between counters per core and
59  * counters per VPE.  There is no reasonable interface atm to obtain the
60  * number of VPEs used by Linux and in the 34K this number is fixed to two
61  * anyways so we hardcore a few things here for the moment.  The way it's
62  * done here will ensure that oprofile VSMP kernel will run right on a lesser
63  * core like a 24K also or with maxcpus=1.
64  */
vpe_shift(void)65 static inline unsigned int vpe_shift(void)
66 {
67 	if (num_possible_cpus() > 1)
68 		return 1;
69 
70 	return 0;
71 }
72 
73 #else
74 
75 #define WHAT		0
76 #define vpe_id()	0
77 
vpe_shift(void)78 static inline unsigned int vpe_shift(void)
79 {
80 	return 0;
81 }
82 
83 #endif
84 
counters_total_to_per_cpu(unsigned int counters)85 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
86 {
87 	return counters >> vpe_shift();
88 }
89 
counters_per_cpu_to_total(unsigned int counters)90 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
91 {
92 	return counters << vpe_shift();
93 }
94 
95 #define __define_perf_accessors(r, n, np)				\
96 									\
97 static inline unsigned int r_c0_ ## r ## n(void)			\
98 {									\
99 	unsigned int cpu = vpe_id();					\
100 									\
101 	switch (cpu) {							\
102 	case 0:								\
103 		return read_c0_ ## r ## n();				\
104 	case 1:								\
105 		return read_c0_ ## r ## np();				\
106 	default:							\
107 		BUG();							\
108 	}								\
109 	return 0;							\
110 }									\
111 									\
112 static inline void w_c0_ ## r ## n(unsigned int value)			\
113 {									\
114 	unsigned int cpu = vpe_id();					\
115 									\
116 	switch (cpu) {							\
117 	case 0:								\
118 		write_c0_ ## r ## n(value);				\
119 		return;							\
120 	case 1:								\
121 		write_c0_ ## r ## np(value);				\
122 		return;							\
123 	default:							\
124 		BUG();							\
125 	}								\
126 	return;								\
127 }									\
128 
129 __define_perf_accessors(perfcntr, 0, 2)
130 __define_perf_accessors(perfcntr, 1, 3)
131 __define_perf_accessors(perfcntr, 2, 0)
132 __define_perf_accessors(perfcntr, 3, 1)
133 
134 __define_perf_accessors(perfctrl, 0, 2)
135 __define_perf_accessors(perfctrl, 1, 3)
136 __define_perf_accessors(perfctrl, 2, 0)
137 __define_perf_accessors(perfctrl, 3, 1)
138 
139 struct op_mips_model op_model_mipsxx_ops;
140 
141 static struct mipsxx_register_config {
142 	unsigned int control[4];
143 	unsigned int counter[4];
144 } reg;
145 
146 /* Compute all of the registers in preparation for enabling profiling.	*/
147 
mipsxx_reg_setup(struct op_counter_config * ctr)148 static void mipsxx_reg_setup(struct op_counter_config *ctr)
149 {
150 	unsigned int counters = op_model_mipsxx_ops.num_counters;
151 	int i;
152 
153 	/* Compute the performance counter control word.  */
154 	for (i = 0; i < counters; i++) {
155 		reg.control[i] = 0;
156 		reg.counter[i] = 0;
157 
158 		if (!ctr[i].enabled)
159 			continue;
160 
161 		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
162 				 M_PERFCTL_INTERRUPT_ENABLE;
163 		if (ctr[i].kernel)
164 			reg.control[i] |= M_PERFCTL_KERNEL;
165 		if (ctr[i].user)
166 			reg.control[i] |= M_PERFCTL_USER;
167 		if (ctr[i].exl)
168 			reg.control[i] |= M_PERFCTL_EXL;
169 		if (boot_cpu_type() == CPU_XLR)
170 			reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
171 		reg.counter[i] = 0x80000000 - ctr[i].count;
172 	}
173 }
174 
175 /* Program all of the registers in preparation for enabling profiling.	*/
176 
mipsxx_cpu_setup(void * args)177 static void mipsxx_cpu_setup(void *args)
178 {
179 	unsigned int counters = op_model_mipsxx_ops.num_counters;
180 
181 	if (oprofile_skip_cpu(smp_processor_id()))
182 		return;
183 
184 	switch (counters) {
185 	case 4:
186 		w_c0_perfctrl3(0);
187 		w_c0_perfcntr3(reg.counter[3]);
188 	case 3:
189 		w_c0_perfctrl2(0);
190 		w_c0_perfcntr2(reg.counter[2]);
191 	case 2:
192 		w_c0_perfctrl1(0);
193 		w_c0_perfcntr1(reg.counter[1]);
194 	case 1:
195 		w_c0_perfctrl0(0);
196 		w_c0_perfcntr0(reg.counter[0]);
197 	}
198 }
199 
200 /* Start all counters on current CPU */
mipsxx_cpu_start(void * args)201 static void mipsxx_cpu_start(void *args)
202 {
203 	unsigned int counters = op_model_mipsxx_ops.num_counters;
204 
205 	if (oprofile_skip_cpu(smp_processor_id()))
206 		return;
207 
208 	switch (counters) {
209 	case 4:
210 		w_c0_perfctrl3(WHAT | reg.control[3]);
211 	case 3:
212 		w_c0_perfctrl2(WHAT | reg.control[2]);
213 	case 2:
214 		w_c0_perfctrl1(WHAT | reg.control[1]);
215 	case 1:
216 		w_c0_perfctrl0(WHAT | reg.control[0]);
217 	}
218 }
219 
220 /* Stop all counters on current CPU */
mipsxx_cpu_stop(void * args)221 static void mipsxx_cpu_stop(void *args)
222 {
223 	unsigned int counters = op_model_mipsxx_ops.num_counters;
224 
225 	if (oprofile_skip_cpu(smp_processor_id()))
226 		return;
227 
228 	switch (counters) {
229 	case 4:
230 		w_c0_perfctrl3(0);
231 	case 3:
232 		w_c0_perfctrl2(0);
233 	case 2:
234 		w_c0_perfctrl1(0);
235 	case 1:
236 		w_c0_perfctrl0(0);
237 	}
238 }
239 
mipsxx_perfcount_handler(void)240 static int mipsxx_perfcount_handler(void)
241 {
242 	unsigned int counters = op_model_mipsxx_ops.num_counters;
243 	unsigned int control;
244 	unsigned int counter;
245 	int handled = IRQ_NONE;
246 
247 	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
248 		return handled;
249 
250 	switch (counters) {
251 #define HANDLE_COUNTER(n)						\
252 	case n + 1:							\
253 		control = r_c0_perfctrl ## n();				\
254 		counter = r_c0_perfcntr ## n();				\
255 		if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&		\
256 		    (counter & M_COUNTER_OVERFLOW)) {			\
257 			oprofile_add_sample(get_irq_regs(), n);		\
258 			w_c0_perfcntr ## n(reg.counter[n]);		\
259 			handled = IRQ_HANDLED;				\
260 		}
261 	HANDLE_COUNTER(3)
262 	HANDLE_COUNTER(2)
263 	HANDLE_COUNTER(1)
264 	HANDLE_COUNTER(0)
265 	}
266 
267 	return handled;
268 }
269 
270 #define M_CONFIG1_PC	(1 << 4)
271 
__n_counters(void)272 static inline int __n_counters(void)
273 {
274 	if (!(read_c0_config1() & M_CONFIG1_PC))
275 		return 0;
276 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
277 		return 1;
278 	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
279 		return 2;
280 	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
281 		return 3;
282 
283 	return 4;
284 }
285 
n_counters(void)286 static inline int n_counters(void)
287 {
288 	int counters;
289 
290 	switch (current_cpu_type()) {
291 	case CPU_R10000:
292 		counters = 2;
293 		break;
294 
295 	case CPU_R12000:
296 	case CPU_R14000:
297 		counters = 4;
298 		break;
299 
300 	default:
301 		counters = __n_counters();
302 	}
303 
304 	return counters;
305 }
306 
reset_counters(void * arg)307 static void reset_counters(void *arg)
308 {
309 	int counters = (int)(long)arg;
310 	switch (counters) {
311 	case 4:
312 		w_c0_perfctrl3(0);
313 		w_c0_perfcntr3(0);
314 	case 3:
315 		w_c0_perfctrl2(0);
316 		w_c0_perfcntr2(0);
317 	case 2:
318 		w_c0_perfctrl1(0);
319 		w_c0_perfcntr1(0);
320 	case 1:
321 		w_c0_perfctrl0(0);
322 		w_c0_perfcntr0(0);
323 	}
324 }
325 
mipsxx_perfcount_int(int irq,void * dev_id)326 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
327 {
328 	return mipsxx_perfcount_handler();
329 }
330 
mipsxx_init(void)331 static int __init mipsxx_init(void)
332 {
333 	int counters;
334 
335 	counters = n_counters();
336 	if (counters == 0) {
337 		printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
338 		return -ENODEV;
339 	}
340 
341 #ifdef CONFIG_MIPS_MT_SMP
342 	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
343 	if (!cpu_has_mipsmt_pertccounters)
344 		counters = counters_total_to_per_cpu(counters);
345 #endif
346 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
347 
348 	op_model_mipsxx_ops.num_counters = counters;
349 	switch (current_cpu_type()) {
350 	case CPU_M14KC:
351 		op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
352 		break;
353 
354 	case CPU_M14KEC:
355 		op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
356 		break;
357 
358 	case CPU_20KC:
359 		op_model_mipsxx_ops.cpu_type = "mips/20K";
360 		break;
361 
362 	case CPU_24K:
363 		op_model_mipsxx_ops.cpu_type = "mips/24K";
364 		break;
365 
366 	case CPU_25KF:
367 		op_model_mipsxx_ops.cpu_type = "mips/25K";
368 		break;
369 
370 	case CPU_1004K:
371 	case CPU_34K:
372 		op_model_mipsxx_ops.cpu_type = "mips/34K";
373 		break;
374 
375 	case CPU_1074K:
376 	case CPU_74K:
377 		op_model_mipsxx_ops.cpu_type = "mips/74K";
378 		break;
379 
380 	case CPU_INTERAPTIV:
381 		op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
382 		break;
383 
384 	case CPU_PROAPTIV:
385 		op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
386 		break;
387 
388 	case CPU_P5600:
389 		op_model_mipsxx_ops.cpu_type = "mips/P5600";
390 		break;
391 
392 	case CPU_M5150:
393 		op_model_mipsxx_ops.cpu_type = "mips/M5150";
394 		break;
395 
396 	case CPU_5KC:
397 		op_model_mipsxx_ops.cpu_type = "mips/5K";
398 		break;
399 
400 	case CPU_R10000:
401 		if ((current_cpu_data.processor_id & 0xff) == 0x20)
402 			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
403 		else
404 			op_model_mipsxx_ops.cpu_type = "mips/r10000";
405 		break;
406 
407 	case CPU_R12000:
408 	case CPU_R14000:
409 		op_model_mipsxx_ops.cpu_type = "mips/r12000";
410 		break;
411 
412 	case CPU_SB1:
413 	case CPU_SB1A:
414 		op_model_mipsxx_ops.cpu_type = "mips/sb1";
415 		break;
416 
417 	case CPU_LOONGSON1:
418 		op_model_mipsxx_ops.cpu_type = "mips/loongson1";
419 		break;
420 
421 	case CPU_XLR:
422 		op_model_mipsxx_ops.cpu_type = "mips/xlr";
423 		break;
424 
425 	default:
426 		printk(KERN_ERR "Profiling unsupported for this CPU\n");
427 
428 		return -ENODEV;
429 	}
430 
431 	save_perf_irq = perf_irq;
432 	perf_irq = mipsxx_perfcount_handler;
433 
434 	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
435 		return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
436 			0, "Perfcounter", save_perf_irq);
437 
438 	return 0;
439 }
440 
mipsxx_exit(void)441 static void mipsxx_exit(void)
442 {
443 	int counters = op_model_mipsxx_ops.num_counters;
444 
445 	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
446 		free_irq(cp0_perfcount_irq, save_perf_irq);
447 
448 	counters = counters_per_cpu_to_total(counters);
449 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
450 
451 	perf_irq = save_perf_irq;
452 }
453 
454 struct op_mips_model op_model_mipsxx_ops = {
455 	.reg_setup	= mipsxx_reg_setup,
456 	.cpu_setup	= mipsxx_cpu_setup,
457 	.init		= mipsxx_init,
458 	.exit		= mipsxx_exit,
459 	.cpu_start	= mipsxx_cpu_start,
460 	.cpu_stop	= mipsxx_cpu_stop,
461 };
462