• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file arch/alpha/oprofile/common.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author Richard Henderson <rth@twiddle.net>
8  */
9 
10 #include <linux/oprofile.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <asm/ptrace.h>
15 #include <asm/system.h>
16 
17 #include "op_impl.h"
18 
19 extern struct op_axp_model op_model_ev4 __attribute__((weak));
20 extern struct op_axp_model op_model_ev5 __attribute__((weak));
21 extern struct op_axp_model op_model_pca56 __attribute__((weak));
22 extern struct op_axp_model op_model_ev6 __attribute__((weak));
23 extern struct op_axp_model op_model_ev67 __attribute__((weak));
24 
25 static struct op_axp_model *model;
26 
27 extern void (*perf_irq)(unsigned long, struct pt_regs *);
28 static void (*save_perf_irq)(unsigned long, struct pt_regs *);
29 
30 static struct op_counter_config ctr[20];
31 static struct op_system_config sys;
32 static struct op_register_config reg;
33 
34 /* Called from do_entInt to handle the performance monitor interrupt.  */
35 
36 static void
op_handle_interrupt(unsigned long which,struct pt_regs * regs)37 op_handle_interrupt(unsigned long which, struct pt_regs *regs)
38 {
39 	model->handle_interrupt(which, regs, ctr);
40 
41 	/* If the user has selected an interrupt frequency that is
42 	   not exactly the width of the counter, write a new value
43 	   into the counter such that it'll overflow after N more
44 	   events.  */
45 	if ((reg.need_reset >> which) & 1)
46 		model->reset_ctr(&reg, which);
47 }
48 
49 static int
op_axp_setup(void)50 op_axp_setup(void)
51 {
52 	unsigned long i, e;
53 
54 	/* Install our interrupt handler into the existing hook.  */
55 	save_perf_irq = perf_irq;
56 	perf_irq = op_handle_interrupt;
57 
58 	/* Compute the mask of enabled counters.  */
59 	for (i = e = 0; i < model->num_counters; ++i)
60 		if (ctr[i].enabled)
61 			e |= 1 << i;
62 	reg.enable = e;
63 
64 	/* Pre-compute the values to stuff in the hardware registers.  */
65 	model->reg_setup(&reg, ctr, &sys);
66 
67 	/* Configure the registers on all cpus.  */
68 	(void)smp_call_function(model->cpu_setup, &reg, 1);
69 	model->cpu_setup(&reg);
70 	return 0;
71 }
72 
73 static void
op_axp_shutdown(void)74 op_axp_shutdown(void)
75 {
76 	/* Remove our interrupt handler.  We may be removing this module.  */
77 	perf_irq = save_perf_irq;
78 }
79 
80 static void
op_axp_cpu_start(void * dummy)81 op_axp_cpu_start(void *dummy)
82 {
83 	wrperfmon(1, reg.enable);
84 }
85 
86 static int
op_axp_start(void)87 op_axp_start(void)
88 {
89 	(void)smp_call_function(op_axp_cpu_start, NULL, 1);
90 	op_axp_cpu_start(NULL);
91 	return 0;
92 }
93 
94 static inline void
op_axp_cpu_stop(void * dummy)95 op_axp_cpu_stop(void *dummy)
96 {
97 	/* Disable performance monitoring for all counters.  */
98 	wrperfmon(0, -1);
99 }
100 
101 static void
op_axp_stop(void)102 op_axp_stop(void)
103 {
104 	(void)smp_call_function(op_axp_cpu_stop, NULL, 1);
105 	op_axp_cpu_stop(NULL);
106 }
107 
108 static int
op_axp_create_files(struct super_block * sb,struct dentry * root)109 op_axp_create_files(struct super_block *sb, struct dentry *root)
110 {
111 	int i;
112 
113 	for (i = 0; i < model->num_counters; ++i) {
114 		struct dentry *dir;
115 		char buf[4];
116 
117 		snprintf(buf, sizeof buf, "%d", i);
118 		dir = oprofilefs_mkdir(sb, root, buf);
119 
120 		oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
121                 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
122 		oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
123 		/* Dummies.  */
124 		oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
125 		oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
126 		oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
127 	}
128 
129 	if (model->can_set_proc_mode) {
130 		oprofilefs_create_ulong(sb, root, "enable_pal",
131 					&sys.enable_pal);
132 		oprofilefs_create_ulong(sb, root, "enable_kernel",
133 					&sys.enable_kernel);
134 		oprofilefs_create_ulong(sb, root, "enable_user",
135 					&sys.enable_user);
136 	}
137 
138 	return 0;
139 }
140 
141 int __init
oprofile_arch_init(struct oprofile_operations * ops)142 oprofile_arch_init(struct oprofile_operations *ops)
143 {
144 	struct op_axp_model *lmodel = NULL;
145 
146 	switch (implver()) {
147 	case IMPLVER_EV4:
148 		lmodel = &op_model_ev4;
149 		break;
150 	case IMPLVER_EV5:
151 		/* 21164PC has a slightly different set of events.
152 		   Recognize the chip by the presence of the MAX insns.  */
153 		if (!amask(AMASK_MAX))
154 			lmodel = &op_model_pca56;
155 		else
156 			lmodel = &op_model_ev5;
157 		break;
158 	case IMPLVER_EV6:
159 		/* 21264A supports ProfileMe.
160 		   Recognize the chip by the presence of the CIX insns.  */
161 		if (!amask(AMASK_CIX))
162 			lmodel = &op_model_ev67;
163 		else
164 			lmodel = &op_model_ev6;
165 		break;
166 	}
167 
168 	if (!lmodel)
169 		return -ENODEV;
170 	model = lmodel;
171 
172 	ops->create_files = op_axp_create_files;
173 	ops->setup = op_axp_setup;
174 	ops->shutdown = op_axp_shutdown;
175 	ops->start = op_axp_start;
176 	ops->stop = op_axp_stop;
177 	ops->cpu_type = lmodel->cpu_type;
178 
179 	printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
180 	       lmodel->cpu_type);
181 
182 	return 0;
183 }
184 
185 
186 void
oprofile_arch_exit(void)187 oprofile_arch_exit(void)
188 {
189 }
190