1 /**
2 * @file arch/alpha/oprofile/op_model_ev67.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 * @author Falk Hueffner <falk@debian.org>
9 */
10
11 #include <linux/oprofile.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
14 #include <asm/ptrace.h>
15
16 #include "op_impl.h"
17
18
19 /* Compute all of the registers in preparation for enabling profiling. */
20
21 static void
ev67_reg_setup(struct op_register_config * reg,struct op_counter_config * ctr,struct op_system_config * sys)22 ev67_reg_setup(struct op_register_config *reg,
23 struct op_counter_config *ctr,
24 struct op_system_config *sys)
25 {
26 unsigned long ctl, reset, need_reset, i;
27
28 /* Select desired events. */
29 ctl = 1UL << 4; /* Enable ProfileMe mode. */
30
31 /* The event numbers are chosen so we can use them directly if
32 PCTR1 is enabled. */
33 if (ctr[1].enabled) {
34 ctl |= (ctr[1].event & 3) << 2;
35 } else {
36 if (ctr[0].event == 0) /* cycles */
37 ctl |= 1UL << 2;
38 }
39 reg->mux_select = ctl;
40
41 /* Select logging options. */
42 /* ??? Need to come up with some mechanism to trace only
43 selected processes. EV67 does not have a mechanism to
44 select kernel or user mode only. For now, enable always. */
45 reg->proc_mode = 0;
46
47 /* EV67 cannot change the width of the counters as with the
48 other implementations. But fortunately, we can write to
49 the counters and set the value such that it will overflow
50 at the right time. */
51 reset = need_reset = 0;
52 for (i = 0; i < 2; ++i) {
53 unsigned long count = ctr[i].count;
54 if (!ctr[i].enabled)
55 continue;
56
57 if (count > 0x100000)
58 count = 0x100000;
59 ctr[i].count = count;
60 reset |= (0x100000 - count) << (i ? 6 : 28);
61 if (count != 0x100000)
62 need_reset |= 1 << i;
63 }
64 reg->reset_values = reset;
65 reg->need_reset = need_reset;
66 }
67
68 /* Program all of the registers in preparation for enabling profiling. */
69
70 static void
ev67_cpu_setup(void * x)71 ev67_cpu_setup (void *x)
72 {
73 struct op_register_config *reg = x;
74
75 wrperfmon(2, reg->mux_select);
76 wrperfmon(3, reg->proc_mode);
77 wrperfmon(6, reg->reset_values | 3);
78 }
79
80 /* CTR is a counter for which the user has requested an interrupt count
81 in between one of the widths selectable in hardware. Reset the count
82 for CTR to the value stored in REG->RESET_VALUES. */
83
84 static void
ev67_reset_ctr(struct op_register_config * reg,unsigned long ctr)85 ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
86 {
87 wrperfmon(6, reg->reset_values | (1 << ctr));
88 }
89
90 /* ProfileMe conditions which will show up as counters. We can also
91 detect the following, but it seems unlikely that anybody is
92 interested in counting them:
93 * Reset
94 * MT_FPCR (write to floating point control register)
95 * Arithmetic trap
96 * Dstream Fault
97 * Machine Check (ECC fault, etc.)
98 * OPCDEC (illegal opcode)
99 * Floating point disabled
100 * Differentiate between DTB single/double misses and 3 or 4 level
101 page tables
102 * Istream access violation
103 * Interrupt
104 * Icache Parity Error.
105 * Instruction killed (nop, trapb)
106
107 Unfortunately, there seems to be no way to detect Dcache and Bcache
108 misses; the latter could be approximated by making the counter
109 count Bcache misses, but that is not precise.
110
111 We model this as 20 counters:
112 * PCTR0
113 * PCTR1
114 * 9 ProfileMe events, induced by PCTR0
115 * 9 ProfileMe events, induced by PCTR1
116 */
117
118 enum profileme_counters {
119 PM_STALLED, /* Stalled for at least one cycle
120 between the fetch and map stages */
121 PM_TAKEN, /* Conditional branch taken */
122 PM_MISPREDICT, /* Branch caused mispredict trap */
123 PM_ITB_MISS, /* ITB miss */
124 PM_DTB_MISS, /* DTB miss */
125 PM_REPLAY, /* Replay trap */
126 PM_LOAD_STORE, /* Load-store order trap */
127 PM_ICACHE_MISS, /* Icache miss */
128 PM_UNALIGNED, /* Unaligned Load/Store */
129 PM_NUM_COUNTERS
130 };
131
132 static inline void
op_add_pm(unsigned long pc,int kern,unsigned long counter,struct op_counter_config * ctr,unsigned long event)133 op_add_pm(unsigned long pc, int kern, unsigned long counter,
134 struct op_counter_config *ctr, unsigned long event)
135 {
136 unsigned long fake_counter = 2 + event;
137 if (counter == 1)
138 fake_counter += PM_NUM_COUNTERS;
139 if (ctr[fake_counter].enabled)
140 oprofile_add_pc(pc, kern, fake_counter);
141 }
142
143 static void
ev67_handle_interrupt(unsigned long which,struct pt_regs * regs,struct op_counter_config * ctr)144 ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
145 struct op_counter_config *ctr)
146 {
147 unsigned long pmpc, pctr_ctl;
148 int kern = !user_mode(regs);
149 int mispredict = 0;
150 union {
151 unsigned long v;
152 struct {
153 unsigned reserved: 30; /* 0-29 */
154 unsigned overcount: 3; /* 30-32 */
155 unsigned icache_miss: 1; /* 33 */
156 unsigned trap_type: 4; /* 34-37 */
157 unsigned load_store: 1; /* 38 */
158 unsigned trap: 1; /* 39 */
159 unsigned mispredict: 1; /* 40 */
160 } fields;
161 } i_stat;
162
163 enum trap_types {
164 TRAP_REPLAY,
165 TRAP_INVALID0,
166 TRAP_DTB_DOUBLE_MISS_3,
167 TRAP_DTB_DOUBLE_MISS_4,
168 TRAP_FP_DISABLED,
169 TRAP_UNALIGNED,
170 TRAP_DTB_SINGLE_MISS,
171 TRAP_DSTREAM_FAULT,
172 TRAP_OPCDEC,
173 TRAP_INVALID1,
174 TRAP_MACHINE_CHECK,
175 TRAP_INVALID2,
176 TRAP_ARITHMETIC,
177 TRAP_INVALID3,
178 TRAP_MT_FPCR,
179 TRAP_RESET
180 };
181
182 pmpc = wrperfmon(9, 0);
183 /* ??? Don't know how to handle physical-mode PALcode address. */
184 if (pmpc & 1)
185 return;
186 pmpc &= ~2; /* clear reserved bit */
187
188 i_stat.v = wrperfmon(8, 0);
189 if (i_stat.fields.trap) {
190 switch (i_stat.fields.trap_type) {
191 case TRAP_INVALID1:
192 case TRAP_INVALID2:
193 case TRAP_INVALID3:
194 /* Pipeline redirection occurred. PMPC points
195 to PALcode. Recognize ITB miss by PALcode
196 offset address, and get actual PC from
197 EXC_ADDR. */
198 oprofile_add_pc(regs->pc, kern, which);
199 if ((pmpc & ((1 << 15) - 1)) == 581)
200 op_add_pm(regs->pc, kern, which,
201 ctr, PM_ITB_MISS);
202 /* Most other bit and counter values will be
203 those for the first instruction in the
204 fault handler, so we're done. */
205 return;
206 case TRAP_REPLAY:
207 op_add_pm(pmpc, kern, which, ctr,
208 (i_stat.fields.load_store
209 ? PM_LOAD_STORE : PM_REPLAY));
210 break;
211 case TRAP_DTB_DOUBLE_MISS_3:
212 case TRAP_DTB_DOUBLE_MISS_4:
213 case TRAP_DTB_SINGLE_MISS:
214 op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
215 break;
216 case TRAP_UNALIGNED:
217 op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
218 break;
219 case TRAP_INVALID0:
220 case TRAP_FP_DISABLED:
221 case TRAP_DSTREAM_FAULT:
222 case TRAP_OPCDEC:
223 case TRAP_MACHINE_CHECK:
224 case TRAP_ARITHMETIC:
225 case TRAP_MT_FPCR:
226 case TRAP_RESET:
227 break;
228 }
229
230 /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
231 mispredicts do not set this bit but can be
232 recognized by the presence of one of these
233 instructions at the PMPC location with bit 39
234 set. */
235 if (i_stat.fields.mispredict) {
236 mispredict = 1;
237 op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
238 }
239 }
240
241 oprofile_add_pc(pmpc, kern, which);
242
243 pctr_ctl = wrperfmon(5, 0);
244 if (pctr_ctl & (1UL << 27))
245 op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
246
247 /* Unfortunately, TAK is undefined on mispredicted branches.
248 ??? It is also undefined for non-cbranch insns, should
249 check that. */
250 if (!mispredict && pctr_ctl & (1UL << 0))
251 op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
252 }
253
254 struct op_axp_model op_model_ev67 = {
255 .reg_setup = ev67_reg_setup,
256 .cpu_setup = ev67_cpu_setup,
257 .reset_ctr = ev67_reset_ctr,
258 .handle_interrupt = ev67_handle_interrupt,
259 .cpu_type = "alpha/ev67",
260 .num_counters = 20,
261 .can_set_proc_mode = 0,
262 };
263