1 /*
2 * SuperH KGDB support
3 *
4 * Copyright (C) 2008 - 2009 Paul Mundt
5 *
6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12 #include <linux/kgdb.h>
13 #include <linux/kdebug.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <asm/cacheflush.h>
17 #include <asm/traps.h>
18
19 /* Macros for single step instruction identification */
20 #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
21 #define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
22 #define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
23 (((op) & 0x7f ) << 1))
24 #define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
25 #define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
26 #define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
27 #define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
28 (((op) & 0x7ff) << 1))
29 #define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
30 #define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
31 #define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
32 #define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
33 (((op) & 0x7ff) << 1))
34 #define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
35 #define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
36 #define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
37 #define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
38 #define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
39 #define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
40 #define OPCODE_RTS(op) ((op) == 0xb)
41 #define OPCODE_RTE(op) ((op) == 0x2b)
42
43 #define SR_T_BIT_MASK 0x1
44 #define STEP_OPCODE 0xc33d
45
46 /* Calculate the new address for after a step */
get_step_address(struct pt_regs * linux_regs)47 static short *get_step_address(struct pt_regs *linux_regs)
48 {
49 insn_size_t op = __raw_readw(linux_regs->pc);
50 long addr;
51
52 /* BT */
53 if (OPCODE_BT(op)) {
54 if (linux_regs->sr & SR_T_BIT_MASK)
55 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
56 else
57 addr = linux_regs->pc + 2;
58 }
59
60 /* BTS */
61 else if (OPCODE_BTS(op)) {
62 if (linux_regs->sr & SR_T_BIT_MASK)
63 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
64 else
65 addr = linux_regs->pc + 4; /* Not in delay slot */
66 }
67
68 /* BF */
69 else if (OPCODE_BF(op)) {
70 if (!(linux_regs->sr & SR_T_BIT_MASK))
71 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
72 else
73 addr = linux_regs->pc + 2;
74 }
75
76 /* BFS */
77 else if (OPCODE_BFS(op)) {
78 if (!(linux_regs->sr & SR_T_BIT_MASK))
79 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
80 else
81 addr = linux_regs->pc + 4; /* Not in delay slot */
82 }
83
84 /* BRA */
85 else if (OPCODE_BRA(op))
86 addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
87
88 /* BRAF */
89 else if (OPCODE_BRAF(op))
90 addr = linux_regs->pc + 4
91 + linux_regs->regs[OPCODE_BRAF_REG(op)];
92
93 /* BSR */
94 else if (OPCODE_BSR(op))
95 addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
96
97 /* BSRF */
98 else if (OPCODE_BSRF(op))
99 addr = linux_regs->pc + 4
100 + linux_regs->regs[OPCODE_BSRF_REG(op)];
101
102 /* JMP */
103 else if (OPCODE_JMP(op))
104 addr = linux_regs->regs[OPCODE_JMP_REG(op)];
105
106 /* JSR */
107 else if (OPCODE_JSR(op))
108 addr = linux_regs->regs[OPCODE_JSR_REG(op)];
109
110 /* RTS */
111 else if (OPCODE_RTS(op))
112 addr = linux_regs->pr;
113
114 /* RTE */
115 else if (OPCODE_RTE(op))
116 addr = linux_regs->regs[15];
117
118 /* Other */
119 else
120 addr = linux_regs->pc + instruction_size(op);
121
122 flush_icache_range(addr, addr + instruction_size(op));
123 return (short *)addr;
124 }
125
126 /*
127 * Replace the instruction immediately after the current instruction
128 * (i.e. next in the expected flow of control) with a trap instruction,
129 * so that returning will cause only a single instruction to be executed.
130 * Note that this model is slightly broken for instructions with delay
131 * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
132 * instruction in the delay slot will be executed.
133 */
134
135 static unsigned long stepped_address;
136 static insn_size_t stepped_opcode;
137
do_single_step(struct pt_regs * linux_regs)138 static void do_single_step(struct pt_regs *linux_regs)
139 {
140 /* Determine where the target instruction will send us to */
141 unsigned short *addr = get_step_address(linux_regs);
142
143 stepped_address = (int)addr;
144
145 /* Replace it */
146 stepped_opcode = __raw_readw((long)addr);
147 *addr = STEP_OPCODE;
148
149 /* Flush and return */
150 flush_icache_range((long)addr, (long)addr +
151 instruction_size(stepped_opcode));
152 }
153
154 /* Undo a single step */
undo_single_step(struct pt_regs * linux_regs)155 static void undo_single_step(struct pt_regs *linux_regs)
156 {
157 /* If we have stepped, put back the old instruction */
158 /* Use stepped_address in case we stopped elsewhere */
159 if (stepped_opcode != 0) {
160 __raw_writew(stepped_opcode, stepped_address);
161 flush_icache_range(stepped_address, stepped_address + 2);
162 }
163
164 stepped_opcode = 0;
165 }
166
pt_regs_to_gdb_regs(unsigned long * gdb_regs,struct pt_regs * regs)167 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
168 {
169 int i;
170
171 for (i = 0; i < 16; i++)
172 gdb_regs[GDB_R0 + i] = regs->regs[i];
173
174 gdb_regs[GDB_PC] = regs->pc;
175 gdb_regs[GDB_PR] = regs->pr;
176 gdb_regs[GDB_SR] = regs->sr;
177 gdb_regs[GDB_GBR] = regs->gbr;
178 gdb_regs[GDB_MACH] = regs->mach;
179 gdb_regs[GDB_MACL] = regs->macl;
180
181 __asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
182 }
183
gdb_regs_to_pt_regs(unsigned long * gdb_regs,struct pt_regs * regs)184 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
185 {
186 int i;
187
188 for (i = 0; i < 16; i++)
189 regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
190
191 regs->pc = gdb_regs[GDB_PC];
192 regs->pr = gdb_regs[GDB_PR];
193 regs->sr = gdb_regs[GDB_SR];
194 regs->gbr = gdb_regs[GDB_GBR];
195 regs->mach = gdb_regs[GDB_MACH];
196 regs->macl = gdb_regs[GDB_MACL];
197 }
198
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)199 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
200 {
201 gdb_regs[GDB_R15] = p->thread.sp;
202 gdb_regs[GDB_PC] = p->thread.pc;
203 }
204
kgdb_arch_handle_exception(int e_vector,int signo,int err_code,char * remcomInBuffer,char * remcomOutBuffer,struct pt_regs * linux_regs)205 int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
206 char *remcomInBuffer, char *remcomOutBuffer,
207 struct pt_regs *linux_regs)
208 {
209 unsigned long addr;
210 char *ptr;
211
212 /* Undo any stepping we may have done */
213 undo_single_step(linux_regs);
214
215 switch (remcomInBuffer[0]) {
216 case 'c':
217 case 's':
218 /* try to read optional parameter, pc unchanged if no parm */
219 ptr = &remcomInBuffer[1];
220 if (kgdb_hex2long(&ptr, &addr))
221 linux_regs->pc = addr;
222 case 'D':
223 case 'k':
224 atomic_set(&kgdb_cpu_doing_single_step, -1);
225
226 if (remcomInBuffer[0] == 's') {
227 do_single_step(linux_regs);
228 kgdb_single_step = 1;
229
230 atomic_set(&kgdb_cpu_doing_single_step,
231 raw_smp_processor_id());
232 }
233
234 return 0;
235 }
236
237 /* this means that we do not want to exit from the handler: */
238 return -1;
239 }
240
kgdb_arch_pc(int exception,struct pt_regs * regs)241 unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
242 {
243 if (exception == 60)
244 return instruction_pointer(regs) - 2;
245 return instruction_pointer(regs);
246 }
247
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long ip)248 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
249 {
250 regs->pc = ip;
251 }
252
253 /*
254 * The primary entry points for the kgdb debug trap table entries.
255 */
BUILD_TRAP_HANDLER(singlestep)256 BUILD_TRAP_HANDLER(singlestep)
257 {
258 unsigned long flags;
259 TRAP_HANDLER_DECL;
260
261 local_irq_save(flags);
262 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
263 kgdb_handle_exception(0, SIGTRAP, 0, regs);
264 local_irq_restore(flags);
265 }
266
__kgdb_notify(struct die_args * args,unsigned long cmd)267 static int __kgdb_notify(struct die_args *args, unsigned long cmd)
268 {
269 int ret;
270
271 switch (cmd) {
272 case DIE_BREAKPOINT:
273 /*
274 * This means a user thread is single stepping
275 * a system call which should be ignored
276 */
277 if (test_thread_flag(TIF_SINGLESTEP))
278 return NOTIFY_DONE;
279
280 ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
281 args->err, args->regs);
282 if (ret)
283 return NOTIFY_DONE;
284
285 break;
286 }
287
288 return NOTIFY_STOP;
289 }
290
291 static int
kgdb_notify(struct notifier_block * self,unsigned long cmd,void * ptr)292 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
293 {
294 unsigned long flags;
295 int ret;
296
297 local_irq_save(flags);
298 ret = __kgdb_notify(ptr, cmd);
299 local_irq_restore(flags);
300
301 return ret;
302 }
303
304 static struct notifier_block kgdb_notifier = {
305 .notifier_call = kgdb_notify,
306
307 /*
308 * Lowest-prio notifier priority, we want to be notified last:
309 */
310 .priority = -INT_MAX,
311 };
312
kgdb_arch_init(void)313 int kgdb_arch_init(void)
314 {
315 return register_die_notifier(&kgdb_notifier);
316 }
317
kgdb_arch_exit(void)318 void kgdb_arch_exit(void)
319 {
320 unregister_die_notifier(&kgdb_notifier);
321 }
322
323 struct kgdb_arch arch_kgdb_ops = {
324 /* Breakpoint instruction: trapa #0x3c */
325 #ifdef CONFIG_CPU_LITTLE_ENDIAN
326 .gdb_bpt_instr = { 0x3c, 0xc3 },
327 #else
328 .gdb_bpt_instr = { 0xc3, 0x3c },
329 #endif
330 };
331