1 /*
2 * PowerPC backend to the KGDB stub.
3 *
4 * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5 * Copyright (C) 2003 Timesys Corporation.
6 * Copyright (C) 2004-2006 MontaVista Software, Inc.
7 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8 * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9 * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10 * Copyright (C) 2007-2008 Wind River Systems, Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program as licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/kgdb.h>
19 #include <linux/smp.h>
20 #include <linux/signal.h>
21 #include <linux/ptrace.h>
22 #include <linux/kdebug.h>
23 #include <asm/current.h>
24 #include <asm/processor.h>
25 #include <asm/machdep.h>
26 #include <asm/debug.h>
27 #include <linux/slab.h>
28
29 /*
30 * This table contains the mapping between PowerPC hardware trap types, and
31 * signals, which are primarily what GDB understands. GDB and the kernel
32 * don't always agree on values, so we use constants taken from gdb-6.2.
33 */
34 static struct hard_trap_info
35 {
36 unsigned int tt; /* Trap type code for powerpc */
37 unsigned char signo; /* Signal that we map this trap into */
38 } hard_trap_info[] = {
39 { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
40 { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
41 { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
42 { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
43 { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
44 { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
45 { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
46 { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
47 { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
48 { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
49 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
50 { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
51 #if defined(CONFIG_FSL_BOOKE)
52 { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
53 { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
54 { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
55 { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
56 { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
57 { 0x2060, 0x0e /* SIGILL */ }, /* performance monitor */
58 { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
59 { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
60 { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
61 #else /* ! CONFIG_FSL_BOOKE */
62 { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
63 { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
64 { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
65 { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
66 { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
67 #endif
68 #else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
69 { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
70 #if defined(CONFIG_8xx)
71 { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
72 #else /* ! CONFIG_8xx */
73 { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
74 { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
75 { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
76 #if defined(CONFIG_PPC64)
77 { 0x1200, 0x05 /* SIGILL */ }, /* system error */
78 { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
79 { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
80 { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
81 { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
82 #else /* ! CONFIG_PPC64 */
83 { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
84 { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
85 { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
86 { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
87 #endif
88 #endif
89 #endif
90 { 0x0000, 0x00 } /* Must be last */
91 };
92
computeSignal(unsigned int tt)93 static int computeSignal(unsigned int tt)
94 {
95 struct hard_trap_info *ht;
96
97 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
98 if (ht->tt == tt)
99 return ht->signo;
100
101 return SIGHUP; /* default for things we don't know about */
102 }
103
104 /**
105 *
106 * kgdb_skipexception - Bail out of KGDB when we've been triggered.
107 * @exception: Exception vector number
108 * @regs: Current &struct pt_regs.
109 *
110 * On some architectures we need to skip a breakpoint exception when
111 * it occurs after a breakpoint has been removed.
112 *
113 */
kgdb_skipexception(int exception,struct pt_regs * regs)114 int kgdb_skipexception(int exception, struct pt_regs *regs)
115 {
116 return kgdb_isremovedbreak(regs->nip);
117 }
118
kgdb_call_nmi_hook(struct pt_regs * regs)119 static int kgdb_call_nmi_hook(struct pt_regs *regs)
120 {
121 kgdb_nmicallback(raw_smp_processor_id(), regs);
122 return 0;
123 }
124
125 #ifdef CONFIG_SMP
kgdb_roundup_cpus(unsigned long flags)126 void kgdb_roundup_cpus(unsigned long flags)
127 {
128 smp_send_debugger_break();
129 }
130 #endif
131
132 /* KGDB functions to use existing PowerPC64 hooks. */
kgdb_debugger(struct pt_regs * regs)133 static int kgdb_debugger(struct pt_regs *regs)
134 {
135 return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
136 DIE_OOPS, regs);
137 }
138
kgdb_handle_breakpoint(struct pt_regs * regs)139 static int kgdb_handle_breakpoint(struct pt_regs *regs)
140 {
141 if (user_mode(regs))
142 return 0;
143
144 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
145 return 0;
146
147 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
148 regs->nip += BREAK_INSTR_SIZE;
149
150 return 1;
151 }
152
153 static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
kgdb_singlestep(struct pt_regs * regs)154 static int kgdb_singlestep(struct pt_regs *regs)
155 {
156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info =
158 this_cpu_ptr(&kgdb_thread_info);
159
160 if (user_mode(regs))
161 return 0;
162
163 /*
164 * On Book E and perhaps other processors, singlestep is handled on
165 * the critical exception stack. This causes current_thread_info()
166 * to fail, since it it locates the thread_info by masking off
167 * the low bits of the current stack pointer. We work around
168 * this issue by copying the thread_info from the kernel stack
169 * before calling kgdb_handle_exception, and copying it back
170 * afterwards. On most processors the copy is avoided since
171 * exception_thread_info == thread_info.
172 */
173 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
174 exception_thread_info = current_thread_info();
175
176 if (thread_info != exception_thread_info) {
177 /* Save the original current_thread_info. */
178 memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
179 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
180 }
181
182 kgdb_handle_exception(0, SIGTRAP, 0, regs);
183
184 if (thread_info != exception_thread_info)
185 /* Restore current_thread_info lastly. */
186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
187
188 return 1;
189 }
190
kgdb_iabr_match(struct pt_regs * regs)191 static int kgdb_iabr_match(struct pt_regs *regs)
192 {
193 if (user_mode(regs))
194 return 0;
195
196 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
197 return 0;
198 return 1;
199 }
200
kgdb_break_match(struct pt_regs * regs)201 static int kgdb_break_match(struct pt_regs *regs)
202 {
203 if (user_mode(regs))
204 return 0;
205
206 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
207 return 0;
208 return 1;
209 }
210
211 #define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
212
213 #define PACK32(ptr, src) do { \
214 u32 *ptr32; \
215 ptr32 = (u32 *)ptr; \
216 *(ptr32++) = (src); \
217 ptr = (unsigned long *)ptr32; \
218 } while (0)
219
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)220 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
221 {
222 struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
223 STACK_FRAME_OVERHEAD);
224 unsigned long *ptr = gdb_regs;
225 int reg;
226
227 memset(gdb_regs, 0, NUMREGBYTES);
228
229 /* Regs GPR0-2 */
230 for (reg = 0; reg < 3; reg++)
231 PACK64(ptr, regs->gpr[reg]);
232
233 /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
234 ptr += 11;
235
236 /* Regs GPR14-31 */
237 for (reg = 14; reg < 32; reg++)
238 PACK64(ptr, regs->gpr[reg]);
239
240 #ifdef CONFIG_FSL_BOOKE
241 #ifdef CONFIG_SPE
242 for (reg = 0; reg < 32; reg++)
243 PACK64(ptr, p->thread.evr[reg]);
244 #else
245 ptr += 32;
246 #endif
247 #else
248 /* fp registers not used by kernel, leave zero */
249 ptr += 32 * 8 / sizeof(long);
250 #endif
251
252 PACK64(ptr, regs->nip);
253 PACK64(ptr, regs->msr);
254 PACK32(ptr, regs->ccr);
255 PACK64(ptr, regs->link);
256 PACK64(ptr, regs->ctr);
257 PACK32(ptr, regs->xer);
258
259 BUG_ON((unsigned long)ptr >
260 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
261 }
262
263 #define GDB_SIZEOF_REG sizeof(unsigned long)
264 #define GDB_SIZEOF_REG_U32 sizeof(u32)
265
266 #ifdef CONFIG_FSL_BOOKE
267 #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
268 #else
269 #define GDB_SIZEOF_FLOAT_REG sizeof(u64)
270 #endif
271
272 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
273 {
274 { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
275 { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
276 { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
277 { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
278 { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
279 { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
280 { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
281 { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
282 { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
283 { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
284 { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
285 { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
286 { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
287 { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
288 { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
289 { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
290 { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
291 { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
292 { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
293 { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
294 { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
295 { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
296 { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
297 { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
298 { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
299 { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
300 { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
301 { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
302 { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
303 { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
304 { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
305 { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
306
307 { "f0", GDB_SIZEOF_FLOAT_REG, 0 },
308 { "f1", GDB_SIZEOF_FLOAT_REG, 1 },
309 { "f2", GDB_SIZEOF_FLOAT_REG, 2 },
310 { "f3", GDB_SIZEOF_FLOAT_REG, 3 },
311 { "f4", GDB_SIZEOF_FLOAT_REG, 4 },
312 { "f5", GDB_SIZEOF_FLOAT_REG, 5 },
313 { "f6", GDB_SIZEOF_FLOAT_REG, 6 },
314 { "f7", GDB_SIZEOF_FLOAT_REG, 7 },
315 { "f8", GDB_SIZEOF_FLOAT_REG, 8 },
316 { "f9", GDB_SIZEOF_FLOAT_REG, 9 },
317 { "f10", GDB_SIZEOF_FLOAT_REG, 10 },
318 { "f11", GDB_SIZEOF_FLOAT_REG, 11 },
319 { "f12", GDB_SIZEOF_FLOAT_REG, 12 },
320 { "f13", GDB_SIZEOF_FLOAT_REG, 13 },
321 { "f14", GDB_SIZEOF_FLOAT_REG, 14 },
322 { "f15", GDB_SIZEOF_FLOAT_REG, 15 },
323 { "f16", GDB_SIZEOF_FLOAT_REG, 16 },
324 { "f17", GDB_SIZEOF_FLOAT_REG, 17 },
325 { "f18", GDB_SIZEOF_FLOAT_REG, 18 },
326 { "f19", GDB_SIZEOF_FLOAT_REG, 19 },
327 { "f20", GDB_SIZEOF_FLOAT_REG, 20 },
328 { "f21", GDB_SIZEOF_FLOAT_REG, 21 },
329 { "f22", GDB_SIZEOF_FLOAT_REG, 22 },
330 { "f23", GDB_SIZEOF_FLOAT_REG, 23 },
331 { "f24", GDB_SIZEOF_FLOAT_REG, 24 },
332 { "f25", GDB_SIZEOF_FLOAT_REG, 25 },
333 { "f26", GDB_SIZEOF_FLOAT_REG, 26 },
334 { "f27", GDB_SIZEOF_FLOAT_REG, 27 },
335 { "f28", GDB_SIZEOF_FLOAT_REG, 28 },
336 { "f29", GDB_SIZEOF_FLOAT_REG, 29 },
337 { "f30", GDB_SIZEOF_FLOAT_REG, 30 },
338 { "f31", GDB_SIZEOF_FLOAT_REG, 31 },
339
340 { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
341 { "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
342 { "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
343 { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
344 { "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
345 { "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
346 };
347
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)348 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
349 {
350 if (regno >= DBG_MAX_REG_NUM || regno < 0)
351 return NULL;
352
353 if (regno < 32 || regno >= 64)
354 /* First 0 -> 31 gpr registers*/
355 /* pc, msr, ls... registers 64 -> 69 */
356 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
357 dbg_reg_def[regno].size);
358
359 if (regno >= 32 && regno < 64) {
360 /* FP registers 32 -> 63 */
361 #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
362 if (current)
363 memcpy(mem, ¤t->thread.evr[regno-32],
364 dbg_reg_def[regno].size);
365 #else
366 /* fp registers not used by kernel, leave zero */
367 memset(mem, 0, dbg_reg_def[regno].size);
368 #endif
369 }
370
371 return dbg_reg_def[regno].name;
372 }
373
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)374 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
375 {
376 if (regno >= DBG_MAX_REG_NUM || regno < 0)
377 return -EINVAL;
378
379 if (regno < 32 || regno >= 64)
380 /* First 0 -> 31 gpr registers*/
381 /* pc, msr, ls... registers 64 -> 69 */
382 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
383 dbg_reg_def[regno].size);
384
385 if (regno >= 32 && regno < 64) {
386 /* FP registers 32 -> 63 */
387 #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
388 memcpy(¤t->thread.evr[regno-32], mem,
389 dbg_reg_def[regno].size);
390 #else
391 /* fp registers not used by kernel, leave zero */
392 return 0;
393 #endif
394 }
395
396 return 0;
397 }
398
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)399 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
400 {
401 regs->nip = pc;
402 }
403
404 /*
405 * This function does PowerPC specific procesing for interfacing to gdb.
406 */
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * linux_regs)407 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
408 char *remcom_in_buffer, char *remcom_out_buffer,
409 struct pt_regs *linux_regs)
410 {
411 char *ptr = &remcom_in_buffer[1];
412 unsigned long addr;
413
414 switch (remcom_in_buffer[0]) {
415 /*
416 * sAA..AA Step one instruction from AA..AA
417 * This will return an error to gdb ..
418 */
419 case 's':
420 case 'c':
421 /* handle the optional parameter */
422 if (kgdb_hex2long(&ptr, &addr))
423 linux_regs->nip = addr;
424
425 atomic_set(&kgdb_cpu_doing_single_step, -1);
426 /* set the trace bit if we're stepping */
427 if (remcom_in_buffer[0] == 's') {
428 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
429 mtspr(SPRN_DBCR0,
430 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
431 linux_regs->msr |= MSR_DE;
432 #else
433 linux_regs->msr |= MSR_SE;
434 #endif
435 atomic_set(&kgdb_cpu_doing_single_step,
436 raw_smp_processor_id());
437 }
438 return 0;
439 }
440
441 return -1;
442 }
443
444 /*
445 * Global data
446 */
447 struct kgdb_arch arch_kgdb_ops = {
448 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
449 };
450
kgdb_not_implemented(struct pt_regs * regs)451 static int kgdb_not_implemented(struct pt_regs *regs)
452 {
453 return 0;
454 }
455
456 static void *old__debugger_ipi;
457 static void *old__debugger;
458 static void *old__debugger_bpt;
459 static void *old__debugger_sstep;
460 static void *old__debugger_iabr_match;
461 static void *old__debugger_break_match;
462 static void *old__debugger_fault_handler;
463
kgdb_arch_init(void)464 int kgdb_arch_init(void)
465 {
466 old__debugger_ipi = __debugger_ipi;
467 old__debugger = __debugger;
468 old__debugger_bpt = __debugger_bpt;
469 old__debugger_sstep = __debugger_sstep;
470 old__debugger_iabr_match = __debugger_iabr_match;
471 old__debugger_break_match = __debugger_break_match;
472 old__debugger_fault_handler = __debugger_fault_handler;
473
474 __debugger_ipi = kgdb_call_nmi_hook;
475 __debugger = kgdb_debugger;
476 __debugger_bpt = kgdb_handle_breakpoint;
477 __debugger_sstep = kgdb_singlestep;
478 __debugger_iabr_match = kgdb_iabr_match;
479 __debugger_break_match = kgdb_break_match;
480 __debugger_fault_handler = kgdb_not_implemented;
481
482 return 0;
483 }
484
kgdb_arch_exit(void)485 void kgdb_arch_exit(void)
486 {
487 __debugger_ipi = old__debugger_ipi;
488 __debugger = old__debugger;
489 __debugger_bpt = old__debugger_bpt;
490 __debugger_sstep = old__debugger_sstep;
491 __debugger_iabr_match = old__debugger_iabr_match;
492 __debugger_break_match = old__debugger_break_match;
493 __debugger_fault_handler = old__debugger_fault_handler;
494 }
495