1 /*
2 * Kernel Debug Core
3 *
4 * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5 *
6 * Copyright (C) 2000-2001 VERITAS Software Corporation.
7 * Copyright (C) 2002-2004 Timesys Corporation
8 * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9 * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10 * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11 * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12 * Copyright (C) 2005-2009 Wind River Systems, Inc.
13 * Copyright (C) 2007 MontaVista Software, Inc.
14 * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15 *
16 * Contributors at various stages not listed above:
17 * Jason Wessel ( jason.wessel@windriver.com )
18 * George Anzinger <george@mvista.com>
19 * Anurekh Saxena (anurekh.saxena@timesys.com)
20 * Lake Stevens Instrument Division (Glenn Engel)
21 * Jim Kingdon, Cygnus Support.
22 *
23 * Original KGDB stub: David Grothe <dave@gcom.com>,
24 * Tigran Aivazian <tigran@sco.com>
25 *
26 * This file is licensed under the terms of the GNU General Public License
27 * version 2. This program is licensed "as is" without any warranty of any
28 * kind, whether express or implied.
29 */
30
31 #define pr_fmt(fmt) "KGDB: " fmt
32
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/pid.h>
53 #include <linux/smp.h>
54 #include <linux/mm.h>
55 #include <linux/vmacache.h>
56 #include <linux/rcupdate.h>
57
58 #include <asm/cacheflush.h>
59 #include <asm/byteorder.h>
60 #include <linux/atomic.h>
61
62 #include "debug_core.h"
63
64 static int kgdb_break_asap;
65
66 struct debuggerinfo_struct kgdb_info[NR_CPUS];
67
68 /**
69 * kgdb_connected - Is a host GDB connected to us?
70 */
71 int kgdb_connected;
72 EXPORT_SYMBOL_GPL(kgdb_connected);
73
74 /* All the KGDB handlers are installed */
75 int kgdb_io_module_registered;
76
77 /* Guard for recursive entry */
78 static int exception_level;
79
80 struct kgdb_io *dbg_io_ops;
81 static DEFINE_SPINLOCK(kgdb_registration_lock);
82
83 /* Action for the reboot notifiter, a global allow kdb to change it */
84 static int kgdbreboot;
85 /* kgdb console driver is loaded */
86 static int kgdb_con_registered;
87 /* determine if kgdb console output should be used */
88 static int kgdb_use_con;
89 /* Flag for alternate operations for early debugging */
90 bool dbg_is_early = true;
91 /* Next cpu to become the master debug core */
92 int dbg_switch_cpu;
93
94 /* Use kdb or gdbserver mode */
95 int dbg_kdb_mode = 1;
96
97 module_param(kgdb_use_con, int, 0644);
98 module_param(kgdbreboot, int, 0644);
99
100 /*
101 * Holds information about breakpoints in a kernel. These breakpoints are
102 * added and removed by gdb.
103 */
104 static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
105 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
106 };
107
108 /*
109 * The CPU# of the active CPU, or -1 if none:
110 */
111 atomic_t kgdb_active = ATOMIC_INIT(-1);
112 EXPORT_SYMBOL_GPL(kgdb_active);
113 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
114 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
115
116 /*
117 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
118 * bootup code (which might not have percpu set up yet):
119 */
120 static atomic_t masters_in_kgdb;
121 static atomic_t slaves_in_kgdb;
122 static atomic_t kgdb_break_tasklet_var;
123 atomic_t kgdb_setting_breakpoint;
124
125 struct task_struct *kgdb_usethread;
126 struct task_struct *kgdb_contthread;
127
128 int kgdb_single_step;
129 static pid_t kgdb_sstep_pid;
130
131 /* to keep track of the CPU which is doing the single stepping*/
132 atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
133
134 /*
135 * If you are debugging a problem where roundup (the collection of
136 * all other CPUs) is a problem [this should be extremely rare],
137 * then use the nokgdbroundup option to avoid roundup. In that case
138 * the other CPUs might interfere with your debugging context, so
139 * use this with care:
140 */
141 static int kgdb_do_roundup = 1;
142
opt_nokgdbroundup(char * str)143 static int __init opt_nokgdbroundup(char *str)
144 {
145 kgdb_do_roundup = 0;
146
147 return 0;
148 }
149
150 early_param("nokgdbroundup", opt_nokgdbroundup);
151
152 /*
153 * Finally, some KGDB code :-)
154 */
155
156 /*
157 * Weak aliases for breakpoint management,
158 * can be overriden by architectures when needed:
159 */
kgdb_arch_set_breakpoint(struct kgdb_bkpt * bpt)160 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
161 {
162 int err;
163
164 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
165 BREAK_INSTR_SIZE);
166 if (err)
167 return err;
168 err = probe_kernel_write((char *)bpt->bpt_addr,
169 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
170 return err;
171 }
172
kgdb_arch_remove_breakpoint(struct kgdb_bkpt * bpt)173 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
174 {
175 return probe_kernel_write((char *)bpt->bpt_addr,
176 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
177 }
178
kgdb_validate_break_address(unsigned long addr)179 int __weak kgdb_validate_break_address(unsigned long addr)
180 {
181 struct kgdb_bkpt tmp;
182 int err;
183 /* Validate setting the breakpoint and then removing it. If the
184 * remove fails, the kernel needs to emit a bad message because we
185 * are deep trouble not being able to put things back the way we
186 * found them.
187 */
188 tmp.bpt_addr = addr;
189 err = kgdb_arch_set_breakpoint(&tmp);
190 if (err)
191 return err;
192 err = kgdb_arch_remove_breakpoint(&tmp);
193 if (err)
194 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
195 addr);
196 return err;
197 }
198
kgdb_arch_pc(int exception,struct pt_regs * regs)199 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
200 {
201 return instruction_pointer(regs);
202 }
203
kgdb_arch_init(void)204 int __weak kgdb_arch_init(void)
205 {
206 return 0;
207 }
208
kgdb_skipexception(int exception,struct pt_regs * regs)209 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
210 {
211 return 0;
212 }
213
214 /*
215 * Some architectures need cache flushes when we set/clear a
216 * breakpoint:
217 */
kgdb_flush_swbreak_addr(unsigned long addr)218 static void kgdb_flush_swbreak_addr(unsigned long addr)
219 {
220 if (!CACHE_FLUSH_IS_SAFE)
221 return;
222
223 if (current->mm) {
224 int i;
225
226 for (i = 0; i < VMACACHE_SIZE; i++) {
227 if (!current->vmacache[i])
228 continue;
229 flush_cache_range(current->vmacache[i],
230 addr, addr + BREAK_INSTR_SIZE);
231 }
232 }
233
234 /* Force flush instruction cache if it was outside the mm */
235 flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
236 }
237
238 /*
239 * SW breakpoint management:
240 */
dbg_activate_sw_breakpoints(void)241 int dbg_activate_sw_breakpoints(void)
242 {
243 int error;
244 int ret = 0;
245 int i;
246
247 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
248 if (kgdb_break[i].state != BP_SET)
249 continue;
250
251 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
252 if (error) {
253 ret = error;
254 pr_info("BP install failed: %lx\n",
255 kgdb_break[i].bpt_addr);
256 continue;
257 }
258
259 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
260 kgdb_break[i].state = BP_ACTIVE;
261 }
262 return ret;
263 }
264
dbg_set_sw_break(unsigned long addr)265 int dbg_set_sw_break(unsigned long addr)
266 {
267 int err = kgdb_validate_break_address(addr);
268 int breakno = -1;
269 int i;
270
271 if (err)
272 return err;
273
274 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
275 if ((kgdb_break[i].state == BP_SET) &&
276 (kgdb_break[i].bpt_addr == addr))
277 return -EEXIST;
278 }
279 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
280 if (kgdb_break[i].state == BP_REMOVED &&
281 kgdb_break[i].bpt_addr == addr) {
282 breakno = i;
283 break;
284 }
285 }
286
287 if (breakno == -1) {
288 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
289 if (kgdb_break[i].state == BP_UNDEFINED) {
290 breakno = i;
291 break;
292 }
293 }
294 }
295
296 if (breakno == -1)
297 return -E2BIG;
298
299 kgdb_break[breakno].state = BP_SET;
300 kgdb_break[breakno].type = BP_BREAKPOINT;
301 kgdb_break[breakno].bpt_addr = addr;
302
303 return 0;
304 }
305
dbg_deactivate_sw_breakpoints(void)306 int dbg_deactivate_sw_breakpoints(void)
307 {
308 int error;
309 int ret = 0;
310 int i;
311
312 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
313 if (kgdb_break[i].state != BP_ACTIVE)
314 continue;
315 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
316 if (error) {
317 pr_info("BP remove failed: %lx\n",
318 kgdb_break[i].bpt_addr);
319 ret = error;
320 }
321
322 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
323 kgdb_break[i].state = BP_SET;
324 }
325 return ret;
326 }
327
dbg_remove_sw_break(unsigned long addr)328 int dbg_remove_sw_break(unsigned long addr)
329 {
330 int i;
331
332 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
333 if ((kgdb_break[i].state == BP_SET) &&
334 (kgdb_break[i].bpt_addr == addr)) {
335 kgdb_break[i].state = BP_REMOVED;
336 return 0;
337 }
338 }
339 return -ENOENT;
340 }
341
kgdb_isremovedbreak(unsigned long addr)342 int kgdb_isremovedbreak(unsigned long addr)
343 {
344 int i;
345
346 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
347 if ((kgdb_break[i].state == BP_REMOVED) &&
348 (kgdb_break[i].bpt_addr == addr))
349 return 1;
350 }
351 return 0;
352 }
353
dbg_remove_all_break(void)354 int dbg_remove_all_break(void)
355 {
356 int error;
357 int i;
358
359 /* Clear memory breakpoints. */
360 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
361 if (kgdb_break[i].state != BP_ACTIVE)
362 goto setundefined;
363 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
364 if (error)
365 pr_err("breakpoint remove failed: %lx\n",
366 kgdb_break[i].bpt_addr);
367 setundefined:
368 kgdb_break[i].state = BP_UNDEFINED;
369 }
370
371 /* Clear hardware breakpoints. */
372 if (arch_kgdb_ops.remove_all_hw_break)
373 arch_kgdb_ops.remove_all_hw_break();
374
375 return 0;
376 }
377
378 /*
379 * Return true if there is a valid kgdb I/O module. Also if no
380 * debugger is attached a message can be printed to the console about
381 * waiting for the debugger to attach.
382 *
383 * The print_wait argument is only to be true when called from inside
384 * the core kgdb_handle_exception, because it will wait for the
385 * debugger to attach.
386 */
kgdb_io_ready(int print_wait)387 static int kgdb_io_ready(int print_wait)
388 {
389 if (!dbg_io_ops)
390 return 0;
391 if (kgdb_connected)
392 return 1;
393 if (atomic_read(&kgdb_setting_breakpoint))
394 return 1;
395 if (print_wait) {
396 #ifdef CONFIG_KGDB_KDB
397 if (!dbg_kdb_mode)
398 pr_crit("waiting... or $3#33 for KDB\n");
399 #else
400 pr_crit("Waiting for remote debugger\n");
401 #endif
402 }
403 return 1;
404 }
405
kgdb_reenter_check(struct kgdb_state * ks)406 static int kgdb_reenter_check(struct kgdb_state *ks)
407 {
408 unsigned long addr;
409
410 if (atomic_read(&kgdb_active) != raw_smp_processor_id())
411 return 0;
412
413 /* Panic on recursive debugger calls: */
414 exception_level++;
415 addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
416 dbg_deactivate_sw_breakpoints();
417
418 /*
419 * If the break point removed ok at the place exception
420 * occurred, try to recover and print a warning to the end
421 * user because the user planted a breakpoint in a place that
422 * KGDB needs in order to function.
423 */
424 if (dbg_remove_sw_break(addr) == 0) {
425 exception_level = 0;
426 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
427 dbg_activate_sw_breakpoints();
428 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
429 WARN_ON_ONCE(1);
430
431 return 1;
432 }
433 dbg_remove_all_break();
434 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
435
436 if (exception_level > 1) {
437 dump_stack();
438 kgdb_io_module_registered = false;
439 panic("Recursive entry to debugger");
440 }
441
442 pr_crit("re-enter exception: ALL breakpoints killed\n");
443 #ifdef CONFIG_KGDB_KDB
444 /* Allow kdb to debug itself one level */
445 return 0;
446 #endif
447 dump_stack();
448 panic("Recursive entry to debugger");
449
450 return 1;
451 }
452
dbg_touch_watchdogs(void)453 static void dbg_touch_watchdogs(void)
454 {
455 touch_softlockup_watchdog_sync();
456 clocksource_touch_watchdog();
457 rcu_cpu_stall_reset();
458 }
459
kgdb_cpu_enter(struct kgdb_state * ks,struct pt_regs * regs,int exception_state)460 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
461 int exception_state)
462 {
463 unsigned long flags;
464 int sstep_tries = 100;
465 int error;
466 int cpu;
467 int trace_on = 0;
468 int online_cpus = num_online_cpus();
469 u64 time_left;
470
471 kgdb_info[ks->cpu].enter_kgdb++;
472 kgdb_info[ks->cpu].exception_state |= exception_state;
473
474 if (exception_state == DCPU_WANT_MASTER)
475 atomic_inc(&masters_in_kgdb);
476 else
477 atomic_inc(&slaves_in_kgdb);
478
479 if (arch_kgdb_ops.disable_hw_break)
480 arch_kgdb_ops.disable_hw_break(regs);
481
482 acquirelock:
483 rcu_read_lock();
484 /*
485 * Interrupts will be restored by the 'trap return' code, except when
486 * single stepping.
487 */
488 local_irq_save(flags);
489
490 cpu = ks->cpu;
491 kgdb_info[cpu].debuggerinfo = regs;
492 kgdb_info[cpu].task = current;
493 kgdb_info[cpu].ret_state = 0;
494 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
495
496 /* Make sure the above info reaches the primary CPU */
497 smp_mb();
498
499 if (exception_level == 1) {
500 if (raw_spin_trylock(&dbg_master_lock))
501 atomic_xchg(&kgdb_active, cpu);
502 goto cpu_master_loop;
503 }
504
505 /*
506 * CPU will loop if it is a slave or request to become a kgdb
507 * master cpu and acquire the kgdb_active lock:
508 */
509 while (1) {
510 cpu_loop:
511 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
512 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
513 goto cpu_master_loop;
514 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
515 if (raw_spin_trylock(&dbg_master_lock)) {
516 atomic_xchg(&kgdb_active, cpu);
517 break;
518 }
519 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
520 if (!raw_spin_is_locked(&dbg_slave_lock))
521 goto return_normal;
522 } else {
523 return_normal:
524 /* Return to normal operation by executing any
525 * hw breakpoint fixup.
526 */
527 if (arch_kgdb_ops.correct_hw_break)
528 arch_kgdb_ops.correct_hw_break();
529 if (trace_on)
530 tracing_on();
531 kgdb_info[cpu].exception_state &=
532 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
533 kgdb_info[cpu].enter_kgdb--;
534 smp_mb__before_atomic();
535 atomic_dec(&slaves_in_kgdb);
536 dbg_touch_watchdogs();
537 local_irq_restore(flags);
538 rcu_read_unlock();
539 return 0;
540 }
541 cpu_relax();
542 }
543
544 /*
545 * For single stepping, try to only enter on the processor
546 * that was single stepping. To guard against a deadlock, the
547 * kernel will only try for the value of sstep_tries before
548 * giving up and continuing on.
549 */
550 if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
551 (kgdb_info[cpu].task &&
552 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
553 atomic_set(&kgdb_active, -1);
554 raw_spin_unlock(&dbg_master_lock);
555 dbg_touch_watchdogs();
556 local_irq_restore(flags);
557 rcu_read_unlock();
558
559 goto acquirelock;
560 }
561
562 if (!kgdb_io_ready(1)) {
563 kgdb_info[cpu].ret_state = 1;
564 goto kgdb_restore; /* No I/O connection, resume the system */
565 }
566
567 /*
568 * Don't enter if we have hit a removed breakpoint.
569 */
570 if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
571 goto kgdb_restore;
572
573 /* Call the I/O driver's pre_exception routine */
574 if (dbg_io_ops->pre_exception)
575 dbg_io_ops->pre_exception();
576
577 /*
578 * Get the passive CPU lock which will hold all the non-primary
579 * CPU in a spin state while the debugger is active
580 */
581 if (!kgdb_single_step)
582 raw_spin_lock(&dbg_slave_lock);
583
584 #ifdef CONFIG_SMP
585 /* If send_ready set, slaves are already waiting */
586 if (ks->send_ready)
587 atomic_set(ks->send_ready, 1);
588
589 /* Signal the other CPUs to enter kgdb_wait() */
590 else if ((!kgdb_single_step) && kgdb_do_roundup)
591 kgdb_roundup_cpus(flags);
592 #endif
593
594 /*
595 * Wait for the other CPUs to be notified and be waiting for us:
596 */
597 time_left = MSEC_PER_SEC;
598 while (kgdb_do_roundup && --time_left &&
599 (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
600 online_cpus)
601 udelay(1000);
602 if (!time_left)
603 pr_crit("Timed out waiting for secondary CPUs.\n");
604
605 /*
606 * At this point the primary processor is completely
607 * in the debugger and all secondary CPUs are quiescent
608 */
609 dbg_deactivate_sw_breakpoints();
610 kgdb_single_step = 0;
611 kgdb_contthread = current;
612 exception_level = 0;
613 trace_on = tracing_is_on();
614 if (trace_on)
615 tracing_off();
616
617 while (1) {
618 cpu_master_loop:
619 if (dbg_kdb_mode) {
620 kgdb_connected = 1;
621 error = kdb_stub(ks);
622 if (error == -1)
623 continue;
624 kgdb_connected = 0;
625 } else {
626 error = gdb_serial_stub(ks);
627 }
628
629 if (error == DBG_PASS_EVENT) {
630 dbg_kdb_mode = !dbg_kdb_mode;
631 } else if (error == DBG_SWITCH_CPU_EVENT) {
632 kgdb_info[dbg_switch_cpu].exception_state |=
633 DCPU_NEXT_MASTER;
634 goto cpu_loop;
635 } else {
636 kgdb_info[cpu].ret_state = error;
637 break;
638 }
639 }
640
641 /* Call the I/O driver's post_exception routine */
642 if (dbg_io_ops->post_exception)
643 dbg_io_ops->post_exception();
644
645 if (!kgdb_single_step) {
646 raw_spin_unlock(&dbg_slave_lock);
647 /* Wait till all the CPUs have quit from the debugger. */
648 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
649 cpu_relax();
650 }
651
652 kgdb_restore:
653 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
654 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
655 if (kgdb_info[sstep_cpu].task)
656 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
657 else
658 kgdb_sstep_pid = 0;
659 }
660 if (arch_kgdb_ops.correct_hw_break)
661 arch_kgdb_ops.correct_hw_break();
662 if (trace_on)
663 tracing_on();
664
665 kgdb_info[cpu].exception_state &=
666 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
667 kgdb_info[cpu].enter_kgdb--;
668 smp_mb__before_atomic();
669 atomic_dec(&masters_in_kgdb);
670 /* Free kgdb_active */
671 atomic_set(&kgdb_active, -1);
672 raw_spin_unlock(&dbg_master_lock);
673 dbg_touch_watchdogs();
674 local_irq_restore(flags);
675 rcu_read_unlock();
676
677 return kgdb_info[cpu].ret_state;
678 }
679
680 /*
681 * kgdb_handle_exception() - main entry point from a kernel exception
682 *
683 * Locking hierarchy:
684 * interface locks, if any (begin_session)
685 * kgdb lock (kgdb_active)
686 */
687 int
kgdb_handle_exception(int evector,int signo,int ecode,struct pt_regs * regs)688 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
689 {
690 struct kgdb_state kgdb_var;
691 struct kgdb_state *ks = &kgdb_var;
692 int ret = 0;
693
694 if (arch_kgdb_ops.enable_nmi)
695 arch_kgdb_ops.enable_nmi(0);
696 /*
697 * Avoid entering the debugger if we were triggered due to an oops
698 * but panic_timeout indicates the system should automatically
699 * reboot on panic. We don't want to get stuck waiting for input
700 * on such systems, especially if its "just" an oops.
701 */
702 if (signo != SIGTRAP && panic_timeout)
703 return 1;
704
705 memset(ks, 0, sizeof(struct kgdb_state));
706 ks->cpu = raw_smp_processor_id();
707 ks->ex_vector = evector;
708 ks->signo = signo;
709 ks->err_code = ecode;
710 ks->linux_regs = regs;
711
712 if (kgdb_reenter_check(ks))
713 goto out; /* Ouch, double exception ! */
714 if (kgdb_info[ks->cpu].enter_kgdb != 0)
715 goto out;
716
717 ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
718 out:
719 if (arch_kgdb_ops.enable_nmi)
720 arch_kgdb_ops.enable_nmi(1);
721 return ret;
722 }
723
724 /*
725 * GDB places a breakpoint at this function to know dynamically
726 * loaded objects. It's not defined static so that only one instance with this
727 * name exists in the kernel.
728 */
729
module_event(struct notifier_block * self,unsigned long val,void * data)730 static int module_event(struct notifier_block *self, unsigned long val,
731 void *data)
732 {
733 return 0;
734 }
735
736 static struct notifier_block dbg_module_load_nb = {
737 .notifier_call = module_event,
738 };
739
kgdb_nmicallback(int cpu,void * regs)740 int kgdb_nmicallback(int cpu, void *regs)
741 {
742 #ifdef CONFIG_SMP
743 struct kgdb_state kgdb_var;
744 struct kgdb_state *ks = &kgdb_var;
745
746 memset(ks, 0, sizeof(struct kgdb_state));
747 ks->cpu = cpu;
748 ks->linux_regs = regs;
749
750 if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
751 raw_spin_is_locked(&dbg_master_lock)) {
752 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
753 return 0;
754 }
755 #endif
756 return 1;
757 }
758
kgdb_nmicallin(int cpu,int trapnr,void * regs,int err_code,atomic_t * send_ready)759 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
760 atomic_t *send_ready)
761 {
762 #ifdef CONFIG_SMP
763 if (!kgdb_io_ready(0) || !send_ready)
764 return 1;
765
766 if (kgdb_info[cpu].enter_kgdb == 0) {
767 struct kgdb_state kgdb_var;
768 struct kgdb_state *ks = &kgdb_var;
769
770 memset(ks, 0, sizeof(struct kgdb_state));
771 ks->cpu = cpu;
772 ks->ex_vector = trapnr;
773 ks->signo = SIGTRAP;
774 ks->err_code = err_code;
775 ks->linux_regs = regs;
776 ks->send_ready = send_ready;
777 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
778 return 0;
779 }
780 #endif
781 return 1;
782 }
783
kgdb_console_write(struct console * co,const char * s,unsigned count)784 static void kgdb_console_write(struct console *co, const char *s,
785 unsigned count)
786 {
787 unsigned long flags;
788
789 /* If we're debugging, or KGDB has not connected, don't try
790 * and print. */
791 if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
792 return;
793
794 local_irq_save(flags);
795 gdbstub_msg_write(s, count);
796 local_irq_restore(flags);
797 }
798
799 static struct console kgdbcons = {
800 .name = "kgdb",
801 .write = kgdb_console_write,
802 .flags = CON_PRINTBUFFER | CON_ENABLED,
803 .index = -1,
804 };
805
opt_kgdb_con(char * str)806 static int __init opt_kgdb_con(char *str)
807 {
808 kgdb_use_con = 1;
809
810 if (kgdb_io_module_registered && !kgdb_con_registered) {
811 register_console(&kgdbcons);
812 kgdb_con_registered = 1;
813 }
814
815 return 0;
816 }
817
818 early_param("kgdbcon", opt_kgdb_con);
819
820 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_handle_dbg(int key)821 static void sysrq_handle_dbg(int key)
822 {
823 if (!dbg_io_ops) {
824 pr_crit("ERROR: No KGDB I/O module available\n");
825 return;
826 }
827 if (!kgdb_connected) {
828 #ifdef CONFIG_KGDB_KDB
829 if (!dbg_kdb_mode)
830 pr_crit("KGDB or $3#33 for KDB\n");
831 #else
832 pr_crit("Entering KGDB\n");
833 #endif
834 }
835
836 kgdb_breakpoint();
837 }
838
839 static struct sysrq_key_op sysrq_dbg_op = {
840 .handler = sysrq_handle_dbg,
841 .help_msg = "debug(g)",
842 .action_msg = "DEBUG",
843 };
844 #endif
845
kgdb_panic_event(struct notifier_block * self,unsigned long val,void * data)846 static int kgdb_panic_event(struct notifier_block *self,
847 unsigned long val,
848 void *data)
849 {
850 /*
851 * Avoid entering the debugger if we were triggered due to a panic
852 * We don't want to get stuck waiting for input from user in such case.
853 * panic_timeout indicates the system should automatically
854 * reboot on panic.
855 */
856 if (panic_timeout)
857 return NOTIFY_DONE;
858
859 if (dbg_kdb_mode)
860 kdb_printf("PANIC: %s\n", (char *)data);
861 kgdb_breakpoint();
862 return NOTIFY_DONE;
863 }
864
865 static struct notifier_block kgdb_panic_event_nb = {
866 .notifier_call = kgdb_panic_event,
867 .priority = INT_MAX,
868 };
869
kgdb_arch_late(void)870 void __weak kgdb_arch_late(void)
871 {
872 }
873
dbg_late_init(void)874 void __init dbg_late_init(void)
875 {
876 dbg_is_early = false;
877 if (kgdb_io_module_registered)
878 kgdb_arch_late();
879 kdb_init(KDB_INIT_FULL);
880 }
881
882 static int
dbg_notify_reboot(struct notifier_block * this,unsigned long code,void * x)883 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
884 {
885 /*
886 * Take the following action on reboot notify depending on value:
887 * 1 == Enter debugger
888 * 0 == [the default] detatch debug client
889 * -1 == Do nothing... and use this until the board resets
890 */
891 switch (kgdbreboot) {
892 case 1:
893 kgdb_breakpoint();
894 case -1:
895 goto done;
896 }
897 if (!dbg_kdb_mode)
898 gdbstub_exit(code);
899 done:
900 return NOTIFY_DONE;
901 }
902
903 static struct notifier_block dbg_reboot_notifier = {
904 .notifier_call = dbg_notify_reboot,
905 .next = NULL,
906 .priority = INT_MAX,
907 };
908
kgdb_register_callbacks(void)909 static void kgdb_register_callbacks(void)
910 {
911 if (!kgdb_io_module_registered) {
912 kgdb_io_module_registered = 1;
913 kgdb_arch_init();
914 if (!dbg_is_early)
915 kgdb_arch_late();
916 register_module_notifier(&dbg_module_load_nb);
917 register_reboot_notifier(&dbg_reboot_notifier);
918 atomic_notifier_chain_register(&panic_notifier_list,
919 &kgdb_panic_event_nb);
920 #ifdef CONFIG_MAGIC_SYSRQ
921 register_sysrq_key('g', &sysrq_dbg_op);
922 #endif
923 if (kgdb_use_con && !kgdb_con_registered) {
924 register_console(&kgdbcons);
925 kgdb_con_registered = 1;
926 }
927 }
928 }
929
kgdb_unregister_callbacks(void)930 static void kgdb_unregister_callbacks(void)
931 {
932 /*
933 * When this routine is called KGDB should unregister from the
934 * panic handler and clean up, making sure it is not handling any
935 * break exceptions at the time.
936 */
937 if (kgdb_io_module_registered) {
938 kgdb_io_module_registered = 0;
939 unregister_reboot_notifier(&dbg_reboot_notifier);
940 unregister_module_notifier(&dbg_module_load_nb);
941 atomic_notifier_chain_unregister(&panic_notifier_list,
942 &kgdb_panic_event_nb);
943 kgdb_arch_exit();
944 #ifdef CONFIG_MAGIC_SYSRQ
945 unregister_sysrq_key('g', &sysrq_dbg_op);
946 #endif
947 if (kgdb_con_registered) {
948 unregister_console(&kgdbcons);
949 kgdb_con_registered = 0;
950 }
951 }
952 }
953
954 /*
955 * There are times a tasklet needs to be used vs a compiled in
956 * break point so as to cause an exception outside a kgdb I/O module,
957 * such as is the case with kgdboe, where calling a breakpoint in the
958 * I/O driver itself would be fatal.
959 */
kgdb_tasklet_bpt(unsigned long ing)960 static void kgdb_tasklet_bpt(unsigned long ing)
961 {
962 kgdb_breakpoint();
963 atomic_set(&kgdb_break_tasklet_var, 0);
964 }
965
966 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
967
kgdb_schedule_breakpoint(void)968 void kgdb_schedule_breakpoint(void)
969 {
970 if (atomic_read(&kgdb_break_tasklet_var) ||
971 atomic_read(&kgdb_active) != -1 ||
972 atomic_read(&kgdb_setting_breakpoint))
973 return;
974 atomic_inc(&kgdb_break_tasklet_var);
975 tasklet_schedule(&kgdb_tasklet_breakpoint);
976 }
977 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
978
kgdb_initial_breakpoint(void)979 static void kgdb_initial_breakpoint(void)
980 {
981 kgdb_break_asap = 0;
982
983 pr_crit("Waiting for connection from remote gdb...\n");
984 kgdb_breakpoint();
985 }
986
987 /**
988 * kgdb_register_io_module - register KGDB IO module
989 * @new_dbg_io_ops: the io ops vector
990 *
991 * Register it with the KGDB core.
992 */
kgdb_register_io_module(struct kgdb_io * new_dbg_io_ops)993 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
994 {
995 int err;
996
997 spin_lock(&kgdb_registration_lock);
998
999 if (dbg_io_ops) {
1000 spin_unlock(&kgdb_registration_lock);
1001
1002 pr_err("Another I/O driver is already registered with KGDB\n");
1003 return -EBUSY;
1004 }
1005
1006 if (new_dbg_io_ops->init) {
1007 err = new_dbg_io_ops->init();
1008 if (err) {
1009 spin_unlock(&kgdb_registration_lock);
1010 return err;
1011 }
1012 }
1013
1014 dbg_io_ops = new_dbg_io_ops;
1015
1016 spin_unlock(&kgdb_registration_lock);
1017
1018 pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1019
1020 /* Arm KGDB now. */
1021 kgdb_register_callbacks();
1022
1023 if (kgdb_break_asap)
1024 kgdb_initial_breakpoint();
1025
1026 return 0;
1027 }
1028 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1029
1030 /**
1031 * kkgdb_unregister_io_module - unregister KGDB IO module
1032 * @old_dbg_io_ops: the io ops vector
1033 *
1034 * Unregister it with the KGDB core.
1035 */
kgdb_unregister_io_module(struct kgdb_io * old_dbg_io_ops)1036 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1037 {
1038 BUG_ON(kgdb_connected);
1039
1040 /*
1041 * KGDB is no longer able to communicate out, so
1042 * unregister our callbacks and reset state.
1043 */
1044 kgdb_unregister_callbacks();
1045
1046 spin_lock(&kgdb_registration_lock);
1047
1048 WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1049 dbg_io_ops = NULL;
1050
1051 spin_unlock(&kgdb_registration_lock);
1052
1053 pr_info("Unregistered I/O driver %s, debugger disabled\n",
1054 old_dbg_io_ops->name);
1055 }
1056 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1057
dbg_io_get_char(void)1058 int dbg_io_get_char(void)
1059 {
1060 int ret = dbg_io_ops->read_char();
1061 if (ret == NO_POLL_CHAR)
1062 return -1;
1063 if (!dbg_kdb_mode)
1064 return ret;
1065 if (ret == 127)
1066 return 8;
1067 return ret;
1068 }
1069
1070 /**
1071 * kgdb_breakpoint - generate breakpoint exception
1072 *
1073 * This function will generate a breakpoint exception. It is used at the
1074 * beginning of a program to sync up with a debugger and can be used
1075 * otherwise as a quick means to stop program execution and "break" into
1076 * the debugger.
1077 */
kgdb_breakpoint(void)1078 noinline void kgdb_breakpoint(void)
1079 {
1080 atomic_inc(&kgdb_setting_breakpoint);
1081 wmb(); /* Sync point before breakpoint */
1082 arch_kgdb_breakpoint();
1083 wmb(); /* Sync point after breakpoint */
1084 atomic_dec(&kgdb_setting_breakpoint);
1085 }
1086 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1087
opt_kgdb_wait(char * str)1088 static int __init opt_kgdb_wait(char *str)
1089 {
1090 kgdb_break_asap = 1;
1091
1092 kdb_init(KDB_INIT_EARLY);
1093 if (kgdb_io_module_registered)
1094 kgdb_initial_breakpoint();
1095
1096 return 0;
1097 }
1098
1099 early_param("kgdbwait", opt_kgdb_wait);
1100