• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/pid.h>
50 #include <linux/smp.h>
51 #include <linux/mm.h>
52 #include <linux/rcupdate.h>
53 
54 #include <asm/cacheflush.h>
55 #include <asm/byteorder.h>
56 #include <linux/atomic.h>
57 
58 #include "debug_core.h"
59 
60 static int kgdb_break_asap;
61 
62 struct debuggerinfo_struct kgdb_info[NR_CPUS];
63 
64 /**
65  * kgdb_connected - Is a host GDB connected to us?
66  */
67 int				kgdb_connected;
68 EXPORT_SYMBOL_GPL(kgdb_connected);
69 
70 /* All the KGDB handlers are installed */
71 int			kgdb_io_module_registered;
72 
73 /* Guard for recursive entry */
74 static int			exception_level;
75 
76 struct kgdb_io		*dbg_io_ops;
77 static DEFINE_SPINLOCK(kgdb_registration_lock);
78 
79 /* Action for the reboot notifiter, a global allow kdb to change it */
80 static int kgdbreboot;
81 /* kgdb console driver is loaded */
82 static int kgdb_con_registered;
83 /* determine if kgdb console output should be used */
84 static int kgdb_use_con;
85 /* Flag for alternate operations for early debugging */
86 bool dbg_is_early = true;
87 /* Next cpu to become the master debug core */
88 int dbg_switch_cpu;
89 /* Flag for entering kdb when a panic occurs */
90 static bool break_on_panic = true;
91 /* Flag for entering kdb when an exception occurs */
92 static bool break_on_exception = true;
93 
94 /* Use kdb or gdbserver mode */
95 int dbg_kdb_mode = 1;
96 
opt_kgdb_con(char * str)97 static int __init opt_kgdb_con(char *str)
98 {
99 	kgdb_use_con = 1;
100 	return 0;
101 }
102 
103 early_param("kgdbcon", opt_kgdb_con);
104 
105 module_param(kgdb_use_con, int, 0644);
106 module_param(kgdbreboot, int, 0644);
107 module_param(break_on_panic, bool, 0644);
108 module_param(break_on_exception, bool, 0644);
109 
110 /*
111  * Holds information about breakpoints in a kernel. These breakpoints are
112  * added and removed by gdb.
113  */
114 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
115 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
116 };
117 
118 /*
119  * The CPU# of the active CPU, or -1 if none:
120  */
121 atomic_t			kgdb_active = ATOMIC_INIT(-1);
122 EXPORT_SYMBOL_GPL(kgdb_active);
123 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
124 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
125 
126 /*
127  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
128  * bootup code (which might not have percpu set up yet):
129  */
130 static atomic_t			masters_in_kgdb;
131 static atomic_t			slaves_in_kgdb;
132 static atomic_t			kgdb_break_tasklet_var;
133 atomic_t			kgdb_setting_breakpoint;
134 
135 struct task_struct		*kgdb_usethread;
136 struct task_struct		*kgdb_contthread;
137 
138 int				kgdb_single_step;
139 static pid_t			kgdb_sstep_pid;
140 
141 /* to keep track of the CPU which is doing the single stepping*/
142 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
143 
144 /*
145  * If you are debugging a problem where roundup (the collection of
146  * all other CPUs) is a problem [this should be extremely rare],
147  * then use the nokgdbroundup option to avoid roundup. In that case
148  * the other CPUs might interfere with your debugging context, so
149  * use this with care:
150  */
151 static int kgdb_do_roundup = 1;
152 
opt_nokgdbroundup(char * str)153 static int __init opt_nokgdbroundup(char *str)
154 {
155 	kgdb_do_roundup = 0;
156 
157 	return 0;
158 }
159 
160 early_param("nokgdbroundup", opt_nokgdbroundup);
161 
162 /*
163  * Finally, some KGDB code :-)
164  */
165 
166 /*
167  * Weak aliases for breakpoint management,
168  * can be overriden by architectures when needed:
169  */
kgdb_arch_set_breakpoint(struct kgdb_bkpt * bpt)170 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
171 {
172 	int err;
173 
174 	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
175 				BREAK_INSTR_SIZE);
176 	if (err)
177 		return err;
178 	err = probe_kernel_write((char *)bpt->bpt_addr,
179 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
180 	return err;
181 }
182 
kgdb_arch_remove_breakpoint(struct kgdb_bkpt * bpt)183 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
184 {
185 	return probe_kernel_write((char *)bpt->bpt_addr,
186 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
187 }
188 
kgdb_validate_break_address(unsigned long addr)189 int __weak kgdb_validate_break_address(unsigned long addr)
190 {
191 	struct kgdb_bkpt tmp;
192 	int err;
193 	/* Validate setting the breakpoint and then removing it.  If the
194 	 * remove fails, the kernel needs to emit a bad message because we
195 	 * are deep trouble not being able to put things back the way we
196 	 * found them.
197 	 */
198 	tmp.bpt_addr = addr;
199 	err = kgdb_arch_set_breakpoint(&tmp);
200 	if (err)
201 		return err;
202 	err = kgdb_arch_remove_breakpoint(&tmp);
203 	if (err)
204 		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
205 		   "memory destroyed at: %lx", addr);
206 	return err;
207 }
208 
kgdb_arch_pc(int exception,struct pt_regs * regs)209 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
210 {
211 	return instruction_pointer(regs);
212 }
213 
kgdb_arch_init(void)214 int __weak kgdb_arch_init(void)
215 {
216 	return 0;
217 }
218 
kgdb_skipexception(int exception,struct pt_regs * regs)219 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
220 {
221 	return 0;
222 }
223 
224 /*
225  * Some architectures need cache flushes when we set/clear a
226  * breakpoint:
227  */
kgdb_flush_swbreak_addr(unsigned long addr)228 static void kgdb_flush_swbreak_addr(unsigned long addr)
229 {
230 	if (!CACHE_FLUSH_IS_SAFE)
231 		return;
232 
233 	if (current->mm && current->mm->mmap_cache) {
234 		flush_cache_range(current->mm->mmap_cache,
235 				  addr, addr + BREAK_INSTR_SIZE);
236 	}
237 	/* Force flush instruction cache if it was outside the mm */
238 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
239 }
240 
241 /*
242  * SW breakpoint management:
243  */
dbg_activate_sw_breakpoints(void)244 int dbg_activate_sw_breakpoints(void)
245 {
246 	int error;
247 	int ret = 0;
248 	int i;
249 
250 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
251 		if (kgdb_break[i].state != BP_SET)
252 			continue;
253 
254 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
255 		if (error) {
256 			ret = error;
257 			printk(KERN_INFO "KGDB: BP install failed: %lx",
258 			       kgdb_break[i].bpt_addr);
259 			continue;
260 		}
261 
262 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
263 		kgdb_break[i].state = BP_ACTIVE;
264 	}
265 	return ret;
266 }
267 
dbg_set_sw_break(unsigned long addr)268 int dbg_set_sw_break(unsigned long addr)
269 {
270 	int err = kgdb_validate_break_address(addr);
271 	int breakno = -1;
272 	int i;
273 
274 	if (err)
275 		return err;
276 
277 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
278 		if ((kgdb_break[i].state == BP_SET) &&
279 					(kgdb_break[i].bpt_addr == addr))
280 			return -EEXIST;
281 	}
282 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
283 		if (kgdb_break[i].state == BP_REMOVED &&
284 					kgdb_break[i].bpt_addr == addr) {
285 			breakno = i;
286 			break;
287 		}
288 	}
289 
290 	if (breakno == -1) {
291 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
292 			if (kgdb_break[i].state == BP_UNDEFINED) {
293 				breakno = i;
294 				break;
295 			}
296 		}
297 	}
298 
299 	if (breakno == -1)
300 		return -E2BIG;
301 
302 	kgdb_break[breakno].state = BP_SET;
303 	kgdb_break[breakno].type = BP_BREAKPOINT;
304 	kgdb_break[breakno].bpt_addr = addr;
305 
306 	return 0;
307 }
308 
dbg_deactivate_sw_breakpoints(void)309 int dbg_deactivate_sw_breakpoints(void)
310 {
311 	int error;
312 	int ret = 0;
313 	int i;
314 
315 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
316 		if (kgdb_break[i].state != BP_ACTIVE)
317 			continue;
318 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
319 		if (error) {
320 			printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
321 			       kgdb_break[i].bpt_addr);
322 			ret = error;
323 		}
324 
325 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
326 		kgdb_break[i].state = BP_SET;
327 	}
328 	return ret;
329 }
330 
dbg_remove_sw_break(unsigned long addr)331 int dbg_remove_sw_break(unsigned long addr)
332 {
333 	int i;
334 
335 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
336 		if ((kgdb_break[i].state == BP_SET) &&
337 				(kgdb_break[i].bpt_addr == addr)) {
338 			kgdb_break[i].state = BP_REMOVED;
339 			return 0;
340 		}
341 	}
342 	return -ENOENT;
343 }
344 
kgdb_isremovedbreak(unsigned long addr)345 int kgdb_isremovedbreak(unsigned long addr)
346 {
347 	int i;
348 
349 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
350 		if ((kgdb_break[i].state == BP_REMOVED) &&
351 					(kgdb_break[i].bpt_addr == addr))
352 			return 1;
353 	}
354 	return 0;
355 }
356 
dbg_remove_all_break(void)357 int dbg_remove_all_break(void)
358 {
359 	int error;
360 	int i;
361 
362 	/* Clear memory breakpoints. */
363 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
364 		if (kgdb_break[i].state != BP_ACTIVE)
365 			goto setundefined;
366 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
367 		if (error)
368 			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
369 			       kgdb_break[i].bpt_addr);
370 setundefined:
371 		kgdb_break[i].state = BP_UNDEFINED;
372 	}
373 
374 	/* Clear hardware breakpoints. */
375 	if (arch_kgdb_ops.remove_all_hw_break)
376 		arch_kgdb_ops.remove_all_hw_break();
377 
378 	return 0;
379 }
380 
381 /*
382  * Return true if there is a valid kgdb I/O module.  Also if no
383  * debugger is attached a message can be printed to the console about
384  * waiting for the debugger to attach.
385  *
386  * The print_wait argument is only to be true when called from inside
387  * the core kgdb_handle_exception, because it will wait for the
388  * debugger to attach.
389  */
kgdb_io_ready(int print_wait)390 static int kgdb_io_ready(int print_wait)
391 {
392 	if (!dbg_io_ops)
393 		return 0;
394 	if (kgdb_connected)
395 		return 1;
396 	if (atomic_read(&kgdb_setting_breakpoint))
397 		return 1;
398 	if (print_wait) {
399 #ifdef CONFIG_KGDB_KDB
400 		if (!dbg_kdb_mode)
401 			printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
402 #else
403 		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
404 #endif
405 	}
406 	return 1;
407 }
408 
kgdb_reenter_check(struct kgdb_state * ks)409 static int kgdb_reenter_check(struct kgdb_state *ks)
410 {
411 	unsigned long addr;
412 
413 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
414 		return 0;
415 
416 	/* Panic on recursive debugger calls: */
417 	exception_level++;
418 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
419 	dbg_deactivate_sw_breakpoints();
420 
421 	/*
422 	 * If the break point removed ok at the place exception
423 	 * occurred, try to recover and print a warning to the end
424 	 * user because the user planted a breakpoint in a place that
425 	 * KGDB needs in order to function.
426 	 */
427 	if (dbg_remove_sw_break(addr) == 0) {
428 		exception_level = 0;
429 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
430 		dbg_activate_sw_breakpoints();
431 		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
432 			addr);
433 		WARN_ON_ONCE(1);
434 
435 		return 1;
436 	}
437 	dbg_remove_all_break();
438 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
439 
440 	if (exception_level > 1) {
441 		dump_stack();
442 		panic("Recursive entry to debugger");
443 	}
444 
445 	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
446 #ifdef CONFIG_KGDB_KDB
447 	/* Allow kdb to debug itself one level */
448 	return 0;
449 #endif
450 	dump_stack();
451 	panic("Recursive entry to debugger");
452 
453 	return 1;
454 }
455 
dbg_touch_watchdogs(void)456 static void dbg_touch_watchdogs(void)
457 {
458 	touch_softlockup_watchdog_sync();
459 	clocksource_touch_watchdog();
460 	rcu_cpu_stall_reset();
461 }
462 
kgdb_cpu_enter(struct kgdb_state * ks,struct pt_regs * regs,int exception_state)463 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
464 		int exception_state)
465 {
466 	unsigned long flags;
467 	int sstep_tries = 100;
468 	int error;
469 	int cpu;
470 	int trace_on = 0;
471 	int online_cpus = num_online_cpus();
472 
473 	kgdb_info[ks->cpu].enter_kgdb++;
474 	kgdb_info[ks->cpu].exception_state |= exception_state;
475 
476 	if (exception_state == DCPU_WANT_MASTER)
477 		atomic_inc(&masters_in_kgdb);
478 	else
479 		atomic_inc(&slaves_in_kgdb);
480 
481 	if (arch_kgdb_ops.disable_hw_break)
482 		arch_kgdb_ops.disable_hw_break(regs);
483 
484 acquirelock:
485 	/*
486 	 * Interrupts will be restored by the 'trap return' code, except when
487 	 * single stepping.
488 	 */
489 	local_irq_save(flags);
490 
491 	cpu = ks->cpu;
492 	kgdb_info[cpu].debuggerinfo = regs;
493 	kgdb_info[cpu].task = current;
494 	kgdb_info[cpu].ret_state = 0;
495 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
496 
497 	/* Make sure the above info reaches the primary CPU */
498 	smp_mb();
499 
500 	if (exception_level == 1) {
501 		if (raw_spin_trylock(&dbg_master_lock))
502 			atomic_xchg(&kgdb_active, cpu);
503 		goto cpu_master_loop;
504 	}
505 
506 	/*
507 	 * CPU will loop if it is a slave or request to become a kgdb
508 	 * master cpu and acquire the kgdb_active lock:
509 	 */
510 	while (1) {
511 cpu_loop:
512 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
513 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
514 			goto cpu_master_loop;
515 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
516 			if (raw_spin_trylock(&dbg_master_lock)) {
517 				atomic_xchg(&kgdb_active, cpu);
518 				break;
519 			}
520 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
521 			if (!raw_spin_is_locked(&dbg_slave_lock))
522 				goto return_normal;
523 		} else {
524 return_normal:
525 			/* Return to normal operation by executing any
526 			 * hw breakpoint fixup.
527 			 */
528 			if (arch_kgdb_ops.correct_hw_break)
529 				arch_kgdb_ops.correct_hw_break();
530 			if (trace_on)
531 				tracing_on();
532 			kgdb_info[cpu].exception_state &=
533 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
534 			kgdb_info[cpu].enter_kgdb--;
535 			smp_mb__before_atomic_dec();
536 			atomic_dec(&slaves_in_kgdb);
537 			dbg_touch_watchdogs();
538 			local_irq_restore(flags);
539 			return 0;
540 		}
541 		cpu_relax();
542 	}
543 
544 	/*
545 	 * For single stepping, try to only enter on the processor
546 	 * that was single stepping.  To guard against a deadlock, the
547 	 * kernel will only try for the value of sstep_tries before
548 	 * giving up and continuing on.
549 	 */
550 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
551 	    (kgdb_info[cpu].task &&
552 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
553 		atomic_set(&kgdb_active, -1);
554 		raw_spin_unlock(&dbg_master_lock);
555 		dbg_touch_watchdogs();
556 		local_irq_restore(flags);
557 
558 		goto acquirelock;
559 	}
560 
561 	if (!kgdb_io_ready(1)) {
562 		kgdb_info[cpu].ret_state = 1;
563 		goto kgdb_restore; /* No I/O connection, resume the system */
564 	}
565 
566 	/*
567 	 * Don't enter if we have hit a removed breakpoint.
568 	 */
569 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
570 		goto kgdb_restore;
571 
572 	/* Call the I/O driver's pre_exception routine */
573 	if (dbg_io_ops->pre_exception)
574 		dbg_io_ops->pre_exception();
575 
576 	/*
577 	 * Get the passive CPU lock which will hold all the non-primary
578 	 * CPU in a spin state while the debugger is active
579 	 */
580 	if (!kgdb_single_step)
581 		raw_spin_lock(&dbg_slave_lock);
582 
583 #ifdef CONFIG_SMP
584 	/* Signal the other CPUs to enter kgdb_wait() */
585 	if ((!kgdb_single_step) && kgdb_do_roundup)
586 		kgdb_roundup_cpus(flags);
587 #endif
588 
589 	/*
590 	 * Wait for the other CPUs to be notified and be waiting for us:
591 	 */
592 	while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
593 				atomic_read(&slaves_in_kgdb)) != online_cpus)
594 		cpu_relax();
595 
596 	/*
597 	 * At this point the primary processor is completely
598 	 * in the debugger and all secondary CPUs are quiescent
599 	 */
600 	dbg_deactivate_sw_breakpoints();
601 	kgdb_single_step = 0;
602 	kgdb_contthread = current;
603 	exception_level = 0;
604 	trace_on = tracing_is_on();
605 	if (trace_on)
606 		tracing_off();
607 
608 	while (1) {
609 cpu_master_loop:
610 		if (dbg_kdb_mode) {
611 			kgdb_connected = 1;
612 			error = kdb_stub(ks);
613 			if (error == -1)
614 				continue;
615 			kgdb_connected = 0;
616 		} else {
617 			error = gdb_serial_stub(ks);
618 		}
619 
620 		if (error == DBG_PASS_EVENT) {
621 			dbg_kdb_mode = !dbg_kdb_mode;
622 		} else if (error == DBG_SWITCH_CPU_EVENT) {
623 			kgdb_info[dbg_switch_cpu].exception_state |=
624 				DCPU_NEXT_MASTER;
625 			goto cpu_loop;
626 		} else {
627 			kgdb_info[cpu].ret_state = error;
628 			break;
629 		}
630 	}
631 
632 	/* Call the I/O driver's post_exception routine */
633 	if (dbg_io_ops->post_exception)
634 		dbg_io_ops->post_exception();
635 
636 	if (!kgdb_single_step) {
637 		raw_spin_unlock(&dbg_slave_lock);
638 		/* Wait till all the CPUs have quit from the debugger. */
639 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
640 			cpu_relax();
641 	}
642 
643 kgdb_restore:
644 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
645 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
646 		if (kgdb_info[sstep_cpu].task)
647 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
648 		else
649 			kgdb_sstep_pid = 0;
650 	}
651 	if (arch_kgdb_ops.correct_hw_break)
652 		arch_kgdb_ops.correct_hw_break();
653 	if (trace_on)
654 		tracing_on();
655 
656 	kgdb_info[cpu].exception_state &=
657 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
658 	kgdb_info[cpu].enter_kgdb--;
659 	smp_mb__before_atomic_dec();
660 	atomic_dec(&masters_in_kgdb);
661 	/* Free kgdb_active */
662 	atomic_set(&kgdb_active, -1);
663 	raw_spin_unlock(&dbg_master_lock);
664 	dbg_touch_watchdogs();
665 	local_irq_restore(flags);
666 
667 	return kgdb_info[cpu].ret_state;
668 }
669 
670 /*
671  * kgdb_handle_exception() - main entry point from a kernel exception
672  *
673  * Locking hierarchy:
674  *	interface locks, if any (begin_session)
675  *	kgdb lock (kgdb_active)
676  */
677 int
kgdb_handle_exception(int evector,int signo,int ecode,struct pt_regs * regs)678 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
679 {
680 	struct kgdb_state kgdb_var;
681 	struct kgdb_state *ks = &kgdb_var;
682 	int ret = 0;
683 
684 	if (arch_kgdb_ops.enable_nmi)
685 		arch_kgdb_ops.enable_nmi(0);
686 
687 	if (unlikely(signo != SIGTRAP && !break_on_exception))
688 		return 1;
689 
690 	ks->cpu			= raw_smp_processor_id();
691 	ks->ex_vector		= evector;
692 	ks->signo		= signo;
693 	ks->err_code		= ecode;
694 	ks->kgdb_usethreadid	= 0;
695 	ks->linux_regs		= regs;
696 
697 	if (kgdb_reenter_check(ks))
698 		goto out; /* Ouch, double exception ! */
699 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
700 		goto out;
701 
702 	ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
703 out:
704 	if (arch_kgdb_ops.enable_nmi)
705 		arch_kgdb_ops.enable_nmi(1);
706 	return ret;
707 }
708 
709 /*
710  * GDB places a breakpoint at this function to know dynamically
711  * loaded objects. It's not defined static so that only one instance with this
712  * name exists in the kernel.
713  */
714 
module_event(struct notifier_block * self,unsigned long val,void * data)715 static int module_event(struct notifier_block *self, unsigned long val,
716 	void *data)
717 {
718 	return 0;
719 }
720 
721 static struct notifier_block dbg_module_load_nb = {
722 	.notifier_call	= module_event,
723 };
724 
kgdb_nmicallback(int cpu,void * regs)725 int kgdb_nmicallback(int cpu, void *regs)
726 {
727 #ifdef CONFIG_SMP
728 	struct kgdb_state kgdb_var;
729 	struct kgdb_state *ks = &kgdb_var;
730 
731 	memset(ks, 0, sizeof(struct kgdb_state));
732 	ks->cpu			= cpu;
733 	ks->linux_regs		= regs;
734 
735 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
736 			raw_spin_is_locked(&dbg_master_lock)) {
737 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
738 		return 0;
739 	}
740 #endif
741 	return 1;
742 }
743 
kgdb_console_write(struct console * co,const char * s,unsigned count)744 static void kgdb_console_write(struct console *co, const char *s,
745    unsigned count)
746 {
747 	unsigned long flags;
748 
749 	/* If we're debugging, or KGDB has not connected, don't try
750 	 * and print. */
751 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
752 		return;
753 
754 	local_irq_save(flags);
755 	gdbstub_msg_write(s, count);
756 	local_irq_restore(flags);
757 }
758 
759 static struct console kgdbcons = {
760 	.name		= "kgdb",
761 	.write		= kgdb_console_write,
762 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
763 	.index		= -1,
764 };
765 
766 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_handle_dbg(int key)767 static void sysrq_handle_dbg(int key)
768 {
769 	if (!dbg_io_ops) {
770 		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
771 		return;
772 	}
773 	if (!kgdb_connected) {
774 #ifdef CONFIG_KGDB_KDB
775 		if (!dbg_kdb_mode)
776 			printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
777 #else
778 		printk(KERN_CRIT "Entering KGDB\n");
779 #endif
780 	}
781 
782 	kgdb_breakpoint();
783 }
784 
785 static struct sysrq_key_op sysrq_dbg_op = {
786 	.handler	= sysrq_handle_dbg,
787 	.help_msg	= "debug(g)",
788 	.action_msg	= "DEBUG",
789 };
790 #endif
791 
kgdb_panic_event(struct notifier_block * self,unsigned long val,void * data)792 static int kgdb_panic_event(struct notifier_block *self,
793 			    unsigned long val,
794 			    void *data)
795 {
796 	if (!break_on_panic)
797 		return NOTIFY_DONE;
798 
799 	if (dbg_kdb_mode)
800 		kdb_printf("PANIC: %s\n", (char *)data);
801 	kgdb_breakpoint();
802 	return NOTIFY_DONE;
803 }
804 
805 static struct notifier_block kgdb_panic_event_nb = {
806        .notifier_call	= kgdb_panic_event,
807        .priority	= INT_MAX,
808 };
809 
kgdb_arch_late(void)810 void __weak kgdb_arch_late(void)
811 {
812 }
813 
dbg_late_init(void)814 void __init dbg_late_init(void)
815 {
816 	dbg_is_early = false;
817 	if (kgdb_io_module_registered)
818 		kgdb_arch_late();
819 	kdb_init(KDB_INIT_FULL);
820 }
821 
822 static int
dbg_notify_reboot(struct notifier_block * this,unsigned long code,void * x)823 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
824 {
825 	/*
826 	 * Take the following action on reboot notify depending on value:
827 	 *    1 == Enter debugger
828 	 *    0 == [the default] detatch debug client
829 	 *   -1 == Do nothing... and use this until the board resets
830 	 */
831 	switch (kgdbreboot) {
832 	case 1:
833 		kgdb_breakpoint();
834 	case -1:
835 		goto done;
836 	}
837 	if (!dbg_kdb_mode)
838 		gdbstub_exit(code);
839 done:
840 	return NOTIFY_DONE;
841 }
842 
843 static struct notifier_block dbg_reboot_notifier = {
844 	.notifier_call		= dbg_notify_reboot,
845 	.next			= NULL,
846 	.priority		= INT_MAX,
847 };
848 
kgdb_register_callbacks(void)849 static void kgdb_register_callbacks(void)
850 {
851 	if (!kgdb_io_module_registered) {
852 		kgdb_io_module_registered = 1;
853 		kgdb_arch_init();
854 		if (!dbg_is_early)
855 			kgdb_arch_late();
856 		register_module_notifier(&dbg_module_load_nb);
857 		register_reboot_notifier(&dbg_reboot_notifier);
858 		atomic_notifier_chain_register(&panic_notifier_list,
859 					       &kgdb_panic_event_nb);
860 #ifdef CONFIG_MAGIC_SYSRQ
861 		register_sysrq_key('g', &sysrq_dbg_op);
862 #endif
863 		if (kgdb_use_con && !kgdb_con_registered) {
864 			register_console(&kgdbcons);
865 			kgdb_con_registered = 1;
866 		}
867 	}
868 }
869 
kgdb_unregister_callbacks(void)870 static void kgdb_unregister_callbacks(void)
871 {
872 	/*
873 	 * When this routine is called KGDB should unregister from the
874 	 * panic handler and clean up, making sure it is not handling any
875 	 * break exceptions at the time.
876 	 */
877 	if (kgdb_io_module_registered) {
878 		kgdb_io_module_registered = 0;
879 		unregister_reboot_notifier(&dbg_reboot_notifier);
880 		unregister_module_notifier(&dbg_module_load_nb);
881 		atomic_notifier_chain_unregister(&panic_notifier_list,
882 					       &kgdb_panic_event_nb);
883 		kgdb_arch_exit();
884 #ifdef CONFIG_MAGIC_SYSRQ
885 		unregister_sysrq_key('g', &sysrq_dbg_op);
886 #endif
887 		if (kgdb_con_registered) {
888 			unregister_console(&kgdbcons);
889 			kgdb_con_registered = 0;
890 		}
891 	}
892 }
893 
894 /*
895  * There are times a tasklet needs to be used vs a compiled in
896  * break point so as to cause an exception outside a kgdb I/O module,
897  * such as is the case with kgdboe, where calling a breakpoint in the
898  * I/O driver itself would be fatal.
899  */
kgdb_tasklet_bpt(unsigned long ing)900 static void kgdb_tasklet_bpt(unsigned long ing)
901 {
902 	kgdb_breakpoint();
903 	atomic_set(&kgdb_break_tasklet_var, 0);
904 }
905 
906 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
907 
kgdb_schedule_breakpoint(void)908 void kgdb_schedule_breakpoint(void)
909 {
910 	if (atomic_read(&kgdb_break_tasklet_var) ||
911 		atomic_read(&kgdb_active) != -1 ||
912 		atomic_read(&kgdb_setting_breakpoint))
913 		return;
914 	atomic_inc(&kgdb_break_tasklet_var);
915 	tasklet_schedule(&kgdb_tasklet_breakpoint);
916 }
917 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
918 
kgdb_initial_breakpoint(void)919 static void kgdb_initial_breakpoint(void)
920 {
921 	kgdb_break_asap = 0;
922 
923 	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
924 	kgdb_breakpoint();
925 }
926 
927 /**
928  *	kgdb_register_io_module - register KGDB IO module
929  *	@new_dbg_io_ops: the io ops vector
930  *
931  *	Register it with the KGDB core.
932  */
kgdb_register_io_module(struct kgdb_io * new_dbg_io_ops)933 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
934 {
935 	int err;
936 
937 	spin_lock(&kgdb_registration_lock);
938 
939 	if (dbg_io_ops) {
940 		spin_unlock(&kgdb_registration_lock);
941 
942 		printk(KERN_ERR "kgdb: Another I/O driver is already "
943 				"registered with KGDB.\n");
944 		return -EBUSY;
945 	}
946 
947 	if (new_dbg_io_ops->init) {
948 		err = new_dbg_io_ops->init();
949 		if (err) {
950 			spin_unlock(&kgdb_registration_lock);
951 			return err;
952 		}
953 	}
954 
955 	dbg_io_ops = new_dbg_io_ops;
956 
957 	spin_unlock(&kgdb_registration_lock);
958 
959 	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
960 	       new_dbg_io_ops->name);
961 
962 	/* Arm KGDB now. */
963 	kgdb_register_callbacks();
964 
965 	if (kgdb_break_asap)
966 		kgdb_initial_breakpoint();
967 
968 	return 0;
969 }
970 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
971 
972 /**
973  *	kkgdb_unregister_io_module - unregister KGDB IO module
974  *	@old_dbg_io_ops: the io ops vector
975  *
976  *	Unregister it with the KGDB core.
977  */
kgdb_unregister_io_module(struct kgdb_io * old_dbg_io_ops)978 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
979 {
980 	BUG_ON(kgdb_connected);
981 
982 	/*
983 	 * KGDB is no longer able to communicate out, so
984 	 * unregister our callbacks and reset state.
985 	 */
986 	kgdb_unregister_callbacks();
987 
988 	spin_lock(&kgdb_registration_lock);
989 
990 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
991 	dbg_io_ops = NULL;
992 
993 	spin_unlock(&kgdb_registration_lock);
994 
995 	printk(KERN_INFO
996 		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
997 		old_dbg_io_ops->name);
998 }
999 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1000 
dbg_io_get_char(void)1001 int dbg_io_get_char(void)
1002 {
1003 	int ret = dbg_io_ops->read_char();
1004 	if (ret == NO_POLL_CHAR)
1005 		return -1;
1006 	if (!dbg_kdb_mode)
1007 		return ret;
1008 	if (ret == 127)
1009 		return 8;
1010 	return ret;
1011 }
1012 
1013 /**
1014  * kgdb_breakpoint - generate breakpoint exception
1015  *
1016  * This function will generate a breakpoint exception.  It is used at the
1017  * beginning of a program to sync up with a debugger and can be used
1018  * otherwise as a quick means to stop program execution and "break" into
1019  * the debugger.
1020  */
kgdb_breakpoint(void)1021 void kgdb_breakpoint(void)
1022 {
1023 	atomic_inc(&kgdb_setting_breakpoint);
1024 	wmb(); /* Sync point before breakpoint */
1025 	arch_kgdb_breakpoint();
1026 	wmb(); /* Sync point after breakpoint */
1027 	atomic_dec(&kgdb_setting_breakpoint);
1028 }
1029 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1030 
opt_kgdb_wait(char * str)1031 static int __init opt_kgdb_wait(char *str)
1032 {
1033 	kgdb_break_asap = 1;
1034 
1035 	kdb_init(KDB_INIT_EARLY);
1036 	if (kgdb_io_module_registered)
1037 		kgdb_initial_breakpoint();
1038 
1039 	return 0;
1040 }
1041 
1042 early_param("kgdbwait", opt_kgdb_wait);
1043