• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/pid.h>
50 #include <linux/smp.h>
51 #include <linux/mm.h>
52 #include <linux/vmacache.h>
53 #include <linux/rcupdate.h>
54 
55 #include <asm/cacheflush.h>
56 #include <asm/byteorder.h>
57 #include <linux/atomic.h>
58 
59 #include "debug_core.h"
60 
61 static int kgdb_break_asap;
62 
63 struct debuggerinfo_struct kgdb_info[NR_CPUS];
64 
65 /**
66  * kgdb_connected - Is a host GDB connected to us?
67  */
68 int				kgdb_connected;
69 EXPORT_SYMBOL_GPL(kgdb_connected);
70 
71 /* All the KGDB handlers are installed */
72 int			kgdb_io_module_registered;
73 
74 /* Guard for recursive entry */
75 static int			exception_level;
76 
77 struct kgdb_io		*dbg_io_ops;
78 static DEFINE_SPINLOCK(kgdb_registration_lock);
79 
80 /* Action for the reboot notifiter, a global allow kdb to change it */
81 static int kgdbreboot;
82 /* kgdb console driver is loaded */
83 static int kgdb_con_registered;
84 /* determine if kgdb console output should be used */
85 static int kgdb_use_con;
86 /* Flag for alternate operations for early debugging */
87 bool dbg_is_early = true;
88 /* Next cpu to become the master debug core */
89 int dbg_switch_cpu;
90 /* Flag for entering kdb when a panic occurs */
91 static bool break_on_panic = true;
92 /* Flag for entering kdb when an exception occurs */
93 static bool break_on_exception = true;
94 
95 /* Use kdb or gdbserver mode */
96 int dbg_kdb_mode = 1;
97 
opt_kgdb_con(char * str)98 static int __init opt_kgdb_con(char *str)
99 {
100 	kgdb_use_con = 1;
101 	return 0;
102 }
103 
104 early_param("kgdbcon", opt_kgdb_con);
105 
106 module_param(kgdb_use_con, int, 0644);
107 module_param(kgdbreboot, int, 0644);
108 module_param(break_on_panic, bool, 0644);
109 module_param(break_on_exception, bool, 0644);
110 
111 /*
112  * Holds information about breakpoints in a kernel. These breakpoints are
113  * added and removed by gdb.
114  */
115 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
116 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
117 };
118 
119 /*
120  * The CPU# of the active CPU, or -1 if none:
121  */
122 atomic_t			kgdb_active = ATOMIC_INIT(-1);
123 EXPORT_SYMBOL_GPL(kgdb_active);
124 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
125 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
126 
127 /*
128  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
129  * bootup code (which might not have percpu set up yet):
130  */
131 static atomic_t			masters_in_kgdb;
132 static atomic_t			slaves_in_kgdb;
133 static atomic_t			kgdb_break_tasklet_var;
134 atomic_t			kgdb_setting_breakpoint;
135 
136 struct task_struct		*kgdb_usethread;
137 struct task_struct		*kgdb_contthread;
138 
139 int				kgdb_single_step;
140 static pid_t			kgdb_sstep_pid;
141 
142 /* to keep track of the CPU which is doing the single stepping*/
143 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
144 
145 /*
146  * If you are debugging a problem where roundup (the collection of
147  * all other CPUs) is a problem [this should be extremely rare],
148  * then use the nokgdbroundup option to avoid roundup. In that case
149  * the other CPUs might interfere with your debugging context, so
150  * use this with care:
151  */
152 static int kgdb_do_roundup = 1;
153 
opt_nokgdbroundup(char * str)154 static int __init opt_nokgdbroundup(char *str)
155 {
156 	kgdb_do_roundup = 0;
157 
158 	return 0;
159 }
160 
161 early_param("nokgdbroundup", opt_nokgdbroundup);
162 
163 /*
164  * Finally, some KGDB code :-)
165  */
166 
167 /*
168  * Weak aliases for breakpoint management,
169  * can be overriden by architectures when needed:
170  */
kgdb_arch_set_breakpoint(struct kgdb_bkpt * bpt)171 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
172 {
173 	int err;
174 
175 	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
176 				BREAK_INSTR_SIZE);
177 	if (err)
178 		return err;
179 	err = probe_kernel_write((char *)bpt->bpt_addr,
180 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
181 	return err;
182 }
183 
kgdb_arch_remove_breakpoint(struct kgdb_bkpt * bpt)184 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
185 {
186 	return probe_kernel_write((char *)bpt->bpt_addr,
187 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
188 }
189 
kgdb_validate_break_address(unsigned long addr)190 int __weak kgdb_validate_break_address(unsigned long addr)
191 {
192 	struct kgdb_bkpt tmp;
193 	int err;
194 	/* Validate setting the breakpoint and then removing it.  If the
195 	 * remove fails, the kernel needs to emit a bad message because we
196 	 * are deep trouble not being able to put things back the way we
197 	 * found them.
198 	 */
199 	tmp.bpt_addr = addr;
200 	err = kgdb_arch_set_breakpoint(&tmp);
201 	if (err)
202 		return err;
203 	err = kgdb_arch_remove_breakpoint(&tmp);
204 	if (err)
205 		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
206 		   "memory destroyed at: %lx", addr);
207 	return err;
208 }
209 
kgdb_arch_pc(int exception,struct pt_regs * regs)210 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
211 {
212 	return instruction_pointer(regs);
213 }
214 
kgdb_arch_init(void)215 int __weak kgdb_arch_init(void)
216 {
217 	return 0;
218 }
219 
kgdb_skipexception(int exception,struct pt_regs * regs)220 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
221 {
222 	return 0;
223 }
224 
225 /*
226  * Some architectures need cache flushes when we set/clear a
227  * breakpoint:
228  */
kgdb_flush_swbreak_addr(unsigned long addr)229 static void kgdb_flush_swbreak_addr(unsigned long addr)
230 {
231 	if (!CACHE_FLUSH_IS_SAFE)
232 		return;
233 
234 	if (current->mm) {
235 		int i;
236 
237 		for (i = 0; i < VMACACHE_SIZE; i++) {
238 			if (!current->vmacache[i])
239 				continue;
240 			flush_cache_range(current->vmacache[i],
241 					  addr, addr + BREAK_INSTR_SIZE);
242 		}
243 	}
244 
245 	/* Force flush instruction cache if it was outside the mm */
246 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
247 }
248 
249 /*
250  * SW breakpoint management:
251  */
dbg_activate_sw_breakpoints(void)252 int dbg_activate_sw_breakpoints(void)
253 {
254 	int error;
255 	int ret = 0;
256 	int i;
257 
258 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
259 		if (kgdb_break[i].state != BP_SET)
260 			continue;
261 
262 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
263 		if (error) {
264 			ret = error;
265 			printk(KERN_INFO "KGDB: BP install failed: %lx",
266 			       kgdb_break[i].bpt_addr);
267 			continue;
268 		}
269 
270 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
271 		kgdb_break[i].state = BP_ACTIVE;
272 	}
273 	return ret;
274 }
275 
dbg_set_sw_break(unsigned long addr)276 int dbg_set_sw_break(unsigned long addr)
277 {
278 	int err = kgdb_validate_break_address(addr);
279 	int breakno = -1;
280 	int i;
281 
282 	if (err)
283 		return err;
284 
285 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
286 		if ((kgdb_break[i].state == BP_SET) &&
287 					(kgdb_break[i].bpt_addr == addr))
288 			return -EEXIST;
289 	}
290 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
291 		if (kgdb_break[i].state == BP_REMOVED &&
292 					kgdb_break[i].bpt_addr == addr) {
293 			breakno = i;
294 			break;
295 		}
296 	}
297 
298 	if (breakno == -1) {
299 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
300 			if (kgdb_break[i].state == BP_UNDEFINED) {
301 				breakno = i;
302 				break;
303 			}
304 		}
305 	}
306 
307 	if (breakno == -1)
308 		return -E2BIG;
309 
310 	kgdb_break[breakno].state = BP_SET;
311 	kgdb_break[breakno].type = BP_BREAKPOINT;
312 	kgdb_break[breakno].bpt_addr = addr;
313 
314 	return 0;
315 }
316 
dbg_deactivate_sw_breakpoints(void)317 int dbg_deactivate_sw_breakpoints(void)
318 {
319 	int error;
320 	int ret = 0;
321 	int i;
322 
323 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
324 		if (kgdb_break[i].state != BP_ACTIVE)
325 			continue;
326 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
327 		if (error) {
328 			printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
329 			       kgdb_break[i].bpt_addr);
330 			ret = error;
331 		}
332 
333 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
334 		kgdb_break[i].state = BP_SET;
335 	}
336 	return ret;
337 }
338 
dbg_remove_sw_break(unsigned long addr)339 int dbg_remove_sw_break(unsigned long addr)
340 {
341 	int i;
342 
343 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
344 		if ((kgdb_break[i].state == BP_SET) &&
345 				(kgdb_break[i].bpt_addr == addr)) {
346 			kgdb_break[i].state = BP_REMOVED;
347 			return 0;
348 		}
349 	}
350 	return -ENOENT;
351 }
352 
kgdb_isremovedbreak(unsigned long addr)353 int kgdb_isremovedbreak(unsigned long addr)
354 {
355 	int i;
356 
357 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
358 		if ((kgdb_break[i].state == BP_REMOVED) &&
359 					(kgdb_break[i].bpt_addr == addr))
360 			return 1;
361 	}
362 	return 0;
363 }
364 
dbg_remove_all_break(void)365 int dbg_remove_all_break(void)
366 {
367 	int error;
368 	int i;
369 
370 	/* Clear memory breakpoints. */
371 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
372 		if (kgdb_break[i].state != BP_ACTIVE)
373 			goto setundefined;
374 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
375 		if (error)
376 			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
377 			       kgdb_break[i].bpt_addr);
378 setundefined:
379 		kgdb_break[i].state = BP_UNDEFINED;
380 	}
381 
382 	/* Clear hardware breakpoints. */
383 	if (arch_kgdb_ops.remove_all_hw_break)
384 		arch_kgdb_ops.remove_all_hw_break();
385 
386 	return 0;
387 }
388 
389 /*
390  * Return true if there is a valid kgdb I/O module.  Also if no
391  * debugger is attached a message can be printed to the console about
392  * waiting for the debugger to attach.
393  *
394  * The print_wait argument is only to be true when called from inside
395  * the core kgdb_handle_exception, because it will wait for the
396  * debugger to attach.
397  */
kgdb_io_ready(int print_wait)398 static int kgdb_io_ready(int print_wait)
399 {
400 	if (!dbg_io_ops)
401 		return 0;
402 	if (kgdb_connected)
403 		return 1;
404 	if (atomic_read(&kgdb_setting_breakpoint))
405 		return 1;
406 	if (print_wait) {
407 #ifdef CONFIG_KGDB_KDB
408 		if (!dbg_kdb_mode)
409 			printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
410 #else
411 		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
412 #endif
413 	}
414 	return 1;
415 }
416 
kgdb_reenter_check(struct kgdb_state * ks)417 static int kgdb_reenter_check(struct kgdb_state *ks)
418 {
419 	unsigned long addr;
420 
421 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
422 		return 0;
423 
424 	/* Panic on recursive debugger calls: */
425 	exception_level++;
426 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
427 	dbg_deactivate_sw_breakpoints();
428 
429 	/*
430 	 * If the break point removed ok at the place exception
431 	 * occurred, try to recover and print a warning to the end
432 	 * user because the user planted a breakpoint in a place that
433 	 * KGDB needs in order to function.
434 	 */
435 	if (dbg_remove_sw_break(addr) == 0) {
436 		exception_level = 0;
437 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
438 		dbg_activate_sw_breakpoints();
439 		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
440 			addr);
441 		WARN_ON_ONCE(1);
442 
443 		return 1;
444 	}
445 	dbg_remove_all_break();
446 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
447 
448 	if (exception_level > 1) {
449 		dump_stack();
450 		panic("Recursive entry to debugger");
451 	}
452 
453 	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
454 #ifdef CONFIG_KGDB_KDB
455 	/* Allow kdb to debug itself one level */
456 	return 0;
457 #endif
458 	dump_stack();
459 	panic("Recursive entry to debugger");
460 
461 	return 1;
462 }
463 
dbg_touch_watchdogs(void)464 static void dbg_touch_watchdogs(void)
465 {
466 	touch_softlockup_watchdog_sync();
467 	clocksource_touch_watchdog();
468 	rcu_cpu_stall_reset();
469 }
470 
kgdb_cpu_enter(struct kgdb_state * ks,struct pt_regs * regs,int exception_state)471 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
472 		int exception_state)
473 {
474 	unsigned long flags;
475 	int sstep_tries = 100;
476 	int error;
477 	int cpu;
478 	int trace_on = 0;
479 	int online_cpus = num_online_cpus();
480 
481 	kgdb_info[ks->cpu].enter_kgdb++;
482 	kgdb_info[ks->cpu].exception_state |= exception_state;
483 
484 	if (exception_state == DCPU_WANT_MASTER)
485 		atomic_inc(&masters_in_kgdb);
486 	else
487 		atomic_inc(&slaves_in_kgdb);
488 
489 	if (arch_kgdb_ops.disable_hw_break)
490 		arch_kgdb_ops.disable_hw_break(regs);
491 
492 acquirelock:
493 	/*
494 	 * Interrupts will be restored by the 'trap return' code, except when
495 	 * single stepping.
496 	 */
497 	local_irq_save(flags);
498 
499 	cpu = ks->cpu;
500 	kgdb_info[cpu].debuggerinfo = regs;
501 	kgdb_info[cpu].task = current;
502 	kgdb_info[cpu].ret_state = 0;
503 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
504 
505 	/* Make sure the above info reaches the primary CPU */
506 	smp_mb();
507 
508 	if (exception_level == 1) {
509 		if (raw_spin_trylock(&dbg_master_lock))
510 			atomic_xchg(&kgdb_active, cpu);
511 		goto cpu_master_loop;
512 	}
513 
514 	/*
515 	 * CPU will loop if it is a slave or request to become a kgdb
516 	 * master cpu and acquire the kgdb_active lock:
517 	 */
518 	while (1) {
519 cpu_loop:
520 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
521 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
522 			goto cpu_master_loop;
523 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
524 			if (raw_spin_trylock(&dbg_master_lock)) {
525 				atomic_xchg(&kgdb_active, cpu);
526 				break;
527 			}
528 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
529 			if (!raw_spin_is_locked(&dbg_slave_lock))
530 				goto return_normal;
531 		} else {
532 return_normal:
533 			/* Return to normal operation by executing any
534 			 * hw breakpoint fixup.
535 			 */
536 			if (arch_kgdb_ops.correct_hw_break)
537 				arch_kgdb_ops.correct_hw_break();
538 			if (trace_on)
539 				tracing_on();
540 			kgdb_info[cpu].exception_state &=
541 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
542 			kgdb_info[cpu].enter_kgdb--;
543 			smp_mb__before_atomic();
544 			atomic_dec(&slaves_in_kgdb);
545 			dbg_touch_watchdogs();
546 			local_irq_restore(flags);
547 			return 0;
548 		}
549 		cpu_relax();
550 	}
551 
552 	/*
553 	 * For single stepping, try to only enter on the processor
554 	 * that was single stepping.  To guard against a deadlock, the
555 	 * kernel will only try for the value of sstep_tries before
556 	 * giving up and continuing on.
557 	 */
558 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
559 	    (kgdb_info[cpu].task &&
560 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
561 		atomic_set(&kgdb_active, -1);
562 		raw_spin_unlock(&dbg_master_lock);
563 		dbg_touch_watchdogs();
564 		local_irq_restore(flags);
565 
566 		goto acquirelock;
567 	}
568 
569 	if (!kgdb_io_ready(1)) {
570 		kgdb_info[cpu].ret_state = 1;
571 		goto kgdb_restore; /* No I/O connection, resume the system */
572 	}
573 
574 	/*
575 	 * Don't enter if we have hit a removed breakpoint.
576 	 */
577 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
578 		goto kgdb_restore;
579 
580 	/* Call the I/O driver's pre_exception routine */
581 	if (dbg_io_ops->pre_exception)
582 		dbg_io_ops->pre_exception();
583 
584 	/*
585 	 * Get the passive CPU lock which will hold all the non-primary
586 	 * CPU in a spin state while the debugger is active
587 	 */
588 	if (!kgdb_single_step)
589 		raw_spin_lock(&dbg_slave_lock);
590 
591 #ifdef CONFIG_SMP
592 	/* If send_ready set, slaves are already waiting */
593 	if (ks->send_ready)
594 		atomic_set(ks->send_ready, 1);
595 
596 	/* Signal the other CPUs to enter kgdb_wait() */
597 	else if ((!kgdb_single_step) && kgdb_do_roundup)
598 		kgdb_roundup_cpus(flags);
599 #endif
600 
601 	/*
602 	 * Wait for the other CPUs to be notified and be waiting for us:
603 	 */
604 	while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
605 				atomic_read(&slaves_in_kgdb)) != online_cpus)
606 		cpu_relax();
607 
608 	/*
609 	 * At this point the primary processor is completely
610 	 * in the debugger and all secondary CPUs are quiescent
611 	 */
612 	dbg_deactivate_sw_breakpoints();
613 	kgdb_single_step = 0;
614 	kgdb_contthread = current;
615 	exception_level = 0;
616 	trace_on = tracing_is_on();
617 	if (trace_on)
618 		tracing_off();
619 
620 	while (1) {
621 cpu_master_loop:
622 		if (dbg_kdb_mode) {
623 			kgdb_connected = 1;
624 			error = kdb_stub(ks);
625 			if (error == -1)
626 				continue;
627 			kgdb_connected = 0;
628 		} else {
629 			error = gdb_serial_stub(ks);
630 		}
631 
632 		if (error == DBG_PASS_EVENT) {
633 			dbg_kdb_mode = !dbg_kdb_mode;
634 		} else if (error == DBG_SWITCH_CPU_EVENT) {
635 			kgdb_info[dbg_switch_cpu].exception_state |=
636 				DCPU_NEXT_MASTER;
637 			goto cpu_loop;
638 		} else {
639 			kgdb_info[cpu].ret_state = error;
640 			break;
641 		}
642 	}
643 
644 	/* Call the I/O driver's post_exception routine */
645 	if (dbg_io_ops->post_exception)
646 		dbg_io_ops->post_exception();
647 
648 	if (!kgdb_single_step) {
649 		raw_spin_unlock(&dbg_slave_lock);
650 		/* Wait till all the CPUs have quit from the debugger. */
651 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
652 			cpu_relax();
653 	}
654 
655 kgdb_restore:
656 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
657 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
658 		if (kgdb_info[sstep_cpu].task)
659 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
660 		else
661 			kgdb_sstep_pid = 0;
662 	}
663 	if (arch_kgdb_ops.correct_hw_break)
664 		arch_kgdb_ops.correct_hw_break();
665 	if (trace_on)
666 		tracing_on();
667 
668 	kgdb_info[cpu].exception_state &=
669 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
670 	kgdb_info[cpu].enter_kgdb--;
671 	smp_mb__before_atomic();
672 	atomic_dec(&masters_in_kgdb);
673 	/* Free kgdb_active */
674 	atomic_set(&kgdb_active, -1);
675 	raw_spin_unlock(&dbg_master_lock);
676 	dbg_touch_watchdogs();
677 	local_irq_restore(flags);
678 
679 	return kgdb_info[cpu].ret_state;
680 }
681 
682 /*
683  * kgdb_handle_exception() - main entry point from a kernel exception
684  *
685  * Locking hierarchy:
686  *	interface locks, if any (begin_session)
687  *	kgdb lock (kgdb_active)
688  */
689 int
kgdb_handle_exception(int evector,int signo,int ecode,struct pt_regs * regs)690 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
691 {
692 	struct kgdb_state kgdb_var;
693 	struct kgdb_state *ks = &kgdb_var;
694 	int ret = 0;
695 
696 	if (arch_kgdb_ops.enable_nmi)
697 		arch_kgdb_ops.enable_nmi(0);
698 
699 	if (unlikely(signo != SIGTRAP && !break_on_exception))
700 		return 1;
701 
702 	memset(ks, 0, sizeof(struct kgdb_state));
703 	ks->cpu			= raw_smp_processor_id();
704 	ks->ex_vector		= evector;
705 	ks->signo		= signo;
706 	ks->err_code		= ecode;
707 	ks->linux_regs		= regs;
708 
709 	if (kgdb_reenter_check(ks))
710 		goto out; /* Ouch, double exception ! */
711 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
712 		goto out;
713 
714 	ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
715 out:
716 	if (arch_kgdb_ops.enable_nmi)
717 		arch_kgdb_ops.enable_nmi(1);
718 	return ret;
719 }
720 
721 /*
722  * GDB places a breakpoint at this function to know dynamically
723  * loaded objects. It's not defined static so that only one instance with this
724  * name exists in the kernel.
725  */
726 
module_event(struct notifier_block * self,unsigned long val,void * data)727 static int module_event(struct notifier_block *self, unsigned long val,
728 	void *data)
729 {
730 	return 0;
731 }
732 
733 static struct notifier_block dbg_module_load_nb = {
734 	.notifier_call	= module_event,
735 };
736 
kgdb_nmicallback(int cpu,void * regs)737 int kgdb_nmicallback(int cpu, void *regs)
738 {
739 #ifdef CONFIG_SMP
740 	struct kgdb_state kgdb_var;
741 	struct kgdb_state *ks = &kgdb_var;
742 
743 	memset(ks, 0, sizeof(struct kgdb_state));
744 	ks->cpu			= cpu;
745 	ks->linux_regs		= regs;
746 
747 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
748 			raw_spin_is_locked(&dbg_master_lock)) {
749 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
750 		return 0;
751 	}
752 #endif
753 	return 1;
754 }
755 
kgdb_nmicallin(int cpu,int trapnr,void * regs,int err_code,atomic_t * send_ready)756 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
757 							atomic_t *send_ready)
758 {
759 #ifdef CONFIG_SMP
760 	if (!kgdb_io_ready(0) || !send_ready)
761 		return 1;
762 
763 	if (kgdb_info[cpu].enter_kgdb == 0) {
764 		struct kgdb_state kgdb_var;
765 		struct kgdb_state *ks = &kgdb_var;
766 
767 		memset(ks, 0, sizeof(struct kgdb_state));
768 		ks->cpu			= cpu;
769 		ks->ex_vector		= trapnr;
770 		ks->signo		= SIGTRAP;
771 		ks->err_code		= err_code;
772 		ks->linux_regs		= regs;
773 		ks->send_ready		= send_ready;
774 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
775 		return 0;
776 	}
777 #endif
778 	return 1;
779 }
780 
kgdb_console_write(struct console * co,const char * s,unsigned count)781 static void kgdb_console_write(struct console *co, const char *s,
782    unsigned count)
783 {
784 	unsigned long flags;
785 
786 	/* If we're debugging, or KGDB has not connected, don't try
787 	 * and print. */
788 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
789 		return;
790 
791 	local_irq_save(flags);
792 	gdbstub_msg_write(s, count);
793 	local_irq_restore(flags);
794 }
795 
796 static struct console kgdbcons = {
797 	.name		= "kgdb",
798 	.write		= kgdb_console_write,
799 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
800 	.index		= -1,
801 };
802 
803 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_handle_dbg(int key)804 static void sysrq_handle_dbg(int key)
805 {
806 	if (!dbg_io_ops) {
807 		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
808 		return;
809 	}
810 	if (!kgdb_connected) {
811 #ifdef CONFIG_KGDB_KDB
812 		if (!dbg_kdb_mode)
813 			printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
814 #else
815 		printk(KERN_CRIT "Entering KGDB\n");
816 #endif
817 	}
818 
819 	kgdb_breakpoint();
820 }
821 
822 static struct sysrq_key_op sysrq_dbg_op = {
823 	.handler	= sysrq_handle_dbg,
824 	.help_msg	= "debug(g)",
825 	.action_msg	= "DEBUG",
826 };
827 #endif
828 
kgdb_panic_event(struct notifier_block * self,unsigned long val,void * data)829 static int kgdb_panic_event(struct notifier_block *self,
830 			    unsigned long val,
831 			    void *data)
832 {
833 	if (!break_on_panic)
834 		return NOTIFY_DONE;
835 
836 	if (dbg_kdb_mode)
837 		kdb_printf("PANIC: %s\n", (char *)data);
838 	kgdb_breakpoint();
839 	return NOTIFY_DONE;
840 }
841 
842 static struct notifier_block kgdb_panic_event_nb = {
843        .notifier_call	= kgdb_panic_event,
844        .priority	= INT_MAX,
845 };
846 
kgdb_arch_late(void)847 void __weak kgdb_arch_late(void)
848 {
849 }
850 
dbg_late_init(void)851 void __init dbg_late_init(void)
852 {
853 	dbg_is_early = false;
854 	if (kgdb_io_module_registered)
855 		kgdb_arch_late();
856 	kdb_init(KDB_INIT_FULL);
857 }
858 
859 static int
dbg_notify_reboot(struct notifier_block * this,unsigned long code,void * x)860 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
861 {
862 	/*
863 	 * Take the following action on reboot notify depending on value:
864 	 *    1 == Enter debugger
865 	 *    0 == [the default] detatch debug client
866 	 *   -1 == Do nothing... and use this until the board resets
867 	 */
868 	switch (kgdbreboot) {
869 	case 1:
870 		kgdb_breakpoint();
871 	case -1:
872 		goto done;
873 	}
874 	if (!dbg_kdb_mode)
875 		gdbstub_exit(code);
876 done:
877 	return NOTIFY_DONE;
878 }
879 
880 static struct notifier_block dbg_reboot_notifier = {
881 	.notifier_call		= dbg_notify_reboot,
882 	.next			= NULL,
883 	.priority		= INT_MAX,
884 };
885 
kgdb_register_callbacks(void)886 static void kgdb_register_callbacks(void)
887 {
888 	if (!kgdb_io_module_registered) {
889 		kgdb_io_module_registered = 1;
890 		kgdb_arch_init();
891 		if (!dbg_is_early)
892 			kgdb_arch_late();
893 		register_module_notifier(&dbg_module_load_nb);
894 		register_reboot_notifier(&dbg_reboot_notifier);
895 		atomic_notifier_chain_register(&panic_notifier_list,
896 					       &kgdb_panic_event_nb);
897 #ifdef CONFIG_MAGIC_SYSRQ
898 		register_sysrq_key('g', &sysrq_dbg_op);
899 #endif
900 		if (kgdb_use_con && !kgdb_con_registered) {
901 			register_console(&kgdbcons);
902 			kgdb_con_registered = 1;
903 		}
904 	}
905 }
906 
kgdb_unregister_callbacks(void)907 static void kgdb_unregister_callbacks(void)
908 {
909 	/*
910 	 * When this routine is called KGDB should unregister from the
911 	 * panic handler and clean up, making sure it is not handling any
912 	 * break exceptions at the time.
913 	 */
914 	if (kgdb_io_module_registered) {
915 		kgdb_io_module_registered = 0;
916 		unregister_reboot_notifier(&dbg_reboot_notifier);
917 		unregister_module_notifier(&dbg_module_load_nb);
918 		atomic_notifier_chain_unregister(&panic_notifier_list,
919 					       &kgdb_panic_event_nb);
920 		kgdb_arch_exit();
921 #ifdef CONFIG_MAGIC_SYSRQ
922 		unregister_sysrq_key('g', &sysrq_dbg_op);
923 #endif
924 		if (kgdb_con_registered) {
925 			unregister_console(&kgdbcons);
926 			kgdb_con_registered = 0;
927 		}
928 	}
929 }
930 
931 /*
932  * There are times a tasklet needs to be used vs a compiled in
933  * break point so as to cause an exception outside a kgdb I/O module,
934  * such as is the case with kgdboe, where calling a breakpoint in the
935  * I/O driver itself would be fatal.
936  */
kgdb_tasklet_bpt(unsigned long ing)937 static void kgdb_tasklet_bpt(unsigned long ing)
938 {
939 	kgdb_breakpoint();
940 	atomic_set(&kgdb_break_tasklet_var, 0);
941 }
942 
943 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
944 
kgdb_schedule_breakpoint(void)945 void kgdb_schedule_breakpoint(void)
946 {
947 	if (atomic_read(&kgdb_break_tasklet_var) ||
948 		atomic_read(&kgdb_active) != -1 ||
949 		atomic_read(&kgdb_setting_breakpoint))
950 		return;
951 	atomic_inc(&kgdb_break_tasklet_var);
952 	tasklet_schedule(&kgdb_tasklet_breakpoint);
953 }
954 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
955 
kgdb_initial_breakpoint(void)956 static void kgdb_initial_breakpoint(void)
957 {
958 	kgdb_break_asap = 0;
959 
960 	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
961 	kgdb_breakpoint();
962 }
963 
964 /**
965  *	kgdb_register_io_module - register KGDB IO module
966  *	@new_dbg_io_ops: the io ops vector
967  *
968  *	Register it with the KGDB core.
969  */
kgdb_register_io_module(struct kgdb_io * new_dbg_io_ops)970 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
971 {
972 	int err;
973 
974 	spin_lock(&kgdb_registration_lock);
975 
976 	if (dbg_io_ops) {
977 		spin_unlock(&kgdb_registration_lock);
978 
979 		printk(KERN_ERR "kgdb: Another I/O driver is already "
980 				"registered with KGDB.\n");
981 		return -EBUSY;
982 	}
983 
984 	if (new_dbg_io_ops->init) {
985 		err = new_dbg_io_ops->init();
986 		if (err) {
987 			spin_unlock(&kgdb_registration_lock);
988 			return err;
989 		}
990 	}
991 
992 	dbg_io_ops = new_dbg_io_ops;
993 
994 	spin_unlock(&kgdb_registration_lock);
995 
996 	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
997 	       new_dbg_io_ops->name);
998 
999 	/* Arm KGDB now. */
1000 	kgdb_register_callbacks();
1001 
1002 	if (kgdb_break_asap)
1003 		kgdb_initial_breakpoint();
1004 
1005 	return 0;
1006 }
1007 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1008 
1009 /**
1010  *	kkgdb_unregister_io_module - unregister KGDB IO module
1011  *	@old_dbg_io_ops: the io ops vector
1012  *
1013  *	Unregister it with the KGDB core.
1014  */
kgdb_unregister_io_module(struct kgdb_io * old_dbg_io_ops)1015 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1016 {
1017 	BUG_ON(kgdb_connected);
1018 
1019 	/*
1020 	 * KGDB is no longer able to communicate out, so
1021 	 * unregister our callbacks and reset state.
1022 	 */
1023 	kgdb_unregister_callbacks();
1024 
1025 	spin_lock(&kgdb_registration_lock);
1026 
1027 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1028 	dbg_io_ops = NULL;
1029 
1030 	spin_unlock(&kgdb_registration_lock);
1031 
1032 	printk(KERN_INFO
1033 		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
1034 		old_dbg_io_ops->name);
1035 }
1036 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1037 
dbg_io_get_char(void)1038 int dbg_io_get_char(void)
1039 {
1040 	int ret = dbg_io_ops->read_char();
1041 	if (ret == NO_POLL_CHAR)
1042 		return -1;
1043 	if (!dbg_kdb_mode)
1044 		return ret;
1045 	if (ret == 127)
1046 		return 8;
1047 	return ret;
1048 }
1049 
1050 /**
1051  * kgdb_breakpoint - generate breakpoint exception
1052  *
1053  * This function will generate a breakpoint exception.  It is used at the
1054  * beginning of a program to sync up with a debugger and can be used
1055  * otherwise as a quick means to stop program execution and "break" into
1056  * the debugger.
1057  */
kgdb_breakpoint(void)1058 noinline void kgdb_breakpoint(void)
1059 {
1060 	atomic_inc(&kgdb_setting_breakpoint);
1061 	wmb(); /* Sync point before breakpoint */
1062 	arch_kgdb_breakpoint();
1063 	wmb(); /* Sync point after breakpoint */
1064 	atomic_dec(&kgdb_setting_breakpoint);
1065 }
1066 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1067 
opt_kgdb_wait(char * str)1068 static int __init opt_kgdb_wait(char *str)
1069 {
1070 	kgdb_break_asap = 1;
1071 
1072 	kdb_init(KDB_INIT_EARLY);
1073 	if (kgdb_io_module_registered)
1074 		kgdb_initial_breakpoint();
1075 
1076 	return 0;
1077 }
1078 
1079 early_param("kgdbwait", opt_kgdb_wait);
1080