• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 
31 #define pr_fmt(fmt) "KGDB: " fmt
32 
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/nmi.h>
53 #include <linux/pid.h>
54 #include <linux/smp.h>
55 #include <linux/mm.h>
56 #include <linux/vmacache.h>
57 #include <linux/rcupdate.h>
58 
59 #include <asm/cacheflush.h>
60 #include <asm/byteorder.h>
61 #include <linux/atomic.h>
62 
63 #include "debug_core.h"
64 
65 static int kgdb_break_asap;
66 
67 struct debuggerinfo_struct kgdb_info[NR_CPUS];
68 
69 /**
70  * kgdb_connected - Is a host GDB connected to us?
71  */
72 int				kgdb_connected;
73 EXPORT_SYMBOL_GPL(kgdb_connected);
74 
75 /* All the KGDB handlers are installed */
76 int			kgdb_io_module_registered;
77 
78 /* Guard for recursive entry */
79 static int			exception_level;
80 
81 struct kgdb_io		*dbg_io_ops;
82 static DEFINE_SPINLOCK(kgdb_registration_lock);
83 
84 /* Action for the reboot notifiter, a global allow kdb to change it */
85 static int kgdbreboot;
86 /* kgdb console driver is loaded */
87 static int kgdb_con_registered;
88 /* determine if kgdb console output should be used */
89 static int kgdb_use_con;
90 /* Flag for alternate operations for early debugging */
91 bool dbg_is_early = true;
92 /* Next cpu to become the master debug core */
93 int dbg_switch_cpu;
94 
95 /* Use kdb or gdbserver mode */
96 int dbg_kdb_mode = 1;
97 
98 module_param(kgdb_use_con, int, 0644);
99 module_param(kgdbreboot, int, 0644);
100 
101 /*
102  * Holds information about breakpoints in a kernel. These breakpoints are
103  * added and removed by gdb.
104  */
105 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
106 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
107 };
108 
109 /*
110  * The CPU# of the active CPU, or -1 if none:
111  */
112 atomic_t			kgdb_active = ATOMIC_INIT(-1);
113 EXPORT_SYMBOL_GPL(kgdb_active);
114 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
115 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
116 
117 /*
118  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
119  * bootup code (which might not have percpu set up yet):
120  */
121 static atomic_t			masters_in_kgdb;
122 static atomic_t			slaves_in_kgdb;
123 static atomic_t			kgdb_break_tasklet_var;
124 atomic_t			kgdb_setting_breakpoint;
125 
126 struct task_struct		*kgdb_usethread;
127 struct task_struct		*kgdb_contthread;
128 
129 int				kgdb_single_step;
130 static pid_t			kgdb_sstep_pid;
131 
132 /* to keep track of the CPU which is doing the single stepping*/
133 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
134 
135 /*
136  * If you are debugging a problem where roundup (the collection of
137  * all other CPUs) is a problem [this should be extremely rare],
138  * then use the nokgdbroundup option to avoid roundup. In that case
139  * the other CPUs might interfere with your debugging context, so
140  * use this with care:
141  */
142 static int kgdb_do_roundup = 1;
143 
opt_nokgdbroundup(char * str)144 static int __init opt_nokgdbroundup(char *str)
145 {
146 	kgdb_do_roundup = 0;
147 
148 	return 0;
149 }
150 
151 early_param("nokgdbroundup", opt_nokgdbroundup);
152 
153 /*
154  * Finally, some KGDB code :-)
155  */
156 
157 /*
158  * Weak aliases for breakpoint management,
159  * can be overriden by architectures when needed:
160  */
kgdb_arch_set_breakpoint(struct kgdb_bkpt * bpt)161 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
162 {
163 	int err;
164 
165 	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
166 				BREAK_INSTR_SIZE);
167 	if (err)
168 		return err;
169 	err = probe_kernel_write((char *)bpt->bpt_addr,
170 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
171 	return err;
172 }
173 
kgdb_arch_remove_breakpoint(struct kgdb_bkpt * bpt)174 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
175 {
176 	return probe_kernel_write((char *)bpt->bpt_addr,
177 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
178 }
179 
kgdb_validate_break_address(unsigned long addr)180 int __weak kgdb_validate_break_address(unsigned long addr)
181 {
182 	struct kgdb_bkpt tmp;
183 	int err;
184 	/* Validate setting the breakpoint and then removing it.  If the
185 	 * remove fails, the kernel needs to emit a bad message because we
186 	 * are deep trouble not being able to put things back the way we
187 	 * found them.
188 	 */
189 	tmp.bpt_addr = addr;
190 	err = kgdb_arch_set_breakpoint(&tmp);
191 	if (err)
192 		return err;
193 	err = kgdb_arch_remove_breakpoint(&tmp);
194 	if (err)
195 		pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
196 		       addr);
197 	return err;
198 }
199 
kgdb_arch_pc(int exception,struct pt_regs * regs)200 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
201 {
202 	return instruction_pointer(regs);
203 }
204 
kgdb_arch_init(void)205 int __weak kgdb_arch_init(void)
206 {
207 	return 0;
208 }
209 
kgdb_skipexception(int exception,struct pt_regs * regs)210 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
211 {
212 	return 0;
213 }
214 
215 /*
216  * Some architectures need cache flushes when we set/clear a
217  * breakpoint:
218  */
kgdb_flush_swbreak_addr(unsigned long addr)219 static void kgdb_flush_swbreak_addr(unsigned long addr)
220 {
221 	if (!CACHE_FLUSH_IS_SAFE)
222 		return;
223 
224 	if (current->mm) {
225 		int i;
226 
227 		for (i = 0; i < VMACACHE_SIZE; i++) {
228 			if (!current->vmacache.vmas[i])
229 				continue;
230 			flush_cache_range(current->vmacache.vmas[i],
231 					  addr, addr + BREAK_INSTR_SIZE);
232 		}
233 	}
234 
235 	/* Force flush instruction cache if it was outside the mm */
236 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
237 }
238 
239 /*
240  * SW breakpoint management:
241  */
dbg_activate_sw_breakpoints(void)242 int dbg_activate_sw_breakpoints(void)
243 {
244 	int error;
245 	int ret = 0;
246 	int i;
247 
248 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
249 		if (kgdb_break[i].state != BP_SET)
250 			continue;
251 
252 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
253 		if (error) {
254 			ret = error;
255 			pr_info("BP install failed: %lx\n",
256 				kgdb_break[i].bpt_addr);
257 			continue;
258 		}
259 
260 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
261 		kgdb_break[i].state = BP_ACTIVE;
262 	}
263 	return ret;
264 }
265 
dbg_set_sw_break(unsigned long addr)266 int dbg_set_sw_break(unsigned long addr)
267 {
268 	int err = kgdb_validate_break_address(addr);
269 	int breakno = -1;
270 	int i;
271 
272 	if (err)
273 		return err;
274 
275 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
276 		if ((kgdb_break[i].state == BP_SET) &&
277 					(kgdb_break[i].bpt_addr == addr))
278 			return -EEXIST;
279 	}
280 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
281 		if (kgdb_break[i].state == BP_REMOVED &&
282 					kgdb_break[i].bpt_addr == addr) {
283 			breakno = i;
284 			break;
285 		}
286 	}
287 
288 	if (breakno == -1) {
289 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
290 			if (kgdb_break[i].state == BP_UNDEFINED) {
291 				breakno = i;
292 				break;
293 			}
294 		}
295 	}
296 
297 	if (breakno == -1)
298 		return -E2BIG;
299 
300 	kgdb_break[breakno].state = BP_SET;
301 	kgdb_break[breakno].type = BP_BREAKPOINT;
302 	kgdb_break[breakno].bpt_addr = addr;
303 
304 	return 0;
305 }
306 
dbg_deactivate_sw_breakpoints(void)307 int dbg_deactivate_sw_breakpoints(void)
308 {
309 	int error;
310 	int ret = 0;
311 	int i;
312 
313 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
314 		if (kgdb_break[i].state != BP_ACTIVE)
315 			continue;
316 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
317 		if (error) {
318 			pr_info("BP remove failed: %lx\n",
319 				kgdb_break[i].bpt_addr);
320 			ret = error;
321 		}
322 
323 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
324 		kgdb_break[i].state = BP_SET;
325 	}
326 	return ret;
327 }
328 
dbg_remove_sw_break(unsigned long addr)329 int dbg_remove_sw_break(unsigned long addr)
330 {
331 	int i;
332 
333 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
334 		if ((kgdb_break[i].state == BP_SET) &&
335 				(kgdb_break[i].bpt_addr == addr)) {
336 			kgdb_break[i].state = BP_REMOVED;
337 			return 0;
338 		}
339 	}
340 	return -ENOENT;
341 }
342 
kgdb_isremovedbreak(unsigned long addr)343 int kgdb_isremovedbreak(unsigned long addr)
344 {
345 	int i;
346 
347 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
348 		if ((kgdb_break[i].state == BP_REMOVED) &&
349 					(kgdb_break[i].bpt_addr == addr))
350 			return 1;
351 	}
352 	return 0;
353 }
354 
dbg_remove_all_break(void)355 int dbg_remove_all_break(void)
356 {
357 	int error;
358 	int i;
359 
360 	/* Clear memory breakpoints. */
361 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
362 		if (kgdb_break[i].state != BP_ACTIVE)
363 			goto setundefined;
364 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
365 		if (error)
366 			pr_err("breakpoint remove failed: %lx\n",
367 			       kgdb_break[i].bpt_addr);
368 setundefined:
369 		kgdb_break[i].state = BP_UNDEFINED;
370 	}
371 
372 	/* Clear hardware breakpoints. */
373 	if (arch_kgdb_ops.remove_all_hw_break)
374 		arch_kgdb_ops.remove_all_hw_break();
375 
376 	return 0;
377 }
378 
379 /*
380  * Return true if there is a valid kgdb I/O module.  Also if no
381  * debugger is attached a message can be printed to the console about
382  * waiting for the debugger to attach.
383  *
384  * The print_wait argument is only to be true when called from inside
385  * the core kgdb_handle_exception, because it will wait for the
386  * debugger to attach.
387  */
kgdb_io_ready(int print_wait)388 static int kgdb_io_ready(int print_wait)
389 {
390 	if (!dbg_io_ops)
391 		return 0;
392 	if (kgdb_connected)
393 		return 1;
394 	if (atomic_read(&kgdb_setting_breakpoint))
395 		return 1;
396 	if (print_wait) {
397 #ifdef CONFIG_KGDB_KDB
398 		if (!dbg_kdb_mode)
399 			pr_crit("waiting... or $3#33 for KDB\n");
400 #else
401 		pr_crit("Waiting for remote debugger\n");
402 #endif
403 	}
404 	return 1;
405 }
406 
kgdb_reenter_check(struct kgdb_state * ks)407 static int kgdb_reenter_check(struct kgdb_state *ks)
408 {
409 	unsigned long addr;
410 
411 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
412 		return 0;
413 
414 	/* Panic on recursive debugger calls: */
415 	exception_level++;
416 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
417 	dbg_deactivate_sw_breakpoints();
418 
419 	/*
420 	 * If the break point removed ok at the place exception
421 	 * occurred, try to recover and print a warning to the end
422 	 * user because the user planted a breakpoint in a place that
423 	 * KGDB needs in order to function.
424 	 */
425 	if (dbg_remove_sw_break(addr) == 0) {
426 		exception_level = 0;
427 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
428 		dbg_activate_sw_breakpoints();
429 		pr_crit("re-enter error: breakpoint removed %lx\n", addr);
430 		WARN_ON_ONCE(1);
431 
432 		return 1;
433 	}
434 	dbg_remove_all_break();
435 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
436 
437 	if (exception_level > 1) {
438 		dump_stack();
439 		kgdb_io_module_registered = false;
440 		panic("Recursive entry to debugger");
441 	}
442 
443 	pr_crit("re-enter exception: ALL breakpoints killed\n");
444 #ifdef CONFIG_KGDB_KDB
445 	/* Allow kdb to debug itself one level */
446 	return 0;
447 #endif
448 	dump_stack();
449 	panic("Recursive entry to debugger");
450 
451 	return 1;
452 }
453 
dbg_touch_watchdogs(void)454 static void dbg_touch_watchdogs(void)
455 {
456 	touch_softlockup_watchdog_sync();
457 	clocksource_touch_watchdog();
458 	rcu_cpu_stall_reset();
459 }
460 
kgdb_cpu_enter(struct kgdb_state * ks,struct pt_regs * regs,int exception_state)461 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
462 		int exception_state)
463 {
464 	unsigned long flags;
465 	int sstep_tries = 100;
466 	int error;
467 	int cpu;
468 	int trace_on = 0;
469 	int online_cpus = num_online_cpus();
470 	u64 time_left;
471 
472 	kgdb_info[ks->cpu].enter_kgdb++;
473 	kgdb_info[ks->cpu].exception_state |= exception_state;
474 
475 	if (exception_state == DCPU_WANT_MASTER)
476 		atomic_inc(&masters_in_kgdb);
477 	else
478 		atomic_inc(&slaves_in_kgdb);
479 
480 	if (arch_kgdb_ops.disable_hw_break)
481 		arch_kgdb_ops.disable_hw_break(regs);
482 
483 acquirelock:
484 	rcu_read_lock();
485 	/*
486 	 * Interrupts will be restored by the 'trap return' code, except when
487 	 * single stepping.
488 	 */
489 	local_irq_save(flags);
490 
491 	cpu = ks->cpu;
492 	kgdb_info[cpu].debuggerinfo = regs;
493 	kgdb_info[cpu].task = current;
494 	kgdb_info[cpu].ret_state = 0;
495 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
496 
497 	/* Make sure the above info reaches the primary CPU */
498 	smp_mb();
499 
500 	if (exception_level == 1) {
501 		if (raw_spin_trylock(&dbg_master_lock))
502 			atomic_xchg(&kgdb_active, cpu);
503 		goto cpu_master_loop;
504 	}
505 
506 	/*
507 	 * CPU will loop if it is a slave or request to become a kgdb
508 	 * master cpu and acquire the kgdb_active lock:
509 	 */
510 	while (1) {
511 cpu_loop:
512 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
513 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
514 			goto cpu_master_loop;
515 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
516 			if (raw_spin_trylock(&dbg_master_lock)) {
517 				atomic_xchg(&kgdb_active, cpu);
518 				break;
519 			}
520 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
521 			if (!raw_spin_is_locked(&dbg_slave_lock))
522 				goto return_normal;
523 		} else {
524 return_normal:
525 			/* Return to normal operation by executing any
526 			 * hw breakpoint fixup.
527 			 */
528 			if (arch_kgdb_ops.correct_hw_break)
529 				arch_kgdb_ops.correct_hw_break();
530 			if (trace_on)
531 				tracing_on();
532 			kgdb_info[cpu].debuggerinfo = NULL;
533 			kgdb_info[cpu].task = NULL;
534 			kgdb_info[cpu].exception_state &=
535 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
536 			kgdb_info[cpu].enter_kgdb--;
537 			smp_mb__before_atomic();
538 			atomic_dec(&slaves_in_kgdb);
539 			dbg_touch_watchdogs();
540 			local_irq_restore(flags);
541 			rcu_read_unlock();
542 			return 0;
543 		}
544 		cpu_relax();
545 	}
546 
547 	/*
548 	 * For single stepping, try to only enter on the processor
549 	 * that was single stepping.  To guard against a deadlock, the
550 	 * kernel will only try for the value of sstep_tries before
551 	 * giving up and continuing on.
552 	 */
553 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
554 	    (kgdb_info[cpu].task &&
555 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
556 		atomic_set(&kgdb_active, -1);
557 		raw_spin_unlock(&dbg_master_lock);
558 		dbg_touch_watchdogs();
559 		local_irq_restore(flags);
560 		rcu_read_unlock();
561 
562 		goto acquirelock;
563 	}
564 
565 	if (!kgdb_io_ready(1)) {
566 		kgdb_info[cpu].ret_state = 1;
567 		goto kgdb_restore; /* No I/O connection, resume the system */
568 	}
569 
570 	/*
571 	 * Don't enter if we have hit a removed breakpoint.
572 	 */
573 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
574 		goto kgdb_restore;
575 
576 	atomic_inc(&ignore_console_lock_warning);
577 
578 	/* Call the I/O driver's pre_exception routine */
579 	if (dbg_io_ops->pre_exception)
580 		dbg_io_ops->pre_exception();
581 
582 	/*
583 	 * Get the passive CPU lock which will hold all the non-primary
584 	 * CPU in a spin state while the debugger is active
585 	 */
586 	if (!kgdb_single_step)
587 		raw_spin_lock(&dbg_slave_lock);
588 
589 #ifdef CONFIG_SMP
590 	/* If send_ready set, slaves are already waiting */
591 	if (ks->send_ready)
592 		atomic_set(ks->send_ready, 1);
593 
594 	/* Signal the other CPUs to enter kgdb_wait() */
595 	else if ((!kgdb_single_step) && kgdb_do_roundup)
596 		kgdb_roundup_cpus(flags);
597 #endif
598 
599 	/*
600 	 * Wait for the other CPUs to be notified and be waiting for us:
601 	 */
602 	time_left = MSEC_PER_SEC;
603 	while (kgdb_do_roundup && --time_left &&
604 	       (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
605 		   online_cpus)
606 		udelay(1000);
607 	if (!time_left)
608 		pr_crit("Timed out waiting for secondary CPUs.\n");
609 
610 	/*
611 	 * At this point the primary processor is completely
612 	 * in the debugger and all secondary CPUs are quiescent
613 	 */
614 	dbg_deactivate_sw_breakpoints();
615 	kgdb_single_step = 0;
616 	kgdb_contthread = current;
617 	exception_level = 0;
618 	trace_on = tracing_is_on();
619 	if (trace_on)
620 		tracing_off();
621 
622 	while (1) {
623 cpu_master_loop:
624 		if (dbg_kdb_mode) {
625 			kgdb_connected = 1;
626 			error = kdb_stub(ks);
627 			if (error == -1)
628 				continue;
629 			kgdb_connected = 0;
630 		} else {
631 			error = gdb_serial_stub(ks);
632 		}
633 
634 		if (error == DBG_PASS_EVENT) {
635 			dbg_kdb_mode = !dbg_kdb_mode;
636 		} else if (error == DBG_SWITCH_CPU_EVENT) {
637 			kgdb_info[dbg_switch_cpu].exception_state |=
638 				DCPU_NEXT_MASTER;
639 			goto cpu_loop;
640 		} else {
641 			kgdb_info[cpu].ret_state = error;
642 			break;
643 		}
644 	}
645 
646 	/* Call the I/O driver's post_exception routine */
647 	if (dbg_io_ops->post_exception)
648 		dbg_io_ops->post_exception();
649 
650 	atomic_dec(&ignore_console_lock_warning);
651 
652 	if (!kgdb_single_step) {
653 		raw_spin_unlock(&dbg_slave_lock);
654 		/* Wait till all the CPUs have quit from the debugger. */
655 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
656 			cpu_relax();
657 	}
658 
659 kgdb_restore:
660 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
661 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
662 		if (kgdb_info[sstep_cpu].task)
663 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
664 		else
665 			kgdb_sstep_pid = 0;
666 	}
667 	if (arch_kgdb_ops.correct_hw_break)
668 		arch_kgdb_ops.correct_hw_break();
669 	if (trace_on)
670 		tracing_on();
671 
672 	kgdb_info[cpu].debuggerinfo = NULL;
673 	kgdb_info[cpu].task = NULL;
674 	kgdb_info[cpu].exception_state &=
675 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
676 	kgdb_info[cpu].enter_kgdb--;
677 	smp_mb__before_atomic();
678 	atomic_dec(&masters_in_kgdb);
679 	/* Free kgdb_active */
680 	atomic_set(&kgdb_active, -1);
681 	raw_spin_unlock(&dbg_master_lock);
682 	dbg_touch_watchdogs();
683 	local_irq_restore(flags);
684 	rcu_read_unlock();
685 
686 	return kgdb_info[cpu].ret_state;
687 }
688 
689 /*
690  * kgdb_handle_exception() - main entry point from a kernel exception
691  *
692  * Locking hierarchy:
693  *	interface locks, if any (begin_session)
694  *	kgdb lock (kgdb_active)
695  */
696 int
kgdb_handle_exception(int evector,int signo,int ecode,struct pt_regs * regs)697 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
698 {
699 	struct kgdb_state kgdb_var;
700 	struct kgdb_state *ks = &kgdb_var;
701 	int ret = 0;
702 
703 	if (arch_kgdb_ops.enable_nmi)
704 		arch_kgdb_ops.enable_nmi(0);
705 	/*
706 	 * Avoid entering the debugger if we were triggered due to an oops
707 	 * but panic_timeout indicates the system should automatically
708 	 * reboot on panic. We don't want to get stuck waiting for input
709 	 * on such systems, especially if its "just" an oops.
710 	 */
711 	if (signo != SIGTRAP && panic_timeout)
712 		return 1;
713 
714 	memset(ks, 0, sizeof(struct kgdb_state));
715 	ks->cpu			= raw_smp_processor_id();
716 	ks->ex_vector		= evector;
717 	ks->signo		= signo;
718 	ks->err_code		= ecode;
719 	ks->linux_regs		= regs;
720 
721 	if (kgdb_reenter_check(ks))
722 		goto out; /* Ouch, double exception ! */
723 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
724 		goto out;
725 
726 	ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
727 out:
728 	if (arch_kgdb_ops.enable_nmi)
729 		arch_kgdb_ops.enable_nmi(1);
730 	return ret;
731 }
732 
733 /*
734  * GDB places a breakpoint at this function to know dynamically
735  * loaded objects. It's not defined static so that only one instance with this
736  * name exists in the kernel.
737  */
738 
module_event(struct notifier_block * self,unsigned long val,void * data)739 static int module_event(struct notifier_block *self, unsigned long val,
740 	void *data)
741 {
742 	return 0;
743 }
744 
745 static struct notifier_block dbg_module_load_nb = {
746 	.notifier_call	= module_event,
747 };
748 
kgdb_nmicallback(int cpu,void * regs)749 int kgdb_nmicallback(int cpu, void *regs)
750 {
751 #ifdef CONFIG_SMP
752 	struct kgdb_state kgdb_var;
753 	struct kgdb_state *ks = &kgdb_var;
754 
755 	memset(ks, 0, sizeof(struct kgdb_state));
756 	ks->cpu			= cpu;
757 	ks->linux_regs		= regs;
758 
759 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
760 			raw_spin_is_locked(&dbg_master_lock)) {
761 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
762 		return 0;
763 	}
764 #endif
765 	return 1;
766 }
767 
kgdb_nmicallin(int cpu,int trapnr,void * regs,int err_code,atomic_t * send_ready)768 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
769 							atomic_t *send_ready)
770 {
771 #ifdef CONFIG_SMP
772 	if (!kgdb_io_ready(0) || !send_ready)
773 		return 1;
774 
775 	if (kgdb_info[cpu].enter_kgdb == 0) {
776 		struct kgdb_state kgdb_var;
777 		struct kgdb_state *ks = &kgdb_var;
778 
779 		memset(ks, 0, sizeof(struct kgdb_state));
780 		ks->cpu			= cpu;
781 		ks->ex_vector		= trapnr;
782 		ks->signo		= SIGTRAP;
783 		ks->err_code		= err_code;
784 		ks->linux_regs		= regs;
785 		ks->send_ready		= send_ready;
786 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
787 		return 0;
788 	}
789 #endif
790 	return 1;
791 }
792 
kgdb_console_write(struct console * co,const char * s,unsigned count)793 static void kgdb_console_write(struct console *co, const char *s,
794    unsigned count)
795 {
796 	unsigned long flags;
797 
798 	/* If we're debugging, or KGDB has not connected, don't try
799 	 * and print. */
800 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
801 		return;
802 
803 	local_irq_save(flags);
804 	gdbstub_msg_write(s, count);
805 	local_irq_restore(flags);
806 }
807 
808 static struct console kgdbcons = {
809 	.name		= "kgdb",
810 	.write		= kgdb_console_write,
811 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
812 	.index		= -1,
813 };
814 
opt_kgdb_con(char * str)815 static int __init opt_kgdb_con(char *str)
816 {
817 	kgdb_use_con = 1;
818 
819 	if (kgdb_io_module_registered && !kgdb_con_registered) {
820 		register_console(&kgdbcons);
821 		kgdb_con_registered = 1;
822 	}
823 
824 	return 0;
825 }
826 
827 early_param("kgdbcon", opt_kgdb_con);
828 
829 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_handle_dbg(int key)830 static void sysrq_handle_dbg(int key)
831 {
832 	if (!dbg_io_ops) {
833 		pr_crit("ERROR: No KGDB I/O module available\n");
834 		return;
835 	}
836 	if (!kgdb_connected) {
837 #ifdef CONFIG_KGDB_KDB
838 		if (!dbg_kdb_mode)
839 			pr_crit("KGDB or $3#33 for KDB\n");
840 #else
841 		pr_crit("Entering KGDB\n");
842 #endif
843 	}
844 
845 	kgdb_breakpoint();
846 }
847 
848 static struct sysrq_key_op sysrq_dbg_op = {
849 	.handler	= sysrq_handle_dbg,
850 	.help_msg	= "debug(g)",
851 	.action_msg	= "DEBUG",
852 };
853 #endif
854 
kgdb_panic_event(struct notifier_block * self,unsigned long val,void * data)855 static int kgdb_panic_event(struct notifier_block *self,
856 			    unsigned long val,
857 			    void *data)
858 {
859 	/*
860 	 * Avoid entering the debugger if we were triggered due to a panic
861 	 * We don't want to get stuck waiting for input from user in such case.
862 	 * panic_timeout indicates the system should automatically
863 	 * reboot on panic.
864 	 */
865 	if (panic_timeout)
866 		return NOTIFY_DONE;
867 
868 	if (dbg_kdb_mode)
869 		kdb_printf("PANIC: %s\n", (char *)data);
870 	kgdb_breakpoint();
871 	return NOTIFY_DONE;
872 }
873 
874 static struct notifier_block kgdb_panic_event_nb = {
875        .notifier_call	= kgdb_panic_event,
876        .priority	= INT_MAX,
877 };
878 
kgdb_arch_late(void)879 void __weak kgdb_arch_late(void)
880 {
881 }
882 
dbg_late_init(void)883 void __init dbg_late_init(void)
884 {
885 	dbg_is_early = false;
886 	if (kgdb_io_module_registered)
887 		kgdb_arch_late();
888 	kdb_init(KDB_INIT_FULL);
889 }
890 
891 static int
dbg_notify_reboot(struct notifier_block * this,unsigned long code,void * x)892 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
893 {
894 	/*
895 	 * Take the following action on reboot notify depending on value:
896 	 *    1 == Enter debugger
897 	 *    0 == [the default] detatch debug client
898 	 *   -1 == Do nothing... and use this until the board resets
899 	 */
900 	switch (kgdbreboot) {
901 	case 1:
902 		kgdb_breakpoint();
903 	case -1:
904 		goto done;
905 	}
906 	if (!dbg_kdb_mode)
907 		gdbstub_exit(code);
908 done:
909 	return NOTIFY_DONE;
910 }
911 
912 static struct notifier_block dbg_reboot_notifier = {
913 	.notifier_call		= dbg_notify_reboot,
914 	.next			= NULL,
915 	.priority		= INT_MAX,
916 };
917 
kgdb_register_callbacks(void)918 static void kgdb_register_callbacks(void)
919 {
920 	if (!kgdb_io_module_registered) {
921 		kgdb_io_module_registered = 1;
922 		kgdb_arch_init();
923 		if (!dbg_is_early)
924 			kgdb_arch_late();
925 		register_module_notifier(&dbg_module_load_nb);
926 		register_reboot_notifier(&dbg_reboot_notifier);
927 		atomic_notifier_chain_register(&panic_notifier_list,
928 					       &kgdb_panic_event_nb);
929 #ifdef CONFIG_MAGIC_SYSRQ
930 		register_sysrq_key('g', &sysrq_dbg_op);
931 #endif
932 		if (kgdb_use_con && !kgdb_con_registered) {
933 			register_console(&kgdbcons);
934 			kgdb_con_registered = 1;
935 		}
936 	}
937 }
938 
kgdb_unregister_callbacks(void)939 static void kgdb_unregister_callbacks(void)
940 {
941 	/*
942 	 * When this routine is called KGDB should unregister from the
943 	 * panic handler and clean up, making sure it is not handling any
944 	 * break exceptions at the time.
945 	 */
946 	if (kgdb_io_module_registered) {
947 		kgdb_io_module_registered = 0;
948 		unregister_reboot_notifier(&dbg_reboot_notifier);
949 		unregister_module_notifier(&dbg_module_load_nb);
950 		atomic_notifier_chain_unregister(&panic_notifier_list,
951 					       &kgdb_panic_event_nb);
952 		kgdb_arch_exit();
953 #ifdef CONFIG_MAGIC_SYSRQ
954 		unregister_sysrq_key('g', &sysrq_dbg_op);
955 #endif
956 		if (kgdb_con_registered) {
957 			unregister_console(&kgdbcons);
958 			kgdb_con_registered = 0;
959 		}
960 	}
961 }
962 
963 /*
964  * There are times a tasklet needs to be used vs a compiled in
965  * break point so as to cause an exception outside a kgdb I/O module,
966  * such as is the case with kgdboe, where calling a breakpoint in the
967  * I/O driver itself would be fatal.
968  */
kgdb_tasklet_bpt(unsigned long ing)969 static void kgdb_tasklet_bpt(unsigned long ing)
970 {
971 	kgdb_breakpoint();
972 	atomic_set(&kgdb_break_tasklet_var, 0);
973 }
974 
975 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
976 
kgdb_schedule_breakpoint(void)977 void kgdb_schedule_breakpoint(void)
978 {
979 	if (atomic_read(&kgdb_break_tasklet_var) ||
980 		atomic_read(&kgdb_active) != -1 ||
981 		atomic_read(&kgdb_setting_breakpoint))
982 		return;
983 	atomic_inc(&kgdb_break_tasklet_var);
984 	tasklet_schedule(&kgdb_tasklet_breakpoint);
985 }
986 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
987 
kgdb_initial_breakpoint(void)988 static void kgdb_initial_breakpoint(void)
989 {
990 	kgdb_break_asap = 0;
991 
992 	pr_crit("Waiting for connection from remote gdb...\n");
993 	kgdb_breakpoint();
994 }
995 
996 /**
997  *	kgdb_register_io_module - register KGDB IO module
998  *	@new_dbg_io_ops: the io ops vector
999  *
1000  *	Register it with the KGDB core.
1001  */
kgdb_register_io_module(struct kgdb_io * new_dbg_io_ops)1002 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1003 {
1004 	int err;
1005 
1006 	spin_lock(&kgdb_registration_lock);
1007 
1008 	if (dbg_io_ops) {
1009 		spin_unlock(&kgdb_registration_lock);
1010 
1011 		pr_err("Another I/O driver is already registered with KGDB\n");
1012 		return -EBUSY;
1013 	}
1014 
1015 	if (new_dbg_io_ops->init) {
1016 		err = new_dbg_io_ops->init();
1017 		if (err) {
1018 			spin_unlock(&kgdb_registration_lock);
1019 			return err;
1020 		}
1021 	}
1022 
1023 	dbg_io_ops = new_dbg_io_ops;
1024 
1025 	spin_unlock(&kgdb_registration_lock);
1026 
1027 	pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1028 
1029 	/* Arm KGDB now. */
1030 	kgdb_register_callbacks();
1031 
1032 	if (kgdb_break_asap)
1033 		kgdb_initial_breakpoint();
1034 
1035 	return 0;
1036 }
1037 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1038 
1039 /**
1040  *	kkgdb_unregister_io_module - unregister KGDB IO module
1041  *	@old_dbg_io_ops: the io ops vector
1042  *
1043  *	Unregister it with the KGDB core.
1044  */
kgdb_unregister_io_module(struct kgdb_io * old_dbg_io_ops)1045 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1046 {
1047 	BUG_ON(kgdb_connected);
1048 
1049 	/*
1050 	 * KGDB is no longer able to communicate out, so
1051 	 * unregister our callbacks and reset state.
1052 	 */
1053 	kgdb_unregister_callbacks();
1054 
1055 	spin_lock(&kgdb_registration_lock);
1056 
1057 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1058 	dbg_io_ops = NULL;
1059 
1060 	spin_unlock(&kgdb_registration_lock);
1061 
1062 	pr_info("Unregistered I/O driver %s, debugger disabled\n",
1063 		old_dbg_io_ops->name);
1064 }
1065 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1066 
dbg_io_get_char(void)1067 int dbg_io_get_char(void)
1068 {
1069 	int ret = dbg_io_ops->read_char();
1070 	if (ret == NO_POLL_CHAR)
1071 		return -1;
1072 	if (!dbg_kdb_mode)
1073 		return ret;
1074 	if (ret == 127)
1075 		return 8;
1076 	return ret;
1077 }
1078 
1079 /**
1080  * kgdb_breakpoint - generate breakpoint exception
1081  *
1082  * This function will generate a breakpoint exception.  It is used at the
1083  * beginning of a program to sync up with a debugger and can be used
1084  * otherwise as a quick means to stop program execution and "break" into
1085  * the debugger.
1086  */
kgdb_breakpoint(void)1087 noinline void kgdb_breakpoint(void)
1088 {
1089 	atomic_inc(&kgdb_setting_breakpoint);
1090 	wmb(); /* Sync point before breakpoint */
1091 	arch_kgdb_breakpoint();
1092 	wmb(); /* Sync point after breakpoint */
1093 	atomic_dec(&kgdb_setting_breakpoint);
1094 }
1095 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1096 
opt_kgdb_wait(char * str)1097 static int __init opt_kgdb_wait(char *str)
1098 {
1099 	kgdb_break_asap = 1;
1100 
1101 	kdb_init(KDB_INIT_EARLY);
1102 	if (kgdb_io_module_registered)
1103 		kgdb_initial_breakpoint();
1104 
1105 	return 0;
1106 }
1107 
1108 early_param("kgdbwait", opt_kgdb_wait);
1109