• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * TILE-Gx KGDB support.
15  */
16 
17 #include <linux/ptrace.h>
18 #include <linux/kgdb.h>
19 #include <linux/kdebug.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 
24 static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
25 static unsigned long stepped_addr;
26 static tile_bundle_bits stepped_instr;
27 
28 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
29 	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
30 	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
31 	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
32 	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
33 	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
34 	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
35 	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
36 	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
37 	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
38 	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
39 	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
40 	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
41 	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
42 	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
43 	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
44 	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
45 	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
46 	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
47 	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
48 	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
49 	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
50 	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
51 	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
52 	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
53 	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
54 	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
55 	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
56 	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
57 	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
58 	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
59 	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
60 	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
61 	{ "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
62 	{ "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
63 	{ "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
64 	{ "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
65 	{ "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
66 	{ "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
67 	{ "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
68 	{ "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
69 	{ "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
70 	{ "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
71 	{ "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
72 	{ "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
73 	{ "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
74 	{ "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
75 	{ "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
76 	{ "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
77 	{ "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
78 	{ "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
79 	{ "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
80 	{ "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
81 	{ "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
82 	{ "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
83 	{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
84 	{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
85 	{ "sn", GDB_SIZEOF_REG, -1},
86 	{ "idn0", GDB_SIZEOF_REG, -1},
87 	{ "idn1", GDB_SIZEOF_REG, -1},
88 	{ "udn0", GDB_SIZEOF_REG, -1},
89 	{ "udn1", GDB_SIZEOF_REG, -1},
90 	{ "udn2", GDB_SIZEOF_REG, -1},
91 	{ "udn3", GDB_SIZEOF_REG, -1},
92 	{ "zero", GDB_SIZEOF_REG, -1},
93 	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
94 	{ "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
95 };
96 
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)97 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
98 {
99 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
100 		return NULL;
101 
102 	if (dbg_reg_def[regno].offset != -1)
103 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
104 		       dbg_reg_def[regno].size);
105 	else
106 		memset(mem, 0, dbg_reg_def[regno].size);
107 	return dbg_reg_def[regno].name;
108 }
109 
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)110 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
111 {
112 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
113 		return -EINVAL;
114 
115 	if (dbg_reg_def[regno].offset != -1)
116 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
117 		       dbg_reg_def[regno].size);
118 	return 0;
119 }
120 
121 /*
122  * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
123  * we may not be able to get all the info.
124  */
125 void
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * task)126 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
127 {
128 	struct pt_regs *thread_regs;
129 	const int NGPRS = TREG_LAST_GPR + 1;
130 
131 	if (task == NULL)
132 		return;
133 
134 	thread_regs = task_pt_regs(task);
135 	memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
136 	memset(&gdb_regs[NGPRS], 0,
137 	       (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
138 	gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
139 	gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
140 }
141 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)142 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
143 {
144 	regs->pc = pc;
145 }
146 
kgdb_call_nmi_hook(void * ignored)147 static void kgdb_call_nmi_hook(void *ignored)
148 {
149 	kgdb_nmicallback(raw_smp_processor_id(), NULL);
150 }
151 
kgdb_roundup_cpus(unsigned long flags)152 void kgdb_roundup_cpus(unsigned long flags)
153 {
154 	local_irq_enable();
155 	smp_call_function(kgdb_call_nmi_hook, NULL, 0);
156 	local_irq_disable();
157 }
158 
159 /*
160  * Convert a kernel address to the writable kernel text mapping.
161  */
writable_address(unsigned long addr)162 static unsigned long writable_address(unsigned long addr)
163 {
164 	unsigned long ret = 0;
165 
166 	if (core_kernel_text(addr))
167 		ret = ktext_writable_addr(addr);
168 	else if (is_module_text_address(addr))
169 		ret = addr;
170 	else
171 		pr_err("Unknown virtual address 0x%lx\n", addr);
172 
173 	return ret;
174 }
175 
176 /*
177  * Calculate the new address for after a step.
178  */
get_step_address(struct pt_regs * regs)179 static unsigned long get_step_address(struct pt_regs *regs)
180 {
181 	int src_reg;
182 	int jump_off;
183 	int br_off;
184 	unsigned long addr;
185 	unsigned int opcode;
186 	tile_bundle_bits bundle;
187 
188 	/* Move to the next instruction by default. */
189 	addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
190 	bundle = *(unsigned long *)instruction_pointer(regs);
191 
192 	/* 0: X mode, Otherwise: Y mode. */
193 	if (bundle & TILEGX_BUNDLE_MODE_MASK) {
194 		if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
195 		    get_RRROpcodeExtension_Y1(bundle) ==
196 		    UNARY_RRR_1_OPCODE_Y1) {
197 			opcode = get_UnaryOpcodeExtension_Y1(bundle);
198 
199 			switch (opcode) {
200 			case JALR_UNARY_OPCODE_Y1:
201 			case JALRP_UNARY_OPCODE_Y1:
202 			case JR_UNARY_OPCODE_Y1:
203 			case JRP_UNARY_OPCODE_Y1:
204 				src_reg = get_SrcA_Y1(bundle);
205 				dbg_get_reg(src_reg, &addr, regs);
206 				break;
207 			}
208 		}
209 	} else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
210 		if (get_RRROpcodeExtension_X1(bundle) ==
211 		    UNARY_RRR_0_OPCODE_X1) {
212 			opcode = get_UnaryOpcodeExtension_X1(bundle);
213 
214 			switch (opcode) {
215 			case JALR_UNARY_OPCODE_X1:
216 			case JALRP_UNARY_OPCODE_X1:
217 			case JR_UNARY_OPCODE_X1:
218 			case JRP_UNARY_OPCODE_X1:
219 				src_reg = get_SrcA_X1(bundle);
220 				dbg_get_reg(src_reg, &addr, regs);
221 				break;
222 			}
223 		}
224 	} else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
225 		opcode = get_JumpOpcodeExtension_X1(bundle);
226 
227 		switch (opcode) {
228 		case JAL_JUMP_OPCODE_X1:
229 		case J_JUMP_OPCODE_X1:
230 			jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
231 			addr = regs->pc +
232 				(jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
233 			break;
234 		}
235 	} else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
236 		br_off = 0;
237 		opcode = get_BrType_X1(bundle);
238 
239 		switch (opcode) {
240 		case BEQZT_BRANCH_OPCODE_X1:
241 		case BEQZ_BRANCH_OPCODE_X1:
242 			if (get_SrcA_X1(bundle) == 0)
243 				br_off = get_BrOff_X1(bundle);
244 			break;
245 		case BGEZT_BRANCH_OPCODE_X1:
246 		case BGEZ_BRANCH_OPCODE_X1:
247 			if (get_SrcA_X1(bundle) >= 0)
248 				br_off = get_BrOff_X1(bundle);
249 			break;
250 		case BGTZT_BRANCH_OPCODE_X1:
251 		case BGTZ_BRANCH_OPCODE_X1:
252 			if (get_SrcA_X1(bundle) > 0)
253 				br_off = get_BrOff_X1(bundle);
254 			break;
255 		case BLBCT_BRANCH_OPCODE_X1:
256 		case BLBC_BRANCH_OPCODE_X1:
257 			if (!(get_SrcA_X1(bundle) & 1))
258 				br_off = get_BrOff_X1(bundle);
259 			break;
260 		case BLBST_BRANCH_OPCODE_X1:
261 		case BLBS_BRANCH_OPCODE_X1:
262 			if (get_SrcA_X1(bundle) & 1)
263 				br_off = get_BrOff_X1(bundle);
264 			break;
265 		case BLEZT_BRANCH_OPCODE_X1:
266 		case BLEZ_BRANCH_OPCODE_X1:
267 			if (get_SrcA_X1(bundle) <= 0)
268 				br_off = get_BrOff_X1(bundle);
269 			break;
270 		case BLTZT_BRANCH_OPCODE_X1:
271 		case BLTZ_BRANCH_OPCODE_X1:
272 			if (get_SrcA_X1(bundle) < 0)
273 				br_off = get_BrOff_X1(bundle);
274 			break;
275 		case BNEZT_BRANCH_OPCODE_X1:
276 		case BNEZ_BRANCH_OPCODE_X1:
277 			if (get_SrcA_X1(bundle) != 0)
278 				br_off = get_BrOff_X1(bundle);
279 			break;
280 		}
281 
282 		if (br_off != 0) {
283 			br_off = sign_extend(br_off, 17);
284 			addr = regs->pc +
285 				(br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
286 		}
287 	}
288 
289 	return addr;
290 }
291 
292 /*
293  * Replace the next instruction after the current instruction with a
294  * breakpoint instruction.
295  */
do_single_step(struct pt_regs * regs)296 static void do_single_step(struct pt_regs *regs)
297 {
298 	unsigned long addr_wr;
299 
300 	/* Determine where the target instruction will send us to. */
301 	stepped_addr = get_step_address(regs);
302 	probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
303 			  BREAK_INSTR_SIZE);
304 
305 	addr_wr = writable_address(stepped_addr);
306 	probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
307 			   BREAK_INSTR_SIZE);
308 	smp_wmb();
309 	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
310 }
311 
undo_single_step(struct pt_regs * regs)312 static void undo_single_step(struct pt_regs *regs)
313 {
314 	unsigned long addr_wr;
315 
316 	if (stepped_instr == 0)
317 		return;
318 
319 	addr_wr = writable_address(stepped_addr);
320 	probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
321 			   BREAK_INSTR_SIZE);
322 	stepped_instr = 0;
323 	smp_wmb();
324 	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
325 }
326 
327 /*
328  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
329  * then try to fall into the debugger.
330  */
331 static int
kgdb_notify(struct notifier_block * self,unsigned long cmd,void * ptr)332 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
333 {
334 	int ret;
335 	unsigned long flags;
336 	struct die_args *args = (struct die_args *)ptr;
337 	struct pt_regs *regs = args->regs;
338 
339 #ifdef CONFIG_KPROBES
340 	/*
341 	 * Return immediately if the kprobes fault notifier has set
342 	 * DIE_PAGE_FAULT.
343 	 */
344 	if (cmd == DIE_PAGE_FAULT)
345 		return NOTIFY_DONE;
346 #endif /* CONFIG_KPROBES */
347 
348 	switch (cmd) {
349 	case DIE_BREAK:
350 	case DIE_COMPILED_BPT:
351 		break;
352 	case DIE_SSTEPBP:
353 		local_irq_save(flags);
354 		kgdb_handle_exception(0, SIGTRAP, 0, regs);
355 		local_irq_restore(flags);
356 		return NOTIFY_STOP;
357 	default:
358 		/* Userspace events, ignore. */
359 		if (user_mode(regs))
360 			return NOTIFY_DONE;
361 	}
362 
363 	local_irq_save(flags);
364 	ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
365 	local_irq_restore(flags);
366 	if (ret)
367 		return NOTIFY_DONE;
368 
369 	return NOTIFY_STOP;
370 }
371 
372 static struct notifier_block kgdb_notifier = {
373 	.notifier_call = kgdb_notify,
374 };
375 
376 /*
377  * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
378  * @vector: The error vector of the exception that happened.
379  * @signo: The signal number of the exception that happened.
380  * @err_code: The error code of the exception that happened.
381  * @remcom_in_buffer: The buffer of the packet we have read.
382  * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
383  * @regs: The &struct pt_regs of the current process.
384  *
385  * This function MUST handle the 'c' and 's' command packets,
386  * as well packets to set / remove a hardware breakpoint, if used.
387  * If there are additional packets which the hardware needs to handle,
388  * they are handled here. The code should return -1 if it wants to
389  * process more packets, and a %0 or %1 if it wants to exit from the
390  * kgdb callback.
391  */
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)392 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
393 			       char *remcom_in_buffer, char *remcom_out_buffer,
394 			       struct pt_regs *regs)
395 {
396 	char *ptr;
397 	unsigned long address;
398 
399 	/* Undo any stepping we may have done. */
400 	undo_single_step(regs);
401 
402 	switch (remcom_in_buffer[0]) {
403 	case 'c':
404 	case 's':
405 	case 'D':
406 	case 'k':
407 		/*
408 		 * Try to read optional parameter, pc unchanged if no parm.
409 		 * If this was a compiled-in breakpoint, we need to move
410 		 * to the next instruction or we will just breakpoint
411 		 * over and over again.
412 		 */
413 		ptr = &remcom_in_buffer[1];
414 		if (kgdb_hex2long(&ptr, &address))
415 			regs->pc = address;
416 		else if (*(unsigned long *)regs->pc == compiled_bpt)
417 			regs->pc += BREAK_INSTR_SIZE;
418 
419 		if (remcom_in_buffer[0] == 's') {
420 			do_single_step(regs);
421 			kgdb_single_step = 1;
422 			atomic_set(&kgdb_cpu_doing_single_step,
423 				   raw_smp_processor_id());
424 		} else
425 			atomic_set(&kgdb_cpu_doing_single_step, -1);
426 
427 		return 0;
428 	}
429 
430 	return -1; /* this means that we do not want to exit from the handler */
431 }
432 
433 struct kgdb_arch arch_kgdb_ops;
434 
435 /*
436  * kgdb_arch_init - Perform any architecture specific initialization.
437  *
438  * This function will handle the initialization of any architecture
439  * specific callbacks.
440  */
kgdb_arch_init(void)441 int kgdb_arch_init(void)
442 {
443 	tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
444 
445 	memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
446 	return register_die_notifier(&kgdb_notifier);
447 }
448 
449 /*
450  * kgdb_arch_exit - Perform any architecture specific uninitialization.
451  *
452  * This function will handle the uninitialization of any architecture
453  * specific callbacks, for dynamic registration and unregistration.
454  */
kgdb_arch_exit(void)455 void kgdb_arch_exit(void)
456 {
457 	unregister_die_notifier(&kgdb_notifier);
458 }
459 
kgdb_arch_set_breakpoint(struct kgdb_bkpt * bpt)460 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
461 {
462 	int err;
463 	unsigned long addr_wr = writable_address(bpt->bpt_addr);
464 
465 	if (addr_wr == 0)
466 		return -1;
467 
468 	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
469 				BREAK_INSTR_SIZE);
470 	if (err)
471 		return err;
472 
473 	err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
474 				 BREAK_INSTR_SIZE);
475 	smp_wmb();
476 	flush_icache_range((unsigned long)bpt->bpt_addr,
477 			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
478 	return err;
479 }
480 
kgdb_arch_remove_breakpoint(struct kgdb_bkpt * bpt)481 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
482 {
483 	int err;
484 	unsigned long addr_wr = writable_address(bpt->bpt_addr);
485 
486 	if (addr_wr == 0)
487 		return -1;
488 
489 	err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
490 				 BREAK_INSTR_SIZE);
491 	smp_wmb();
492 	flush_icache_range((unsigned long)bpt->bpt_addr,
493 			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
494 	return err;
495 }
496