• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Author: Huacai Chen <chenhuacai@loongson.cn>
4  *  Copyright (C) 2020 Loongson Technology Corporation Limited
5  */
6 
7 #include <linux/module.h>
8 #include <linux/ptrace.h>		/* for linux pt_regs struct */
9 #include <linux/kgdb.h>
10 #include <linux/kdebug.h>
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <asm/inst.h>
14 #include <asm/fpu.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/cacheflush.h>
17 #include <asm/processor.h>
18 #include <asm/sigcontext.h>
19 #include <asm/irq_regs.h>
20 #include <asm/ptrace.h>
21 #include <asm/watch.h>
22 static int kgdb_watch_dcount;
23 static int kgdb_watch_icount;
24 int kgdb_watch_activated;
25 
param_set_dcount(const char * val,const struct kernel_param * kp)26 int param_set_dcount(const char *val, const struct kernel_param *kp)
27 {
28 	int dbcn, d, ret;
29 	ret = kstrtoint(val, 0, &d);
30 	if (ret < 0)
31 		return ret;
32 	dbcn = csr_read32(LOONGARCH_CSR_MWPC) & 0x3f;
33 	if (d > dbcn)
34 		return -EINVAL;
35 	boot_cpu_data.watch_dreg_count = dbcn - d;
36 	*(int *)kp->arg = d;
37 	return 0;
38 }
39 
param_set_icount(const char * val,const struct kernel_param * kp)40 int param_set_icount(const char *val, const struct kernel_param *kp)
41 {
42 	int ibcn, d, ret;
43 	ret = kstrtoint(val, 0, &d);
44 	if (ret < 0)
45 		return ret;
46 	ibcn = csr_read32(LOONGARCH_CSR_FWPC) & 0x3f;
47 	if (d > ibcn)
48 		return -EINVAL;
49 	boot_cpu_data.watch_ireg_count = ibcn - d;
50 	*(int *)kp->arg = d;
51 	return 0;
52 }
53 
54 const struct kernel_param_ops param_ops_dcount = {
55 	.set = param_set_dcount,
56 	.get = param_get_int,
57 };
58 
59 const struct kernel_param_ops param_ops_icount = {
60 	.set = param_set_icount,
61 	.get = param_get_int,
62 };
63 
64 module_param_cb(kgdb_watch_dcount, &param_ops_dcount, &kgdb_watch_dcount, 0644);
65 module_param_cb(kgdb_watch_icount, &param_ops_icount, &kgdb_watch_icount, 0644);
66 
67 static struct hard_trap_info {
68 	unsigned char tt;	/* Trap type code for LoongArch */
69 	unsigned char signo;	/* Signal that we map this trap into */
70 } hard_trap_info[] = {
71 	{ 1, SIGBUS },
72 	{ 2, SIGBUS },
73 	{ 3, SIGBUS },
74 	{ 4, SIGBUS },
75 	{ 5, SIGBUS },
76 	{ 6, SIGBUS },
77 	{ 7, SIGBUS },
78 	{ 8, SIGBUS },
79 	{ 9, SIGBUS },
80 	{ 10, SIGBUS },
81 	{ 12, SIGTRAP },		/* break */
82 	{ 13, SIGBUS },
83 	{ 14, SIGBUS },
84 	{ 15, SIGFPE },
85 	{ 16, SIGFPE },
86 	{ 17, SIGFPE },
87 	{ 18, SIGFPE },
88 	{ 0, 0}			/* Must be last */
89 };
90 
91 struct dbg_reg_def_t dbg_reg_def[DBG_ALL_REG_NUM] = {
92 	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
93 	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
94 	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
95 	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
96 	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
97 	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
98 	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
99 	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
100 	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
101 	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
102 	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
103 	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
104 	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
105 	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
106 	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
107 	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
108 	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
109 	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
110 	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
111 	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
112 	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
113 	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
114 	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
115 	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
116 	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
117 	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
118 	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
119 	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
120 	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
121 	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
122 	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
123 	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
124 	{ "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
125 	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
126 	{ "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
127 	{ "f0", GDB_SIZEOF_REG, 0 },
128 	{ "f1", GDB_SIZEOF_REG, 1 },
129 	{ "f2", GDB_SIZEOF_REG, 2 },
130 	{ "f3", GDB_SIZEOF_REG, 3 },
131 	{ "f4", GDB_SIZEOF_REG, 4 },
132 	{ "f5", GDB_SIZEOF_REG, 5 },
133 	{ "f6", GDB_SIZEOF_REG, 6 },
134 	{ "f7", GDB_SIZEOF_REG, 7 },
135 	{ "f8", GDB_SIZEOF_REG, 8 },
136 	{ "f9", GDB_SIZEOF_REG, 9 },
137 	{ "f10", GDB_SIZEOF_REG, 10 },
138 	{ "f11", GDB_SIZEOF_REG, 11 },
139 	{ "f12", GDB_SIZEOF_REG, 12 },
140 	{ "f13", GDB_SIZEOF_REG, 13 },
141 	{ "f14", GDB_SIZEOF_REG, 14 },
142 	{ "f15", GDB_SIZEOF_REG, 15 },
143 	{ "f16", GDB_SIZEOF_REG, 16 },
144 	{ "f17", GDB_SIZEOF_REG, 17 },
145 	{ "f18", GDB_SIZEOF_REG, 18 },
146 	{ "f19", GDB_SIZEOF_REG, 19 },
147 	{ "f20", GDB_SIZEOF_REG, 20 },
148 	{ "f21", GDB_SIZEOF_REG, 21 },
149 	{ "f22", GDB_SIZEOF_REG, 22 },
150 	{ "f23", GDB_SIZEOF_REG, 23 },
151 	{ "f24", GDB_SIZEOF_REG, 24 },
152 	{ "f25", GDB_SIZEOF_REG, 25 },
153 	{ "f26", GDB_SIZEOF_REG, 26 },
154 	{ "f27", GDB_SIZEOF_REG, 27 },
155 	{ "f28", GDB_SIZEOF_REG, 28 },
156 	{ "f29", GDB_SIZEOF_REG, 29 },
157 	{ "f30", GDB_SIZEOF_REG, 30 },
158 	{ "f31", GDB_SIZEOF_REG, 31 },
159 	{ "fcc0", 1, 0 },
160 	{ "fcc1", 1, 1 },
161 	{ "fcc2", 1, 2 },
162 	{ "fcc3", 1, 3 },
163 	{ "fcc4", 1, 4 },
164 	{ "fcc5", 1, 5 },
165 	{ "fcc6", 1, 6 },
166 	{ "fcc7", 1, 7 },
167 	{ "fcsr", GDB_SIZEOF_REG, 0 },
168 };
169 
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)170 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
171 {
172 	int fp_reg;
173 
174 	if (regno < 0 || regno >= DBG_ALL_REG_NUM)
175 		return -EINVAL;
176 
177 	if (dbg_reg_def[regno].offset != -1 && regno < 33) {
178 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
179 		       dbg_reg_def[regno].size);
180 	} else if (current && dbg_reg_def[regno].offset != -1 && regno < 78) {
181 		/* FP registers 35 -> 76 */
182 		if (!(regs->csr_euen & CSR_EUEN_FPEN))
183 			return 0;
184 		if (regno == 75) {
185 			/* Process the fcsr/fsr (register 75) */
186 			memcpy((void *)&current->thread.fpu.fcsr, mem,
187 			       dbg_reg_def[regno].size);
188 		} else if (regno >= 67 && regno < 75) {
189 			/* Process the fcc */
190 			fp_reg = dbg_reg_def[regno].offset;
191 			memcpy((char *)&current->thread.fpu.fcc + fp_reg, mem,
192 			       dbg_reg_def[regno].size);
193 		} else {
194 		fp_reg = dbg_reg_def[regno].offset;
195 		memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
196 		       dbg_reg_def[regno].size);
197 		}
198 
199 		restore_fp(current);
200 	}
201 
202 	return 0;
203 }
204 
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)205 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
206 {
207 	int fp_reg;
208 
209 	if (regno >= DBG_ALL_REG_NUM || regno < 0)
210 		return NULL;
211 
212 	if (dbg_reg_def[regno].offset != -1 && regno < 33) {
213 		/* First 32 registers */
214 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
215 		       dbg_reg_def[regno].size);
216 	} else if (current && dbg_reg_def[regno].offset != -1 && regno < 78) {
217 		/* FP registers 35 -> 76 */
218 		if (!(regs->csr_euen & CSR_EUEN_FPEN))
219 			goto out;
220 		save_fp(current);
221 		if (regno == 75) {
222 			/* Process the fcsr/fsr (register 75) */
223 			memcpy(mem, (void *)&current->thread.fpu.fcsr,
224 			       dbg_reg_def[regno].size);
225 		} else if (regno >= 67 && regno < 75) {
226 			/* Process the fcc */
227 			fp_reg = dbg_reg_def[regno].offset;
228 			memcpy(mem, (char *)&current->thread.fpu.fcc + fp_reg,
229 			       dbg_reg_def[regno].size);
230 		} else {
231 		fp_reg = dbg_reg_def[regno].offset;
232 		memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
233 		       dbg_reg_def[regno].size);
234 		}
235 	}
236 
237 out:
238 	return dbg_reg_def[regno].name;
239 
240 }
241 
arch_kgdb_breakpoint(void)242 void arch_kgdb_breakpoint(void)
243 {
244 	__asm__ __volatile__(
245 		".globl breakinst\n\t"
246 		"nop\n"
247 		"breakinst:\tbreak 0\n\t");
248 
249 	annotate_reachable();
250 }
251 
compute_signal(int tt)252 static int compute_signal(int tt)
253 {
254 	struct hard_trap_info *ht;
255 
256 	for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
257 		if (ht->tt == tt)
258 			return ht->signo;
259 
260 	return SIGTRAP;		/* default for things we don't know about */
261 }
262 
263 /*
264  * Similar to regs_to_gdb_regs() except that process is sleeping and so
265  * we may not be able to get all the info.
266  */
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)267 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
268 {
269 	int reg;
270 #if (KGDB_GDB_REG_SIZE == 32)
271 	u32 *ptr = (u32 *)gdb_regs;
272 #else
273 	u64 *ptr = (u64 *)gdb_regs;
274 #endif
275 
276 	*(ptr++) = 0;
277 	*(ptr++) = p->thread.reg01;
278 	*(ptr++) = (long)p;
279 	*(ptr++) = p->thread.reg03;
280 	for (reg = 4; reg < 23; reg++)
281 		*(ptr++) = 0;
282 
283 	/* S0 - S8 */
284 	*(ptr++) = p->thread.reg23;
285 	*(ptr++) = p->thread.reg24;
286 	*(ptr++) = p->thread.reg25;
287 	*(ptr++) = p->thread.reg26;
288 	*(ptr++) = p->thread.reg27;
289 	*(ptr++) = p->thread.reg28;
290 	*(ptr++) = p->thread.reg29;
291 	*(ptr++) = p->thread.reg30;
292 	*(ptr++) = p->thread.reg31;
293 	*(ptr++) = 0;
294 
295 	/*
296 	 * PC use return address (RA), i.e. the moment after return from __switch_to()
297 	 */
298 	*(ptr++) = p->thread.reg01;
299 }
300 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)301 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
302 {
303 	regs->csr_era = pc;
304 }
305 
306 /*
307  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
308  * then try to fall into the debugger
309  */
kgdb_loongarch_notify(struct notifier_block * self,unsigned long cmd,void * ptr)310 static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd,
311 			    void *ptr)
312 {
313 	struct die_args *args = (struct die_args *)ptr;
314 	struct pt_regs *regs = args->regs;
315 	int trap = read_csr_excode();
316 
317 #ifdef CONFIG_KPROBES
318 	/*
319 	 * Return immediately if the kprobes fault notifier has set
320 	 * DIE_PAGE_FAULT.
321 	 */
322 	if (cmd == DIE_PAGE_FAULT)
323 		return NOTIFY_DONE;
324 #endif /* CONFIG_KPROBES */
325 
326 	/* Userspace events, ignore. */
327 	if (user_mode(regs))
328 		return NOTIFY_DONE;
329 
330 	if (atomic_read(&kgdb_active) != -1)
331 		kgdb_nmicallback(smp_processor_id(), regs);
332 
333 	if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
334 		return NOTIFY_DONE;
335 
336 	if (atomic_read(&kgdb_setting_breakpoint))
337 		if ((regs->csr_era == (unsigned long)breakinst))
338 			regs->csr_era += 4;
339 
340 	/* In SMP mode, __flush_cache_all does IPI */
341 	local_irq_enable();
342 	flush_cache_all();
343 
344 	return NOTIFY_STOP;
345 }
346 
347 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
kgdb_ll_trap(int cmd,const char * str,struct pt_regs * regs,long err,int trap,int sig)348 int kgdb_ll_trap(int cmd, const char *str,
349 		 struct pt_regs *regs, long err, int trap, int sig)
350 {
351 	struct die_args args = {
352 		.regs	= regs,
353 		.str	= str,
354 		.err	= err,
355 		.trapnr = trap,
356 		.signr	= sig,
357 
358 	};
359 
360 	if (!kgdb_io_module_registered)
361 		return NOTIFY_DONE;
362 
363 	return kgdb_loongarch_notify(NULL, cmd, &args);
364 }
365 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
366 
367 static struct notifier_block kgdb_notifier = {
368 	.notifier_call = kgdb_loongarch_notify,
369 };
370 
371 /*
372  * Handle the 'c' command
373  */
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)374 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
375 			       char *remcom_in_buffer, char *remcom_out_buffer,
376 			       struct pt_regs *regs)
377 {
378 	char *ptr;
379 	unsigned long address;
380 
381 	regs->csr_prmd |= CSR_PRMD_PWE;
382 
383 	switch (remcom_in_buffer[0]) {
384 	case 'c':
385 		/* handle the optional parameter */
386 		ptr = &remcom_in_buffer[1];
387 		if (kgdb_hex2long(&ptr, &address))
388 			regs->csr_era = address;
389 
390 		return 0;
391 	}
392 
393 	return -1;
394 }
395 
396 static struct hw_breakpoint {
397 	unsigned		enabled;
398 	unsigned long		addr;
399 	int			len;
400 	int			type;
401 	struct perf_event	* __percpu *pev;
402 } dbreakinfo[NUM_WATCH_REGS], ibreakinfo[NUM_WATCH_REGS];
403 
404 static int
kgdb_set_hw_break(unsigned long addr,int len,enum kgdb_bptype bptype)405 kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
406 {
407 	int i;
408 	struct hw_breakpoint *breakinfo = (bptype == BP_HARDWARE_BREAKPOINT) ?
409 	ibreakinfo : dbreakinfo;
410 	int count = (bptype == BP_HARDWARE_BREAKPOINT) ? kgdb_watch_icount :
411 	kgdb_watch_dcount;
412 
413 	for (i = 0; i < count; i++)
414 		if (!breakinfo[i].enabled)
415 			break;
416 	if (i == count)
417 		return -1;
418 
419 	breakinfo[i].type = bptype;
420 	breakinfo[i].len = len;
421 	breakinfo[i].addr = addr;
422 	breakinfo[i].enabled |= 1;
423 
424 	return 0;
425 }
426 
427 
428 static int
kgdb_remove_hw_break(unsigned long addr,int len,enum kgdb_bptype bptype)429 kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
430 {
431 	int i;
432 	struct hw_breakpoint *breakinfo = (bptype == BP_HARDWARE_BREAKPOINT) ?
433 	ibreakinfo : dbreakinfo;
434 	int count = (bptype == BP_HARDWARE_BREAKPOINT) ? kgdb_watch_icount :
435 	kgdb_watch_dcount;
436 
437 	for (i = 0; i < count; i++)
438 		if (breakinfo[i].addr == addr && breakinfo[i].enabled)
439 			break;
440 	if (i == count)
441 		return -1;
442 
443 	breakinfo[i].enabled &= ~1;
444 
445 	return 0;
446 }
447 
kgdb_disable_hw_debug(struct pt_regs * regs)448 static void kgdb_disable_hw_debug(struct pt_regs *regs)
449 {
450 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
451 	regs->csr_prmd &= ~CSR_PRMD_PWE;
452 }
453 
kgdb_remove_all_hw_break(void)454 static void kgdb_remove_all_hw_break(void)
455 {
456 	int i, j, mask;
457 
458 	for (mask = 0, i = 0, j = boot_cpu_data.watch_ireg_count;
459 	     i < kgdb_watch_icount; i++, j++) {
460 		if (!(ibreakinfo[i].enabled & 2))
461 			continue;
462 		ibreakinfo[i].enabled = 0;
463 		watch_csrwr(0, LOONGARCH_CSR_IB0ADDR + 8 * j);
464 		watch_csrwr(0, LOONGARCH_CSR_IB0MASK + 8 * j);
465 		watch_csrwr(0, LOONGARCH_CSR_IB0ASID + 8 * j);
466 		watch_csrwr(0, LOONGARCH_CSR_IB0CTL + 8 * j);
467 		mask |= 1 << j;
468 	}
469 	watch_csrwr(mask, LOONGARCH_CSR_FWPS);
470 
471 	for (mask = 0, i = 0, j = boot_cpu_data.watch_dreg_count; i < kgdb_watch_dcount;
472 	     i++, j++) {
473 		if (!(dbreakinfo[i].enabled & 2))
474 			continue;
475 		dbreakinfo[i].enabled = 0;
476 		watch_csrwr(0, LOONGARCH_CSR_DB0ADDR + 8 * j);
477 		watch_csrwr(0, LOONGARCH_CSR_DB0MASK + 8 * j);
478 		watch_csrwr(0, LOONGARCH_CSR_DB0ASID + 8 * j);
479 		watch_csrwr(0, LOONGARCH_CSR_DB0CTL + 8 * j);
480 		mask |= 1 << j;
481 	}
482 	watch_csrwr(mask, LOONGARCH_CSR_MWPS);
483 
484 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
485 
486 	kgdb_watch_activated = 0;
487 }
488 
kgdb_correct_hw_break(void)489 static void kgdb_correct_hw_break(void)
490 {
491 	int i, j, dbc, activated = 0;
492 
493 	for (i = 0, j = boot_cpu_data.watch_ireg_count; i < kgdb_watch_icount; i++, j++) {
494 		if ((ibreakinfo[i].enabled & 3) == 2) {
495 			watch_csrwr(0, LOONGARCH_CSR_IB0CTL + 8*j);
496 			ibreakinfo[i].enabled = 0;
497 			continue;
498 		} else if (!ibreakinfo[i].enabled)
499 			continue;
500 		ibreakinfo[i].enabled |= 2;
501 		watch_csrwr(ibreakinfo[i].addr, LOONGARCH_CSR_IB0ADDR + 8*j);
502 		watch_csrwr(0, LOONGARCH_CSR_IB0MASK + 8*j);
503 		watch_csrwr(0, LOONGARCH_CSR_IB0ASID + 8*j);
504 		watch_csrwr(0x1e, LOONGARCH_CSR_IB0CTL + 8*j);
505 		watch_csrwr(0x10000, LOONGARCH_CSR_FWPS);
506 		activated = 1;
507 	}
508 
509 	for (i = 0, j = boot_cpu_data.watch_dreg_count; i < kgdb_watch_dcount; i++, j++) {
510 		if ((dbreakinfo[i].enabled & 3) == 2) {
511 			watch_csrwr(0, LOONGARCH_CSR_DB0CTL + 8*j);
512 			dbreakinfo[i].enabled = 0;
513 			continue;
514 		} else if (!dbreakinfo[i].enabled)
515 			continue;
516 		dbreakinfo[i].enabled |= 2;
517 		dbc = 0x1e;
518 		switch (dbreakinfo[i].len) {
519 		case 8:
520 			break;
521 		case 4:
522 			dbc |= (1<<10);
523 			break;
524 		case 2:
525 			dbc |= (2<<10);
526 			break;
527 		case 1:
528 			dbc |= (3<<10);
529 			break;
530 		default:
531 			break;
532 		}
533 
534 		if (dbreakinfo[i].type == BP_WRITE_WATCHPOINT) {
535 			dbc |= 1<<9;
536 		} else if (BP_READ_WATCHPOINT) {
537 			dbc |= 1<<8;
538 		} else {
539 			dbc |= 3<<8;
540 		}
541 
542 		watch_csrwr(dbreakinfo[i].addr, LOONGARCH_CSR_DB0ADDR + 8*j);
543 		watch_csrwr(0, LOONGARCH_CSR_DB0MASK + 8*j);
544 		watch_csrwr(0, LOONGARCH_CSR_DB0ASID + 8*j);
545 		watch_csrwr(dbc, LOONGARCH_CSR_DB0CTL + 8*j);
546 		activated = 1;
547 	}
548 
549 	csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
550 	kgdb_watch_activated = activated;
551 }
552 
553 const struct kgdb_arch arch_kgdb_ops = {
554 	.flags			= KGDB_HW_BREAKPOINT,
555 	.set_hw_breakpoint	= kgdb_set_hw_break,
556 	.remove_hw_breakpoint	= kgdb_remove_hw_break,
557 	.disable_hw_break	= kgdb_disable_hw_debug,
558 	.remove_all_hw_break	= kgdb_remove_all_hw_break,
559 	.correct_hw_break	= kgdb_correct_hw_break,
560 	.gdb_bpt_instr		= { 0x00, 0x00, break_op >> 1, 0x00 },
561 };
562 
kgdb_arch_init(void)563 int kgdb_arch_init(void)
564 {
565 	int ibcn, dbcn;
566 
567 	register_die_notifier(&kgdb_notifier);
568 	dbcn = csr_read32(LOONGARCH_CSR_MWPC) & 0x3f;
569 	ibcn = csr_read32(LOONGARCH_CSR_FWPC) & 0x3f;
570 	boot_cpu_data.watch_dreg_count = dbcn - kgdb_watch_dcount;
571 	boot_cpu_data.watch_ireg_count = ibcn - kgdb_watch_icount;
572 	return 0;
573 }
574 
575 /*
576  *	kgdb_arch_exit - Perform any architecture specific uninitalization.
577  *
578  *	This function will handle the uninitalization of any architecture
579  *	specific callbacks, for dynamic registration and unregistration.
580  */
kgdb_arch_exit(void)581 void kgdb_arch_exit(void)
582 {
583 	unregister_die_notifier(&kgdb_notifier);
584 }
585