1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel traps/events for Hexagon processor
4 *
5 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 */
7
8 #include <linux/init.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kdebug.h>
15 #include <linux/syscalls.h>
16 #include <linux/signal.h>
17 #include <linux/tracehook.h>
18 #include <asm/traps.h>
19 #include <asm/vm_fault.h>
20 #include <asm/syscall.h>
21 #include <asm/registers.h>
22 #include <asm/unistd.h>
23 #include <asm/sections.h>
24 #ifdef CONFIG_KGDB
25 # include <linux/kgdb.h>
26 #endif
27
28 #define TRAP_SYSCALL 1
29 #define TRAP_DEBUG 0xdb
30
trap_init(void)31 void __init trap_init(void)
32 {
33 }
34
35 #ifdef CONFIG_GENERIC_BUG
36 /* Maybe should resemble arch/sh/kernel/traps.c ?? */
is_valid_bugaddr(unsigned long addr)37 int is_valid_bugaddr(unsigned long addr)
38 {
39 return 1;
40 }
41 #endif /* CONFIG_GENERIC_BUG */
42
ex_name(int ex)43 static const char *ex_name(int ex)
44 {
45 switch (ex) {
46 case HVM_GE_C_XPROT:
47 case HVM_GE_C_XUSER:
48 return "Execute protection fault";
49 case HVM_GE_C_RPROT:
50 case HVM_GE_C_RUSER:
51 return "Read protection fault";
52 case HVM_GE_C_WPROT:
53 case HVM_GE_C_WUSER:
54 return "Write protection fault";
55 case HVM_GE_C_XMAL:
56 return "Misaligned instruction";
57 case HVM_GE_C_WREG:
58 return "Multiple writes to same register in packet";
59 case HVM_GE_C_PCAL:
60 return "Program counter values that are not properly aligned";
61 case HVM_GE_C_RMAL:
62 return "Misaligned data load";
63 case HVM_GE_C_WMAL:
64 return "Misaligned data store";
65 case HVM_GE_C_INVI:
66 case HVM_GE_C_PRIVI:
67 return "Illegal instruction";
68 case HVM_GE_C_BUS:
69 return "Precise bus error";
70 case HVM_GE_C_CACHE:
71 return "Cache error";
72
73 case 0xdb:
74 return "Debugger trap";
75
76 default:
77 return "Unrecognized exception";
78 }
79 }
80
do_show_stack(struct task_struct * task,unsigned long * fp,unsigned long ip,const char * loglvl)81 static void do_show_stack(struct task_struct *task, unsigned long *fp,
82 unsigned long ip, const char *loglvl)
83 {
84 int kstack_depth_to_print = 24;
85 unsigned long offset, size;
86 const char *name = NULL;
87 unsigned long *newfp;
88 unsigned long low, high;
89 char tmpstr[128];
90 char *modname;
91 int i;
92
93 if (task == NULL)
94 task = current;
95
96 printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
97 task->comm, task_pid_nr(task));
98
99 if (fp == NULL) {
100 if (task == current) {
101 asm("%0 = r30" : "=r" (fp));
102 } else {
103 fp = (unsigned long *)
104 ((struct hexagon_switch_stack *)
105 task->thread.switch_sp)->fp;
106 }
107 }
108
109 if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
110 printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
111 return;
112 }
113
114 /* Saved link reg is one word above FP */
115 if (!ip)
116 ip = *(fp+1);
117
118 /* Expect kernel stack to be in-bounds */
119 low = (unsigned long)task_stack_page(task);
120 high = low + THREAD_SIZE - 8;
121 low += sizeof(struct thread_info);
122
123 for (i = 0; i < kstack_depth_to_print; i++) {
124
125 name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
126
127 printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
128 if (((unsigned long) fp < low) || (high < (unsigned long) fp))
129 printk(KERN_CONT " (FP out of bounds!)");
130 if (modname)
131 printk(KERN_CONT " [%s] ", modname);
132 printk(KERN_CONT "\n");
133
134 newfp = (unsigned long *) *fp;
135
136 if (((unsigned long) newfp) & 0x3) {
137 printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
138 break;
139 }
140
141 /* Attempt to continue past exception. */
142 if (0 == newfp) {
143 struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
144 + 8);
145
146 if (regs->syscall_nr != -1) {
147 printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
148 regs->syscall_nr);
149 printk(KERN_CONT " psp: %lx elr: %lx\n",
150 pt_psp(regs), pt_elr(regs));
151 break;
152 } else {
153 /* really want to see more ... */
154 kstack_depth_to_print += 6;
155 printk("%s-- %s (0x%lx) badva: %lx\n", loglvl,
156 ex_name(pt_cause(regs)), pt_cause(regs),
157 pt_badva(regs));
158 }
159
160 newfp = (unsigned long *) regs->r30;
161 ip = pt_elr(regs);
162 } else {
163 ip = *(newfp + 1);
164 }
165
166 /* If link reg is null, we are done. */
167 if (ip == 0x0)
168 break;
169
170 /* If newfp isn't larger, we're tracing garbage. */
171 if (newfp > fp)
172 fp = newfp;
173 else
174 break;
175 }
176 }
177
show_stack(struct task_struct * task,unsigned long * fp,const char * loglvl)178 void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
179 {
180 /* Saved link reg is one word above FP */
181 do_show_stack(task, fp, 0, loglvl);
182 }
183
die(const char * str,struct pt_regs * regs,long err)184 int die(const char *str, struct pt_regs *regs, long err)
185 {
186 static struct {
187 spinlock_t lock;
188 int counter;
189 } die = {
190 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
191 .counter = 0
192 };
193
194 console_verbose();
195 oops_enter();
196
197 spin_lock_irq(&die.lock);
198 bust_spinlocks(1);
199 printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
200
201 if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
202 NOTIFY_STOP)
203 return 1;
204
205 print_modules();
206 show_regs(regs);
207 do_show_stack(current, ®s->r30, pt_elr(regs), KERN_EMERG);
208
209 bust_spinlocks(0);
210 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
211
212 spin_unlock_irq(&die.lock);
213
214 if (in_interrupt())
215 panic("Fatal exception in interrupt");
216
217 if (panic_on_oops)
218 panic("Fatal exception");
219
220 oops_exit();
221 make_task_dead(err);
222 return 0;
223 }
224
die_if_kernel(char * str,struct pt_regs * regs,long err)225 int die_if_kernel(char *str, struct pt_regs *regs, long err)
226 {
227 if (!user_mode(regs))
228 return die(str, regs, err);
229 else
230 return 0;
231 }
232
233 /*
234 * It's not clear that misaligned fetches are ever recoverable.
235 */
misaligned_instruction(struct pt_regs * regs)236 static void misaligned_instruction(struct pt_regs *regs)
237 {
238 die_if_kernel("Misaligned Instruction", regs, 0);
239 force_sig(SIGBUS);
240 }
241
242 /*
243 * Misaligned loads and stores, on the other hand, can be
244 * emulated, and probably should be, some day. But for now
245 * they will be considered fatal.
246 */
misaligned_data_load(struct pt_regs * regs)247 static void misaligned_data_load(struct pt_regs *regs)
248 {
249 die_if_kernel("Misaligned Data Load", regs, 0);
250 force_sig(SIGBUS);
251 }
252
misaligned_data_store(struct pt_regs * regs)253 static void misaligned_data_store(struct pt_regs *regs)
254 {
255 die_if_kernel("Misaligned Data Store", regs, 0);
256 force_sig(SIGBUS);
257 }
258
illegal_instruction(struct pt_regs * regs)259 static void illegal_instruction(struct pt_regs *regs)
260 {
261 die_if_kernel("Illegal Instruction", regs, 0);
262 force_sig(SIGILL);
263 }
264
265 /*
266 * Precise bus errors may be recoverable with a a retry,
267 * but for now, treat them as irrecoverable.
268 */
precise_bus_error(struct pt_regs * regs)269 static void precise_bus_error(struct pt_regs *regs)
270 {
271 die_if_kernel("Precise Bus Error", regs, 0);
272 force_sig(SIGBUS);
273 }
274
275 /*
276 * If anything is to be done here other than panic,
277 * it will probably be complex and migrate to another
278 * source module. For now, just die.
279 */
cache_error(struct pt_regs * regs)280 static void cache_error(struct pt_regs *regs)
281 {
282 die("Cache Error", regs, 0);
283 }
284
285 /*
286 * General exception handler
287 */
do_genex(struct pt_regs * regs)288 void do_genex(struct pt_regs *regs)
289 {
290 /*
291 * Decode Cause and Dispatch
292 */
293 switch (pt_cause(regs)) {
294 case HVM_GE_C_XPROT:
295 case HVM_GE_C_XUSER:
296 execute_protection_fault(regs);
297 break;
298 case HVM_GE_C_RPROT:
299 case HVM_GE_C_RUSER:
300 read_protection_fault(regs);
301 break;
302 case HVM_GE_C_WPROT:
303 case HVM_GE_C_WUSER:
304 write_protection_fault(regs);
305 break;
306 case HVM_GE_C_XMAL:
307 misaligned_instruction(regs);
308 break;
309 case HVM_GE_C_WREG:
310 illegal_instruction(regs);
311 break;
312 case HVM_GE_C_PCAL:
313 misaligned_instruction(regs);
314 break;
315 case HVM_GE_C_RMAL:
316 misaligned_data_load(regs);
317 break;
318 case HVM_GE_C_WMAL:
319 misaligned_data_store(regs);
320 break;
321 case HVM_GE_C_INVI:
322 case HVM_GE_C_PRIVI:
323 illegal_instruction(regs);
324 break;
325 case HVM_GE_C_BUS:
326 precise_bus_error(regs);
327 break;
328 case HVM_GE_C_CACHE:
329 cache_error(regs);
330 break;
331 default:
332 /* Halt and catch fire */
333 panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
334 break;
335 }
336 }
337
338 /* Indirect system call dispatch */
sys_syscall(void)339 long sys_syscall(void)
340 {
341 printk(KERN_ERR "sys_syscall invoked!\n");
342 return -ENOSYS;
343 }
344
do_trap0(struct pt_regs * regs)345 void do_trap0(struct pt_regs *regs)
346 {
347 syscall_fn syscall;
348
349 switch (pt_cause(regs)) {
350 case TRAP_SYSCALL:
351 /* System call is trap0 #1 */
352
353 /* allow strace to catch syscall args */
354 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
355 tracehook_report_syscall_entry(regs)))
356 return; /* return -ENOSYS somewhere? */
357
358 /* Interrupts should be re-enabled for syscall processing */
359 __vmsetie(VM_INT_ENABLE);
360
361 /*
362 * System call number is in r6, arguments in r0..r5.
363 * Fortunately, no Linux syscall has more than 6 arguments,
364 * and Hexagon ABI passes first 6 arguments in registers.
365 * 64-bit arguments are passed in odd/even register pairs.
366 * Fortunately, we have no system calls that take more
367 * than three arguments with more than one 64-bit value.
368 * Should that change, we'd need to redesign to copy
369 * between user and kernel stacks.
370 */
371 regs->syscall_nr = regs->r06;
372
373 /*
374 * GPR R0 carries the first parameter, and is also used
375 * to report the return value. We need a backup of
376 * the user's value in case we need to do a late restart
377 * of the system call.
378 */
379 regs->restart_r0 = regs->r00;
380
381 if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
382 regs->r00 = -1;
383 } else {
384 syscall = (syscall_fn)
385 (sys_call_table[regs->syscall_nr]);
386 regs->r00 = syscall(regs->r00, regs->r01,
387 regs->r02, regs->r03,
388 regs->r04, regs->r05);
389 }
390
391 /* allow strace to get the syscall return state */
392 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
393 tracehook_report_syscall_exit(regs, 0);
394
395 break;
396 case TRAP_DEBUG:
397 /* Trap0 0xdb is debug breakpoint */
398 if (user_mode(regs)) {
399 /*
400 * Some architecures add some per-thread state
401 * to distinguish between breakpoint traps and
402 * trace traps. We may want to do that, and
403 * set the si_code value appropriately, or we
404 * may want to use a different trap0 flavor.
405 */
406 force_sig_fault(SIGTRAP, TRAP_BRKPT,
407 (void __user *) pt_elr(regs));
408 } else {
409 #ifdef CONFIG_KGDB
410 kgdb_handle_exception(pt_cause(regs), SIGTRAP,
411 TRAP_BRKPT, regs);
412 #endif
413 }
414 break;
415 }
416 /* Ignore other trap0 codes for now, especially 0 (Angel calls) */
417 }
418
419 /*
420 * Machine check exception handler
421 */
do_machcheck(struct pt_regs * regs)422 void do_machcheck(struct pt_regs *regs)
423 {
424 /* Halt and catch fire */
425 __vmstop();
426 }
427
428 /*
429 * Treat this like the old 0xdb trap.
430 */
431
do_debug_exception(struct pt_regs * regs)432 void do_debug_exception(struct pt_regs *regs)
433 {
434 regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
435 regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
436 do_trap0(regs);
437 }
438