1/* -*- mode: asm -*- 2 * 3 * linux/arch/m68k/kernel/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file README.legal in the main directory of this archive 9 * for more details. 10 * 11 * Linux/m68k support by Hamish Macdonald 12 * 13 * 68060 fixes by Jesper Skov 14 * 15 */ 16 17/* 18 * entry.S contains the system-call and fault low-level handling routines. 19 * This also contains the timer-interrupt handler, as well as all interrupts 20 * and faults that can result in a task-switch. 21 * 22 * NOTE: This code handles signal-recognition, which happens every time 23 * after a timer-interrupt and after each system call. 24 * 25 */ 26 27/* 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 29 * all pointers that used to be 'current' are now entry 30 * number 0 in the 'current_set' list. 31 * 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 33 * for 68040 34 */ 35 36#include <linux/linkage.h> 37#include <asm/errno.h> 38#include <asm/setup.h> 39#include <asm/traps.h> 40#include <asm/unistd.h> 41#include <asm/asm-offsets.h> 42#include <asm/entry.h> 43 44.globl system_call, buserr, trap, resume 45.globl sys_call_table 46.globl __sys_fork, __sys_clone, __sys_vfork 47.globl bad_interrupt 48.globl auto_irqhandler_fixup 49.globl user_irqvec_fixup 50 51.text 52ENTRY(__sys_fork) 53 SAVE_SWITCH_STACK 54 jbsr sys_fork 55 lea %sp@(24),%sp 56 rts 57 58ENTRY(__sys_clone) 59 SAVE_SWITCH_STACK 60 pea %sp@(SWITCH_STACK_SIZE) 61 jbsr m68k_clone 62 lea %sp@(28),%sp 63 rts 64 65ENTRY(__sys_vfork) 66 SAVE_SWITCH_STACK 67 jbsr sys_vfork 68 lea %sp@(24),%sp 69 rts 70 71ENTRY(__sys_clone3) 72 SAVE_SWITCH_STACK 73 pea %sp@(SWITCH_STACK_SIZE) 74 jbsr m68k_clone3 75 lea %sp@(28),%sp 76 rts 77 78ENTRY(sys_sigreturn) 79 SAVE_SWITCH_STACK 80 movel %sp,%a1 | switch_stack pointer 81 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 82 lea %sp@(-84),%sp | leave a gap 83 movel %a1,%sp@- 84 movel %a0,%sp@- 85 jbsr do_sigreturn 86 jra 1f | shared with rt_sigreturn() 87 88ENTRY(sys_rt_sigreturn) 89 SAVE_SWITCH_STACK 90 movel %sp,%a1 | switch_stack pointer 91 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 92 lea %sp@(-84),%sp | leave a gap 93 movel %a1,%sp@- 94 movel %a0,%sp@- 95 | stack contents: 96 | [original pt_regs address] [original switch_stack address] 97 | [gap] [switch_stack] [pt_regs] [exception frame] 98 jbsr do_rt_sigreturn 99 1001: 101 | stack contents now: 102 | [original pt_regs address] [original switch_stack address] 103 | [unused part of the gap] [moved switch_stack] [moved pt_regs] 104 | [replacement exception frame] 105 | return value of do_{rt_,}sigreturn() points to moved switch_stack. 106 107 movel %d0,%sp | discard the leftover junk 108 RESTORE_SWITCH_STACK 109 | stack contents now is just [syscall return address] [pt_regs] [frame] 110 | return pt_regs.d0 111 movel %sp@(PT_OFF_D0+4),%d0 112 rts 113 114ENTRY(buserr) 115 SAVE_ALL_INT 116 GET_CURRENT(%d0) 117 movel %sp,%sp@- | stack frame pointer argument 118 jbsr buserr_c 119 addql #4,%sp 120 jra ret_from_exception 121 122ENTRY(trap) 123 SAVE_ALL_INT 124 GET_CURRENT(%d0) 125 movel %sp,%sp@- | stack frame pointer argument 126 jbsr trap_c 127 addql #4,%sp 128 jra ret_from_exception 129 130 | After a fork we jump here directly from resume, 131 | so that %d1 contains the previous task 132 | schedule_tail now used regardless of CONFIG_SMP 133ENTRY(ret_from_fork) 134 movel %d1,%sp@- 135 jsr schedule_tail 136 addql #4,%sp 137 jra ret_from_exception 138 139ENTRY(ret_from_kernel_thread) 140 | a3 contains the kernel thread payload, d7 - its argument 141 movel %d1,%sp@- 142 jsr schedule_tail 143 movel %d7,(%sp) 144 jsr %a3@ 145 addql #4,%sp 146 jra ret_from_exception 147 148#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 149 150#ifdef TRAP_DBG_INTERRUPT 151 152.globl dbginterrupt 153ENTRY(dbginterrupt) 154 SAVE_ALL_INT 155 GET_CURRENT(%d0) 156 movel %sp,%sp@- /* stack frame pointer argument */ 157 jsr dbginterrupt_c 158 addql #4,%sp 159 jra ret_from_exception 160#endif 161 162ENTRY(reschedule) 163 /* save top of frame */ 164 pea %sp@ 165 jbsr set_esp0 166 addql #4,%sp 167 pea ret_from_exception 168 jmp schedule 169 170ENTRY(ret_from_user_signal) 171 moveq #__NR_sigreturn,%d0 172 trap #0 173 174ENTRY(ret_from_user_rt_signal) 175 movel #__NR_rt_sigreturn,%d0 176 trap #0 177 178#else 179 180do_trace_entry: 181 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 182 subql #4,%sp 183 SAVE_SWITCH_STACK 184 jbsr syscall_trace 185 RESTORE_SWITCH_STACK 186 addql #4,%sp 187 addql #1,%d0 | optimization for cmpil #-1,%d0 188 jeq ret_from_syscall 189 movel %sp@(PT_OFF_ORIG_D0),%d0 190 cmpl #NR_syscalls,%d0 191 jcs syscall 192 jra ret_from_syscall 193badsys: 194 movel #-ENOSYS,%sp@(PT_OFF_D0) 195 jra ret_from_syscall 196 197do_trace_exit: 198 subql #4,%sp 199 SAVE_SWITCH_STACK 200 jbsr syscall_trace 201 RESTORE_SWITCH_STACK 202 addql #4,%sp 203 jra .Lret_from_exception 204 205ENTRY(system_call) 206 SAVE_ALL_SYS 207 208 GET_CURRENT(%d1) 209 movel %d1,%a1 210 211 | save top of frame 212 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 213 214 | syscall trace? 215 tstb %a1@(TINFO_FLAGS+2) 216 jmi do_trace_entry 217 cmpl #NR_syscalls,%d0 218 jcc badsys 219syscall: 220 jbsr @(sys_call_table,%d0:l:4)@(0) 221 movel %d0,%sp@(PT_OFF_D0) | save the return value 222ret_from_syscall: 223 |oriw #0x0700,%sr 224 movel %curptr@(TASK_STACK),%a1 225 movew %a1@(TINFO_FLAGS+2),%d0 226 jne syscall_exit_work 2271: RESTORE_ALL 228 229syscall_exit_work: 230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 231 bnes 1b | if so, skip resched, signals 232 lslw #1,%d0 233 jcs do_trace_exit 234 jmi do_delayed_trace 235 lslw #8,%d0 236 jne do_signal_return 237 pea resume_userspace 238 jra schedule 239 240 241ENTRY(ret_from_exception) 242.Lret_from_exception: 243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 244 bnes 1f | if so, skip resched, signals 245 | only allow interrupts when we are really the last one on the 246 | kernel stack, otherwise stack overflow can occur during 247 | heavy interrupt load 248 andw #ALLOWINT,%sr 249 250resume_userspace: 251 movel %curptr@(TASK_STACK),%a1 252 moveb %a1@(TINFO_FLAGS+3),%d0 253 jne exit_work 2541: RESTORE_ALL 255 256exit_work: 257 | save top of frame 258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 259 lslb #1,%d0 260 jne do_signal_return 261 pea resume_userspace 262 jra schedule 263 264 265do_signal_return: 266 |andw #ALLOWINT,%sr 267 subql #4,%sp | dummy return address 268 SAVE_SWITCH_STACK 269 pea %sp@(SWITCH_STACK_SIZE) 270 bsrl do_notify_resume 271 addql #4,%sp 272 RESTORE_SWITCH_STACK 273 addql #4,%sp 274 jbra resume_userspace 275 276do_delayed_trace: 277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR 278 pea 1 | send SIGTRAP 279 movel %curptr,%sp@- 280 pea LSIGTRAP 281 jbsr send_sig 282 addql #8,%sp 283 addql #4,%sp 284 jbra resume_userspace 285 286 287/* This is the main interrupt handler for autovector interrupts */ 288 289ENTRY(auto_inthandler) 290 SAVE_ALL_INT 291 GET_CURRENT(%d0) 292 | put exception # in d0 293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 294 subw #VEC_SPUR,%d0 295 296 movel %sp,%sp@- 297 movel %d0,%sp@- | put vector # on stack 298auto_irqhandler_fixup = . + 2 299 jsr do_IRQ | process the IRQ 300 addql #8,%sp | pop parameters off stack 301 jra ret_from_exception 302 303/* Handler for user defined interrupt vectors */ 304 305ENTRY(user_inthandler) 306 SAVE_ALL_INT 307 GET_CURRENT(%d0) 308 | put exception # in d0 309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 310user_irqvec_fixup = . + 2 311 subw #VEC_USER,%d0 312 313 movel %sp,%sp@- 314 movel %d0,%sp@- | put vector # on stack 315 jsr do_IRQ | process the IRQ 316 addql #8,%sp | pop parameters off stack 317 jra ret_from_exception 318 319/* Handler for uninitialized and spurious interrupts */ 320 321ENTRY(bad_inthandler) 322 SAVE_ALL_INT 323 GET_CURRENT(%d0) 324 325 movel %sp,%sp@- 326 jsr handle_badint 327 addql #4,%sp 328 jra ret_from_exception 329 330resume: 331 /* 332 * Beware - when entering resume, prev (the current task) is 333 * in a0, next (the new task) is in a1,so don't change these 334 * registers until their contents are no longer needed. 335 */ 336 337 /* save sr */ 338 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 339 340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 341 movec %sfc,%d0 342 movew %d0,%a0@(TASK_THREAD+THREAD_FC) 343 344 /* save usp */ 345 /* it is better to use a movel here instead of a movew 8*) */ 346 movec %usp,%d0 347 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 348 349 /* save non-scratch registers on stack */ 350 SAVE_SWITCH_STACK 351 352 /* save current kernel stack pointer */ 353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 354 355 /* save floating point context */ 356#ifndef CONFIG_M68KFPU_EMU_ONLY 357#ifdef CONFIG_M68KFPU_EMU 358 tstl m68k_fputype 359 jeq 3f 360#endif 361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 362 363#if defined(CONFIG_M68060) 364#if !defined(CPU_M68060_ONLY) 365 btst #3,m68k_cputype+3 366 beqs 1f 367#endif 368 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 370 jeq 3f 371#if !defined(CPU_M68060_ONLY) 372 jra 2f 373#endif 374#endif /* CONFIG_M68060 */ 375#if !defined(CPU_M68060_ONLY) 3761: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 377 jeq 3f 378#endif 3792: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 3813: 382#endif /* CONFIG_M68KFPU_EMU_ONLY */ 383 /* Return previous task in %d1 */ 384 movel %curptr,%d1 385 386 /* switch to new task (a1 contains new task) */ 387 movel %a1,%curptr 388 389 /* restore floating point context */ 390#ifndef CONFIG_M68KFPU_EMU_ONLY 391#ifdef CONFIG_M68KFPU_EMU 392 tstl m68k_fputype 393 jeq 4f 394#endif 395#if defined(CONFIG_M68060) 396#if !defined(CPU_M68060_ONLY) 397 btst #3,m68k_cputype+3 398 beqs 1f 399#endif 400 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 402 jeq 3f 403#if !defined(CPU_M68060_ONLY) 404 jra 2f 405#endif 406#endif /* CONFIG_M68060 */ 407#if !defined(CPU_M68060_ONLY) 4081: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 409 jeq 3f 410#endif 4112: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 4133: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 4144: 415#endif /* CONFIG_M68KFPU_EMU_ONLY */ 416 417 /* restore the kernel stack pointer */ 418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 419 420 /* restore non-scratch registers */ 421 RESTORE_SWITCH_STACK 422 423 /* restore user stack pointer */ 424 movel %a1@(TASK_THREAD+THREAD_USP),%a0 425 movel %a0,%usp 426 427 /* restore fs (sfc,%dfc) */ 428 movew %a1@(TASK_THREAD+THREAD_FC),%a0 429 movec %a0,%sfc 430 movec %a0,%dfc 431 432 /* restore status register */ 433 movew %a1@(TASK_THREAD+THREAD_SR),%sr 434 435 rts 436 437#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 438