1/* -*- mode: asm -*- 2 * 3 * linux/arch/m68k/kernel/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file README.legal in the main directory of this archive 9 * for more details. 10 * 11 * Linux/m68k support by Hamish Macdonald 12 * 13 * 68060 fixes by Jesper Skov 14 * 15 */ 16 17/* 18 * entry.S contains the system-call and fault low-level handling routines. 19 * This also contains the timer-interrupt handler, as well as all interrupts 20 * and faults that can result in a task-switch. 21 * 22 * NOTE: This code handles signal-recognition, which happens every time 23 * after a timer-interrupt and after each system call. 24 * 25 */ 26 27/* 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 29 * all pointers that used to be 'current' are now entry 30 * number 0 in the 'current_set' list. 31 * 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 33 * for 68040 34 */ 35 36#include <linux/linkage.h> 37#include <asm/errno.h> 38#include <asm/setup.h> 39#include <asm/segment.h> 40#include <asm/traps.h> 41#include <asm/unistd.h> 42#include <asm/asm-offsets.h> 43#include <asm/entry.h> 44 45.globl system_call, buserr, trap, resume 46.globl sys_call_table 47.globl __sys_fork, __sys_clone, __sys_vfork 48.globl bad_interrupt 49.globl auto_irqhandler_fixup 50.globl user_irqvec_fixup 51 52.text 53ENTRY(__sys_fork) 54 SAVE_SWITCH_STACK 55 jbsr sys_fork 56 lea %sp@(24),%sp 57 rts 58 59ENTRY(__sys_clone) 60 SAVE_SWITCH_STACK 61 pea %sp@(SWITCH_STACK_SIZE) 62 jbsr m68k_clone 63 lea %sp@(28),%sp 64 rts 65 66ENTRY(__sys_vfork) 67 SAVE_SWITCH_STACK 68 jbsr sys_vfork 69 lea %sp@(24),%sp 70 rts 71 72ENTRY(sys_sigreturn) 73 SAVE_SWITCH_STACK 74 movel %sp,%sp@- | switch_stack pointer 75 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 76 jbsr do_sigreturn 77 addql #8,%sp 78 RESTORE_SWITCH_STACK 79 rts 80 81ENTRY(sys_rt_sigreturn) 82 SAVE_SWITCH_STACK 83 movel %sp,%sp@- | switch_stack pointer 84 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 85 jbsr do_rt_sigreturn 86 addql #8,%sp 87 RESTORE_SWITCH_STACK 88 rts 89 90ENTRY(buserr) 91 SAVE_ALL_INT 92 GET_CURRENT(%d0) 93 movel %sp,%sp@- | stack frame pointer argument 94 jbsr buserr_c 95 addql #4,%sp 96 jra ret_from_exception 97 98ENTRY(trap) 99 SAVE_ALL_INT 100 GET_CURRENT(%d0) 101 movel %sp,%sp@- | stack frame pointer argument 102 jbsr trap_c 103 addql #4,%sp 104 jra ret_from_exception 105 106 | After a fork we jump here directly from resume, 107 | so that %d1 contains the previous task 108 | schedule_tail now used regardless of CONFIG_SMP 109ENTRY(ret_from_fork) 110 movel %d1,%sp@- 111 jsr schedule_tail 112 addql #4,%sp 113 jra ret_from_exception 114 115ENTRY(ret_from_kernel_thread) 116 | a3 contains the kernel thread payload, d7 - its argument 117 movel %d1,%sp@- 118 jsr schedule_tail 119 movel %d7,(%sp) 120 jsr %a3@ 121 addql #4,%sp 122 jra ret_from_exception 123 124#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 125 126#ifdef TRAP_DBG_INTERRUPT 127 128.globl dbginterrupt 129ENTRY(dbginterrupt) 130 SAVE_ALL_INT 131 GET_CURRENT(%d0) 132 movel %sp,%sp@- /* stack frame pointer argument */ 133 jsr dbginterrupt_c 134 addql #4,%sp 135 jra ret_from_exception 136#endif 137 138ENTRY(reschedule) 139 /* save top of frame */ 140 pea %sp@ 141 jbsr set_esp0 142 addql #4,%sp 143 pea ret_from_exception 144 jmp schedule 145 146ENTRY(ret_from_user_signal) 147 moveq #__NR_sigreturn,%d0 148 trap #0 149 150ENTRY(ret_from_user_rt_signal) 151 movel #__NR_rt_sigreturn,%d0 152 trap #0 153 154#else 155 156do_trace_entry: 157 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 158 subql #4,%sp 159 SAVE_SWITCH_STACK 160 jbsr syscall_trace 161 RESTORE_SWITCH_STACK 162 addql #4,%sp 163 addql #1,%d0 | optimization for cmpil #-1,%d0 164 jeq ret_from_syscall 165 movel %sp@(PT_OFF_ORIG_D0),%d0 166 cmpl #NR_syscalls,%d0 167 jcs syscall 168 jra ret_from_syscall 169badsys: 170 movel #-ENOSYS,%sp@(PT_OFF_D0) 171 jra ret_from_syscall 172 173do_trace_exit: 174 subql #4,%sp 175 SAVE_SWITCH_STACK 176 jbsr syscall_trace 177 RESTORE_SWITCH_STACK 178 addql #4,%sp 179 jra .Lret_from_exception 180 181ENTRY(ret_from_signal) 182 movel %curptr@(TASK_STACK),%a1 183 tstb %a1@(TINFO_FLAGS+2) 184 jge 1f 185 jbsr syscall_trace 1861: RESTORE_SWITCH_STACK 187 addql #4,%sp 188/* on 68040 complete pending writebacks if any */ 189#ifdef CONFIG_M68040 190 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 191 subql #7,%d0 | bus error frame ? 192 jbne 1f 193 movel %sp,%sp@- 194 jbsr berr_040cleanup 195 addql #4,%sp 1961: 197#endif 198 jra .Lret_from_exception 199 200ENTRY(system_call) 201 SAVE_ALL_SYS 202 203 GET_CURRENT(%d1) 204 movel %d1,%a1 205 206 | save top of frame 207 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 208 209 | syscall trace? 210 tstb %a1@(TINFO_FLAGS+2) 211 jmi do_trace_entry 212 cmpl #NR_syscalls,%d0 213 jcc badsys 214syscall: 215 jbsr @(sys_call_table,%d0:l:4)@(0) 216 movel %d0,%sp@(PT_OFF_D0) | save the return value 217ret_from_syscall: 218 |oriw #0x0700,%sr 219 movel %curptr@(TASK_STACK),%a1 220 movew %a1@(TINFO_FLAGS+2),%d0 221 jne syscall_exit_work 2221: RESTORE_ALL 223 224syscall_exit_work: 225 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 226 bnes 1b | if so, skip resched, signals 227 lslw #1,%d0 228 jcs do_trace_exit 229 jmi do_delayed_trace 230 lslw #8,%d0 231 jne do_signal_return 232 pea resume_userspace 233 jra schedule 234 235 236ENTRY(ret_from_exception) 237.Lret_from_exception: 238 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 239 bnes 1f | if so, skip resched, signals 240 | only allow interrupts when we are really the last one on the 241 | kernel stack, otherwise stack overflow can occur during 242 | heavy interrupt load 243 andw #ALLOWINT,%sr 244 245resume_userspace: 246 movel %curptr@(TASK_STACK),%a1 247 moveb %a1@(TINFO_FLAGS+3),%d0 248 jne exit_work 2491: RESTORE_ALL 250 251exit_work: 252 | save top of frame 253 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 254 lslb #1,%d0 255 jne do_signal_return 256 pea resume_userspace 257 jra schedule 258 259 260do_signal_return: 261 |andw #ALLOWINT,%sr 262 subql #4,%sp | dummy return address 263 SAVE_SWITCH_STACK 264 pea %sp@(SWITCH_STACK_SIZE) 265 bsrl do_notify_resume 266 addql #4,%sp 267 RESTORE_SWITCH_STACK 268 addql #4,%sp 269 jbra resume_userspace 270 271do_delayed_trace: 272 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR 273 pea 1 | send SIGTRAP 274 movel %curptr,%sp@- 275 pea LSIGTRAP 276 jbsr send_sig 277 addql #8,%sp 278 addql #4,%sp 279 jbra resume_userspace 280 281 282/* This is the main interrupt handler for autovector interrupts */ 283 284ENTRY(auto_inthandler) 285 SAVE_ALL_INT 286 GET_CURRENT(%d0) 287 | put exception # in d0 288 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 289 subw #VEC_SPUR,%d0 290 291 movel %sp,%sp@- 292 movel %d0,%sp@- | put vector # on stack 293auto_irqhandler_fixup = . + 2 294 jsr do_IRQ | process the IRQ 295 addql #8,%sp | pop parameters off stack 296 jra ret_from_exception 297 298/* Handler for user defined interrupt vectors */ 299 300ENTRY(user_inthandler) 301 SAVE_ALL_INT 302 GET_CURRENT(%d0) 303 | put exception # in d0 304 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 305user_irqvec_fixup = . + 2 306 subw #VEC_USER,%d0 307 308 movel %sp,%sp@- 309 movel %d0,%sp@- | put vector # on stack 310 jsr do_IRQ | process the IRQ 311 addql #8,%sp | pop parameters off stack 312 jra ret_from_exception 313 314/* Handler for uninitialized and spurious interrupts */ 315 316ENTRY(bad_inthandler) 317 SAVE_ALL_INT 318 GET_CURRENT(%d0) 319 320 movel %sp,%sp@- 321 jsr handle_badint 322 addql #4,%sp 323 jra ret_from_exception 324 325resume: 326 /* 327 * Beware - when entering resume, prev (the current task) is 328 * in a0, next (the new task) is in a1,so don't change these 329 * registers until their contents are no longer needed. 330 */ 331 332 /* save sr */ 333 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 334 335 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 336 movec %sfc,%d0 337 movew %d0,%a0@(TASK_THREAD+THREAD_FS) 338 339 /* save usp */ 340 /* it is better to use a movel here instead of a movew 8*) */ 341 movec %usp,%d0 342 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 343 344 /* save non-scratch registers on stack */ 345 SAVE_SWITCH_STACK 346 347 /* save current kernel stack pointer */ 348 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 349 350 /* save floating point context */ 351#ifndef CONFIG_M68KFPU_EMU_ONLY 352#ifdef CONFIG_M68KFPU_EMU 353 tstl m68k_fputype 354 jeq 3f 355#endif 356 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 357 358#if defined(CONFIG_M68060) 359#if !defined(CPU_M68060_ONLY) 360 btst #3,m68k_cputype+3 361 beqs 1f 362#endif 363 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 364 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 365 jeq 3f 366#if !defined(CPU_M68060_ONLY) 367 jra 2f 368#endif 369#endif /* CONFIG_M68060 */ 370#if !defined(CPU_M68060_ONLY) 3711: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 372 jeq 3f 373#endif 3742: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 375 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 3763: 377#endif /* CONFIG_M68KFPU_EMU_ONLY */ 378 /* Return previous task in %d1 */ 379 movel %curptr,%d1 380 381 /* switch to new task (a1 contains new task) */ 382 movel %a1,%curptr 383 384 /* restore floating point context */ 385#ifndef CONFIG_M68KFPU_EMU_ONLY 386#ifdef CONFIG_M68KFPU_EMU 387 tstl m68k_fputype 388 jeq 4f 389#endif 390#if defined(CONFIG_M68060) 391#if !defined(CPU_M68060_ONLY) 392 btst #3,m68k_cputype+3 393 beqs 1f 394#endif 395 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 396 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 397 jeq 3f 398#if !defined(CPU_M68060_ONLY) 399 jra 2f 400#endif 401#endif /* CONFIG_M68060 */ 402#if !defined(CPU_M68060_ONLY) 4031: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 404 jeq 3f 405#endif 4062: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 407 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 4083: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 4094: 410#endif /* CONFIG_M68KFPU_EMU_ONLY */ 411 412 /* restore the kernel stack pointer */ 413 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 414 415 /* restore non-scratch registers */ 416 RESTORE_SWITCH_STACK 417 418 /* restore user stack pointer */ 419 movel %a1@(TASK_THREAD+THREAD_USP),%a0 420 movel %a0,%usp 421 422 /* restore fs (sfc,%dfc) */ 423 movew %a1@(TASK_THREAD+THREAD_FS),%a0 424 movec %a0,%sfc 425 movec %a0,%dfc 426 427 /* restore status register */ 428 movew %a1@(TASK_THREAD+THREAD_SR),%sr 429 430 rts 431 432#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 433