1/* 2 * arch/sh/kernel/cpu/sh3/entry.S 3 * 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 5 * Copyright (C) 2003 - 2012 Paul Mundt 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 */ 11#include <linux/sys.h> 12#include <linux/errno.h> 13#include <linux/linkage.h> 14#include <asm/asm-offsets.h> 15#include <asm/thread_info.h> 16#include <asm/unistd.h> 17#include <cpu/mmu_context.h> 18#include <asm/page.h> 19#include <asm/cache.h> 20#include <asm/thread_info.h> 21 22! NOTE: 23! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 24! to be jumped is too far, but it causes illegal slot exception. 25 26/* 27 * entry.S contains the system-call and fault low-level handling routines. 28 * This also contains the timer-interrupt handler, as well as all interrupts 29 * and faults that can result in a task-switch. 30 * 31 * NOTE: This code handles signal-recognition, which happens every time 32 * after a timer-interrupt and after each system call. 33 * 34 * NOTE: This code uses a convention that instructions in the delay slot 35 * of a transfer-control instruction are indented by an extra space, thus: 36 * 37 * jmp @k0 ! control-transfer instruction 38 * ldc k1, ssr ! delay slot 39 * 40 * Stack layout in 'ret_from_syscall': 41 * ptrace needs to have all regs on the stack. 42 * if the order here is changed, it needs to be 43 * updated in ptrace.c and ptrace.h 44 * 45 * r0 46 * ... 47 * r15 = stack pointer 48 * spc 49 * pr 50 * ssr 51 * gbr 52 * mach 53 * macl 54 * syscall # 55 * 56 */ 57/* Offsets to the stack */ 58OFF_R0 = 0 /* Return value. New ABI also arg4 */ 59OFF_R1 = 4 /* New ABI: arg5 */ 60OFF_R2 = 8 /* New ABI: arg6 */ 61OFF_R3 = 12 /* New ABI: syscall_nr */ 62OFF_R4 = 16 /* New ABI: arg0 */ 63OFF_R5 = 20 /* New ABI: arg1 */ 64OFF_R6 = 24 /* New ABI: arg2 */ 65OFF_R7 = 28 /* New ABI: arg3 */ 66OFF_SP = (15*4) 67OFF_PC = (16*4) 68OFF_SR = (16*4+8) 69OFF_TRA = (16*4+6*4) 70 71#define k0 r0 72#define k1 r1 73#define k2 r2 74#define k3 r3 75#define k4 r4 76 77#define g_imask r6 /* r6_bank1 */ 78#define k_g_imask r6_bank /* r6_bank1 */ 79#define current r7 /* r7_bank1 */ 80 81#include <asm/entry-macros.S> 82 83/* 84 * Kernel mode register usage: 85 * k0 scratch 86 * k1 scratch 87 * k2 scratch (Exception code) 88 * k3 scratch (Return address) 89 * k4 scratch 90 * k5 reserved 91 * k6 Global Interrupt Mask (0--15 << 4) 92 * k7 CURRENT_THREAD_INFO (pointer to current thread info) 93 */ 94 95! 96! TLB Miss / Initial Page write exception handling 97! _and_ 98! TLB hits, but the access violate the protection. 99! It can be valid access, such as stack grow and/or C-O-W. 100! 101! 102! Find the pmd/pte entry and loadtlb 103! If it's not found, cause address error (SEGV) 104! 105! Although this could be written in assembly language (and it'd be faster), 106! this first version depends *much* on C implementation. 107! 108 109#if defined(CONFIG_MMU) 110 .align 2 111ENTRY(tlb_miss_load) 112 bra call_handle_tlbmiss 113 mov #0, r5 114 115 .align 2 116ENTRY(tlb_miss_store) 117 bra call_handle_tlbmiss 118 mov #FAULT_CODE_WRITE, r5 119 120 .align 2 121ENTRY(initial_page_write) 122 bra call_handle_tlbmiss 123 mov #FAULT_CODE_INITIAL, r5 124 125 .align 2 126ENTRY(tlb_protection_violation_load) 127 bra call_do_page_fault 128 mov #FAULT_CODE_PROT, r5 129 130 .align 2 131ENTRY(tlb_protection_violation_store) 132 bra call_do_page_fault 133 mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5 134 135call_handle_tlbmiss: 136 mov.l 1f, r0 137 mov r5, r8 138 mov.l @r0, r6 139 mov.l 2f, r0 140 sts pr, r10 141 jsr @r0 142 mov r15, r4 143 ! 144 tst r0, r0 145 bf/s 0f 146 lds r10, pr 147 rts 148 nop 1490: 150 mov r8, r5 151call_do_page_fault: 152 mov.l 1f, r0 153 mov.l @r0, r6 154 155 mov.l 3f, r0 156 mov.l 4f, r1 157 mov r15, r4 158 jmp @r0 159 lds r1, pr 160 161 .align 2 1621: .long MMU_TEA 1632: .long handle_tlbmiss 1643: .long do_page_fault 1654: .long ret_from_exception 166 167 .align 2 168ENTRY(address_error_load) 169 bra call_dae 170 mov #0,r5 ! writeaccess = 0 171 172 .align 2 173ENTRY(address_error_store) 174 bra call_dae 175 mov #1,r5 ! writeaccess = 1 176 177 .align 2 178call_dae: 179 mov.l 1f, r0 180 mov.l @r0, r6 ! address 181 mov.l 2f, r0 182 jmp @r0 183 mov r15, r4 ! regs 184 185 .align 2 1861: .long MMU_TEA 1872: .long do_address_error 188#endif /* CONFIG_MMU */ 189 190#if defined(CONFIG_SH_STANDARD_BIOS) 191 /* Unwind the stack and jmp to the debug entry */ 192ENTRY(sh_bios_handler) 193 mov.l 1f, r8 194 bsr restore_regs 195 nop 196 197 lds k2, pr ! restore pr 198 mov k4, r15 199 ! 200 mov.l 2f, k0 201 mov.l @k0, k0 202 jmp @k0 203 ldc k3, ssr 204 .align 2 2051: .long 0x300000f0 2062: .long gdb_vbr_vector 207#endif /* CONFIG_SH_STANDARD_BIOS */ 208 209! restore_regs() 210! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack 211! - switch bank 212! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack 213! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra 214! k2 returns original pr 215! k3 returns original sr 216! k4 returns original stack pointer 217! r8 passes SR bitmask, overwritten with restored data on return 218! r9 trashed 219! BL=0 on entry, on exit BL=1 (depending on r8). 220 221ENTRY(restore_regs) 222 mov.l @r15+, r0 223 mov.l @r15+, r1 224 mov.l @r15+, r2 225 mov.l @r15+, r3 226 mov.l @r15+, r4 227 mov.l @r15+, r5 228 mov.l @r15+, r6 229 mov.l @r15+, r7 230 ! 231 stc sr, r9 232 or r8, r9 233 ldc r9, sr 234 ! 235 mov.l @r15+, r8 236 mov.l @r15+, r9 237 mov.l @r15+, r10 238 mov.l @r15+, r11 239 mov.l @r15+, r12 240 mov.l @r15+, r13 241 mov.l @r15+, r14 242 mov.l @r15+, k4 ! original stack pointer 243 ldc.l @r15+, spc 244 mov.l @r15+, k2 ! original PR 245 mov.l @r15+, k3 ! original SR 246 ldc.l @r15+, gbr 247 lds.l @r15+, mach 248 lds.l @r15+, macl 249 rts 250 add #4, r15 ! Skip syscall number 251 252restore_all: 253 mov.l 7f, r8 254 bsr restore_regs 255 nop 256 257 lds k2, pr ! restore pr 258 ! 259 ! Calculate new SR value 260 mov k3, k2 ! original SR value 261 mov #0xfffffff0, k1 262 extu.b k1, k1 263 not k1, k1 264 and k1, k2 ! Mask original SR value 265 ! 266 mov k3, k0 ! Calculate IMASK-bits 267 shlr2 k0 268 and #0x3c, k0 269 cmp/eq #0x3c, k0 270 bt/s 6f 271 shll2 k0 272 mov g_imask, k0 273 ! 2746: or k0, k2 ! Set the IMASK-bits 275 ldc k2, ssr 276 ! 277 mov k4, r15 278 rte 279 nop 280 281 .align 2 2825: .long 0x00001000 ! DSP 2837: .long 0x30000000 284 285! common exception handler 286#include "../../entry-common.S" 287 288! Exception Vector Base 289! 290! Should be aligned page boundary. 291! 292 .balign 4096,0,4096 293ENTRY(vbr_base) 294 .long 0 295! 296! 0x100: General exception vector 297! 298 .balign 256,0,256 299general_exception: 300 bra handle_exception 301 sts pr, k3 ! save original pr value in k3 302 303! prepare_stack() 304! - roll back gRB 305! - switch to kernel stack 306! k0 returns original sp (after roll back) 307! k1 trashed 308! k2 trashed 309 310prepare_stack: 311#ifdef CONFIG_GUSA 312 ! Check for roll back gRB (User and Kernel) 313 mov r15, k0 314 shll k0 315 bf/s 1f 316 shll k0 317 bf/s 1f 318 stc spc, k1 319 stc r0_bank, k0 320 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0) 321 bt/s 2f 322 stc r1_bank, k1 323 324 add #-2, k0 325 add r15, k0 326 ldc k0, spc ! PC = saved r0 + r15 - 2 3272: mov k1, r15 ! SP = r1 3281: 329#endif 330 ! Switch to kernel stack if needed 331 stc ssr, k0 ! Is it from kernel space? 332 shll k0 ! Check MD bit (bit30) by shifting it into... 333 shll k0 ! ...the T bit 334 bt/s 1f ! It's a kernel to kernel transition. 335 mov r15, k0 ! save original stack to k0 336 /* User space to kernel */ 337 mov #(THREAD_SIZE >> 10), k1 338 shll8 k1 ! k1 := THREAD_SIZE 339 shll2 k1 340 add current, k1 341 mov k1, r15 ! change to kernel stack 342 ! 3431: 344 rts 345 nop 346 347! 348! 0x400: Instruction and Data TLB miss exception vector 349! 350 .balign 1024,0,1024 351tlb_miss: 352 sts pr, k3 ! save original pr value in k3 353 354handle_exception: 355 mova exception_data, k0 356 357 ! Setup stack and save DSP context (k0 contains original r15 on return) 358 bsr prepare_stack 359 PREF(k0) 360 361 ! Save registers / Switch to bank 0 362 mov.l 5f, k2 ! vector register address 363 mov.l 1f, k4 ! SR bits to clear in k4 364 bsr save_regs ! needs original pr value in k3 365 mov.l @k2, k2 ! read out vector and keep in k2 366 367handle_exception_special: 368 setup_frame_reg 369 370 ! Setup return address and jump to exception handler 371 mov.l 7f, r9 ! fetch return address 372 stc r2_bank, r0 ! k2 (vector) 373 mov.l 6f, r10 374 shlr2 r0 375 shlr r0 376 mov.l @(r0, r10), r10 377 jmp @r10 378 lds r9, pr ! put return address in pr 379 380 .align L1_CACHE_SHIFT 381 382! save_regs() 383! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack 384! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack 385! - switch bank 386! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack 387! k0 contains original stack pointer* 388! k1 trashed 389! k3 passes original pr* 390! k4 passes SR bitmask 391! BL=1 on entry, on exit BL=0. 392 393ENTRY(save_regs) 394 mov #-1, r1 395 mov.l k1, @-r15 ! set TRA (default: -1) 396 sts.l macl, @-r15 397 sts.l mach, @-r15 398 stc.l gbr, @-r15 399 stc.l ssr, @-r15 400 mov.l k3, @-r15 ! original pr in k3 401 stc.l spc, @-r15 402 403 mov.l k0, @-r15 ! original stack pointer in k0 404 mov.l r14, @-r15 405 mov.l r13, @-r15 406 mov.l r12, @-r15 407 mov.l r11, @-r15 408 mov.l r10, @-r15 409 mov.l r9, @-r15 410 mov.l r8, @-r15 411 412 mov.l 0f, k3 ! SR bits to set in k3 413 414 ! fall-through 415 416! save_low_regs() 417! - modify SR for bank switch 418! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack 419! k3 passes bits to set in SR 420! k4 passes bits to clear in SR 421 422ENTRY(save_low_regs) 423 stc sr, r8 424 or k3, r8 425 and k4, r8 426 ldc r8, sr 427 428 mov.l r7, @-r15 429 mov.l r6, @-r15 430 mov.l r5, @-r15 431 mov.l r4, @-r15 432 mov.l r3, @-r15 433 mov.l r2, @-r15 434 mov.l r1, @-r15 435 rts 436 mov.l r0, @-r15 437 438! 439! 0x600: Interrupt / NMI vector 440! 441 .balign 512,0,512 442ENTRY(handle_interrupt) 443 sts pr, k3 ! save original pr value in k3 444 mova exception_data, k0 445 446 ! Setup stack and save DSP context (k0 contains original r15 on return) 447 bsr prepare_stack 448 PREF(k0) 449 450 ! Save registers / Switch to bank 0 451 mov.l 1f, k4 ! SR bits to clear in k4 452 bsr save_regs ! needs original pr value in k3 453 mov #-1, k2 ! default vector kept in k2 454 455 setup_frame_reg 456 457 stc sr, r0 ! get status register 458 shlr2 r0 459 and #0x3c, r0 460 cmp/eq #0x3c, r0 461 bf 9f 462 TRACE_IRQS_OFF 4639: 464 465 ! Setup return address and jump to do_IRQ 466 mov.l 4f, r9 ! fetch return address 467 lds r9, pr ! put return address in pr 468 mov.l 2f, r4 469 mov.l 3f, r9 470 mov.l @r4, r4 ! pass INTEVT vector as arg0 471 472 shlr2 r4 473 shlr r4 474 mov r4, r0 ! save vector->jmp table offset for later 475 476 shlr2 r4 ! vector to IRQ# conversion 477 add #-0x10, r4 478 479 cmp/pz r4 ! is it a valid IRQ? 480 bt 10f 481 482 /* 483 * We got here as a result of taking the INTEVT path for something 484 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ() 485 * path and special case the event dispatch instead. This is the 486 * expected path for the NMI (and any other brilliantly implemented 487 * exception), which effectively wants regular exception dispatch 488 * but is unfortunately reported through INTEVT rather than 489 * EXPEVT. Grr. 490 */ 491 mov.l 6f, r9 492 mov.l @(r0, r9), r9 493 jmp @r9 494 mov r15, r8 ! trap handlers take saved regs in r8 495 49610: 497 jmp @r9 ! Off to do_IRQ() we go. 498 mov r15, r5 ! pass saved registers as arg1 499 500ENTRY(exception_none) 501 rts 502 nop 503 504 .align L1_CACHE_SHIFT 505exception_data: 5060: .long 0x000080f0 ! FD=1, IMASK=15 5071: .long 0xcfffffff ! RB=0, BL=0 5082: .long INTEVT 5093: .long do_IRQ 5104: .long ret_from_irq 5115: .long EXPEVT 5126: .long exception_handling_table 5137: .long ret_from_exception 514