1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24 __INIT 25 26/* 27 * General exception vector for all other CPUs. 28 * 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exception handler. 31 */ 32NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set noat 35#if R5432_CP0_INTERRUPT_WAR 36 mfc0 k0, CP0_INDEX 37#endif 38 mfc0 k1, CP0_CAUSE 39 andi k1, k1, 0x7c 40#ifdef CONFIG_64BIT 41 dsll k1, k1, 1 42#endif 43 PTR_L k0, exception_handlers(k1) 44 jr k0 45 .set pop 46 END(except_vec3_generic) 47 48/* 49 * General exception handler for CPUs with virtual coherency exception. 50 * 51 * Be careful when changing this, it has to be at most 256 (as a special 52 * exception) bytes to fit into space reserved for the exception handler. 53 */ 54NESTED(except_vec3_r4000, 0, sp) 55 .set push 56 .set arch=r4000 57 .set noat 58 mfc0 k1, CP0_CAUSE 59 li k0, 31<<2 60 andi k1, k1, 0x7c 61 .set push 62 .set noreorder 63 .set nomacro 64 beq k1, k0, handle_vced 65 li k0, 14<<2 66 beq k1, k0, handle_vcei 67#ifdef CONFIG_64BIT 68 dsll k1, k1, 1 69#endif 70 .set pop 71 PTR_L k0, exception_handlers(k1) 72 jr k0 73 74 /* 75 * Big shit, we now may have two dirty primary cache lines for the same 76 * physical address. We can safely invalidate the line pointed to by 77 * c0_badvaddr because after return from this exception handler the 78 * load / store will be re-executed. 79 */ 80handle_vced: 81 MFC0 k0, CP0_BADVADDR 82 li k1, -4 # Is this ... 83 and k0, k1 # ... really needed? 84 mtc0 zero, CP0_TAGLO 85 cache Index_Store_Tag_D, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 87#ifdef CONFIG_PROC_FS 88 PTR_LA k0, vced_count 89 lw k1, (k0) 90 addiu k1, 1 91 sw k1, (k0) 92#endif 93 eret 94 95handle_vcei: 96 MFC0 k0, CP0_BADVADDR 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 98#ifdef CONFIG_PROC_FS 99 PTR_LA k0, vcei_count 100 lw k1, (k0) 101 addiu k1, 1 102 sw k1, (k0) 103#endif 104 eret 105 .set pop 106 END(except_vec3_r4000) 107 108 __FINIT 109 110 .align 5 /* 32 byte rollback region */ 111LEAF(__r4k_wait) 112 .set push 113 .set noreorder 114 /* start of rollback region */ 115 LONG_L t0, TI_FLAGS($28) 116 nop 117 andi t0, _TIF_NEED_RESCHED 118 bnez t0, 1f 119 nop 120 nop 121 nop 122#ifdef CONFIG_CPU_MICROMIPS 123 nop 124 nop 125 nop 126 nop 127#endif 128 .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 1311: 132 jr ra 133 nop 134 .set pop 135 END(__r4k_wait) 136 137 .macro BUILD_ROLLBACK_PROLOGUE handler 138 FEXPORT(rollback_\handler) 139 .set push 140 .set noat 141 MFC0 k0, CP0_EPC 142 PTR_LA k1, __r4k_wait 143 ori k0, 0x1f /* 32 byte rollback region */ 144 xori k0, 0x1f 145 bne k0, k1, \handler 146 MTC0 k0, CP0_EPC 147 .set pop 148 .endm 149 150 .align 5 151BUILD_ROLLBACK_PROLOGUE handle_int 152NESTED(handle_int, PT_SIZE, sp) 153#ifdef CONFIG_TRACE_IRQFLAGS 154 /* 155 * Check to see if the interrupted code has just disabled 156 * interrupts and ignore this interrupt for now if so. 157 * 158 * local_irq_disable() disables interrupts and then calls 159 * trace_hardirqs_off() to track the state. If an interrupt is taken 160 * after interrupts are disabled but before the state is updated 161 * it will appear to restore_all that it is incorrectly returning with 162 * interrupts disabled 163 */ 164 .set push 165 .set noat 166 mfc0 k0, CP0_STATUS 167#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 168 and k0, ST0_IEP 169 bnez k0, 1f 170 171 mfc0 k0, CP0_EPC 172 .set noreorder 173 j k0 174 rfe 175#else 176 and k0, ST0_IE 177 bnez k0, 1f 178 179 eret 180#endif 1811: 182 .set pop 183#endif 184 SAVE_ALL 185 CLI 186 TRACE_IRQS_OFF 187 188 LONG_L s0, TI_REGS($28) 189 LONG_S sp, TI_REGS($28) 190 191 /* 192 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 193 * Check if we are already using the IRQ stack. 194 */ 195 move s1, sp # Preserve the sp 196 197 /* Get IRQ stack for this CPU */ 198 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 199#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 200 lui k1, %hi(irq_stack) 201#else 202 lui k1, %highest(irq_stack) 203 daddiu k1, %higher(irq_stack) 204 dsll k1, 16 205 daddiu k1, %hi(irq_stack) 206 dsll k1, 16 207#endif 208 LONG_SRL k0, SMP_CPUID_PTRSHIFT 209 LONG_ADDU k1, k0 210 LONG_L t0, %lo(irq_stack)(k1) 211 212 # Check if already on IRQ stack 213 PTR_LI t1, ~(_THREAD_SIZE-1) 214 and t1, t1, sp 215 beq t0, t1, 2f 216 217 /* Switch to IRQ stack */ 218 li t1, _IRQ_STACK_START 219 PTR_ADD sp, t0, t1 220 221 /* Save task's sp on IRQ stack so that unwinding can follow it */ 222 LONG_S s1, 0(sp) 2232: 224 jal plat_irq_dispatch 225 226 /* Restore sp */ 227 move sp, s1 228 229 j ret_from_irq 230#ifdef CONFIG_CPU_MICROMIPS 231 nop 232#endif 233 END(handle_int) 234 235 __INIT 236 237/* 238 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 239 * This is a dedicated interrupt exception vector which reduces the 240 * interrupt processing overhead. The jump instruction will be replaced 241 * at the initialization time. 242 * 243 * Be careful when changing this, it has to be at most 128 bytes 244 * to fit into space reserved for the exception handler. 245 */ 246NESTED(except_vec4, 0, sp) 2471: j 1b /* Dummy, will be replaced */ 248 END(except_vec4) 249 250/* 251 * EJTAG debug exception handler. 252 * The EJTAG debug exception entry point is 0xbfc00480, which 253 * normally is in the boot PROM, so the boot PROM must do an 254 * unconditional jump to this vector. 255 */ 256NESTED(except_vec_ejtag_debug, 0, sp) 257 j ejtag_debug_handler 258#ifdef CONFIG_CPU_MICROMIPS 259 nop 260#endif 261 END(except_vec_ejtag_debug) 262 263 __FINIT 264 265/* 266 * Vectored interrupt handler. 267 * This prototype is copied to ebase + n*IntCtl.VS and patched 268 * to invoke the handler 269 */ 270BUILD_ROLLBACK_PROLOGUE except_vec_vi 271NESTED(except_vec_vi, 0, sp) 272 SAVE_SOME 273 SAVE_AT 274 .set push 275 .set noreorder 276 PTR_LA v1, except_vec_vi_handler 277FEXPORT(except_vec_vi_lui) 278 lui v0, 0 /* Patched */ 279 jr v1 280FEXPORT(except_vec_vi_ori) 281 ori v0, 0 /* Patched */ 282 .set pop 283 END(except_vec_vi) 284EXPORT(except_vec_vi_end) 285 286/* 287 * Common Vectored Interrupt code 288 * Complete the register saves and invoke the handler which is passed in $v0 289 */ 290NESTED(except_vec_vi_handler, 0, sp) 291 SAVE_TEMP 292 SAVE_STATIC 293 CLI 294#ifdef CONFIG_TRACE_IRQFLAGS 295 move s0, v0 296 TRACE_IRQS_OFF 297 move v0, s0 298#endif 299 300 LONG_L s0, TI_REGS($28) 301 LONG_S sp, TI_REGS($28) 302 303 /* 304 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 305 * Check if we are already using the IRQ stack. 306 */ 307 move s1, sp # Preserve the sp 308 309 /* Get IRQ stack for this CPU */ 310 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 311#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 312 lui k1, %hi(irq_stack) 313#else 314 lui k1, %highest(irq_stack) 315 daddiu k1, %higher(irq_stack) 316 dsll k1, 16 317 daddiu k1, %hi(irq_stack) 318 dsll k1, 16 319#endif 320 LONG_SRL k0, SMP_CPUID_PTRSHIFT 321 LONG_ADDU k1, k0 322 LONG_L t0, %lo(irq_stack)(k1) 323 324 # Check if already on IRQ stack 325 PTR_LI t1, ~(_THREAD_SIZE-1) 326 and t1, t1, sp 327 beq t0, t1, 2f 328 329 /* Switch to IRQ stack */ 330 li t1, _IRQ_STACK_START 331 PTR_ADD sp, t0, t1 332 333 /* Save task's sp on IRQ stack so that unwinding can follow it */ 334 LONG_S s1, 0(sp) 3352: 336 jalr v0 337 338 /* Restore sp */ 339 move sp, s1 340 341 j ret_from_irq 342 END(except_vec_vi_handler) 343 344/* 345 * EJTAG debug exception handler. 346 */ 347NESTED(ejtag_debug_handler, PT_SIZE, sp) 348 .set push 349 .set noat 350 MTC0 k0, CP0_DESAVE 351 mfc0 k0, CP0_DEBUG 352 353 sll k0, k0, 30 # Check for SDBBP. 354 bgez k0, ejtag_return 355 356 PTR_LA k0, ejtag_debug_buffer 357 LONG_S k1, 0(k0) 358 SAVE_ALL 359 move a0, sp 360 jal ejtag_exception_handler 361 RESTORE_ALL 362 PTR_LA k0, ejtag_debug_buffer 363 LONG_L k1, 0(k0) 364 365ejtag_return: 366 MFC0 k0, CP0_DESAVE 367 .set mips32 368 deret 369 .set pop 370 END(ejtag_debug_handler) 371 372/* 373 * This buffer is reserved for the use of the EJTAG debug 374 * handler. 375 */ 376 .data 377EXPORT(ejtag_debug_buffer) 378 .fill LONGSIZE 379 .previous 380 381 __INIT 382 383/* 384 * NMI debug exception handler for MIPS reference boards. 385 * The NMI debug exception entry point is 0xbfc00000, which 386 * normally is in the boot PROM, so the boot PROM must do a 387 * unconditional jump to this vector. 388 */ 389NESTED(except_vec_nmi, 0, sp) 390 j nmi_handler 391#ifdef CONFIG_CPU_MICROMIPS 392 nop 393#endif 394 END(except_vec_nmi) 395 396 __FINIT 397 398NESTED(nmi_handler, PT_SIZE, sp) 399 .set push 400 .set noat 401 /* 402 * Clear ERL - restore segment mapping 403 * Clear BEV - required for page fault exception handler to work 404 */ 405 mfc0 k0, CP0_STATUS 406 ori k0, k0, ST0_EXL 407 li k1, ~(ST0_BEV | ST0_ERL) 408 and k0, k0, k1 409 mtc0 k0, CP0_STATUS 410 _ehb 411 SAVE_ALL 412 move a0, sp 413 jal nmi_exception_handler 414 /* nmi_exception_handler never returns */ 415 .set pop 416 END(nmi_handler) 417 418 .macro __build_clear_none 419 .endm 420 421 .macro __build_clear_sti 422 TRACE_IRQS_ON 423 STI 424 .endm 425 426 .macro __build_clear_cli 427 CLI 428 TRACE_IRQS_OFF 429 .endm 430 431 .macro __build_clear_fpe 432 CLI 433 TRACE_IRQS_OFF 434 .set push 435 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 436 .set mips1 437 SET_HARDFLOAT 438 cfc1 a1, fcr31 439 .set pop 440 .endm 441 442 .macro __build_clear_msa_fpe 443 CLI 444 TRACE_IRQS_OFF 445 _cfcmsa a1, MSA_CSR 446 .endm 447 448 .macro __build_clear_ade 449 MFC0 t0, CP0_BADVADDR 450 PTR_S t0, PT_BVADDR(sp) 451 KMODE 452 .endm 453 454 .macro __BUILD_silent exception 455 .endm 456 457 /* Gas tries to parse the PRINT argument as a string containing 458 string escapes and emits bogus warnings if it believes to 459 recognize an unknown escape code. So make the arguments 460 start with an n and gas will believe \n is ok ... */ 461 .macro __BUILD_verbose nexception 462 LONG_L a1, PT_EPC(sp) 463#ifdef CONFIG_32BIT 464 PRINT("Got \nexception at %08lx\012") 465#endif 466#ifdef CONFIG_64BIT 467 PRINT("Got \nexception at %016lx\012") 468#endif 469 .endm 470 471 .macro __BUILD_count exception 472 LONG_L t0,exception_count_\exception 473 LONG_ADDIU t0, 1 474 LONG_S t0,exception_count_\exception 475 .comm exception_count\exception, 8, 8 476 .endm 477 478 .macro __BUILD_HANDLER exception handler clear verbose ext 479 .align 5 480 NESTED(handle_\exception, PT_SIZE, sp) 481 .set noat 482 SAVE_ALL 483 FEXPORT(handle_\exception\ext) 484 __build_clear_\clear 485 .set at 486 __BUILD_\verbose \exception 487 move a0, sp 488 PTR_LA ra, ret_from_exception 489 j do_\handler 490 END(handle_\exception) 491 .endm 492 493 .macro BUILD_HANDLER exception handler clear verbose 494 __BUILD_HANDLER \exception \handler \clear \verbose _int 495 .endm 496 497 BUILD_HANDLER adel ade ade silent /* #4 */ 498 BUILD_HANDLER ades ade ade silent /* #5 */ 499 BUILD_HANDLER ibe be cli silent /* #6 */ 500 BUILD_HANDLER dbe be cli silent /* #7 */ 501 BUILD_HANDLER bp bp sti silent /* #9 */ 502 BUILD_HANDLER ri ri sti silent /* #10 */ 503 BUILD_HANDLER cpu cpu sti silent /* #11 */ 504 BUILD_HANDLER ov ov sti silent /* #12 */ 505 BUILD_HANDLER tr tr sti silent /* #13 */ 506 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 507 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 508 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 509 BUILD_HANDLER msa msa sti silent /* #21 */ 510 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 511#ifdef CONFIG_HARDWARE_WATCHPOINTS 512 /* 513 * For watch, interrupts will be enabled after the watch 514 * registers are read. 515 */ 516 BUILD_HANDLER watch watch cli silent /* #23 */ 517#else 518 BUILD_HANDLER watch watch sti verbose /* #23 */ 519#endif 520 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 521 BUILD_HANDLER mt mt sti silent /* #25 */ 522 BUILD_HANDLER dsp dsp sti silent /* #26 */ 523 BUILD_HANDLER reserved reserved sti verbose /* others */ 524 525 .align 5 526 LEAF(handle_ri_rdhwr_vivt) 527 .set push 528 .set noat 529 .set noreorder 530 /* check if TLB contains a entry for EPC */ 531 MFC0 k1, CP0_ENTRYHI 532 andi k1, 0xff /* ASID_MASK */ 533 MFC0 k0, CP0_EPC 534 PTR_SRL k0, _PAGE_SHIFT + 1 535 PTR_SLL k0, _PAGE_SHIFT + 1 536 or k1, k0 537 MTC0 k1, CP0_ENTRYHI 538 mtc0_tlbw_hazard 539 tlbp 540 tlb_probe_hazard 541 mfc0 k1, CP0_INDEX 542 .set pop 543 bltz k1, handle_ri /* slow path */ 544 /* fall thru */ 545 END(handle_ri_rdhwr_vivt) 546 547 LEAF(handle_ri_rdhwr) 548 .set push 549 .set noat 550 .set noreorder 551 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 552 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 553 MFC0 k1, CP0_EPC 554#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 555 and k0, k1, 1 556 beqz k0, 1f 557 xor k1, k0 558 lhu k0, (k1) 559 lhu k1, 2(k1) 560 ins k1, k0, 16, 16 561 lui k0, 0x007d 562 b docheck 563 ori k0, 0x6b3c 5641: 565 lui k0, 0x7c03 566 lw k1, (k1) 567 ori k0, 0xe83b 568#else 569 andi k0, k1, 1 570 bnez k0, handle_ri 571 lui k0, 0x7c03 572 lw k1, (k1) 573 ori k0, 0xe83b 574#endif 575 .set reorder 576docheck: 577 bne k0, k1, handle_ri /* if not ours */ 578 579isrdhwr: 580 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 581 get_saved_sp /* k1 := current_thread_info */ 582 .set noreorder 583 MFC0 k0, CP0_EPC 584#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 585 ori k1, _THREAD_MASK 586 xori k1, _THREAD_MASK 587 LONG_L v1, TI_TP_VALUE(k1) 588 LONG_ADDIU k0, 4 589 jr k0 590 rfe 591#else 592#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 593 LONG_ADDIU k0, 4 /* stall on $k0 */ 594#else 595 .set at=v1 596 LONG_ADDIU k0, 4 597 .set noat 598#endif 599 MTC0 k0, CP0_EPC 600 /* I hope three instructions between MTC0 and ERET are enough... */ 601 ori k1, _THREAD_MASK 602 xori k1, _THREAD_MASK 603 LONG_L v1, TI_TP_VALUE(k1) 604 .set arch=r4000 605 eret 606 .set mips0 607#endif 608 .set pop 609 END(handle_ri_rdhwr) 610 611#ifdef CONFIG_64BIT 612/* A temporary overflow handler used by check_daddi(). */ 613 614 __INIT 615 616 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 617#endif 618