1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24 __INIT 25 26/* 27 * General exception vector for all other CPUs. 28 * 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exception handler. 31 */ 32NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set noat 35#if R5432_CP0_INTERRUPT_WAR 36 mfc0 k0, CP0_INDEX 37#endif 38 mfc0 k1, CP0_CAUSE 39 andi k1, k1, 0x7c 40#ifdef CONFIG_64BIT 41 dsll k1, k1, 1 42#endif 43 PTR_L k0, exception_handlers(k1) 44 jr k0 45 .set pop 46 END(except_vec3_generic) 47 48/* 49 * General exception handler for CPUs with virtual coherency exception. 50 * 51 * Be careful when changing this, it has to be at most 256 (as a special 52 * exception) bytes to fit into space reserved for the exception handler. 53 */ 54NESTED(except_vec3_r4000, 0, sp) 55 .set push 56 .set arch=r4000 57 .set noat 58 mfc0 k1, CP0_CAUSE 59 li k0, 31<<2 60 andi k1, k1, 0x7c 61 .set push 62 .set noreorder 63 .set nomacro 64 beq k1, k0, handle_vced 65 li k0, 14<<2 66 beq k1, k0, handle_vcei 67#ifdef CONFIG_64BIT 68 dsll k1, k1, 1 69#endif 70 .set pop 71 PTR_L k0, exception_handlers(k1) 72 jr k0 73 74 /* 75 * Big shit, we now may have two dirty primary cache lines for the same 76 * physical address. We can safely invalidate the line pointed to by 77 * c0_badvaddr because after return from this exception handler the 78 * load / store will be re-executed. 79 */ 80handle_vced: 81 MFC0 k0, CP0_BADVADDR 82 li k1, -4 # Is this ... 83 and k0, k1 # ... really needed? 84 mtc0 zero, CP0_TAGLO 85 cache Index_Store_Tag_D, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 87#ifdef CONFIG_PROC_FS 88 PTR_LA k0, vced_count 89 lw k1, (k0) 90 addiu k1, 1 91 sw k1, (k0) 92#endif 93 eret 94 95handle_vcei: 96 MFC0 k0, CP0_BADVADDR 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 98#ifdef CONFIG_PROC_FS 99 PTR_LA k0, vcei_count 100 lw k1, (k0) 101 addiu k1, 1 102 sw k1, (k0) 103#endif 104 eret 105 .set pop 106 END(except_vec3_r4000) 107 108 __FINIT 109 110 .align 5 /* 32 byte rollback region */ 111LEAF(__r4k_wait) 112 .set push 113 .set noreorder 114 /* start of rollback region */ 115 LONG_L t0, TI_FLAGS($28) 116 nop 117 andi t0, _TIF_NEED_RESCHED 118 bnez t0, 1f 119 nop 120 nop 121 nop 122#ifdef CONFIG_CPU_MICROMIPS 123 nop 124 nop 125 nop 126 nop 127#endif 128 .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 1311: 132 jr ra 133 nop 134 .set pop 135 END(__r4k_wait) 136 137 .macro BUILD_ROLLBACK_PROLOGUE handler 138 FEXPORT(rollback_\handler) 139 .set push 140 .set noat 141 MFC0 k0, CP0_EPC 142 PTR_LA k1, __r4k_wait 143 ori k0, 0x1f /* 32 byte rollback region */ 144 xori k0, 0x1f 145 bne k0, k1, 9f 146 MTC0 k0, CP0_EPC 1479: 148 .set pop 149 .endm 150 151 .align 5 152BUILD_ROLLBACK_PROLOGUE handle_int 153NESTED(handle_int, PT_SIZE, sp) 154#ifdef CONFIG_TRACE_IRQFLAGS 155 /* 156 * Check to see if the interrupted code has just disabled 157 * interrupts and ignore this interrupt for now if so. 158 * 159 * local_irq_disable() disables interrupts and then calls 160 * trace_hardirqs_off() to track the state. If an interrupt is taken 161 * after interrupts are disabled but before the state is updated 162 * it will appear to restore_all that it is incorrectly returning with 163 * interrupts disabled 164 */ 165 .set push 166 .set noat 167 mfc0 k0, CP0_STATUS 168#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 169 and k0, ST0_IEP 170 bnez k0, 1f 171 172 mfc0 k0, CP0_EPC 173 .set noreorder 174 j k0 175 rfe 176#else 177 and k0, ST0_IE 178 bnez k0, 1f 179 180 eret 181#endif 1821: 183 .set pop 184#endif 185 SAVE_ALL 186 CLI 187 TRACE_IRQS_OFF 188 189 LONG_L s0, TI_REGS($28) 190 LONG_S sp, TI_REGS($28) 191 PTR_LA ra, ret_from_irq 192 PTR_LA v0, plat_irq_dispatch 193 jr v0 194#ifdef CONFIG_CPU_MICROMIPS 195 nop 196#endif 197 END(handle_int) 198 199 __INIT 200 201/* 202 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 203 * This is a dedicated interrupt exception vector which reduces the 204 * interrupt processing overhead. The jump instruction will be replaced 205 * at the initialization time. 206 * 207 * Be careful when changing this, it has to be at most 128 bytes 208 * to fit into space reserved for the exception handler. 209 */ 210NESTED(except_vec4, 0, sp) 2111: j 1b /* Dummy, will be replaced */ 212 END(except_vec4) 213 214/* 215 * EJTAG debug exception handler. 216 * The EJTAG debug exception entry point is 0xbfc00480, which 217 * normally is in the boot PROM, so the boot PROM must do an 218 * unconditional jump to this vector. 219 */ 220NESTED(except_vec_ejtag_debug, 0, sp) 221 j ejtag_debug_handler 222#ifdef CONFIG_CPU_MICROMIPS 223 nop 224#endif 225 END(except_vec_ejtag_debug) 226 227 __FINIT 228 229/* 230 * Vectored interrupt handler. 231 * This prototype is copied to ebase + n*IntCtl.VS and patched 232 * to invoke the handler 233 */ 234BUILD_ROLLBACK_PROLOGUE except_vec_vi 235NESTED(except_vec_vi, 0, sp) 236 SAVE_SOME 237 SAVE_AT 238 .set push 239 .set noreorder 240 PTR_LA v1, except_vec_vi_handler 241FEXPORT(except_vec_vi_lui) 242 lui v0, 0 /* Patched */ 243 jr v1 244FEXPORT(except_vec_vi_ori) 245 ori v0, 0 /* Patched */ 246 .set pop 247 END(except_vec_vi) 248EXPORT(except_vec_vi_end) 249 250/* 251 * Common Vectored Interrupt code 252 * Complete the register saves and invoke the handler which is passed in $v0 253 */ 254NESTED(except_vec_vi_handler, 0, sp) 255 SAVE_TEMP 256 SAVE_STATIC 257 CLI 258#ifdef CONFIG_TRACE_IRQFLAGS 259 move s0, v0 260 TRACE_IRQS_OFF 261 move v0, s0 262#endif 263 264 LONG_L s0, TI_REGS($28) 265 LONG_S sp, TI_REGS($28) 266 PTR_LA ra, ret_from_irq 267 jr v0 268 END(except_vec_vi_handler) 269 270/* 271 * EJTAG debug exception handler. 272 */ 273NESTED(ejtag_debug_handler, PT_SIZE, sp) 274 .set push 275 .set noat 276 MTC0 k0, CP0_DESAVE 277 mfc0 k0, CP0_DEBUG 278 279 sll k0, k0, 30 # Check for SDBBP. 280 bgez k0, ejtag_return 281 282 PTR_LA k0, ejtag_debug_buffer 283 LONG_S k1, 0(k0) 284 SAVE_ALL 285 move a0, sp 286 jal ejtag_exception_handler 287 RESTORE_ALL 288 PTR_LA k0, ejtag_debug_buffer 289 LONG_L k1, 0(k0) 290 291ejtag_return: 292 MFC0 k0, CP0_DESAVE 293 .set mips32 294 deret 295 .set pop 296 END(ejtag_debug_handler) 297 298/* 299 * This buffer is reserved for the use of the EJTAG debug 300 * handler. 301 */ 302 .data 303EXPORT(ejtag_debug_buffer) 304 .fill LONGSIZE 305 .previous 306 307 __INIT 308 309/* 310 * NMI debug exception handler for MIPS reference boards. 311 * The NMI debug exception entry point is 0xbfc00000, which 312 * normally is in the boot PROM, so the boot PROM must do a 313 * unconditional jump to this vector. 314 */ 315NESTED(except_vec_nmi, 0, sp) 316 j nmi_handler 317#ifdef CONFIG_CPU_MICROMIPS 318 nop 319#endif 320 END(except_vec_nmi) 321 322 __FINIT 323 324NESTED(nmi_handler, PT_SIZE, sp) 325 .set push 326 .set noat 327 /* 328 * Clear ERL - restore segment mapping 329 * Clear BEV - required for page fault exception handler to work 330 */ 331 mfc0 k0, CP0_STATUS 332 ori k0, k0, ST0_EXL 333 li k1, ~(ST0_BEV | ST0_ERL) 334 and k0, k0, k1 335 mtc0 k0, CP0_STATUS 336 _ehb 337 SAVE_ALL 338 move a0, sp 339 jal nmi_exception_handler 340 /* nmi_exception_handler never returns */ 341 .set pop 342 END(nmi_handler) 343 344 .macro __build_clear_none 345 .endm 346 347 .macro __build_clear_sti 348 TRACE_IRQS_ON 349 STI 350 .endm 351 352 .macro __build_clear_cli 353 CLI 354 TRACE_IRQS_OFF 355 .endm 356 357 .macro __build_clear_fpe 358 .set push 359 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 360 .set mips1 361 SET_HARDFLOAT 362 cfc1 a1, fcr31 363 li a2, ~FPU_CSR_ALL_X 364 and a2, a1 365 ctc1 a2, fcr31 366 .set pop 367 CLI 368 TRACE_IRQS_OFF 369 .endm 370 371 .macro __build_clear_msa_fpe 372 _cfcmsa a1, MSA_CSR 373 CLI 374 TRACE_IRQS_OFF 375 .endm 376 377 .macro __build_clear_ade 378 MFC0 t0, CP0_BADVADDR 379 PTR_S t0, PT_BVADDR(sp) 380 KMODE 381 .endm 382 383 .macro __BUILD_silent exception 384 .endm 385 386 /* Gas tries to parse the PRINT argument as a string containing 387 string escapes and emits bogus warnings if it believes to 388 recognize an unknown escape code. So make the arguments 389 start with an n and gas will believe \n is ok ... */ 390 .macro __BUILD_verbose nexception 391 LONG_L a1, PT_EPC(sp) 392#ifdef CONFIG_32BIT 393 PRINT("Got \nexception at %08lx\012") 394#endif 395#ifdef CONFIG_64BIT 396 PRINT("Got \nexception at %016lx\012") 397#endif 398 .endm 399 400 .macro __BUILD_count exception 401 LONG_L t0,exception_count_\exception 402 LONG_ADDIU t0, 1 403 LONG_S t0,exception_count_\exception 404 .comm exception_count\exception, 8, 8 405 .endm 406 407 .macro __BUILD_HANDLER exception handler clear verbose ext 408 .align 5 409 NESTED(handle_\exception, PT_SIZE, sp) 410 .set noat 411 SAVE_ALL 412 FEXPORT(handle_\exception\ext) 413 __build_clear_\clear 414 .set at 415 __BUILD_\verbose \exception 416 move a0, sp 417 PTR_LA ra, ret_from_exception 418 j do_\handler 419 END(handle_\exception) 420 .endm 421 422 .macro BUILD_HANDLER exception handler clear verbose 423 __BUILD_HANDLER \exception \handler \clear \verbose _int 424 .endm 425 426 BUILD_HANDLER adel ade ade silent /* #4 */ 427 BUILD_HANDLER ades ade ade silent /* #5 */ 428 BUILD_HANDLER ibe be cli silent /* #6 */ 429 BUILD_HANDLER dbe be cli silent /* #7 */ 430 BUILD_HANDLER bp bp sti silent /* #9 */ 431 BUILD_HANDLER ri ri sti silent /* #10 */ 432 BUILD_HANDLER cpu cpu sti silent /* #11 */ 433 BUILD_HANDLER ov ov sti silent /* #12 */ 434 BUILD_HANDLER tr tr sti silent /* #13 */ 435 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 436 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 437 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 438 BUILD_HANDLER msa msa sti silent /* #21 */ 439 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 440#ifdef CONFIG_HARDWARE_WATCHPOINTS 441 /* 442 * For watch, interrupts will be enabled after the watch 443 * registers are read. 444 */ 445 BUILD_HANDLER watch watch cli silent /* #23 */ 446#else 447 BUILD_HANDLER watch watch sti verbose /* #23 */ 448#endif 449 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 450 BUILD_HANDLER mt mt sti silent /* #25 */ 451 BUILD_HANDLER dsp dsp sti silent /* #26 */ 452 BUILD_HANDLER reserved reserved sti verbose /* others */ 453 454 .align 5 455 LEAF(handle_ri_rdhwr_vivt) 456 .set push 457 .set noat 458 .set noreorder 459 /* check if TLB contains a entry for EPC */ 460 MFC0 k1, CP0_ENTRYHI 461 andi k1, 0xff /* ASID_MASK */ 462 MFC0 k0, CP0_EPC 463 PTR_SRL k0, _PAGE_SHIFT + 1 464 PTR_SLL k0, _PAGE_SHIFT + 1 465 or k1, k0 466 MTC0 k1, CP0_ENTRYHI 467 mtc0_tlbw_hazard 468 tlbp 469 tlb_probe_hazard 470 mfc0 k1, CP0_INDEX 471 .set pop 472 bltz k1, handle_ri /* slow path */ 473 /* fall thru */ 474 END(handle_ri_rdhwr_vivt) 475 476 LEAF(handle_ri_rdhwr) 477 .set push 478 .set noat 479 .set noreorder 480 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 481 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 482 MFC0 k1, CP0_EPC 483#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 484 and k0, k1, 1 485 beqz k0, 1f 486 xor k1, k0 487 lhu k0, (k1) 488 lhu k1, 2(k1) 489 ins k1, k0, 16, 16 490 lui k0, 0x007d 491 b docheck 492 ori k0, 0x6b3c 4931: 494 lui k0, 0x7c03 495 lw k1, (k1) 496 ori k0, 0xe83b 497#else 498 andi k0, k1, 1 499 bnez k0, handle_ri 500 lui k0, 0x7c03 501 lw k1, (k1) 502 ori k0, 0xe83b 503#endif 504 .set reorder 505docheck: 506 bne k0, k1, handle_ri /* if not ours */ 507 508isrdhwr: 509 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 510 get_saved_sp /* k1 := current_thread_info */ 511 .set noreorder 512 MFC0 k0, CP0_EPC 513#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 514 ori k1, _THREAD_MASK 515 xori k1, _THREAD_MASK 516 LONG_L v1, TI_TP_VALUE(k1) 517 LONG_ADDIU k0, 4 518 jr k0 519 rfe 520#else 521#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 522 LONG_ADDIU k0, 4 /* stall on $k0 */ 523#else 524 .set at=v1 525 LONG_ADDIU k0, 4 526 .set noat 527#endif 528 MTC0 k0, CP0_EPC 529 /* I hope three instructions between MTC0 and ERET are enough... */ 530 ori k1, _THREAD_MASK 531 xori k1, _THREAD_MASK 532 LONG_L v1, TI_TP_VALUE(k1) 533 .set arch=r4000 534 eret 535 .set mips0 536#endif 537 .set pop 538 END(handle_ri_rdhwr) 539 540#ifdef CONFIG_64BIT 541/* A temporary overflow handler used by check_daddi(). */ 542 543 __INIT 544 545 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 546#endif 547