1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This file contains the 64-bit "server" PowerPC variant 4 * of the low level exception handling including exception 5 * vectors, exception return, part of the slb and stab 6 * handling and other fixed offset specific things. 7 * 8 * This file is meant to be #included from head_64.S due to 9 * position dependent assembly. 10 * 11 * Most of this originates from head_64.S and thus has the same 12 * copyright history. 13 * 14 */ 15 16#include <asm/hw_irq.h> 17#include <asm/exception-64s.h> 18#include <asm/ptrace.h> 19#include <asm/cpuidle.h> 20#include <asm/head-64.h> 21#include <asm/feature-fixups.h> 22#include <asm/kup.h> 23 24/* PACA save area offsets (exgen, exmc, etc) */ 25#define EX_R9 0 26#define EX_R10 8 27#define EX_R11 16 28#define EX_R12 24 29#define EX_R13 32 30#define EX_DAR 40 31#define EX_DSISR 48 32#define EX_CCR 52 33#define EX_CFAR 56 34#define EX_PPR 64 35#define EX_CTR 72 36.if EX_SIZE != 10 37 .error "EX_SIZE is wrong" 38.endif 39 40/* 41 * Following are fixed section helper macros. 42 * 43 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors 44 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors 45 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) 46 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) 47 * EXC_COMMON - After switching to virtual, relocated mode. 48 */ 49 50#define EXC_REAL_BEGIN(name, start, size) \ 51 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 52 53#define EXC_REAL_END(name, start, size) \ 54 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) 55 56#define EXC_VIRT_BEGIN(name, start, size) \ 57 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 58 59#define EXC_VIRT_END(name, start, size) \ 60 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) 61 62#define EXC_COMMON_BEGIN(name) \ 63 USE_TEXT_SECTION(); \ 64 .balign IFETCH_ALIGN_BYTES; \ 65 .global name; \ 66 _ASM_NOKPROBE_SYMBOL(name); \ 67 DEFINE_FIXED_SYMBOL(name); \ 68name: 69 70#define TRAMP_REAL_BEGIN(name) \ 71 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) 72 73#define TRAMP_VIRT_BEGIN(name) \ 74 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) 75 76#define EXC_REAL_NONE(start, size) \ 77 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ 78 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) 79 80#define EXC_VIRT_NONE(start, size) \ 81 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ 82 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) 83 84/* 85 * We're short on space and time in the exception prolog, so we can't 86 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. 87 * Instead we get the base of the kernel from paca->kernelbase and or in the low 88 * part of label. This requires that the label be within 64KB of kernelbase, and 89 * that kernelbase be 64K aligned. 90 */ 91#define LOAD_HANDLER(reg, label) \ 92 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 93 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) 94 95#define __LOAD_HANDLER(reg, label) \ 96 ld reg,PACAKBASE(r13); \ 97 ori reg,reg,(ABS_ADDR(label))@l 98 99/* 100 * Branches from unrelocated code (e.g., interrupts) to labels outside 101 * head-y require >64K offsets. 102 */ 103#define __LOAD_FAR_HANDLER(reg, label) \ 104 ld reg,PACAKBASE(r13); \ 105 ori reg,reg,(ABS_ADDR(label))@l; \ 106 addis reg,reg,(ABS_ADDR(label))@h 107 108/* 109 * Branch to label using its 0xC000 address. This results in instruction 110 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned 111 * on using mtmsr rather than rfid. 112 * 113 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than 114 * load KBASE for a slight optimisation. 115 */ 116#define BRANCH_TO_C000(reg, label) \ 117 __LOAD_FAR_HANDLER(reg, label); \ 118 mtctr reg; \ 119 bctr 120 121/* 122 * Interrupt code generation macros 123 */ 124#define IVEC .L_IVEC_\name\() /* Interrupt vector address */ 125#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ 126#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ 127#define IAREA .L_IAREA_\name\() /* PACA save area */ 128#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ 129#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ 130#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ 131#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ 132#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */ 133#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ 134#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ 135#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ 136#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */ 137#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ 138#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name 139#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ 140#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ 141#define __ISTACK(name) .L_ISTACK_ ## name 142#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */ 143#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ 144 145#define INT_DEFINE_BEGIN(n) \ 146.macro int_define_ ## n name 147 148#define INT_DEFINE_END(n) \ 149.endm ; \ 150int_define_ ## n n ; \ 151do_define_int n 152 153.macro do_define_int name 154 .ifndef IVEC 155 .error "IVEC not defined" 156 .endif 157 .ifndef IHSRR 158 IHSRR=0 159 .endif 160 .ifndef IHSRR_IF_HVMODE 161 IHSRR_IF_HVMODE=0 162 .endif 163 .ifndef IAREA 164 IAREA=PACA_EXGEN 165 .endif 166 .ifndef IVIRT 167 IVIRT=1 168 .endif 169 .ifndef IISIDE 170 IISIDE=0 171 .endif 172 .ifndef IDAR 173 IDAR=0 174 .endif 175 .ifndef IDSISR 176 IDSISR=0 177 .endif 178 .ifndef ISET_RI 179 ISET_RI=1 180 .endif 181 .ifndef IBRANCH_TO_COMMON 182 IBRANCH_TO_COMMON=1 183 .endif 184 .ifndef IREALMODE_COMMON 185 IREALMODE_COMMON=0 186 .else 187 .if ! IBRANCH_TO_COMMON 188 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" 189 .endif 190 .endif 191 .ifndef IMASK 192 IMASK=0 193 .endif 194 .ifndef IKVM_SKIP 195 IKVM_SKIP=0 196 .endif 197 .ifndef IKVM_REAL 198 IKVM_REAL=0 199 .endif 200 .ifndef IKVM_VIRT 201 IKVM_VIRT=0 202 .endif 203 .ifndef ISTACK 204 ISTACK=1 205 .endif 206 .ifndef IRECONCILE 207 IRECONCILE=1 208 .endif 209 .ifndef IKUAP 210 IKUAP=1 211 .endif 212.endm 213 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 215#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 216/* 217 * All interrupts which set HSRR registers, as well as SRESET and MCE and 218 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, 219 * so they all generally need to test whether they were taken in guest context. 220 * 221 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be 222 * taken with MSR[HV]=0. 223 * 224 * Interrupts which set SRR registers (with the above exceptions) do not 225 * elevate to MSR[HV]=1 mode, though most can be taken when running with 226 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do 227 * not need to test whether a guest is running because they get delivered to 228 * the guest directly, including nested HV KVM guests. 229 * 230 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host 231 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the 232 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be 233 * delivered to the real-mode entry point, therefore such interrupts only test 234 * KVM in their real mode handlers, and only when PR KVM is possible. 235 * 236 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always 237 * delivered in real-mode when the MMU is in hash mode because the MMU 238 * registers are not set appropriately to translate host addresses. In nested 239 * radix mode these can be delivered in virt-mode as the host translations are 240 * used implicitly (see: effective LPID, effective PID). 241 */ 242 243/* 244 * If an interrupt is taken while a guest is running, it is immediately routed 245 * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first 246 * to kvmppc_interrupt_hv, which handles the PR guest case. 247 */ 248#define kvmppc_interrupt kvmppc_interrupt_hv 249#else 250#define kvmppc_interrupt kvmppc_interrupt_pr 251#endif 252 253.macro KVMTEST name 254 lbz r10,HSTATE_IN_GUEST(r13) 255 cmpwi r10,0 256 bne \name\()_kvm 257.endm 258 259.macro GEN_KVM name 260 .balign IFETCH_ALIGN_BYTES 261\name\()_kvm: 262 263 .if IKVM_SKIP 264 cmpwi r10,KVM_GUEST_MODE_SKIP 265 beq 89f 266 .else 267BEGIN_FTR_SECTION 268 ld r10,IAREA+EX_CFAR(r13) 269 std r10,HSTATE_CFAR(r13) 270END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 271 .endif 272 273 ld r10,IAREA+EX_CTR(r13) 274 mtctr r10 275BEGIN_FTR_SECTION 276 ld r10,IAREA+EX_PPR(r13) 277 std r10,HSTATE_PPR(r13) 278END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 279 ld r11,IAREA+EX_R11(r13) 280 ld r12,IAREA+EX_R12(r13) 281 std r12,HSTATE_SCRATCH0(r13) 282 sldi r12,r9,32 283 ld r9,IAREA+EX_R9(r13) 284 ld r10,IAREA+EX_R10(r13) 285 /* HSRR variants have the 0x2 bit added to their trap number */ 286 .if IHSRR_IF_HVMODE 287 BEGIN_FTR_SECTION 288 ori r12,r12,(IVEC + 0x2) 289 FTR_SECTION_ELSE 290 ori r12,r12,(IVEC) 291 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 292 .elseif IHSRR 293 ori r12,r12,(IVEC+ 0x2) 294 .else 295 ori r12,r12,(IVEC) 296 .endif 297 b kvmppc_interrupt 298 299 .if IKVM_SKIP 30089: mtocrf 0x80,r9 301 ld r10,IAREA+EX_CTR(r13) 302 mtctr r10 303 ld r9,IAREA+EX_R9(r13) 304 ld r10,IAREA+EX_R10(r13) 305 ld r11,IAREA+EX_R11(r13) 306 ld r12,IAREA+EX_R12(r13) 307 .if IHSRR_IF_HVMODE 308 BEGIN_FTR_SECTION 309 b kvmppc_skip_Hinterrupt 310 FTR_SECTION_ELSE 311 b kvmppc_skip_interrupt 312 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 313 .elseif IHSRR 314 b kvmppc_skip_Hinterrupt 315 .else 316 b kvmppc_skip_interrupt 317 .endif 318 .endif 319.endm 320 321#else 322.macro KVMTEST name 323.endm 324.macro GEN_KVM name 325.endm 326#endif 327 328/* 329 * This is the BOOK3S interrupt entry code macro. 330 * 331 * This can result in one of several things happening: 332 * - Branch to the _common handler, relocated, in virtual mode. 333 * These are normal interrupts (synchronous and asynchronous) handled by 334 * the kernel. 335 * - Branch to KVM, relocated but real mode interrupts remain in real mode. 336 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by 337 * / intended for host or guest kernel, but KVM must always be involved 338 * because the machine state is set for guest execution. 339 * - Branch to the masked handler, unrelocated. 340 * These occur when maskable asynchronous interrupts are taken with the 341 * irq_soft_mask set. 342 * - Branch to an "early" handler in real mode but relocated. 343 * This is done if early=1. MCE and HMI use these to handle errors in real 344 * mode. 345 * - Fall through and continue executing in real, unrelocated mode. 346 * This is done if early=2. 347 */ 348 349.macro GEN_BRANCH_TO_COMMON name, virt 350 .if IREALMODE_COMMON 351 LOAD_HANDLER(r10, \name\()_common) 352 mtctr r10 353 bctr 354 .else 355 .if \virt 356#ifndef CONFIG_RELOCATABLE 357 b \name\()_common_virt 358#else 359 LOAD_HANDLER(r10, \name\()_common_virt) 360 mtctr r10 361 bctr 362#endif 363 .else 364 LOAD_HANDLER(r10, \name\()_common_real) 365 mtctr r10 366 bctr 367 .endif 368 .endif 369.endm 370 371.macro GEN_INT_ENTRY name, virt, ool=0 372 SET_SCRATCH0(r13) /* save r13 */ 373 GET_PACA(r13) 374 std r9,IAREA+EX_R9(r13) /* save r9 */ 375BEGIN_FTR_SECTION 376 mfspr r9,SPRN_PPR 377END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 378 HMT_MEDIUM 379 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ 380BEGIN_FTR_SECTION 381 mfspr r10,SPRN_CFAR 382END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 383 .if \ool 384 .if !\virt 385 b tramp_real_\name 386 .pushsection .text 387 TRAMP_REAL_BEGIN(tramp_real_\name) 388 .else 389 b tramp_virt_\name 390 .pushsection .text 391 TRAMP_VIRT_BEGIN(tramp_virt_\name) 392 .endif 393 .endif 394 395BEGIN_FTR_SECTION 396 std r9,IAREA+EX_PPR(r13) 397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 398BEGIN_FTR_SECTION 399 std r10,IAREA+EX_CFAR(r13) 400END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 401 INTERRUPT_TO_KERNEL 402 mfctr r10 403 std r10,IAREA+EX_CTR(r13) 404 mfcr r9 405 std r11,IAREA+EX_R11(r13) 406 std r12,IAREA+EX_R12(r13) 407 408 /* 409 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], 410 * because a d-side MCE will clobber those registers so is 411 * not recoverable if they are live. 412 */ 413 GET_SCRATCH0(r10) 414 std r10,IAREA+EX_R13(r13) 415 .if IDAR && !IISIDE 416 .if IHSRR 417 mfspr r10,SPRN_HDAR 418 .else 419 mfspr r10,SPRN_DAR 420 .endif 421 std r10,IAREA+EX_DAR(r13) 422 .endif 423 .if IDSISR && !IISIDE 424 .if IHSRR 425 mfspr r10,SPRN_HDSISR 426 .else 427 mfspr r10,SPRN_DSISR 428 .endif 429 stw r10,IAREA+EX_DSISR(r13) 430 .endif 431 432 .if IHSRR_IF_HVMODE 433 BEGIN_FTR_SECTION 434 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 435 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 436 FTR_SECTION_ELSE 437 mfspr r11,SPRN_SRR0 /* save SRR0 */ 438 mfspr r12,SPRN_SRR1 /* and SRR1 */ 439 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 440 .elseif IHSRR 441 mfspr r11,SPRN_HSRR0 /* save HSRR0 */ 442 mfspr r12,SPRN_HSRR1 /* and HSRR1 */ 443 .else 444 mfspr r11,SPRN_SRR0 /* save SRR0 */ 445 mfspr r12,SPRN_SRR1 /* and SRR1 */ 446 .endif 447 448 .if IBRANCH_TO_COMMON 449 GEN_BRANCH_TO_COMMON \name \virt 450 .endif 451 452 .if \ool 453 .popsection 454 .endif 455.endm 456 457/* 458 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt 459 * entry, except in the case of the real-mode handlers which require 460 * __GEN_REALMODE_COMMON_ENTRY. 461 * 462 * This switches to virtual mode and sets MSR[RI]. 463 */ 464.macro __GEN_COMMON_ENTRY name 465DEFINE_FIXED_SYMBOL(\name\()_common_real) 466\name\()_common_real: 467 .if IKVM_REAL 468 KVMTEST \name 469 .endif 470 471 ld r10,PACAKMSR(r13) /* get MSR value for kernel */ 472 /* MSR[RI] is clear iff using SRR regs */ 473 .if IHSRR_IF_HVMODE 474 BEGIN_FTR_SECTION 475 xori r10,r10,MSR_RI 476 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) 477 .elseif ! IHSRR 478 xori r10,r10,MSR_RI 479 .endif 480 mtmsrd r10 481 482 .if IVIRT 483 .if IKVM_VIRT 484 b 1f /* skip the virt test coming from real */ 485 .endif 486 487 .balign IFETCH_ALIGN_BYTES 488DEFINE_FIXED_SYMBOL(\name\()_common_virt) 489\name\()_common_virt: 490 .if IKVM_VIRT 491 KVMTEST \name 4921: 493 .endif 494 .endif /* IVIRT */ 495.endm 496 497/* 498 * Don't switch to virt mode. Used for early MCE and HMI handlers that 499 * want to run in real mode. 500 */ 501.macro __GEN_REALMODE_COMMON_ENTRY name 502DEFINE_FIXED_SYMBOL(\name\()_common_real) 503\name\()_common_real: 504 .if IKVM_REAL 505 KVMTEST \name 506 .endif 507.endm 508 509.macro __GEN_COMMON_BODY name 510 .if IMASK 511 .if ! ISTACK 512 .error "No support for masked interrupt to use custom stack" 513 .endif 514 515 /* If coming from user, skip soft-mask tests. */ 516 andi. r10,r12,MSR_PR 517 bne 2f 518 519 /* Kernel code running below __end_interrupts is implicitly 520 * soft-masked */ 521 LOAD_HANDLER(r10, __end_interrupts) 522 cmpld r11,r10 523 li r10,IMASK 524 blt- 1f 525 526 /* Test the soft mask state against our interrupt's bit */ 527 lbz r10,PACAIRQSOFTMASK(r13) 5281: andi. r10,r10,IMASK 529 /* Associate vector numbers with bits in paca->irq_happened */ 530 .if IVEC == 0x500 || IVEC == 0xea0 531 li r10,PACA_IRQ_EE 532 .elseif IVEC == 0x900 533 li r10,PACA_IRQ_DEC 534 .elseif IVEC == 0xa00 || IVEC == 0xe80 535 li r10,PACA_IRQ_DBELL 536 .elseif IVEC == 0xe60 537 li r10,PACA_IRQ_HMI 538 .elseif IVEC == 0xf00 539 li r10,PACA_IRQ_PMI 540 .else 541 .abort "Bad maskable vector" 542 .endif 543 544 .if IHSRR_IF_HVMODE 545 BEGIN_FTR_SECTION 546 bne masked_Hinterrupt 547 FTR_SECTION_ELSE 548 bne masked_interrupt 549 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 550 .elseif IHSRR 551 bne masked_Hinterrupt 552 .else 553 bne masked_interrupt 554 .endif 555 .endif 556 557 .if ISTACK 558 andi. r10,r12,MSR_PR /* See if coming from user */ 5592: mr r10,r1 /* Save r1 */ 560 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ 561 beq- 100f 562 ld r1,PACAKSAVE(r13) /* kernel stack to use */ 563100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ 564 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 565 .endif 566 567 std r9,_CCR(r1) /* save CR in stackframe */ 568 std r11,_NIP(r1) /* save SRR0 in stackframe */ 569 std r12,_MSR(r1) /* save SRR1 in stackframe */ 570 std r10,0(r1) /* make stack chain pointer */ 571 std r0,GPR0(r1) /* save r0 in stackframe */ 572 std r10,GPR1(r1) /* save r1 in stackframe */ 573 574 .if ISET_RI 575 li r10,MSR_RI 576 mtmsrd r10,1 /* Set MSR_RI */ 577 .endif 578 579 .if ISTACK 580 .if IKUAP 581 kuap_save_amr_and_lock r9, r10, cr1, cr0 582 .endif 583 beq 101f /* if from kernel mode */ 584 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) 585BEGIN_FTR_SECTION 586 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ 587 std r9,_PPR(r1) 588END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 589101: 590 .else 591 .if IKUAP 592 kuap_save_amr_and_lock r9, r10, cr1 593 .endif 594 .endif 595 596 /* Save original regs values from save area to stack frame. */ 597 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ 598 ld r10,IAREA+EX_R10(r13) 599 std r9,GPR9(r1) 600 std r10,GPR10(r1) 601 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ 602 ld r10,IAREA+EX_R12(r13) 603 ld r11,IAREA+EX_R13(r13) 604 std r9,GPR11(r1) 605 std r10,GPR12(r1) 606 std r11,GPR13(r1) 607 608 SAVE_NVGPRS(r1) 609 610 .if IDAR 611 .if IISIDE 612 ld r10,_NIP(r1) 613 .else 614 ld r10,IAREA+EX_DAR(r13) 615 .endif 616 std r10,_DAR(r1) 617 .endif 618 619 .if IDSISR 620 .if IISIDE 621 ld r10,_MSR(r1) 622 lis r11,DSISR_SRR1_MATCH_64S@h 623 and r10,r10,r11 624 .else 625 lwz r10,IAREA+EX_DSISR(r13) 626 .endif 627 std r10,_DSISR(r1) 628 .endif 629 630BEGIN_FTR_SECTION 631 ld r10,IAREA+EX_CFAR(r13) 632 std r10,ORIG_GPR3(r1) 633END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 634 ld r10,IAREA+EX_CTR(r13) 635 std r10,_CTR(r1) 636 std r2,GPR2(r1) /* save r2 in stackframe */ 637 SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ 638 SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ 639 mflr r9 /* Get LR, later save to stack */ 640 ld r2,PACATOC(r13) /* get kernel TOC into r2 */ 641 std r9,_LINK(r1) 642 lbz r10,PACAIRQSOFTMASK(r13) 643 mfspr r11,SPRN_XER /* save XER in stackframe */ 644 std r10,SOFTE(r1) 645 std r11,_XER(r1) 646 li r9,IVEC 647 std r9,_TRAP(r1) /* set trap number */ 648 li r10,0 649 ld r11,exception_marker@toc(r2) 650 std r10,RESULT(r1) /* clear regs->result */ 651 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ 652 653 .if ISTACK 654 ACCOUNT_STOLEN_TIME 655 .endif 656 657 .if IRECONCILE 658 RECONCILE_IRQ_STATE(r10, r11) 659 .endif 660.endm 661 662/* 663 * On entry r13 points to the paca, r9-r13 are saved in the paca, 664 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 665 * SRR1, and relocation is on. 666 * 667 * If stack=0, then the stack is already set in r1, and r1 is saved in r10. 668 * PPR save and CPU accounting is not done for the !stack case (XXX why not?) 669 */ 670.macro GEN_COMMON name 671 __GEN_COMMON_ENTRY \name 672 __GEN_COMMON_BODY \name 673.endm 674 675/* 676 * Restore all registers including H/SRR0/1 saved in a stack frame of a 677 * standard exception. 678 */ 679.macro EXCEPTION_RESTORE_REGS hsrr=0 680 /* Move original SRR0 and SRR1 into the respective regs */ 681 ld r9,_MSR(r1) 682 .if \hsrr 683 mtspr SPRN_HSRR1,r9 684 .else 685 mtspr SPRN_SRR1,r9 686 .endif 687 ld r9,_NIP(r1) 688 .if \hsrr 689 mtspr SPRN_HSRR0,r9 690 .else 691 mtspr SPRN_SRR0,r9 692 .endif 693 ld r9,_CTR(r1) 694 mtctr r9 695 ld r9,_XER(r1) 696 mtxer r9 697 ld r9,_LINK(r1) 698 mtlr r9 699 ld r9,_CCR(r1) 700 mtcr r9 701 REST_8GPRS(2, r1) 702 REST_4GPRS(10, r1) 703 REST_GPR(0, r1) 704 /* restore original r1. */ 705 ld r1,GPR1(r1) 706.endm 707 708#define RUNLATCH_ON \ 709BEGIN_FTR_SECTION \ 710 ld r3, PACA_THREAD_INFO(r13); \ 711 ld r4,TI_LOCAL_FLAGS(r3); \ 712 andi. r0,r4,_TLF_RUNLATCH; \ 713 beql ppc64_runlatch_on_trampoline; \ 714END_FTR_SECTION_IFSET(CPU_FTR_CTRL) 715 716/* 717 * When the idle code in power4_idle puts the CPU into NAP mode, 718 * it has to do so in a loop, and relies on the external interrupt 719 * and decrementer interrupt entry code to get it out of the loop. 720 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 721 * to signal that it is in the loop and needs help to get out. 722 */ 723#ifdef CONFIG_PPC_970_NAP 724#define FINISH_NAP \ 725BEGIN_FTR_SECTION \ 726 ld r11, PACA_THREAD_INFO(r13); \ 727 ld r9,TI_LOCAL_FLAGS(r11); \ 728 andi. r10,r9,_TLF_NAPPING; \ 729 bnel power4_fixup_nap; \ 730END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 731#else 732#define FINISH_NAP 733#endif 734 735/* 736 * There are a few constraints to be concerned with. 737 * - Real mode exceptions code/data must be located at their physical location. 738 * - Virtual mode exceptions must be mapped at their 0xc000... location. 739 * - Fixed location code must not call directly beyond the __end_interrupts 740 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence 741 * must be used. 742 * - LOAD_HANDLER targets must be within first 64K of physical 0 / 743 * virtual 0xc00... 744 * - Conditional branch targets must be within +/-32K of caller. 745 * 746 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and 747 * therefore don't have to run in physically located code or rfid to 748 * virtual mode kernel code. However on relocatable kernels they do have 749 * to branch to KERNELBASE offset because the rest of the kernel (outside 750 * the exception vectors) may be located elsewhere. 751 * 752 * Virtual exceptions correspond with physical, except their entry points 753 * are offset by 0xc000000000000000 and also tend to get an added 0x4000 754 * offset applied. Virtual exceptions are enabled with the Alternate 755 * Interrupt Location (AIL) bit set in the LPCR. However this does not 756 * guarantee they will be delivered virtually. Some conditions (see the ISA) 757 * cause exceptions to be delivered in real mode. 758 * 759 * The scv instructions are a special case. They get a 0x3000 offset applied. 760 * scv exceptions have unique reentrancy properties, see below. 761 * 762 * It's impossible to receive interrupts below 0x300 via AIL. 763 * 764 * KVM: None of the virtual exceptions are from the guest. Anything that 765 * escalated to HV=1 from HV=0 is delivered via real mode handlers. 766 * 767 * 768 * We layout physical memory as follows: 769 * 0x0000 - 0x00ff : Secondary processor spin code 770 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors 771 * 0x1900 - 0x2fff : Real mode trampolines 772 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors 773 * 0x5900 - 0x6fff : Relon mode trampolines 774 * 0x7000 - 0x7fff : FWNMI data area 775 * 0x8000 - .... : Common interrupt handlers, remaining early 776 * setup code, rest of kernel. 777 * 778 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space 779 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE 780 * vectors there. 781 */ 782OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) 783OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) 784OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) 785OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) 786 787#ifdef CONFIG_PPC_POWERNV 788 .globl start_real_trampolines 789 .globl end_real_trampolines 790 .globl start_virt_trampolines 791 .globl end_virt_trampolines 792#endif 793 794#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 795/* 796 * Data area reserved for FWNMI option. 797 * This address (0x7000) is fixed by the RPA. 798 * pseries and powernv need to keep the whole page from 799 * 0x7000 to 0x8000 free for use by the firmware 800 */ 801ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) 802OPEN_TEXT_SECTION(0x8000) 803#else 804OPEN_TEXT_SECTION(0x7000) 805#endif 806 807USE_FIXED_SECTION(real_vectors) 808 809/* 810 * This is the start of the interrupt handlers for pSeries 811 * This code runs with relocation off. 812 * Code from here to __end_interrupts gets copied down to real 813 * address 0x100 when we are running a relocatable kernel. 814 * Therefore any relative branches in this section must only 815 * branch to labels in this section. 816 */ 817 .globl __start_interrupts 818__start_interrupts: 819 820/** 821 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). 822 * This is a synchronous interrupt invoked with the "scv" instruction. The 823 * system call does not alter the HV bit, so it is directed to the OS. 824 * 825 * Handling: 826 * scv instructions enter the kernel without changing EE, RI, ME, or HV. 827 * In particular, this means we can take a maskable interrupt at any point 828 * in the scv handler, which is unlike any other interrupt. This is solved 829 * by treating the instruction addresses below __end_interrupts as being 830 * soft-masked. 831 * 832 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and 833 * ensure scv is never executed with relocation off, which means AIL-0 834 * should never happen. 835 * 836 * Before leaving the below __end_interrupts text, at least of the following 837 * must be true: 838 * - MSR[PR]=1 (i.e., return to userspace) 839 * - MSR_EE|MSR_RI is set (no reentrant exceptions) 840 * - Standard kernel environment is set up (stack, paca, etc) 841 * 842 * Call convention: 843 * 844 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst 845 */ 846EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) 847 /* SCV 0 */ 848 mr r9,r13 849 GET_PACA(r13) 850 mflr r11 851 mfctr r12 852 li r10,IRQS_ALL_DISABLED 853 stb r10,PACAIRQSOFTMASK(r13) 854#ifdef CONFIG_RELOCATABLE 855 b system_call_vectored_tramp 856#else 857 b system_call_vectored_common 858#endif 859 nop 860 861 /* SCV 1 - 127 */ 862 .rept 127 863 mr r9,r13 864 GET_PACA(r13) 865 mflr r11 866 mfctr r12 867 li r10,IRQS_ALL_DISABLED 868 stb r10,PACAIRQSOFTMASK(r13) 869 li r0,-1 /* cause failure */ 870#ifdef CONFIG_RELOCATABLE 871 b system_call_vectored_sigill_tramp 872#else 873 b system_call_vectored_sigill 874#endif 875 .endr 876EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) 877 878#ifdef CONFIG_RELOCATABLE 879TRAMP_VIRT_BEGIN(system_call_vectored_tramp) 880 __LOAD_HANDLER(r10, system_call_vectored_common) 881 mtctr r10 882 bctr 883 884TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) 885 __LOAD_HANDLER(r10, system_call_vectored_sigill) 886 mtctr r10 887 bctr 888#endif 889 890 891/* No virt vectors corresponding with 0x0..0x100 */ 892EXC_VIRT_NONE(0x4000, 0x100) 893 894 895/** 896 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). 897 * This is a non-maskable, asynchronous interrupt always taken in real-mode. 898 * It is caused by: 899 * - Wake from power-saving state, on powernv. 900 * - An NMI from another CPU, triggered by firmware or hypercall. 901 * - As crash/debug signal injected from BMC, firmware or hypervisor. 902 * 903 * Handling: 904 * Power-save wakeup is the only performance critical path, so this is 905 * determined quickly as possible first. In this case volatile registers 906 * can be discarded and SPRs like CFAR don't need to be read. 907 * 908 * If not a powersave wakeup, then it's run as a regular interrupt, however 909 * it uses its own stack and PACA save area to preserve the regular kernel 910 * environment for debugging. 911 * 912 * This interrupt is not maskable, so triggering it when MSR[RI] is clear, 913 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely 914 * correct to switch to virtual mode to run the regular interrupt handler 915 * because it might be interrupted when the MMU is in a bad state (e.g., SLB 916 * is clear). 917 * 918 * FWNMI: 919 * PAPR specifies a "fwnmi" facility which sends the sreset to a different 920 * entry point with a different register set up. Some hypervisors will 921 * send the sreset to 0x100 in the guest if it is not fwnmi capable. 922 * 923 * KVM: 924 * Unlike most SRR interrupts, this may be taken by the host while executing 925 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest 926 * mode and then raise the sreset. 927 */ 928INT_DEFINE_BEGIN(system_reset) 929 IVEC=0x100 930 IAREA=PACA_EXNMI 931 IVIRT=0 /* no virt entry point */ 932 /* 933 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is 934 * being used, so a nested NMI exception would corrupt it. 935 */ 936 ISET_RI=0 937 ISTACK=0 938 IRECONCILE=0 939 IKVM_REAL=1 940INT_DEFINE_END(system_reset) 941 942EXC_REAL_BEGIN(system_reset, 0x100, 0x100) 943#ifdef CONFIG_PPC_P7_NAP 944 /* 945 * If running native on arch 2.06 or later, check if we are waking up 946 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 947 * bits 46:47. A non-0 value indicates that we are coming from a power 948 * saving state. The idle wakeup handler initially runs in real mode, 949 * but we branch to the 0xc000... address so we can turn on relocation 950 * with mtmsrd later, after SPRs are restored. 951 * 952 * Careful to minimise cost for the fast path (idle wakeup) while 953 * also avoiding clobbering CFAR for the debug path (non-idle). 954 * 955 * For the idle wake case volatile registers can be clobbered, which 956 * is why we use those initially. If it turns out to not be an idle 957 * wake, carefully put everything back the way it was, so we can use 958 * common exception macros to handle it. 959 */ 960BEGIN_FTR_SECTION 961 SET_SCRATCH0(r13) 962 GET_PACA(r13) 963 std r3,PACA_EXNMI+0*8(r13) 964 std r4,PACA_EXNMI+1*8(r13) 965 std r5,PACA_EXNMI+2*8(r13) 966 mfspr r3,SPRN_SRR1 967 mfocrf r4,0x80 968 rlwinm. r5,r3,47-31,30,31 969 bne+ system_reset_idle_wake 970 /* Not powersave wakeup. Restore regs for regular interrupt handler. */ 971 mtocrf 0x80,r4 972 ld r3,PACA_EXNMI+0*8(r13) 973 ld r4,PACA_EXNMI+1*8(r13) 974 ld r5,PACA_EXNMI+2*8(r13) 975 GET_SCRATCH0(r13) 976END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 977#endif 978 979 GEN_INT_ENTRY system_reset, virt=0 980 /* 981 * In theory, we should not enable relocation here if it was disabled 982 * in SRR1, because the MMU may not be configured to support it (e.g., 983 * SLB may have been cleared). In practice, there should only be a few 984 * small windows where that's the case, and sreset is considered to 985 * be dangerous anyway. 986 */ 987EXC_REAL_END(system_reset, 0x100, 0x100) 988EXC_VIRT_NONE(0x4100, 0x100) 989 990#ifdef CONFIG_PPC_P7_NAP 991TRAMP_REAL_BEGIN(system_reset_idle_wake) 992 /* We are waking up from idle, so may clobber any volatile register */ 993 cmpwi cr1,r5,2 994 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 995 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) 996#endif 997 998#ifdef CONFIG_PPC_PSERIES 999/* 1000 * Vectors for the FWNMI option. Share common code. 1001 */ 1002TRAMP_REAL_BEGIN(system_reset_fwnmi) 1003 GEN_INT_ENTRY system_reset, virt=0 1004 1005#endif /* CONFIG_PPC_PSERIES */ 1006 1007EXC_COMMON_BEGIN(system_reset_common) 1008 __GEN_COMMON_ENTRY system_reset 1009 /* 1010 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able 1011 * to recover, but nested NMI will notice in_nmi and not recover 1012 * because of the use of the NMI stack. in_nmi reentrancy is tested in 1013 * system_reset_exception. 1014 */ 1015 lhz r10,PACA_IN_NMI(r13) 1016 addi r10,r10,1 1017 sth r10,PACA_IN_NMI(r13) 1018 li r10,MSR_RI 1019 mtmsrd r10,1 1020 1021 mr r10,r1 1022 ld r1,PACA_NMI_EMERG_SP(r13) 1023 subi r1,r1,INT_FRAME_SIZE 1024 __GEN_COMMON_BODY system_reset 1025 /* 1026 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 1027 * the right thing. We do not want to reconcile because that goes 1028 * through irq tracing which we don't want in NMI. 1029 * 1030 * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS 1031 * as we are running with MSR[EE]=0. 1032 */ 1033 li r10,IRQS_ALL_DISABLED 1034 stb r10,PACAIRQSOFTMASK(r13) 1035 lbz r10,PACAIRQHAPPENED(r13) 1036 std r10,RESULT(r1) 1037 ori r10,r10,PACA_IRQ_HARD_DIS 1038 stb r10,PACAIRQHAPPENED(r13) 1039 1040 addi r3,r1,STACK_FRAME_OVERHEAD 1041 bl system_reset_exception 1042 1043 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1044 li r9,0 1045 mtmsrd r9,1 1046 1047 /* 1048 * MSR_RI is clear, now we can decrement paca->in_nmi. 1049 */ 1050 lhz r10,PACA_IN_NMI(r13) 1051 subi r10,r10,1 1052 sth r10,PACA_IN_NMI(r13) 1053 1054 /* 1055 * Restore soft mask settings. 1056 */ 1057 ld r10,RESULT(r1) 1058 stb r10,PACAIRQHAPPENED(r13) 1059 ld r10,SOFTE(r1) 1060 stb r10,PACAIRQSOFTMASK(r13) 1061 1062 kuap_restore_amr r9, r10 1063 EXCEPTION_RESTORE_REGS 1064 RFI_TO_USER_OR_KERNEL 1065 1066 GEN_KVM system_reset 1067 1068 1069/** 1070 * Interrupt 0x200 - Machine Check Interrupt (MCE). 1071 * This is a non-maskable interrupt always taken in real-mode. It can be 1072 * synchronous or asynchronous, caused by hardware or software, and it may be 1073 * taken in a power-saving state. 1074 * 1075 * Handling: 1076 * Similarly to system reset, this uses its own stack and PACA save area, 1077 * the difference is re-entrancy is allowed on the machine check stack. 1078 * 1079 * machine_check_early is run in real mode, and carefully decodes the 1080 * machine check and tries to handle it (e.g., flush the SLB if there was an 1081 * error detected there), determines if it was recoverable and logs the 1082 * event. 1083 * 1084 * This early code does not "reconcile" irq soft-mask state like SRESET or 1085 * regular interrupts do, so irqs_disabled() among other things may not work 1086 * properly (irq disable/enable already doesn't work because irq tracing can 1087 * not work in real mode). 1088 * 1089 * Then, depending on the execution context when the interrupt is taken, there 1090 * are 3 main actions: 1091 * - Executing in kernel mode. The event is queued with irq_work, which means 1092 * it is handled when it is next safe to do so (i.e., the kernel has enabled 1093 * interrupts), which could be immediately when the interrupt returns. This 1094 * avoids nasty issues like switching to virtual mode when the MMU is in a 1095 * bad state, or when executing OPAL code. (SRESET is exposed to such issues, 1096 * but it has different priorities). Check to see if the CPU was in power 1097 * save, and return via the wake up code if it was. 1098 * 1099 * - Executing in user mode. machine_check_exception is run like a normal 1100 * interrupt handler, which processes the data generated by the early handler. 1101 * 1102 * - Executing in guest mode. The interrupt is run with its KVM test, and 1103 * branches to KVM to deal with. KVM may queue the event for the host 1104 * to report later. 1105 * 1106 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, 1107 * or SCRATCH0 is in use, it may cause a crash. 1108 * 1109 * KVM: 1110 * See SRESET. 1111 */ 1112INT_DEFINE_BEGIN(machine_check_early) 1113 IVEC=0x200 1114 IAREA=PACA_EXMC 1115 IVIRT=0 /* no virt entry point */ 1116 IREALMODE_COMMON=1 1117 /* 1118 * MSR_RI is not enabled, because PACA_EXMC is being used, so a 1119 * nested machine check corrupts it. machine_check_common enables 1120 * MSR_RI. 1121 */ 1122 ISET_RI=0 1123 ISTACK=0 1124 IDAR=1 1125 IDSISR=1 1126 IRECONCILE=0 1127 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 1128INT_DEFINE_END(machine_check_early) 1129 1130INT_DEFINE_BEGIN(machine_check) 1131 IVEC=0x200 1132 IAREA=PACA_EXMC 1133 IVIRT=0 /* no virt entry point */ 1134 ISET_RI=0 1135 IDAR=1 1136 IDSISR=1 1137 IKVM_SKIP=1 1138 IKVM_REAL=1 1139INT_DEFINE_END(machine_check) 1140 1141EXC_REAL_BEGIN(machine_check, 0x200, 0x100) 1142 GEN_INT_ENTRY machine_check_early, virt=0 1143EXC_REAL_END(machine_check, 0x200, 0x100) 1144EXC_VIRT_NONE(0x4200, 0x100) 1145 1146#ifdef CONFIG_PPC_PSERIES 1147TRAMP_REAL_BEGIN(machine_check_fwnmi) 1148 /* See comment at machine_check exception, don't turn on RI */ 1149 GEN_INT_ENTRY machine_check_early, virt=0 1150#endif 1151 1152#define MACHINE_CHECK_HANDLER_WINDUP \ 1153 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1154 li r9,0; \ 1155 mtmsrd r9,1; /* Clear MSR_RI */ \ 1156 /* Decrement paca->in_mce now RI is clear. */ \ 1157 lhz r12,PACA_IN_MCE(r13); \ 1158 subi r12,r12,1; \ 1159 sth r12,PACA_IN_MCE(r13); \ 1160 EXCEPTION_RESTORE_REGS 1161 1162EXC_COMMON_BEGIN(machine_check_early_common) 1163 __GEN_REALMODE_COMMON_ENTRY machine_check_early 1164 1165 /* 1166 * Switch to mc_emergency stack and handle re-entrancy (we limit 1167 * the nested MCE upto level 4 to avoid stack overflow). 1168 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 1169 * 1170 * We use paca->in_mce to check whether this is the first entry or 1171 * nested machine check. We increment paca->in_mce to track nested 1172 * machine checks. 1173 * 1174 * If this is the first entry then set stack pointer to 1175 * paca->mc_emergency_sp, otherwise r1 is already pointing to 1176 * stack frame on mc_emergency stack. 1177 * 1178 * NOTE: We are here with MSR_ME=0 (off), which means we risk a 1179 * checkstop if we get another machine check exception before we do 1180 * rfid with MSR_ME=1. 1181 * 1182 * This interrupt can wake directly from idle. If that is the case, 1183 * the machine check is handled then the idle wakeup code is called 1184 * to restore state. 1185 */ 1186 lhz r10,PACA_IN_MCE(r13) 1187 cmpwi r10,0 /* Are we in nested machine check */ 1188 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ 1189 addi r10,r10,1 /* increment paca->in_mce */ 1190 sth r10,PACA_IN_MCE(r13) 1191 1192 mr r10,r1 /* Save r1 */ 1193 bne 1f 1194 /* First machine check entry */ 1195 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 11961: /* Limit nested MCE to level 4 to avoid stack overflow */ 1197 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ 1198 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1199 1200 __GEN_COMMON_BODY machine_check_early 1201 1202BEGIN_FTR_SECTION 1203 bl enable_machine_check 1204END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1205 li r10,MSR_RI 1206 mtmsrd r10,1 1207 1208 /* 1209 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see 1210 * system_reset_common) 1211 */ 1212 li r10,IRQS_ALL_DISABLED 1213 stb r10,PACAIRQSOFTMASK(r13) 1214 lbz r10,PACAIRQHAPPENED(r13) 1215 std r10,RESULT(r1) 1216 ori r10,r10,PACA_IRQ_HARD_DIS 1217 stb r10,PACAIRQHAPPENED(r13) 1218 1219 addi r3,r1,STACK_FRAME_OVERHEAD 1220 bl machine_check_early 1221 std r3,RESULT(r1) /* Save result */ 1222 ld r12,_MSR(r1) 1223 1224 /* 1225 * Restore soft mask settings. 1226 */ 1227 ld r10,RESULT(r1) 1228 stb r10,PACAIRQHAPPENED(r13) 1229 ld r10,SOFTE(r1) 1230 stb r10,PACAIRQSOFTMASK(r13) 1231 1232#ifdef CONFIG_PPC_P7_NAP 1233 /* 1234 * Check if thread was in power saving mode. We come here when any 1235 * of the following is true: 1236 * a. thread wasn't in power saving mode 1237 * b. thread was in power saving mode with no state loss, 1238 * supervisor state loss or hypervisor state loss. 1239 * 1240 * Go back to nap/sleep/winkle mode again if (b) is true. 1241 */ 1242BEGIN_FTR_SECTION 1243 rlwinm. r11,r12,47-31,30,31 1244 bne machine_check_idle_common 1245END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 1246#endif 1247 1248#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1249 /* 1250 * Check if we are coming from guest. If yes, then run the normal 1251 * exception handler which will take the 1252 * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event 1253 * to guest. 1254 */ 1255 lbz r11,HSTATE_IN_GUEST(r13) 1256 cmpwi r11,0 /* Check if coming from guest */ 1257 bne mce_deliver /* continue if we are. */ 1258#endif 1259 1260 /* 1261 * Check if we are coming from userspace. If yes, then run the normal 1262 * exception handler which will deliver the MC event to this kernel. 1263 */ 1264 andi. r11,r12,MSR_PR /* See if coming from user. */ 1265 bne mce_deliver /* continue in V mode if we are. */ 1266 1267 /* 1268 * At this point we are coming from kernel context. 1269 * Queue up the MCE event and return from the interrupt. 1270 * But before that, check if this is an un-recoverable exception. 1271 * If yes, then stay on emergency stack and panic. 1272 */ 1273 andi. r11,r12,MSR_RI 1274 beq unrecoverable_mce 1275 1276 /* 1277 * Check if we have successfully handled/recovered from error, if not 1278 * then stay on emergency stack and panic. 1279 */ 1280 ld r3,RESULT(r1) /* Load result */ 1281 cmpdi r3,0 /* see if we handled MCE successfully */ 1282 beq unrecoverable_mce /* if !handled then panic */ 1283 1284 /* 1285 * Return from MC interrupt. 1286 * Queue up the MCE event so that we can log it later, while 1287 * returning from kernel or opal call. 1288 */ 1289 bl machine_check_queue_event 1290 MACHINE_CHECK_HANDLER_WINDUP 1291 RFI_TO_KERNEL 1292 1293mce_deliver: 1294 /* 1295 * This is a host user or guest MCE. Restore all registers, then 1296 * run the "late" handler. For host user, this will run the 1297 * machine_check_exception handler in virtual mode like a normal 1298 * interrupt handler. For guest, this will trigger the KVM test 1299 * and branch to the KVM interrupt similarly to other interrupts. 1300 */ 1301BEGIN_FTR_SECTION 1302 ld r10,ORIG_GPR3(r1) 1303 mtspr SPRN_CFAR,r10 1304END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1305 MACHINE_CHECK_HANDLER_WINDUP 1306 GEN_INT_ENTRY machine_check, virt=0 1307 1308EXC_COMMON_BEGIN(machine_check_common) 1309 /* 1310 * Machine check is different because we use a different 1311 * save area: PACA_EXMC instead of PACA_EXGEN. 1312 */ 1313 GEN_COMMON machine_check 1314 1315 FINISH_NAP 1316 /* Enable MSR_RI when finished with PACA_EXMC */ 1317 li r10,MSR_RI 1318 mtmsrd r10,1 1319 addi r3,r1,STACK_FRAME_OVERHEAD 1320 bl machine_check_exception 1321 b interrupt_return 1322 1323 GEN_KVM machine_check 1324 1325 1326#ifdef CONFIG_PPC_P7_NAP 1327/* 1328 * This is an idle wakeup. Low level machine check has already been 1329 * done. Queue the event then call the idle code to do the wake up. 1330 */ 1331EXC_COMMON_BEGIN(machine_check_idle_common) 1332 bl machine_check_queue_event 1333 1334 /* 1335 * GPR-loss wakeups are relatively straightforward, because the 1336 * idle sleep code has saved all non-volatile registers on its 1337 * own stack, and r1 in PACAR1. 1338 * 1339 * For no-loss wakeups the r1 and lr registers used by the 1340 * early machine check handler have to be restored first. r2 is 1341 * the kernel TOC, so no need to restore it. 1342 * 1343 * Then decrement MCE nesting after finishing with the stack. 1344 */ 1345 ld r3,_MSR(r1) 1346 ld r4,_LINK(r1) 1347 ld r1,GPR1(r1) 1348 1349 lhz r11,PACA_IN_MCE(r13) 1350 subi r11,r11,1 1351 sth r11,PACA_IN_MCE(r13) 1352 1353 mtlr r4 1354 rlwinm r10,r3,47-31,30,31 1355 cmpwi cr1,r10,2 1356 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ 1357 b idle_return_gpr_loss 1358#endif 1359 1360EXC_COMMON_BEGIN(unrecoverable_mce) 1361 /* 1362 * We are going down. But there are chances that we might get hit by 1363 * another MCE during panic path and we may run into unstable state 1364 * with no way out. Hence, turn ME bit off while going down, so that 1365 * when another MCE is hit during panic path, system will checkstop 1366 * and hypervisor will get restarted cleanly by SP. 1367 */ 1368BEGIN_FTR_SECTION 1369 li r10,0 /* clear MSR_RI */ 1370 mtmsrd r10,1 1371 bl disable_machine_check 1372END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1373 ld r10,PACAKMSR(r13) 1374 li r3,MSR_ME 1375 andc r10,r10,r3 1376 mtmsrd r10 1377 1378 lhz r12,PACA_IN_MCE(r13) 1379 subi r12,r12,1 1380 sth r12,PACA_IN_MCE(r13) 1381 1382 /* Invoke machine_check_exception to print MCE event and panic. */ 1383 addi r3,r1,STACK_FRAME_OVERHEAD 1384 bl machine_check_exception 1385 1386 /* 1387 * We will not reach here. Even if we did, there is no way out. 1388 * Call unrecoverable_exception and die. 1389 */ 1390 addi r3,r1,STACK_FRAME_OVERHEAD 1391 bl unrecoverable_exception 1392 b . 1393 1394 1395/** 1396 * Interrupt 0x300 - Data Storage Interrupt (DSI). 1397 * This is a synchronous interrupt generated due to a data access exception, 1398 * e.g., a load orstore which does not have a valid page table entry with 1399 * permissions. DAWR matches also fault here, as do RC updates, and minor misc 1400 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. 1401 * 1402 * Handling: 1403 * - Hash MMU 1404 * Go to do_hash_page first to see if the HPT can be filled from an entry in 1405 * the Linux page table. Hash faults can hit in kernel mode in a fairly 1406 * arbitrary state (e.g., interrupts disabled, locks held) when accessing 1407 * "non-bolted" regions, e.g., vmalloc space. However these should always be 1408 * backed by Linux page tables. 1409 * 1410 * If none is found, do a Linux page fault. Linux page faults can happen in 1411 * kernel mode due to user copy operations of course. 1412 * 1413 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest 1414 * MMU context, which may cause a DSI in the host, which must go to the 1415 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will 1416 * always be used regardless of AIL setting. 1417 * 1418 * - Radix MMU 1419 * The hardware loads from the Linux page table directly, so a fault goes 1420 * immediately to Linux page fault. 1421 * 1422 * Conditions like DAWR match are handled on the way in to Linux page fault. 1423 */ 1424INT_DEFINE_BEGIN(data_access) 1425 IVEC=0x300 1426 IDAR=1 1427 IDSISR=1 1428 IKVM_SKIP=1 1429 IKVM_REAL=1 1430INT_DEFINE_END(data_access) 1431 1432EXC_REAL_BEGIN(data_access, 0x300, 0x80) 1433 GEN_INT_ENTRY data_access, virt=0 1434EXC_REAL_END(data_access, 0x300, 0x80) 1435EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) 1436 GEN_INT_ENTRY data_access, virt=1 1437EXC_VIRT_END(data_access, 0x4300, 0x80) 1438EXC_COMMON_BEGIN(data_access_common) 1439 GEN_COMMON data_access 1440 ld r4,_DAR(r1) 1441 ld r5,_DSISR(r1) 1442BEGIN_MMU_FTR_SECTION 1443 ld r6,_MSR(r1) 1444 li r3,0x300 1445 b do_hash_page /* Try to handle as hpte fault */ 1446MMU_FTR_SECTION_ELSE 1447 b handle_page_fault 1448ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1449 1450 GEN_KVM data_access 1451 1452 1453/** 1454 * Interrupt 0x380 - Data Segment Interrupt (DSLB). 1455 * This is a synchronous interrupt in response to an MMU fault missing SLB 1456 * entry for HPT, or an address outside RPT translation range. 1457 * 1458 * Handling: 1459 * - HPT: 1460 * This refills the SLB, or reports an access fault similarly to a bad page 1461 * fault. When coming from user-mode, the SLB handler may access any kernel 1462 * data, though it may itself take a DSLB. When coming from kernel mode, 1463 * recursive faults must be avoided so access is restricted to the kernel 1464 * image text/data, kernel stack, and any data allocated below 1465 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping 1466 * on user-handler data structures. 1467 * 1468 * KVM: Same as 0x300, DSLB must test for KVM guest. 1469 * 1470 * A dedicated save area EXSLB is used (XXX: but it actually need not be 1471 * these days, we could use EXGEN). 1472 */ 1473INT_DEFINE_BEGIN(data_access_slb) 1474 IVEC=0x380 1475 IAREA=PACA_EXSLB 1476 IRECONCILE=0 1477 IDAR=1 1478 IKVM_SKIP=1 1479 IKVM_REAL=1 1480INT_DEFINE_END(data_access_slb) 1481 1482EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) 1483 GEN_INT_ENTRY data_access_slb, virt=0 1484EXC_REAL_END(data_access_slb, 0x380, 0x80) 1485EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) 1486 GEN_INT_ENTRY data_access_slb, virt=1 1487EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1488EXC_COMMON_BEGIN(data_access_slb_common) 1489 GEN_COMMON data_access_slb 1490 ld r4,_DAR(r1) 1491 addi r3,r1,STACK_FRAME_OVERHEAD 1492BEGIN_MMU_FTR_SECTION 1493 /* HPT case, do SLB fault */ 1494 bl do_slb_fault 1495 cmpdi r3,0 1496 bne- 1f 1497 b fast_interrupt_return 14981: /* Error case */ 1499MMU_FTR_SECTION_ELSE 1500 /* Radix case, access is outside page table range */ 1501 li r3,-EFAULT 1502ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1503 std r3,RESULT(r1) 1504 RECONCILE_IRQ_STATE(r10, r11) 1505 ld r4,_DAR(r1) 1506 ld r5,RESULT(r1) 1507 addi r3,r1,STACK_FRAME_OVERHEAD 1508 bl do_bad_slb_fault 1509 b interrupt_return 1510 1511 GEN_KVM data_access_slb 1512 1513 1514/** 1515 * Interrupt 0x400 - Instruction Storage Interrupt (ISI). 1516 * This is a synchronous interrupt in response to an MMU fault due to an 1517 * instruction fetch. 1518 * 1519 * Handling: 1520 * Similar to DSI, though in response to fetch. The faulting address is found 1521 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). 1522 */ 1523INT_DEFINE_BEGIN(instruction_access) 1524 IVEC=0x400 1525 IISIDE=1 1526 IDAR=1 1527 IDSISR=1 1528#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1529 IKVM_REAL=1 1530#endif 1531INT_DEFINE_END(instruction_access) 1532 1533EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) 1534 GEN_INT_ENTRY instruction_access, virt=0 1535EXC_REAL_END(instruction_access, 0x400, 0x80) 1536EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) 1537 GEN_INT_ENTRY instruction_access, virt=1 1538EXC_VIRT_END(instruction_access, 0x4400, 0x80) 1539EXC_COMMON_BEGIN(instruction_access_common) 1540 GEN_COMMON instruction_access 1541 ld r4,_DAR(r1) 1542 ld r5,_DSISR(r1) 1543BEGIN_MMU_FTR_SECTION 1544 ld r6,_MSR(r1) 1545 li r3,0x400 1546 b do_hash_page /* Try to handle as hpte fault */ 1547MMU_FTR_SECTION_ELSE 1548 b handle_page_fault 1549ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1550 1551 GEN_KVM instruction_access 1552 1553 1554/** 1555 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). 1556 * This is a synchronous interrupt in response to an MMU fault due to an 1557 * instruction fetch. 1558 * 1559 * Handling: 1560 * Similar to DSLB, though in response to fetch. The faulting address is found 1561 * in SRR0 (rather than DAR). 1562 */ 1563INT_DEFINE_BEGIN(instruction_access_slb) 1564 IVEC=0x480 1565 IAREA=PACA_EXSLB 1566 IRECONCILE=0 1567 IISIDE=1 1568 IDAR=1 1569#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1570 IKVM_REAL=1 1571#endif 1572INT_DEFINE_END(instruction_access_slb) 1573 1574EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) 1575 GEN_INT_ENTRY instruction_access_slb, virt=0 1576EXC_REAL_END(instruction_access_slb, 0x480, 0x80) 1577EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) 1578 GEN_INT_ENTRY instruction_access_slb, virt=1 1579EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1580EXC_COMMON_BEGIN(instruction_access_slb_common) 1581 GEN_COMMON instruction_access_slb 1582 ld r4,_DAR(r1) 1583 addi r3,r1,STACK_FRAME_OVERHEAD 1584BEGIN_MMU_FTR_SECTION 1585 /* HPT case, do SLB fault */ 1586 bl do_slb_fault 1587 cmpdi r3,0 1588 bne- 1f 1589 b fast_interrupt_return 15901: /* Error case */ 1591MMU_FTR_SECTION_ELSE 1592 /* Radix case, access is outside page table range */ 1593 li r3,-EFAULT 1594ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1595 std r3,RESULT(r1) 1596 RECONCILE_IRQ_STATE(r10, r11) 1597 ld r4,_DAR(r1) 1598 ld r5,RESULT(r1) 1599 addi r3,r1,STACK_FRAME_OVERHEAD 1600 bl do_bad_slb_fault 1601 b interrupt_return 1602 1603 GEN_KVM instruction_access_slb 1604 1605 1606/** 1607 * Interrupt 0x500 - External Interrupt. 1608 * This is an asynchronous maskable interrupt in response to an "external 1609 * exception" from the interrupt controller or hypervisor (e.g., device 1610 * interrupt). It is maskable in hardware by clearing MSR[EE], and 1611 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). 1612 * 1613 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that 1614 * interrupts are delivered with HSRR registers, guests use SRRs, which 1615 * reqiures IHSRR_IF_HVMODE. 1616 * 1617 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that 1618 * external interrupts are delivered as Hypervisor Virtualization Interrupts 1619 * rather than External Interrupts. 1620 * 1621 * Handling: 1622 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, 1623 * because registers at the time of the interrupt are not so important as it is 1624 * asynchronous. 1625 * 1626 * If soft masked, the masked handler will note the pending interrupt for 1627 * replay, and clear MSR[EE] in the interrupted context. 1628 */ 1629INT_DEFINE_BEGIN(hardware_interrupt) 1630 IVEC=0x500 1631 IHSRR_IF_HVMODE=1 1632 IMASK=IRQS_DISABLED 1633 IKVM_REAL=1 1634 IKVM_VIRT=1 1635INT_DEFINE_END(hardware_interrupt) 1636 1637EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) 1638 GEN_INT_ENTRY hardware_interrupt, virt=0 1639EXC_REAL_END(hardware_interrupt, 0x500, 0x100) 1640EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) 1641 GEN_INT_ENTRY hardware_interrupt, virt=1 1642EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) 1643EXC_COMMON_BEGIN(hardware_interrupt_common) 1644 GEN_COMMON hardware_interrupt 1645 FINISH_NAP 1646 RUNLATCH_ON 1647 addi r3,r1,STACK_FRAME_OVERHEAD 1648 bl do_IRQ 1649 b interrupt_return 1650 1651 GEN_KVM hardware_interrupt 1652 1653 1654/** 1655 * Interrupt 0x600 - Alignment Interrupt 1656 * This is a synchronous interrupt in response to data alignment fault. 1657 */ 1658INT_DEFINE_BEGIN(alignment) 1659 IVEC=0x600 1660 IDAR=1 1661 IDSISR=1 1662#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1663 IKVM_REAL=1 1664#endif 1665INT_DEFINE_END(alignment) 1666 1667EXC_REAL_BEGIN(alignment, 0x600, 0x100) 1668 GEN_INT_ENTRY alignment, virt=0 1669EXC_REAL_END(alignment, 0x600, 0x100) 1670EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) 1671 GEN_INT_ENTRY alignment, virt=1 1672EXC_VIRT_END(alignment, 0x4600, 0x100) 1673EXC_COMMON_BEGIN(alignment_common) 1674 GEN_COMMON alignment 1675 addi r3,r1,STACK_FRAME_OVERHEAD 1676 bl alignment_exception 1677 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1678 b interrupt_return 1679 1680 GEN_KVM alignment 1681 1682 1683/** 1684 * Interrupt 0x700 - Program Interrupt (program check). 1685 * This is a synchronous interrupt in response to various instruction faults: 1686 * traps, privilege errors, TM errors, floating point exceptions. 1687 * 1688 * Handling: 1689 * This interrupt may use the "emergency stack" in some cases when being taken 1690 * from kernel context, which complicates handling. 1691 */ 1692INT_DEFINE_BEGIN(program_check) 1693 IVEC=0x700 1694#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1695 IKVM_REAL=1 1696#endif 1697INT_DEFINE_END(program_check) 1698 1699EXC_REAL_BEGIN(program_check, 0x700, 0x100) 1700 GEN_INT_ENTRY program_check, virt=0 1701EXC_REAL_END(program_check, 0x700, 0x100) 1702EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) 1703 GEN_INT_ENTRY program_check, virt=1 1704EXC_VIRT_END(program_check, 0x4700, 0x100) 1705EXC_COMMON_BEGIN(program_check_common) 1706 __GEN_COMMON_ENTRY program_check 1707 1708 /* 1709 * It's possible to receive a TM Bad Thing type program check with 1710 * userspace register values (in particular r1), but with SRR1 reporting 1711 * that we came from the kernel. Normally that would confuse the bad 1712 * stack logic, and we would report a bad kernel stack pointer. Instead 1713 * we switch to the emergency stack if we're taking a TM Bad Thing from 1714 * the kernel. 1715 */ 1716 1717 andi. r10,r12,MSR_PR 1718 bne .Lnormal_stack /* If userspace, go normal path */ 1719 1720 andis. r10,r12,(SRR1_PROGTM)@h 1721 bne .Lemergency_stack /* If TM, emergency */ 1722 1723 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1724 blt .Lnormal_stack /* normal path if not */ 1725 1726 /* Use the emergency stack */ 1727.Lemergency_stack: 1728 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1729 /* 3 in EXCEPTION_PROLOG_COMMON */ 1730 mr r10,r1 /* Save r1 */ 1731 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1732 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1733 __ISTACK(program_check)=0 1734 __GEN_COMMON_BODY program_check 1735 b .Ldo_program_check 1736 1737.Lnormal_stack: 1738 __ISTACK(program_check)=1 1739 __GEN_COMMON_BODY program_check 1740 1741.Ldo_program_check: 1742 addi r3,r1,STACK_FRAME_OVERHEAD 1743 bl program_check_exception 1744 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 1745 b interrupt_return 1746 1747 GEN_KVM program_check 1748 1749 1750/* 1751 * Interrupt 0x800 - Floating-Point Unavailable Interrupt. 1752 * This is a synchronous interrupt in response to executing an fp instruction 1753 * with MSR[FP]=0. 1754 * 1755 * Handling: 1756 * This will load FP registers and enable the FP bit if coming from userspace, 1757 * otherwise report a bad kernel use of FP. 1758 */ 1759INT_DEFINE_BEGIN(fp_unavailable) 1760 IVEC=0x800 1761 IRECONCILE=0 1762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1763 IKVM_REAL=1 1764#endif 1765INT_DEFINE_END(fp_unavailable) 1766 1767EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) 1768 GEN_INT_ENTRY fp_unavailable, virt=0 1769EXC_REAL_END(fp_unavailable, 0x800, 0x100) 1770EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) 1771 GEN_INT_ENTRY fp_unavailable, virt=1 1772EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) 1773EXC_COMMON_BEGIN(fp_unavailable_common) 1774 GEN_COMMON fp_unavailable 1775 bne 1f /* if from user, just load it up */ 1776 RECONCILE_IRQ_STATE(r10, r11) 1777 addi r3,r1,STACK_FRAME_OVERHEAD 1778 bl kernel_fp_unavailable_exception 17790: trap 1780 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 17811: 1782#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1783BEGIN_FTR_SECTION 1784 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 1785 * transaction), go do TM stuff 1786 */ 1787 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 1788 bne- 2f 1789END_FTR_SECTION_IFSET(CPU_FTR_TM) 1790#endif 1791 bl load_up_fpu 1792 b fast_interrupt_return 1793#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17942: /* User process was in a transaction */ 1795 RECONCILE_IRQ_STATE(r10, r11) 1796 addi r3,r1,STACK_FRAME_OVERHEAD 1797 bl fp_unavailable_tm 1798 b interrupt_return 1799#endif 1800 1801 GEN_KVM fp_unavailable 1802 1803 1804/** 1805 * Interrupt 0x900 - Decrementer Interrupt. 1806 * This is an asynchronous interrupt in response to a decrementer exception 1807 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing 1808 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., 1809 * local_irq_disable()). 1810 * 1811 * Handling: 1812 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). 1813 * 1814 * If soft masked, the masked handler will note the pending interrupt for 1815 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled 1816 * in the interrupted context. 1817 * If PPC_WATCHDOG is configured, the soft masked handler will actually set 1818 * things back up to run soft_nmi_interrupt as a regular interrupt handler 1819 * on the emergency stack. 1820 */ 1821INT_DEFINE_BEGIN(decrementer) 1822 IVEC=0x900 1823 IMASK=IRQS_DISABLED 1824#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1825 IKVM_REAL=1 1826#endif 1827INT_DEFINE_END(decrementer) 1828 1829EXC_REAL_BEGIN(decrementer, 0x900, 0x80) 1830 GEN_INT_ENTRY decrementer, virt=0 1831EXC_REAL_END(decrementer, 0x900, 0x80) 1832EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) 1833 GEN_INT_ENTRY decrementer, virt=1 1834EXC_VIRT_END(decrementer, 0x4900, 0x80) 1835EXC_COMMON_BEGIN(decrementer_common) 1836 GEN_COMMON decrementer 1837 FINISH_NAP 1838 RUNLATCH_ON 1839 addi r3,r1,STACK_FRAME_OVERHEAD 1840 bl timer_interrupt 1841 b interrupt_return 1842 1843 GEN_KVM decrementer 1844 1845 1846/** 1847 * Interrupt 0x980 - Hypervisor Decrementer Interrupt. 1848 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC 1849 * register. 1850 * 1851 * Handling: 1852 * Linux does not use this outside KVM where it's used to keep a host timer 1853 * while the guest is given control of DEC. It should normally be caught by 1854 * the KVM test and routed there. 1855 */ 1856INT_DEFINE_BEGIN(hdecrementer) 1857 IVEC=0x980 1858 IHSRR=1 1859 ISTACK=0 1860 IRECONCILE=0 1861 IKVM_REAL=1 1862 IKVM_VIRT=1 1863INT_DEFINE_END(hdecrementer) 1864 1865EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) 1866 GEN_INT_ENTRY hdecrementer, virt=0 1867EXC_REAL_END(hdecrementer, 0x980, 0x80) 1868EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) 1869 GEN_INT_ENTRY hdecrementer, virt=1 1870EXC_VIRT_END(hdecrementer, 0x4980, 0x80) 1871EXC_COMMON_BEGIN(hdecrementer_common) 1872 __GEN_COMMON_ENTRY hdecrementer 1873 /* 1874 * Hypervisor decrementer interrupts not caught by the KVM test 1875 * shouldn't occur but are sometimes left pending on exit from a KVM 1876 * guest. We don't need to do anything to clear them, as they are 1877 * edge-triggered. 1878 * 1879 * Be careful to avoid touching the kernel stack. 1880 */ 1881 ld r10,PACA_EXGEN+EX_CTR(r13) 1882 mtctr r10 1883 mtcrf 0x80,r9 1884 ld r9,PACA_EXGEN+EX_R9(r13) 1885 ld r10,PACA_EXGEN+EX_R10(r13) 1886 ld r11,PACA_EXGEN+EX_R11(r13) 1887 ld r12,PACA_EXGEN+EX_R12(r13) 1888 ld r13,PACA_EXGEN+EX_R13(r13) 1889 HRFI_TO_KERNEL 1890 1891 GEN_KVM hdecrementer 1892 1893 1894/** 1895 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. 1896 * This is an asynchronous interrupt in response to a msgsndp doorbell. 1897 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 1898 * IRQS_DISABLED mask (i.e., local_irq_disable()). 1899 * 1900 * Handling: 1901 * Guests may use this for IPIs between threads in a core if the 1902 * hypervisor supports it. NVGPRS are not saved (see 0x500). 1903 * 1904 * If soft masked, the masked handler will note the pending interrupt for 1905 * replay, leaving MSR[EE] enabled in the interrupted context because the 1906 * doorbells are edge triggered. 1907 */ 1908INT_DEFINE_BEGIN(doorbell_super) 1909 IVEC=0xa00 1910 IMASK=IRQS_DISABLED 1911#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1912 IKVM_REAL=1 1913#endif 1914INT_DEFINE_END(doorbell_super) 1915 1916EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) 1917 GEN_INT_ENTRY doorbell_super, virt=0 1918EXC_REAL_END(doorbell_super, 0xa00, 0x100) 1919EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) 1920 GEN_INT_ENTRY doorbell_super, virt=1 1921EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) 1922EXC_COMMON_BEGIN(doorbell_super_common) 1923 GEN_COMMON doorbell_super 1924 FINISH_NAP 1925 RUNLATCH_ON 1926 addi r3,r1,STACK_FRAME_OVERHEAD 1927#ifdef CONFIG_PPC_DOORBELL 1928 bl doorbell_exception 1929#else 1930 bl unknown_exception 1931#endif 1932 b interrupt_return 1933 1934 GEN_KVM doorbell_super 1935 1936 1937EXC_REAL_NONE(0xb00, 0x100) 1938EXC_VIRT_NONE(0x4b00, 0x100) 1939 1940/** 1941 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). 1942 * This is a synchronous interrupt invoked with the "sc" instruction. The 1943 * system call is invoked with "sc 0" and does not alter the HV bit, so it 1944 * is directed to the currently running OS. The hypercall is invoked with 1945 * "sc 1" and it sets HV=1, so it elevates to hypervisor. 1946 * 1947 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to 1948 * 0x4c00 virtual mode. 1949 * 1950 * Handling: 1951 * If the KVM test fires then it was due to a hypercall and is accordingly 1952 * routed to KVM. Otherwise this executes a normal Linux system call. 1953 * 1954 * Call convention: 1955 * 1956 * syscall and hypercalls register conventions are documented in 1957 * Documentation/powerpc/syscall64-abi.rst and 1958 * Documentation/powerpc/papr_hcalls.rst respectively. 1959 * 1960 * The intersection of volatile registers that don't contain possible 1961 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry 1962 * without saving, though xer is not a good idea to use, as hardware may 1963 * interpret some bits so it may be costly to change them. 1964 */ 1965INT_DEFINE_BEGIN(system_call) 1966 IVEC=0xc00 1967 IKVM_REAL=1 1968 IKVM_VIRT=1 1969INT_DEFINE_END(system_call) 1970 1971.macro SYSTEM_CALL virt 1972#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1973 /* 1974 * There is a little bit of juggling to get syscall and hcall 1975 * working well. Save r13 in ctr to avoid using SPRG scratch 1976 * register. 1977 * 1978 * Userspace syscalls have already saved the PPR, hcalls must save 1979 * it before setting HMT_MEDIUM. 1980 */ 1981 mtctr r13 1982 GET_PACA(r13) 1983 std r10,PACA_EXGEN+EX_R10(r13) 1984 INTERRUPT_TO_KERNEL 1985 KVMTEST system_call /* uses r10, branch to system_call_kvm */ 1986 mfctr r9 1987#else 1988 mr r9,r13 1989 GET_PACA(r13) 1990 INTERRUPT_TO_KERNEL 1991#endif 1992 1993#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 1994BEGIN_FTR_SECTION 1995 cmpdi r0,0x1ebe 1996 beq- 1f 1997END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 1998#endif 1999 2000 /* We reach here with PACA in r13, r13 in r9. */ 2001 mfspr r11,SPRN_SRR0 2002 mfspr r12,SPRN_SRR1 2003 2004 HMT_MEDIUM 2005 2006 .if ! \virt 2007 __LOAD_HANDLER(r10, system_call_common) 2008 mtspr SPRN_SRR0,r10 2009 ld r10,PACAKMSR(r13) 2010 mtspr SPRN_SRR1,r10 2011 RFI_TO_KERNEL 2012 b . /* prevent speculative execution */ 2013 .else 2014 li r10,MSR_RI 2015 mtmsrd r10,1 /* Set RI (EE=0) */ 2016#ifdef CONFIG_RELOCATABLE 2017 __LOAD_HANDLER(r10, system_call_common) 2018 mtctr r10 2019 bctr 2020#else 2021 b system_call_common 2022#endif 2023 .endif 2024 2025#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 2026 /* Fast LE/BE switch system call */ 20271: mfspr r12,SPRN_SRR1 2028 xori r12,r12,MSR_LE 2029 mtspr SPRN_SRR1,r12 2030 mr r13,r9 2031 RFI_TO_USER /* return to userspace */ 2032 b . /* prevent speculative execution */ 2033#endif 2034.endm 2035 2036EXC_REAL_BEGIN(system_call, 0xc00, 0x100) 2037 SYSTEM_CALL 0 2038EXC_REAL_END(system_call, 0xc00, 0x100) 2039EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) 2040 SYSTEM_CALL 1 2041EXC_VIRT_END(system_call, 0x4c00, 0x100) 2042 2043#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2044TRAMP_REAL_BEGIN(system_call_kvm) 2045 /* 2046 * This is a hcall, so register convention is as above, with these 2047 * differences: 2048 * r13 = PACA 2049 * ctr = orig r13 2050 * orig r10 saved in PACA 2051 */ 2052 /* 2053 * Save the PPR (on systems that support it) before changing to 2054 * HMT_MEDIUM. That allows the KVM code to save that value into the 2055 * guest state (it is the guest's PPR value). 2056 */ 2057BEGIN_FTR_SECTION 2058 mfspr r10,SPRN_PPR 2059 std r10,HSTATE_PPR(r13) 2060END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2061 HMT_MEDIUM 2062 mfctr r10 2063 SET_SCRATCH0(r10) 2064 mfcr r10 2065 std r12,HSTATE_SCRATCH0(r13) 2066 sldi r12,r10,32 2067 ori r12,r12,0xc00 2068#ifdef CONFIG_RELOCATABLE 2069 /* 2070 * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives 2071 * outside the head section. 2072 */ 2073 __LOAD_FAR_HANDLER(r10, kvmppc_interrupt) 2074 mtctr r10 2075 ld r10,PACA_EXGEN+EX_R10(r13) 2076 bctr 2077#else 2078 ld r10,PACA_EXGEN+EX_R10(r13) 2079 b kvmppc_interrupt 2080#endif 2081#endif 2082 2083 2084/** 2085 * Interrupt 0xd00 - Trace Interrupt. 2086 * This is a synchronous interrupt in response to instruction step or 2087 * breakpoint faults. 2088 */ 2089INT_DEFINE_BEGIN(single_step) 2090 IVEC=0xd00 2091#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2092 IKVM_REAL=1 2093#endif 2094INT_DEFINE_END(single_step) 2095 2096EXC_REAL_BEGIN(single_step, 0xd00, 0x100) 2097 GEN_INT_ENTRY single_step, virt=0 2098EXC_REAL_END(single_step, 0xd00, 0x100) 2099EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) 2100 GEN_INT_ENTRY single_step, virt=1 2101EXC_VIRT_END(single_step, 0x4d00, 0x100) 2102EXC_COMMON_BEGIN(single_step_common) 2103 GEN_COMMON single_step 2104 addi r3,r1,STACK_FRAME_OVERHEAD 2105 bl single_step_exception 2106 b interrupt_return 2107 2108 GEN_KVM single_step 2109 2110 2111/** 2112 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). 2113 * This is a synchronous interrupt in response to an MMU fault caused by a 2114 * guest data access. 2115 * 2116 * Handling: 2117 * This should always get routed to KVM. In radix MMU mode, this is caused 2118 * by a guest nested radix access that can't be performed due to the 2119 * partition scope page table. In hash mode, this can be caused by guests 2120 * running with translation disabled (virtual real mode) or with VPM enabled. 2121 * KVM will update the page table structures or disallow the access. 2122 */ 2123INT_DEFINE_BEGIN(h_data_storage) 2124 IVEC=0xe00 2125 IHSRR=1 2126 IDAR=1 2127 IDSISR=1 2128 IKVM_SKIP=1 2129 IKVM_REAL=1 2130 IKVM_VIRT=1 2131INT_DEFINE_END(h_data_storage) 2132 2133EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) 2134 GEN_INT_ENTRY h_data_storage, virt=0, ool=1 2135EXC_REAL_END(h_data_storage, 0xe00, 0x20) 2136EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) 2137 GEN_INT_ENTRY h_data_storage, virt=1, ool=1 2138EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) 2139EXC_COMMON_BEGIN(h_data_storage_common) 2140 GEN_COMMON h_data_storage 2141 addi r3,r1,STACK_FRAME_OVERHEAD 2142BEGIN_MMU_FTR_SECTION 2143 ld r4,_DAR(r1) 2144 li r5,SIGSEGV 2145 bl bad_page_fault 2146MMU_FTR_SECTION_ELSE 2147 bl unknown_exception 2148ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2149 b interrupt_return 2150 2151 GEN_KVM h_data_storage 2152 2153 2154/** 2155 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). 2156 * This is a synchronous interrupt in response to an MMU fault caused by a 2157 * guest instruction fetch, similar to HDSI. 2158 */ 2159INT_DEFINE_BEGIN(h_instr_storage) 2160 IVEC=0xe20 2161 IHSRR=1 2162 IKVM_REAL=1 2163 IKVM_VIRT=1 2164INT_DEFINE_END(h_instr_storage) 2165 2166EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) 2167 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 2168EXC_REAL_END(h_instr_storage, 0xe20, 0x20) 2169EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) 2170 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 2171EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) 2172EXC_COMMON_BEGIN(h_instr_storage_common) 2173 GEN_COMMON h_instr_storage 2174 addi r3,r1,STACK_FRAME_OVERHEAD 2175 bl unknown_exception 2176 b interrupt_return 2177 2178 GEN_KVM h_instr_storage 2179 2180 2181/** 2182 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. 2183 */ 2184INT_DEFINE_BEGIN(emulation_assist) 2185 IVEC=0xe40 2186 IHSRR=1 2187 IKVM_REAL=1 2188 IKVM_VIRT=1 2189INT_DEFINE_END(emulation_assist) 2190 2191EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) 2192 GEN_INT_ENTRY emulation_assist, virt=0, ool=1 2193EXC_REAL_END(emulation_assist, 0xe40, 0x20) 2194EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) 2195 GEN_INT_ENTRY emulation_assist, virt=1, ool=1 2196EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) 2197EXC_COMMON_BEGIN(emulation_assist_common) 2198 GEN_COMMON emulation_assist 2199 addi r3,r1,STACK_FRAME_OVERHEAD 2200 bl emulation_assist_interrupt 2201 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2202 b interrupt_return 2203 2204 GEN_KVM emulation_assist 2205 2206 2207/** 2208 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). 2209 * This is an asynchronous interrupt caused by a Hypervisor Maintenance 2210 * Exception. It is always taken in real mode but uses HSRR registers 2211 * unlike SRESET and MCE. 2212 * 2213 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable 2214 * with IRQS_DISABLED mask (i.e., local_irq_disable()). 2215 * 2216 * Handling: 2217 * This is a special case, this is handled similarly to machine checks, with an 2218 * initial real mode handler that is not soft-masked, which attempts to fix the 2219 * problem. Then a regular handler which is soft-maskable and reports the 2220 * problem. 2221 * 2222 * The emergency stack is used for the early real mode handler. 2223 * 2224 * XXX: unclear why MCE and HMI schemes could not be made common, e.g., 2225 * either use soft-masking for the MCE, or use irq_work for the HMI. 2226 * 2227 * KVM: 2228 * Unlike MCE, this calls into KVM without calling the real mode handler 2229 * first. 2230 */ 2231INT_DEFINE_BEGIN(hmi_exception_early) 2232 IVEC=0xe60 2233 IHSRR=1 2234 IREALMODE_COMMON=1 2235 ISTACK=0 2236 IRECONCILE=0 2237 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ 2238 IKVM_REAL=1 2239INT_DEFINE_END(hmi_exception_early) 2240 2241INT_DEFINE_BEGIN(hmi_exception) 2242 IVEC=0xe60 2243 IHSRR=1 2244 IMASK=IRQS_DISABLED 2245 IKVM_REAL=1 2246INT_DEFINE_END(hmi_exception) 2247 2248EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) 2249 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 2250EXC_REAL_END(hmi_exception, 0xe60, 0x20) 2251EXC_VIRT_NONE(0x4e60, 0x20) 2252 2253EXC_COMMON_BEGIN(hmi_exception_early_common) 2254 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early 2255 2256 mr r10,r1 /* Save r1 */ 2257 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ 2258 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 2259 2260 __GEN_COMMON_BODY hmi_exception_early 2261 2262 addi r3,r1,STACK_FRAME_OVERHEAD 2263 bl hmi_exception_realmode 2264 cmpdi cr0,r3,0 2265 bne 1f 2266 2267 EXCEPTION_RESTORE_REGS hsrr=1 2268 HRFI_TO_USER_OR_KERNEL 2269 22701: 2271 /* 2272 * Go to virtual mode and pull the HMI event information from 2273 * firmware. 2274 */ 2275 EXCEPTION_RESTORE_REGS hsrr=1 2276 GEN_INT_ENTRY hmi_exception, virt=0 2277 2278 GEN_KVM hmi_exception_early 2279 2280EXC_COMMON_BEGIN(hmi_exception_common) 2281 GEN_COMMON hmi_exception 2282 FINISH_NAP 2283 RUNLATCH_ON 2284 addi r3,r1,STACK_FRAME_OVERHEAD 2285 bl handle_hmi_exception 2286 b interrupt_return 2287 2288 GEN_KVM hmi_exception 2289 2290 2291/** 2292 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. 2293 * This is an asynchronous interrupt in response to a msgsnd doorbell. 2294 * Similar to the 0xa00 doorbell but for host rather than guest. 2295 */ 2296INT_DEFINE_BEGIN(h_doorbell) 2297 IVEC=0xe80 2298 IHSRR=1 2299 IMASK=IRQS_DISABLED 2300 IKVM_REAL=1 2301 IKVM_VIRT=1 2302INT_DEFINE_END(h_doorbell) 2303 2304EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) 2305 GEN_INT_ENTRY h_doorbell, virt=0, ool=1 2306EXC_REAL_END(h_doorbell, 0xe80, 0x20) 2307EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) 2308 GEN_INT_ENTRY h_doorbell, virt=1, ool=1 2309EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) 2310EXC_COMMON_BEGIN(h_doorbell_common) 2311 GEN_COMMON h_doorbell 2312 FINISH_NAP 2313 RUNLATCH_ON 2314 addi r3,r1,STACK_FRAME_OVERHEAD 2315#ifdef CONFIG_PPC_DOORBELL 2316 bl doorbell_exception 2317#else 2318 bl unknown_exception 2319#endif 2320 b interrupt_return 2321 2322 GEN_KVM h_doorbell 2323 2324 2325/** 2326 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. 2327 * This is an asynchronous interrupt in response to an "external exception". 2328 * Similar to 0x500 but for host only. 2329 */ 2330INT_DEFINE_BEGIN(h_virt_irq) 2331 IVEC=0xea0 2332 IHSRR=1 2333 IMASK=IRQS_DISABLED 2334 IKVM_REAL=1 2335 IKVM_VIRT=1 2336INT_DEFINE_END(h_virt_irq) 2337 2338EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) 2339 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 2340EXC_REAL_END(h_virt_irq, 0xea0, 0x20) 2341EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) 2342 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 2343EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) 2344EXC_COMMON_BEGIN(h_virt_irq_common) 2345 GEN_COMMON h_virt_irq 2346 FINISH_NAP 2347 RUNLATCH_ON 2348 addi r3,r1,STACK_FRAME_OVERHEAD 2349 bl do_IRQ 2350 b interrupt_return 2351 2352 GEN_KVM h_virt_irq 2353 2354 2355EXC_REAL_NONE(0xec0, 0x20) 2356EXC_VIRT_NONE(0x4ec0, 0x20) 2357EXC_REAL_NONE(0xee0, 0x20) 2358EXC_VIRT_NONE(0x4ee0, 0x20) 2359 2360 2361/* 2362 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). 2363 * This is an asynchronous interrupt in response to a PMU exception. 2364 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with 2365 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). 2366 * 2367 * Handling: 2368 * This calls into the perf subsystem. 2369 * 2370 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it 2371 * runs under local_irq_disable. However it may be soft-masked in 2372 * powerpc-specific code. 2373 * 2374 * If soft masked, the masked handler will note the pending interrupt for 2375 * replay, and clear MSR[EE] in the interrupted context. 2376 */ 2377INT_DEFINE_BEGIN(performance_monitor) 2378 IVEC=0xf00 2379 IMASK=IRQS_PMI_DISABLED 2380#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2381 IKVM_REAL=1 2382#endif 2383INT_DEFINE_END(performance_monitor) 2384 2385EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) 2386 GEN_INT_ENTRY performance_monitor, virt=0, ool=1 2387EXC_REAL_END(performance_monitor, 0xf00, 0x20) 2388EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) 2389 GEN_INT_ENTRY performance_monitor, virt=1, ool=1 2390EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) 2391EXC_COMMON_BEGIN(performance_monitor_common) 2392 GEN_COMMON performance_monitor 2393 FINISH_NAP 2394 RUNLATCH_ON 2395 addi r3,r1,STACK_FRAME_OVERHEAD 2396 bl performance_monitor_exception 2397 b interrupt_return 2398 2399 GEN_KVM performance_monitor 2400 2401 2402/** 2403 * Interrupt 0xf20 - Vector Unavailable Interrupt. 2404 * This is a synchronous interrupt in response to 2405 * executing a vector (or altivec) instruction with MSR[VEC]=0. 2406 * Similar to FP unavailable. 2407 */ 2408INT_DEFINE_BEGIN(altivec_unavailable) 2409 IVEC=0xf20 2410 IRECONCILE=0 2411#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2412 IKVM_REAL=1 2413#endif 2414INT_DEFINE_END(altivec_unavailable) 2415 2416EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) 2417 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 2418EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) 2419EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) 2420 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 2421EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) 2422EXC_COMMON_BEGIN(altivec_unavailable_common) 2423 GEN_COMMON altivec_unavailable 2424#ifdef CONFIG_ALTIVEC 2425BEGIN_FTR_SECTION 2426 beq 1f 2427#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2428 BEGIN_FTR_SECTION_NESTED(69) 2429 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2430 * transaction), go do TM stuff 2431 */ 2432 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2433 bne- 2f 2434 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2435#endif 2436 bl load_up_altivec 2437 b fast_interrupt_return 2438#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24392: /* User process was in a transaction */ 2440 RECONCILE_IRQ_STATE(r10, r11) 2441 addi r3,r1,STACK_FRAME_OVERHEAD 2442 bl altivec_unavailable_tm 2443 b interrupt_return 2444#endif 24451: 2446END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2447#endif 2448 RECONCILE_IRQ_STATE(r10, r11) 2449 addi r3,r1,STACK_FRAME_OVERHEAD 2450 bl altivec_unavailable_exception 2451 b interrupt_return 2452 2453 GEN_KVM altivec_unavailable 2454 2455 2456/** 2457 * Interrupt 0xf40 - VSX Unavailable Interrupt. 2458 * This is a synchronous interrupt in response to 2459 * executing a VSX instruction with MSR[VSX]=0. 2460 * Similar to FP unavailable. 2461 */ 2462INT_DEFINE_BEGIN(vsx_unavailable) 2463 IVEC=0xf40 2464 IRECONCILE=0 2465#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2466 IKVM_REAL=1 2467#endif 2468INT_DEFINE_END(vsx_unavailable) 2469 2470EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) 2471 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 2472EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) 2473EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) 2474 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 2475EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) 2476EXC_COMMON_BEGIN(vsx_unavailable_common) 2477 GEN_COMMON vsx_unavailable 2478#ifdef CONFIG_VSX 2479BEGIN_FTR_SECTION 2480 beq 1f 2481#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2482 BEGIN_FTR_SECTION_NESTED(69) 2483 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in 2484 * transaction), go do TM stuff 2485 */ 2486 rldicl. r0, r12, (64-MSR_TS_LG), (64-2) 2487 bne- 2f 2488 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2489#endif 2490 b load_up_vsx 2491#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 24922: /* User process was in a transaction */ 2493 RECONCILE_IRQ_STATE(r10, r11) 2494 addi r3,r1,STACK_FRAME_OVERHEAD 2495 bl vsx_unavailable_tm 2496 b interrupt_return 2497#endif 24981: 2499END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2500#endif 2501 RECONCILE_IRQ_STATE(r10, r11) 2502 addi r3,r1,STACK_FRAME_OVERHEAD 2503 bl vsx_unavailable_exception 2504 b interrupt_return 2505 2506 GEN_KVM vsx_unavailable 2507 2508 2509/** 2510 * Interrupt 0xf60 - Facility Unavailable Interrupt. 2511 * This is a synchronous interrupt in response to 2512 * executing an instruction without access to the facility that can be 2513 * resolved by the OS (e.g., FSCR, MSR). 2514 * Similar to FP unavailable. 2515 */ 2516INT_DEFINE_BEGIN(facility_unavailable) 2517 IVEC=0xf60 2518#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2519 IKVM_REAL=1 2520#endif 2521INT_DEFINE_END(facility_unavailable) 2522 2523EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) 2524 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 2525EXC_REAL_END(facility_unavailable, 0xf60, 0x20) 2526EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) 2527 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 2528EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) 2529EXC_COMMON_BEGIN(facility_unavailable_common) 2530 GEN_COMMON facility_unavailable 2531 addi r3,r1,STACK_FRAME_OVERHEAD 2532 bl facility_unavailable_exception 2533 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2534 b interrupt_return 2535 2536 GEN_KVM facility_unavailable 2537 2538 2539/** 2540 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. 2541 * This is a synchronous interrupt in response to 2542 * executing an instruction without access to the facility that can only 2543 * be resolved in HV mode (e.g., HFSCR). 2544 * Similar to FP unavailable. 2545 */ 2546INT_DEFINE_BEGIN(h_facility_unavailable) 2547 IVEC=0xf80 2548 IHSRR=1 2549 IKVM_REAL=1 2550 IKVM_VIRT=1 2551INT_DEFINE_END(h_facility_unavailable) 2552 2553EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) 2554 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 2555EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) 2556EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) 2557 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 2558EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) 2559EXC_COMMON_BEGIN(h_facility_unavailable_common) 2560 GEN_COMMON h_facility_unavailable 2561 addi r3,r1,STACK_FRAME_OVERHEAD 2562 bl facility_unavailable_exception 2563 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ 2564 b interrupt_return 2565 2566 GEN_KVM h_facility_unavailable 2567 2568 2569EXC_REAL_NONE(0xfa0, 0x20) 2570EXC_VIRT_NONE(0x4fa0, 0x20) 2571EXC_REAL_NONE(0xfc0, 0x20) 2572EXC_VIRT_NONE(0x4fc0, 0x20) 2573EXC_REAL_NONE(0xfe0, 0x20) 2574EXC_VIRT_NONE(0x4fe0, 0x20) 2575 2576EXC_REAL_NONE(0x1000, 0x100) 2577EXC_VIRT_NONE(0x5000, 0x100) 2578EXC_REAL_NONE(0x1100, 0x100) 2579EXC_VIRT_NONE(0x5100, 0x100) 2580 2581#ifdef CONFIG_CBE_RAS 2582INT_DEFINE_BEGIN(cbe_system_error) 2583 IVEC=0x1200 2584 IHSRR=1 2585 IKVM_SKIP=1 2586 IKVM_REAL=1 2587INT_DEFINE_END(cbe_system_error) 2588 2589EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) 2590 GEN_INT_ENTRY cbe_system_error, virt=0 2591EXC_REAL_END(cbe_system_error, 0x1200, 0x100) 2592EXC_VIRT_NONE(0x5200, 0x100) 2593EXC_COMMON_BEGIN(cbe_system_error_common) 2594 GEN_COMMON cbe_system_error 2595 addi r3,r1,STACK_FRAME_OVERHEAD 2596 bl cbe_system_error_exception 2597 b interrupt_return 2598 2599 GEN_KVM cbe_system_error 2600 2601#else /* CONFIG_CBE_RAS */ 2602EXC_REAL_NONE(0x1200, 0x100) 2603EXC_VIRT_NONE(0x5200, 0x100) 2604#endif 2605 2606 2607INT_DEFINE_BEGIN(instruction_breakpoint) 2608 IVEC=0x1300 2609#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2610 IKVM_SKIP=1 2611 IKVM_REAL=1 2612#endif 2613INT_DEFINE_END(instruction_breakpoint) 2614 2615EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) 2616 GEN_INT_ENTRY instruction_breakpoint, virt=0 2617EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) 2618EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) 2619 GEN_INT_ENTRY instruction_breakpoint, virt=1 2620EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) 2621EXC_COMMON_BEGIN(instruction_breakpoint_common) 2622 GEN_COMMON instruction_breakpoint 2623 addi r3,r1,STACK_FRAME_OVERHEAD 2624 bl instruction_breakpoint_exception 2625 b interrupt_return 2626 2627 GEN_KVM instruction_breakpoint 2628 2629 2630EXC_REAL_NONE(0x1400, 0x100) 2631EXC_VIRT_NONE(0x5400, 0x100) 2632 2633/** 2634 * Interrupt 0x1500 - Soft Patch Interrupt 2635 * 2636 * Handling: 2637 * This is an implementation specific interrupt which can be used for a 2638 * range of exceptions. 2639 * 2640 * This interrupt handler is unique in that it runs the denormal assist 2641 * code even for guests (and even in guest context) without going to KVM, 2642 * for speed. POWER9 does not raise denorm exceptions, so this special case 2643 * could be phased out in future to reduce special cases. 2644 */ 2645INT_DEFINE_BEGIN(denorm_exception) 2646 IVEC=0x1500 2647 IHSRR=1 2648 IBRANCH_TO_COMMON=0 2649 IKVM_REAL=1 2650INT_DEFINE_END(denorm_exception) 2651 2652EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) 2653 GEN_INT_ENTRY denorm_exception, virt=0 2654#ifdef CONFIG_PPC_DENORMALISATION 2655 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2656 bne+ denorm_assist 2657#endif 2658 GEN_BRANCH_TO_COMMON denorm_exception, virt=0 2659EXC_REAL_END(denorm_exception, 0x1500, 0x100) 2660#ifdef CONFIG_PPC_DENORMALISATION 2661EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) 2662 GEN_INT_ENTRY denorm_exception, virt=1 2663 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ 2664 bne+ denorm_assist 2665 GEN_BRANCH_TO_COMMON denorm_exception, virt=1 2666EXC_VIRT_END(denorm_exception, 0x5500, 0x100) 2667#else 2668EXC_VIRT_NONE(0x5500, 0x100) 2669#endif 2670 2671#ifdef CONFIG_PPC_DENORMALISATION 2672TRAMP_REAL_BEGIN(denorm_assist) 2673BEGIN_FTR_SECTION 2674/* 2675 * To denormalise we need to move a copy of the register to itself. 2676 * For POWER6 do that here for all FP regs. 2677 */ 2678 mfmsr r10 2679 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) 2680 xori r10,r10,(MSR_FE0|MSR_FE1) 2681 mtmsrd r10 2682 sync 2683 2684 .Lreg=0 2685 .rept 32 2686 fmr .Lreg,.Lreg 2687 .Lreg=.Lreg+1 2688 .endr 2689 2690FTR_SECTION_ELSE 2691/* 2692 * To denormalise we need to move a copy of the register to itself. 2693 * For POWER7 do that here for the first 32 VSX registers only. 2694 */ 2695 mfmsr r10 2696 oris r10,r10,MSR_VSX@h 2697 mtmsrd r10 2698 sync 2699 2700 .Lreg=0 2701 .rept 32 2702 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2703 .Lreg=.Lreg+1 2704 .endr 2705 2706ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 2707 2708BEGIN_FTR_SECTION 2709 b denorm_done 2710END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2711/* 2712 * To denormalise we need to move a copy of the register to itself. 2713 * For POWER8 we need to do that for all 64 VSX registers 2714 */ 2715 .Lreg=32 2716 .rept 32 2717 XVCPSGNDP(.Lreg,.Lreg,.Lreg) 2718 .Lreg=.Lreg+1 2719 .endr 2720 2721denorm_done: 2722 mfspr r11,SPRN_HSRR0 2723 subi r11,r11,4 2724 mtspr SPRN_HSRR0,r11 2725 mtcrf 0x80,r9 2726 ld r9,PACA_EXGEN+EX_R9(r13) 2727BEGIN_FTR_SECTION 2728 ld r10,PACA_EXGEN+EX_PPR(r13) 2729 mtspr SPRN_PPR,r10 2730END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 2731BEGIN_FTR_SECTION 2732 ld r10,PACA_EXGEN+EX_CFAR(r13) 2733 mtspr SPRN_CFAR,r10 2734END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2735 ld r10,PACA_EXGEN+EX_R10(r13) 2736 ld r11,PACA_EXGEN+EX_R11(r13) 2737 ld r12,PACA_EXGEN+EX_R12(r13) 2738 ld r13,PACA_EXGEN+EX_R13(r13) 2739 HRFI_TO_UNKNOWN 2740 b . 2741#endif 2742 2743EXC_COMMON_BEGIN(denorm_exception_common) 2744 GEN_COMMON denorm_exception 2745 addi r3,r1,STACK_FRAME_OVERHEAD 2746 bl unknown_exception 2747 b interrupt_return 2748 2749 GEN_KVM denorm_exception 2750 2751 2752#ifdef CONFIG_CBE_RAS 2753INT_DEFINE_BEGIN(cbe_maintenance) 2754 IVEC=0x1600 2755 IHSRR=1 2756 IKVM_SKIP=1 2757 IKVM_REAL=1 2758INT_DEFINE_END(cbe_maintenance) 2759 2760EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) 2761 GEN_INT_ENTRY cbe_maintenance, virt=0 2762EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) 2763EXC_VIRT_NONE(0x5600, 0x100) 2764EXC_COMMON_BEGIN(cbe_maintenance_common) 2765 GEN_COMMON cbe_maintenance 2766 addi r3,r1,STACK_FRAME_OVERHEAD 2767 bl cbe_maintenance_exception 2768 b interrupt_return 2769 2770 GEN_KVM cbe_maintenance 2771 2772#else /* CONFIG_CBE_RAS */ 2773EXC_REAL_NONE(0x1600, 0x100) 2774EXC_VIRT_NONE(0x5600, 0x100) 2775#endif 2776 2777 2778INT_DEFINE_BEGIN(altivec_assist) 2779 IVEC=0x1700 2780#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 2781 IKVM_REAL=1 2782#endif 2783INT_DEFINE_END(altivec_assist) 2784 2785EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) 2786 GEN_INT_ENTRY altivec_assist, virt=0 2787EXC_REAL_END(altivec_assist, 0x1700, 0x100) 2788EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) 2789 GEN_INT_ENTRY altivec_assist, virt=1 2790EXC_VIRT_END(altivec_assist, 0x5700, 0x100) 2791EXC_COMMON_BEGIN(altivec_assist_common) 2792 GEN_COMMON altivec_assist 2793 addi r3,r1,STACK_FRAME_OVERHEAD 2794#ifdef CONFIG_ALTIVEC 2795 bl altivec_assist_exception 2796 REST_NVGPRS(r1) /* instruction emulation may change GPRs */ 2797#else 2798 bl unknown_exception 2799#endif 2800 b interrupt_return 2801 2802 GEN_KVM altivec_assist 2803 2804 2805#ifdef CONFIG_CBE_RAS 2806INT_DEFINE_BEGIN(cbe_thermal) 2807 IVEC=0x1800 2808 IHSRR=1 2809 IKVM_SKIP=1 2810 IKVM_REAL=1 2811INT_DEFINE_END(cbe_thermal) 2812 2813EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) 2814 GEN_INT_ENTRY cbe_thermal, virt=0 2815EXC_REAL_END(cbe_thermal, 0x1800, 0x100) 2816EXC_VIRT_NONE(0x5800, 0x100) 2817EXC_COMMON_BEGIN(cbe_thermal_common) 2818 GEN_COMMON cbe_thermal 2819 addi r3,r1,STACK_FRAME_OVERHEAD 2820 bl cbe_thermal_exception 2821 b interrupt_return 2822 2823 GEN_KVM cbe_thermal 2824 2825#else /* CONFIG_CBE_RAS */ 2826EXC_REAL_NONE(0x1800, 0x100) 2827EXC_VIRT_NONE(0x5800, 0x100) 2828#endif 2829 2830 2831#ifdef CONFIG_PPC_WATCHDOG 2832 2833INT_DEFINE_BEGIN(soft_nmi) 2834 IVEC=0x900 2835 ISTACK=0 2836 IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */ 2837INT_DEFINE_END(soft_nmi) 2838 2839/* 2840 * Branch to soft_nmi_interrupt using the emergency stack. The emergency 2841 * stack is one that is usable by maskable interrupts so long as MSR_EE 2842 * remains off. It is used for recovery when something has corrupted the 2843 * normal kernel stack, for example. The "soft NMI" must not use the process 2844 * stack because we want irq disabled sections to avoid touching the stack 2845 * at all (other than PMU interrupts), so use the emergency stack for this, 2846 * and run it entirely with interrupts hard disabled. 2847 */ 2848EXC_COMMON_BEGIN(soft_nmi_common) 2849 mfspr r11,SPRN_SRR0 2850 mr r10,r1 2851 ld r1,PACAEMERGSP(r13) 2852 subi r1,r1,INT_FRAME_SIZE 2853 __GEN_COMMON_BODY soft_nmi 2854 2855 /* 2856 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see 2857 * system_reset_common) 2858 */ 2859 li r10,IRQS_ALL_DISABLED 2860 stb r10,PACAIRQSOFTMASK(r13) 2861 lbz r10,PACAIRQHAPPENED(r13) 2862 std r10,RESULT(r1) 2863 ori r10,r10,PACA_IRQ_HARD_DIS 2864 stb r10,PACAIRQHAPPENED(r13) 2865 2866 addi r3,r1,STACK_FRAME_OVERHEAD 2867 bl soft_nmi_interrupt 2868 2869 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2870 li r9,0 2871 mtmsrd r9,1 2872 2873 /* 2874 * Restore soft mask settings. 2875 */ 2876 ld r10,RESULT(r1) 2877 stb r10,PACAIRQHAPPENED(r13) 2878 ld r10,SOFTE(r1) 2879 stb r10,PACAIRQSOFTMASK(r13) 2880 2881 kuap_restore_amr r9, r10 2882 EXCEPTION_RESTORE_REGS hsrr=0 2883 RFI_TO_KERNEL 2884 2885#endif /* CONFIG_PPC_WATCHDOG */ 2886 2887/* 2888 * An interrupt came in while soft-disabled. We set paca->irq_happened, then: 2889 * - If it was a decrementer interrupt, we bump the dec to max and and return. 2890 * - If it was a doorbell we return immediately since doorbells are edge 2891 * triggered and won't automatically refire. 2892 * - If it was a HMI we return immediately since we handled it in realmode 2893 * and it won't refire. 2894 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. 2895 * This is called with r10 containing the value to OR to the paca field. 2896 */ 2897.macro MASKED_INTERRUPT hsrr=0 2898 .if \hsrr 2899masked_Hinterrupt: 2900 .else 2901masked_interrupt: 2902 .endif 2903 lbz r11,PACAIRQHAPPENED(r13) 2904 or r11,r11,r10 2905 stb r11,PACAIRQHAPPENED(r13) 2906 cmpwi r10,PACA_IRQ_DEC 2907 bne 1f 2908 lis r10,0x7fff 2909 ori r10,r10,0xffff 2910 mtspr SPRN_DEC,r10 2911#ifdef CONFIG_PPC_WATCHDOG 2912 b soft_nmi_common 2913#else 2914 b 2f 2915#endif 29161: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK 2917 beq 2f 2918 xori r12,r12,MSR_EE /* clear MSR_EE */ 2919 .if \hsrr 2920 mtspr SPRN_HSRR1,r12 2921 .else 2922 mtspr SPRN_SRR1,r12 2923 .endif 2924 ori r11,r11,PACA_IRQ_HARD_DIS 2925 stb r11,PACAIRQHAPPENED(r13) 29262: /* done */ 2927 ld r10,PACA_EXGEN+EX_CTR(r13) 2928 mtctr r10 2929 mtcrf 0x80,r9 2930 std r1,PACAR1(r13) 2931 ld r9,PACA_EXGEN+EX_R9(r13) 2932 ld r10,PACA_EXGEN+EX_R10(r13) 2933 ld r11,PACA_EXGEN+EX_R11(r13) 2934 ld r12,PACA_EXGEN+EX_R12(r13) 2935 ld r13,PACA_EXGEN+EX_R13(r13) 2936 /* May return to masked low address where r13 is not set up */ 2937 .if \hsrr 2938 HRFI_TO_KERNEL 2939 .else 2940 RFI_TO_KERNEL 2941 .endif 2942 b . 2943.endm 2944 2945TRAMP_REAL_BEGIN(stf_barrier_fallback) 2946 std r9,PACA_EXRFI+EX_R9(r13) 2947 std r10,PACA_EXRFI+EX_R10(r13) 2948 sync 2949 ld r9,PACA_EXRFI+EX_R9(r13) 2950 ld r10,PACA_EXRFI+EX_R10(r13) 2951 ori 31,31,0 2952 .rept 14 2953 b 1f 29541: 2955 .endr 2956 blr 2957 2958/* Clobbers r10, r11, ctr */ 2959.macro L1D_DISPLACEMENT_FLUSH 2960 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2961 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2962 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 2963 mtctr r11 2964 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 2965 2966 /* order ld/st prior to dcbt stop all streams with flushing */ 2967 sync 2968 2969 /* 2970 * The load addresses are at staggered offsets within cachelines, 2971 * which suits some pipelines better (on others it should not 2972 * hurt). 2973 */ 29741: 2975 ld r11,(0x80 + 8)*0(r10) 2976 ld r11,(0x80 + 8)*1(r10) 2977 ld r11,(0x80 + 8)*2(r10) 2978 ld r11,(0x80 + 8)*3(r10) 2979 ld r11,(0x80 + 8)*4(r10) 2980 ld r11,(0x80 + 8)*5(r10) 2981 ld r11,(0x80 + 8)*6(r10) 2982 ld r11,(0x80 + 8)*7(r10) 2983 addi r10,r10,0x80*8 2984 bdnz 1b 2985.endm 2986 2987TRAMP_REAL_BEGIN(entry_flush_fallback) 2988 std r9,PACA_EXRFI+EX_R9(r13) 2989 std r10,PACA_EXRFI+EX_R10(r13) 2990 std r11,PACA_EXRFI+EX_R11(r13) 2991 mfctr r9 2992 L1D_DISPLACEMENT_FLUSH 2993 mtctr r9 2994 ld r9,PACA_EXRFI+EX_R9(r13) 2995 ld r10,PACA_EXRFI+EX_R10(r13) 2996 ld r11,PACA_EXRFI+EX_R11(r13) 2997 blr 2998 2999/* 3000 * The SCV entry flush happens with interrupts enabled, so it must disable 3001 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 3002 * (containing LR) does not need to be preserved here because scv entry 3003 * puts 0 in the pt_regs, CTR can be clobbered for the same reason. 3004 */ 3005TRAMP_REAL_BEGIN(scv_entry_flush_fallback) 3006 li r10,0 3007 mtmsrd r10,1 3008 lbz r10,PACAIRQHAPPENED(r13) 3009 ori r10,r10,PACA_IRQ_HARD_DIS 3010 stb r10,PACAIRQHAPPENED(r13) 3011 std r11,PACA_EXRFI+EX_R11(r13) 3012 L1D_DISPLACEMENT_FLUSH 3013 ld r11,PACA_EXRFI+EX_R11(r13) 3014 li r10,MSR_RI 3015 mtmsrd r10,1 3016 blr 3017 3018TRAMP_REAL_BEGIN(rfi_flush_fallback) 3019 SET_SCRATCH0(r13); 3020 GET_PACA(r13); 3021 std r1,PACA_EXRFI+EX_R12(r13) 3022 ld r1,PACAKSAVE(r13) 3023 std r9,PACA_EXRFI+EX_R9(r13) 3024 std r10,PACA_EXRFI+EX_R10(r13) 3025 std r11,PACA_EXRFI+EX_R11(r13) 3026 mfctr r9 3027 L1D_DISPLACEMENT_FLUSH 3028 mtctr r9 3029 ld r9,PACA_EXRFI+EX_R9(r13) 3030 ld r10,PACA_EXRFI+EX_R10(r13) 3031 ld r11,PACA_EXRFI+EX_R11(r13) 3032 ld r1,PACA_EXRFI+EX_R12(r13) 3033 GET_SCRATCH0(r13); 3034 rfid 3035 3036TRAMP_REAL_BEGIN(hrfi_flush_fallback) 3037 SET_SCRATCH0(r13); 3038 GET_PACA(r13); 3039 std r1,PACA_EXRFI+EX_R12(r13) 3040 ld r1,PACAKSAVE(r13) 3041 std r9,PACA_EXRFI+EX_R9(r13) 3042 std r10,PACA_EXRFI+EX_R10(r13) 3043 std r11,PACA_EXRFI+EX_R11(r13) 3044 mfctr r9 3045 L1D_DISPLACEMENT_FLUSH 3046 mtctr r9 3047 ld r9,PACA_EXRFI+EX_R9(r13) 3048 ld r10,PACA_EXRFI+EX_R10(r13) 3049 ld r11,PACA_EXRFI+EX_R11(r13) 3050 ld r1,PACA_EXRFI+EX_R12(r13) 3051 GET_SCRATCH0(r13); 3052 hrfid 3053 3054TRAMP_REAL_BEGIN(rfscv_flush_fallback) 3055 /* system call volatile */ 3056 mr r7,r13 3057 GET_PACA(r13); 3058 mr r8,r1 3059 ld r1,PACAKSAVE(r13) 3060 mfctr r9 3061 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 3062 ld r11,PACA_L1D_FLUSH_SIZE(r13) 3063 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 3064 mtctr r11 3065 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 3066 3067 /* order ld/st prior to dcbt stop all streams with flushing */ 3068 sync 3069 3070 /* 3071 * The load adresses are at staggered offsets within cachelines, 3072 * which suits some pipelines better (on others it should not 3073 * hurt). 3074 */ 30751: 3076 ld r11,(0x80 + 8)*0(r10) 3077 ld r11,(0x80 + 8)*1(r10) 3078 ld r11,(0x80 + 8)*2(r10) 3079 ld r11,(0x80 + 8)*3(r10) 3080 ld r11,(0x80 + 8)*4(r10) 3081 ld r11,(0x80 + 8)*5(r10) 3082 ld r11,(0x80 + 8)*6(r10) 3083 ld r11,(0x80 + 8)*7(r10) 3084 addi r10,r10,0x80*8 3085 bdnz 1b 3086 3087 mtctr r9 3088 li r9,0 3089 li r10,0 3090 li r11,0 3091 mr r1,r8 3092 mr r13,r7 3093 RFSCV 3094 3095USE_TEXT_SECTION() 3096 3097_GLOBAL(do_uaccess_flush) 3098 UACCESS_FLUSH_FIXUP_SECTION 3099 nop 3100 nop 3101 nop 3102 blr 3103 L1D_DISPLACEMENT_FLUSH 3104 blr 3105_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3106EXPORT_SYMBOL(do_uaccess_flush) 3107 3108 3109MASKED_INTERRUPT 3110MASKED_INTERRUPT hsrr=1 3111 3112#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3113kvmppc_skip_interrupt: 3114 /* 3115 * Here all GPRs are unchanged from when the interrupt happened 3116 * except for r13, which is saved in SPRG_SCRATCH0. 3117 */ 3118 mfspr r13, SPRN_SRR0 3119 addi r13, r13, 4 3120 mtspr SPRN_SRR0, r13 3121 GET_SCRATCH0(r13) 3122 RFI_TO_KERNEL 3123 b . 3124 3125kvmppc_skip_Hinterrupt: 3126 /* 3127 * Here all GPRs are unchanged from when the interrupt happened 3128 * except for r13, which is saved in SPRG_SCRATCH0. 3129 */ 3130 mfspr r13, SPRN_HSRR0 3131 addi r13, r13, 4 3132 mtspr SPRN_HSRR0, r13 3133 GET_SCRATCH0(r13) 3134 HRFI_TO_KERNEL 3135 b . 3136#endif 3137 3138 /* 3139 * Relocation-on interrupts: A subset of the interrupts can be delivered 3140 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering 3141 * it. Addresses are the same as the original interrupt addresses, but 3142 * offset by 0xc000000000004000. 3143 * It's impossible to receive interrupts below 0x300 via this mechanism. 3144 * KVM: None of these traps are from the guest ; anything that escalated 3145 * to HV=1 from HV=0 is delivered via real mode handlers. 3146 */ 3147 3148 /* 3149 * This uses the standard macro, since the original 0x300 vector 3150 * only has extra guff for STAB-based processors -- which never 3151 * come here. 3152 */ 3153 3154EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) 3155 b __ppc64_runlatch_on 3156 3157USE_FIXED_SECTION(virt_trampolines) 3158 /* 3159 * All code below __end_interrupts is treated as soft-masked. If 3160 * any code runs here with MSR[EE]=1, it must then cope with pending 3161 * soft interrupt being raised (i.e., by ensuring it is replayed). 3162 * 3163 * The __end_interrupts marker must be past the out-of-line (OOL) 3164 * handlers, so that they are copied to real address 0x100 when running 3165 * a relocatable kernel. This ensures they can be reached from the short 3166 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch 3167 * directly, without using LOAD_HANDLER(). 3168 */ 3169 .align 7 3170 .globl __end_interrupts 3171__end_interrupts: 3172DEFINE_FIXED_SYMBOL(__end_interrupts) 3173 3174#ifdef CONFIG_PPC_970_NAP 3175 /* 3176 * Called by exception entry code if _TLF_NAPPING was set, this clears 3177 * the NAPPING flag, and redirects the exception exit to 3178 * power4_fixup_nap_return. 3179 */ 3180 .globl power4_fixup_nap 3181EXC_COMMON_BEGIN(power4_fixup_nap) 3182 andc r9,r9,r10 3183 std r9,TI_LOCAL_FLAGS(r11) 3184 LOAD_REG_ADDR(r10, power4_idle_nap_return) 3185 std r10,_NIP(r1) 3186 blr 3187 3188power4_idle_nap_return: 3189 blr 3190#endif 3191 3192CLOSE_FIXED_SECTION(real_vectors); 3193CLOSE_FIXED_SECTION(real_trampolines); 3194CLOSE_FIXED_SECTION(virt_vectors); 3195CLOSE_FIXED_SECTION(virt_trampolines); 3196 3197USE_TEXT_SECTION() 3198 3199/* MSR[RI] should be clear because this uses SRR[01] */ 3200enable_machine_check: 3201 mflr r0 3202 bcl 20,31,$+4 32030: mflr r3 3204 addi r3,r3,(1f - 0b) 3205 mtspr SPRN_SRR0,r3 3206 mfmsr r3 3207 ori r3,r3,MSR_ME 3208 mtspr SPRN_SRR1,r3 3209 RFI_TO_KERNEL 32101: mtlr r0 3211 blr 3212 3213/* MSR[RI] should be clear because this uses SRR[01] */ 3214disable_machine_check: 3215 mflr r0 3216 bcl 20,31,$+4 32170: mflr r3 3218 addi r3,r3,(1f - 0b) 3219 mtspr SPRN_SRR0,r3 3220 mfmsr r3 3221 li r4,MSR_ME 3222 andc r3,r3,r4 3223 mtspr SPRN_SRR1,r3 3224 RFI_TO_KERNEL 32251: mtlr r0 3226 blr 3227 3228/* 3229 * Hash table stuff 3230 */ 3231 .balign IFETCH_ALIGN_BYTES 3232do_hash_page: 3233#ifdef CONFIG_PPC_BOOK3S_64 3234 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h 3235 ori r0,r0,DSISR_BAD_FAULT_64S@l 3236 and. r0,r5,r0 /* weird error? */ 3237 bne- handle_page_fault /* if not, try to insert a HPTE */ 3238 3239 /* 3240 * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then 3241 * don't call hash_page, just fail the fault. This is required to 3242 * prevent re-entrancy problems in the hash code, namely perf 3243 * interrupts hitting while something holds H_PAGE_BUSY, and taking a 3244 * hash fault. See the comment in hash_preload(). 3245 */ 3246 ld r11, PACA_THREAD_INFO(r13) 3247 lwz r0,TI_PREEMPT(r11) 3248 andis. r0,r0,NMI_MASK@h 3249 bne 77f 3250 3251 /* 3252 * r3 contains the trap number 3253 * r4 contains the faulting address 3254 * r5 contains dsisr 3255 * r6 msr 3256 * 3257 * at return r3 = 0 for success, 1 for page fault, negative for error 3258 */ 3259 bl __hash_page /* build HPTE if possible */ 3260 cmpdi r3,0 /* see if __hash_page succeeded */ 3261 3262 /* Success */ 3263 beq interrupt_return /* Return from exception on success */ 3264 3265 /* Error */ 3266 blt- 13f 3267 3268 /* Reload DAR/DSISR into r4/r5 for the DABR check below */ 3269 ld r4,_DAR(r1) 3270 ld r5,_DSISR(r1) 3271#endif /* CONFIG_PPC_BOOK3S_64 */ 3272 3273/* Here we have a page fault that hash_page can't handle. */ 3274handle_page_fault: 327511: andis. r0,r5,DSISR_DABRMATCH@h 3276 bne- handle_dabr_fault 3277 addi r3,r1,STACK_FRAME_OVERHEAD 3278 bl do_page_fault 3279 cmpdi r3,0 3280 beq+ interrupt_return 3281 mr r5,r3 3282 addi r3,r1,STACK_FRAME_OVERHEAD 3283 ld r4,_DAR(r1) 3284 bl bad_page_fault 3285 b interrupt_return 3286 3287/* We have a data breakpoint exception - handle it */ 3288handle_dabr_fault: 3289 ld r4,_DAR(r1) 3290 ld r5,_DSISR(r1) 3291 addi r3,r1,STACK_FRAME_OVERHEAD 3292 bl do_break 3293 /* 3294 * do_break() may have changed the NV GPRS while handling a breakpoint. 3295 * If so, we need to restore them with their updated values. 3296 */ 3297 REST_NVGPRS(r1) 3298 b interrupt_return 3299 3300 3301#ifdef CONFIG_PPC_BOOK3S_64 3302/* We have a page fault that hash_page could handle but HV refused 3303 * the PTE insertion 3304 */ 330513: mr r5,r3 3306 addi r3,r1,STACK_FRAME_OVERHEAD 3307 ld r4,_DAR(r1) 3308 bl low_hash_fault 3309 b interrupt_return 3310#endif 3311 3312/* 3313 * We come here as a result of a DSI at a point where we don't want 3314 * to call hash_page, such as when we are accessing memory (possibly 3315 * user memory) inside a PMU interrupt that occurred while interrupts 3316 * were soft-disabled. We want to invoke the exception handler for 3317 * the access, or panic if there isn't a handler. 3318 */ 331977: addi r3,r1,STACK_FRAME_OVERHEAD 3320 li r5,SIGSEGV 3321 bl bad_page_fault 3322 b interrupt_return 3323