1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/exception-64s.h> 13#include <asm/processor.h> 14#include <asm/page.h> 15#include <asm/cputable.h> 16#include <asm/thread_info.h> 17#include <asm/ppc_asm.h> 18#include <asm/asm-offsets.h> 19#include <asm/ppc-opcode.h> 20#include <asm/hw_irq.h> 21#include <asm/kvm_book3s_asm.h> 22#include <asm/opal.h> 23#include <asm/cpuidle.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26 27#undef DEBUG 28 29/* 30 * Use unused space in the interrupt stack to save and restore 31 * registers for winkle support. 32 */ 33#define _SDR1 GPR3 34#define _RPR GPR4 35#define _SPURR GPR5 36#define _PURR GPR6 37#define _TSCR GPR7 38#define _DSCR GPR8 39#define _AMOR GPR9 40#define _WORT GPR10 41#define _WORC GPR11 42#define _PTCR GPR12 43 44#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ 45 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ 46 PSSCR_MTL_MASK 47 48 .text 49 50/* 51 * Used by threads before entering deep idle states. Saves SPRs 52 * in interrupt stack frame 53 */ 54save_sprs_to_stack: 55 /* 56 * Note all register i.e per-core, per-subcore or per-thread is saved 57 * here since any thread in the core might wake up first 58 */ 59BEGIN_FTR_SECTION 60 mfspr r3,SPRN_PTCR 61 std r3,_PTCR(r1) 62 /* 63 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 64 * SDR1 here 65 */ 66FTR_SECTION_ELSE 67 mfspr r3,SPRN_SDR1 68 std r3,_SDR1(r1) 69ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 70 mfspr r3,SPRN_RPR 71 std r3,_RPR(r1) 72 mfspr r3,SPRN_SPURR 73 std r3,_SPURR(r1) 74 mfspr r3,SPRN_PURR 75 std r3,_PURR(r1) 76 mfspr r3,SPRN_TSCR 77 std r3,_TSCR(r1) 78 mfspr r3,SPRN_DSCR 79 std r3,_DSCR(r1) 80 mfspr r3,SPRN_AMOR 81 std r3,_AMOR(r1) 82 mfspr r3,SPRN_WORT 83 std r3,_WORT(r1) 84 mfspr r3,SPRN_WORC 85 std r3,_WORC(r1) 86 87 blr 88 89/* 90 * Used by threads when the lock bit of core_idle_state is set. 91 * Threads will spin in HMT_LOW until the lock bit is cleared. 92 * r14 - pointer to core_idle_state 93 * r15 - used to load contents of core_idle_state 94 * r9 - used as a temporary variable 95 */ 96 97core_idle_lock_held: 98 HMT_LOW 993: lwz r15,0(r14) 100 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT 101 bne 3b 102 HMT_MEDIUM 103 lwarx r15,0,r14 104 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 105 bne core_idle_lock_held 106 blr 107 108/* 109 * Pass requested state in r3: 110 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 111 * - Requested STOP state in POWER9 112 * 113 * To check IRQ_HAPPENED in r4 114 * 0 - don't check 115 * 1 - check 116 * 117 * Address to 'rfid' to in r5 118 */ 119_GLOBAL(pnv_powersave_common) 120 /* Use r3 to pass state nap/sleep/winkle */ 121 /* NAP is a state loss, we create a regs frame on the 122 * stack, fill it up with the state we care about and 123 * stick a pointer to it in PACAR1. We really only 124 * need to save PC, some CR bits and the NV GPRs, 125 * but for now an interrupt frame will do. 126 */ 127 mflr r0 128 std r0,16(r1) 129 stdu r1,-INT_FRAME_SIZE(r1) 130 std r0,_LINK(r1) 131 std r0,_NIP(r1) 132 133 /* Hard disable interrupts */ 134 mfmsr r9 135 rldicl r9,r9,48,1 136 rotldi r9,r9,16 137 mtmsrd r9,1 /* hard-disable interrupts */ 138 139 /* Check if something happened while soft-disabled */ 140 lbz r0,PACAIRQHAPPENED(r13) 141 andi. r0,r0,~PACA_IRQ_HARD_DIS@l 142 beq 1f 143 cmpwi cr0,r4,0 144 beq 1f 145 addi r1,r1,INT_FRAME_SIZE 146 ld r0,16(r1) 147 li r3,0 /* Return 0 (no nap) */ 148 mtlr r0 149 blr 150 1511: /* We mark irqs hard disabled as this is the state we'll 152 * be in when returning and we need to tell arch_local_irq_restore() 153 * about it 154 */ 155 li r0,PACA_IRQ_HARD_DIS 156 stb r0,PACAIRQHAPPENED(r13) 157 158 /* We haven't lost state ... yet */ 159 li r0,0 160 stb r0,PACA_NAPSTATELOST(r13) 161 162 /* Continue saving state */ 163 SAVE_GPR(2, r1) 164 SAVE_NVGPRS(r1) 165 mfcr r4 166 std r4,_CCR(r1) 167 std r9,_MSR(r1) 168 std r1,PACAR1(r13) 169 170 /* 171 * Go to real mode to do the nap, as required by the architecture. 172 * Also, we need to be in real mode before setting hwthread_state, 173 * because as soon as we do that, another thread can switch 174 * the MMU context to the guest. 175 */ 176 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 177 li r6, MSR_RI 178 andc r6, r9, r6 179 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ 180 mtspr SPRN_SRR0, r5 181 mtspr SPRN_SRR1, r7 182 RFI_TO_KERNEL 183 184 .globl pnv_enter_arch207_idle_mode 185pnv_enter_arch207_idle_mode: 186#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 187 /* Tell KVM we're entering idle */ 188 li r4,KVM_HWTHREAD_IN_IDLE 189 /******************************************************/ 190 /* N O T E W E L L ! ! ! N O T E W E L L */ 191 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 192 /* MUST occur in real mode, i.e. with the MMU off, */ 193 /* and the MMU must stay off until we clear this flag */ 194 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ 195 /* reset interrupt vector in exceptions-64s.S. */ 196 /* The reason is that another thread can switch the */ 197 /* MMU to a guest context whenever this flag is set */ 198 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 199 /* that would potentially cause this thread to start */ 200 /* executing instructions from guest memory in */ 201 /* hypervisor mode, leading to a host crash or data */ 202 /* corruption, or worse. */ 203 /******************************************************/ 204 stb r4,HSTATE_HWTHREAD_STATE(r13) 205#endif 206 stb r3,PACA_THREAD_IDLE_STATE(r13) 207 cmpwi cr3,r3,PNV_THREAD_SLEEP 208 bge cr3,2f 209 IDLE_STATE_ENTER_SEQ(PPC_NAP) 210 /* No return */ 2112: 212 /* Sleep or winkle */ 213 lbz r7,PACA_THREAD_MASK(r13) 214 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 215lwarx_loop1: 216 lwarx r15,0,r14 217 218 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 219 bnel core_idle_lock_held 220 221 andc r15,r15,r7 /* Clear thread bit */ 222 223 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 224 225/* 226 * If cr0 = 0, then current thread is the last thread of the core entering 227 * sleep. Last thread needs to execute the hardware bug workaround code if 228 * required by the platform. 229 * Make the workaround call unconditionally here. The below branch call is 230 * patched out when the idle states are discovered if the platform does not 231 * require it. 232 */ 233.global pnv_fastsleep_workaround_at_entry 234pnv_fastsleep_workaround_at_entry: 235 beq fastsleep_workaround_at_entry 236 237 stwcx. r15,0,r14 238 bne- lwarx_loop1 239 isync 240 241common_enter: /* common code for all the threads entering sleep or winkle */ 242 bgt cr3,enter_winkle 243 IDLE_STATE_ENTER_SEQ(PPC_SLEEP) 244 245fastsleep_workaround_at_entry: 246 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 247 stwcx. r15,0,r14 248 bne- lwarx_loop1 249 isync 250 251 /* Fast sleep workaround */ 252 li r3,1 253 li r4,1 254 bl opal_rm_config_cpu_idle_state 255 256 /* Clear Lock bit */ 257 li r0,0 258 lwsync 259 stw r0,0(r14) 260 b common_enter 261 262enter_winkle: 263 bl save_sprs_to_stack 264 265 IDLE_STATE_ENTER_SEQ(PPC_WINKLE) 266 267/* 268 * r3 - requested stop state 269 */ 270power_enter_stop: 271#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 272 /* Tell KVM we're entering idle */ 273 li r4,KVM_HWTHREAD_IN_IDLE 274 /* DO THIS IN REAL MODE! See comment above. */ 275 stb r4,HSTATE_HWTHREAD_STATE(r13) 276#endif 277/* 278 * Check if the requested state is a deep idle state. 279 */ 280 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 281 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 282 cmpd r3,r4 283 bge 2f 284 IDLE_STATE_ENTER_SEQ(PPC_STOP) 2852: 286/* 287 * Entering deep idle state. 288 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 289 * stack and enter stop 290 */ 291 lbz r7,PACA_THREAD_MASK(r13) 292 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 293 294lwarx_loop_stop: 295 lwarx r15,0,r14 296 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 297 bnel core_idle_lock_held 298 andc r15,r15,r7 /* Clear thread bit */ 299 300 stwcx. r15,0,r14 301 bne- lwarx_loop_stop 302 isync 303 304 bl save_sprs_to_stack 305 306 IDLE_STATE_ENTER_SEQ(PPC_STOP) 307 308_GLOBAL(power7_idle) 309 /* Now check if user or arch enabled NAP mode */ 310 LOAD_REG_ADDRBASE(r3,powersave_nap) 311 lwz r4,ADDROFF(powersave_nap)(r3) 312 cmpwi 0,r4,0 313 beqlr 314 li r3, 1 315 /* fall through */ 316 317_GLOBAL(power7_nap) 318 mr r4,r3 319 li r3,PNV_THREAD_NAP 320 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 321 b pnv_powersave_common 322 /* No return */ 323 324_GLOBAL(power7_sleep) 325 li r3,PNV_THREAD_SLEEP 326 li r4,1 327 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 328 b pnv_powersave_common 329 /* No return */ 330 331_GLOBAL(power7_winkle) 332 li r3,PNV_THREAD_WINKLE 333 li r4,1 334 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 335 b pnv_powersave_common 336 /* No return */ 337 338#define CHECK_HMI_INTERRUPT \ 339 mfspr r0,SPRN_SRR1; \ 340BEGIN_FTR_SECTION_NESTED(66); \ 341 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ 342FTR_SECTION_ELSE_NESTED(66); \ 343 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 344ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 345 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 346 bne 20f; \ 347 /* Invoke opal call to handle hmi */ \ 348 ld r2,PACATOC(r13); \ 349 ld r1,PACAR1(r13); \ 350 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 351 li r3,0; /* NULL argument */ \ 352 bl hmi_exception_realmode; \ 353 nop; \ 354 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 35520: nop; 356 357 358/* 359 * r3 - requested stop state 360 */ 361_GLOBAL(power9_idle_stop) 362 LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) 363 or r4,r4,r3 364 mtspr SPRN_PSSCR, r4 365 li r4, 1 366 LOAD_REG_ADDR(r5,power_enter_stop) 367 b pnv_powersave_common 368 /* No return */ 369/* 370 * Called from reset vector. Check whether we have woken up with 371 * hypervisor state loss. If yes, restore hypervisor state and return 372 * back to reset vector. 373 * 374 * r13 - Contents of HSPRG0 375 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 376 */ 377_GLOBAL(pnv_restore_hyp_resource) 378BEGIN_FTR_SECTION 379 ld r2,PACATOC(r13); 380 /* 381 * POWER ISA 3. Use PSSCR to determine if we 382 * are waking up from deep idle state 383 */ 384 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 385 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 386 387 mfspr r5,SPRN_PSSCR 388 /* 389 * 0-3 bits correspond to Power-Saving Level Status 390 * which indicates the idle state we are waking up from 391 */ 392 rldicl r5,r5,4,60 393 cmpd cr4,r5,r4 394 bge cr4,pnv_wakeup_tb_loss 395 /* 396 * Waking up without hypervisor state loss. Return to 397 * reset vector 398 */ 399 blr 400 401END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 402 403 /* 404 * POWER ISA 2.07 or less. 405 * Check if last bit of HSPGR0 is set. This indicates whether we are 406 * waking up from winkle. 407 */ 408 clrldi r5,r13,63 409 clrrdi r13,r13,1 410 411 /* Now that we are sure r13 is corrected, load TOC */ 412 ld r2,PACATOC(r13); 413 cmpwi cr4,r5,1 414 mtspr SPRN_HSPRG0,r13 415 416 lbz r0,PACA_THREAD_IDLE_STATE(r13) 417 cmpwi cr2,r0,PNV_THREAD_NAP 418 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 419 420 /* 421 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 422 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 423 * indicates we are waking with hypervisor state loss from nap. 424 */ 425 bgt cr3,. 426 427 blr /* Return back to System Reset vector from where 428 pnv_restore_hyp_resource was invoked */ 429 430/* 431 * Called if waking up from idle state which can cause either partial or 432 * complete hyp state loss. 433 * In POWER8, called if waking up from fastsleep or winkle 434 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 435 * 436 * r13 - PACA 437 * cr3 - gt if waking up with partial/complete hypervisor state loss 438 * cr4 - gt or eq if waking up from complete hypervisor state loss. 439 */ 440_GLOBAL(pnv_wakeup_tb_loss) 441 ld r1,PACAR1(r13) 442 /* 443 * Before entering any idle state, the NVGPRs are saved in the stack. 444 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 445 * NVGPRs are restored. If we are here, it is likely that state is lost, 446 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 447 * here are the same as the test to restore NVGPRS: 448 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 449 * and SRR1 test for restoring NVGPRs. 450 * 451 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 452 * guarantee they will always be restored. This might be tightened 453 * with careful reading of specs (particularly for ISA300) but this 454 * is already a slow wakeup path and it's simpler to be safe. 455 */ 456 li r0,1 457 stb r0,PACA_NAPSTATELOST(r13) 458 459 /* 460 * 461 * Save SRR1 and LR in NVGPRs as they might be clobbered in 462 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 463 * to determine the wakeup reason if we branch to kvm_start_guest. LR 464 * is required to return back to reset vector after hypervisor state 465 * restore is complete. 466 */ 467 mflr r17 468 mfspr r16,SPRN_SRR1 469BEGIN_FTR_SECTION 470 CHECK_HMI_INTERRUPT 471END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 472 473 lbz r7,PACA_THREAD_MASK(r13) 474 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 475lwarx_loop2: 476 lwarx r15,0,r14 477 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 478 /* 479 * Lock bit is set in one of the 2 cases- 480 * a. In the sleep/winkle enter path, the last thread is executing 481 * fastsleep workaround code. 482 * b. In the wake up path, another thread is executing fastsleep 483 * workaround undo code or resyncing timebase or restoring context 484 * In either case loop until the lock bit is cleared. 485 */ 486 bnel core_idle_lock_held 487 488 cmpwi cr2,r15,0 489 490 /* 491 * At this stage 492 * cr2 - eq if first thread to wakeup in core 493 * cr3- gt if waking up with partial/complete hypervisor state loss 494 * cr4 - gt or eq if waking up from complete hypervisor state loss. 495 */ 496 497 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 498 stwcx. r15,0,r14 499 bne- lwarx_loop2 500 isync 501 502BEGIN_FTR_SECTION 503 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 504 and r4,r4,r15 505 cmpwi r4,0 /* Check if first in subcore */ 506 507 or r15,r15,r7 /* Set thread bit */ 508 beq first_thread_in_subcore 509END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 510 511 or r15,r15,r7 /* Set thread bit */ 512 beq cr2,first_thread_in_core 513 514 /* Not first thread in core or subcore to wake up */ 515 b clear_lock 516 517first_thread_in_subcore: 518 /* 519 * If waking up from sleep, subcore state is not lost. Hence 520 * skip subcore state restore 521 */ 522 blt cr4,subcore_state_restored 523 524 /* Restore per-subcore state */ 525 ld r4,_SDR1(r1) 526 mtspr SPRN_SDR1,r4 527 528 ld r4,_RPR(r1) 529 mtspr SPRN_RPR,r4 530 ld r4,_AMOR(r1) 531 mtspr SPRN_AMOR,r4 532 533subcore_state_restored: 534 /* 535 * Check if the thread is also the first thread in the core. If not, 536 * skip to clear_lock. 537 */ 538 bne cr2,clear_lock 539 540first_thread_in_core: 541 542 /* 543 * First thread in the core waking up from any state which can cause 544 * partial or complete hypervisor state loss. It needs to 545 * call the fastsleep workaround code if the platform requires it. 546 * Call it unconditionally here. The below branch instruction will 547 * be patched out if the platform does not have fastsleep or does not 548 * require the workaround. Patching will be performed during the 549 * discovery of idle-states. 550 */ 551.global pnv_fastsleep_workaround_at_exit 552pnv_fastsleep_workaround_at_exit: 553 b fastsleep_workaround_at_exit 554 555timebase_resync: 556 /* 557 * Use cr3 which indicates that we are waking up with atleast partial 558 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 559 */ 560 ble cr3,clear_lock 561 /* Time base re-sync */ 562 bl opal_rm_resync_timebase; 563 /* 564 * If waking up from sleep, per core state is not lost, skip to 565 * clear_lock. 566 */ 567 blt cr4,clear_lock 568 569 /* 570 * First thread in the core to wake up and its waking up with 571 * complete hypervisor state loss. Restore per core hypervisor 572 * state. 573 */ 574BEGIN_FTR_SECTION 575 ld r4,_PTCR(r1) 576 mtspr SPRN_PTCR,r4 577 ld r4,_RPR(r1) 578 mtspr SPRN_RPR,r4 579END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 580 581 ld r4,_TSCR(r1) 582 mtspr SPRN_TSCR,r4 583 ld r4,_WORC(r1) 584 mtspr SPRN_WORC,r4 585 586clear_lock: 587 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 588 lwsync 589 stw r15,0(r14) 590 591common_exit: 592 /* 593 * Common to all threads. 594 * 595 * If waking up from sleep, hypervisor state is not lost. Hence 596 * skip hypervisor state restore. 597 */ 598 blt cr4,hypervisor_state_restored 599 600 /* Waking up from winkle */ 601 602BEGIN_MMU_FTR_SECTION 603 b no_segments 604END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 605 /* Restore SLB from PACA */ 606 ld r8,PACA_SLBSHADOWPTR(r13) 607 608 .rept SLB_NUM_BOLTED 609 li r3, SLBSHADOW_SAVEAREA 610 LDX_BE r5, r8, r3 611 addi r3, r3, 8 612 LDX_BE r6, r8, r3 613 andis. r7,r5,SLB_ESID_V@h 614 beq 1f 615 slbmte r6,r5 6161: addi r8,r8,16 617 .endr 618no_segments: 619 620 /* Restore per thread state */ 621 622 ld r4,_SPURR(r1) 623 mtspr SPRN_SPURR,r4 624 ld r4,_PURR(r1) 625 mtspr SPRN_PURR,r4 626 ld r4,_DSCR(r1) 627 mtspr SPRN_DSCR,r4 628 ld r4,_WORT(r1) 629 mtspr SPRN_WORT,r4 630 631 /* Call cur_cpu_spec->cpu_restore() */ 632 LOAD_REG_ADDR(r4, cur_cpu_spec) 633 ld r4,0(r4) 634 ld r12,CPU_SPEC_RESTORE(r4) 635#ifdef PPC64_ELF_ABI_v1 636 ld r12,0(r12) 637#endif 638 mtctr r12 639 bctrl 640 641hypervisor_state_restored: 642 643 mtspr SPRN_SRR1,r16 644 mtlr r17 645 blr /* Return back to System Reset vector from where 646 pnv_restore_hyp_resource was invoked */ 647 648fastsleep_workaround_at_exit: 649 li r3,1 650 li r4,0 651 bl opal_rm_config_cpu_idle_state 652 b timebase_resync 653 654/* 655 * R3 here contains the value that will be returned to the caller 656 * of power7_nap. 657 */ 658_GLOBAL(pnv_wakeup_loss) 659 ld r1,PACAR1(r13) 660BEGIN_FTR_SECTION 661 CHECK_HMI_INTERRUPT 662END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 663 REST_NVGPRS(r1) 664 REST_GPR(2, r1) 665 ld r6,_CCR(r1) 666 ld r4,_MSR(r1) 667 ld r5,_NIP(r1) 668 addi r1,r1,INT_FRAME_SIZE 669 mtcr r6 670 mtspr SPRN_SRR1,r4 671 mtspr SPRN_SRR0,r5 672 RFI_TO_KERNEL 673 674/* 675 * R3 here contains the value that will be returned to the caller 676 * of power7_nap. 677 */ 678_GLOBAL(pnv_wakeup_noloss) 679 lbz r0,PACA_NAPSTATELOST(r13) 680 cmpwi r0,0 681 bne pnv_wakeup_loss 682BEGIN_FTR_SECTION 683 CHECK_HMI_INTERRUPT 684END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 685 ld r1,PACAR1(r13) 686 ld r6,_CCR(r1) 687 ld r4,_MSR(r1) 688 ld r5,_NIP(r1) 689 addi r1,r1,INT_FRAME_SIZE 690 mtcr r6 691 mtspr SPRN_SRR1,r4 692 mtspr SPRN_SRR0,r5 693 RFI_TO_KERNEL 694