1/* 2 * This file contains sleep low-level functions for PowerBook G3. 3 * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org) 4 * and Paul Mackerras (paulus@samba.org). 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13#include <asm/processor.h> 14#include <asm/page.h> 15#include <asm/ppc_asm.h> 16#include <asm/cputable.h> 17#include <asm/cache.h> 18#include <asm/thread_info.h> 19#include <asm/asm-offsets.h> 20#include <asm/mmu.h> 21 22#define MAGIC 0x4c617273 /* 'Lars' */ 23 24/* 25 * Structure for storing CPU registers on the stack. 26 */ 27#define SL_SP 0 28#define SL_PC 4 29#define SL_MSR 8 30#define SL_SDR1 0xc 31#define SL_SPRG0 0x10 /* 4 sprg's */ 32#define SL_DBAT0 0x20 33#define SL_IBAT0 0x28 34#define SL_DBAT1 0x30 35#define SL_IBAT1 0x38 36#define SL_DBAT2 0x40 37#define SL_IBAT2 0x48 38#define SL_DBAT3 0x50 39#define SL_IBAT3 0x58 40#define SL_DBAT4 0x60 41#define SL_IBAT4 0x68 42#define SL_DBAT5 0x70 43#define SL_IBAT5 0x78 44#define SL_DBAT6 0x80 45#define SL_IBAT6 0x88 46#define SL_DBAT7 0x90 47#define SL_IBAT7 0x98 48#define SL_TB 0xa0 49#define SL_R2 0xa8 50#define SL_CR 0xac 51#define SL_R12 0xb0 /* r12 to r31 */ 52#define SL_SIZE (SL_R12 + 80) 53 54 .section .text 55 .align 5 56 57#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) || \ 58 (defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)) 59 60/* This gets called by via-pmu.c late during the sleep process. 61 * The PMU was already send the sleep command and will shut us down 62 * soon. We need to save all that is needed and setup the wakeup 63 * vector that will be called by the ROM on wakeup 64 */ 65_GLOBAL(low_sleep_handler) 66#ifndef CONFIG_6xx 67 blr 68#else 69 mflr r0 70 stw r0,4(r1) 71 stwu r1,-SL_SIZE(r1) 72 mfcr r0 73 stw r0,SL_CR(r1) 74 stw r2,SL_R2(r1) 75 stmw r12,SL_R12(r1) 76 77 /* Save MSR & SDR1 */ 78 mfmsr r4 79 stw r4,SL_MSR(r1) 80 mfsdr1 r4 81 stw r4,SL_SDR1(r1) 82 83 /* Get a stable timebase and save it */ 841: mftbu r4 85 stw r4,SL_TB(r1) 86 mftb r5 87 stw r5,SL_TB+4(r1) 88 mftbu r3 89 cmpw r3,r4 90 bne 1b 91 92 /* Save SPRGs */ 93 mfsprg r4,0 94 stw r4,SL_SPRG0(r1) 95 mfsprg r4,1 96 stw r4,SL_SPRG0+4(r1) 97 mfsprg r4,2 98 stw r4,SL_SPRG0+8(r1) 99 mfsprg r4,3 100 stw r4,SL_SPRG0+12(r1) 101 102 /* Save BATs */ 103 mfdbatu r4,0 104 stw r4,SL_DBAT0(r1) 105 mfdbatl r4,0 106 stw r4,SL_DBAT0+4(r1) 107 mfdbatu r4,1 108 stw r4,SL_DBAT1(r1) 109 mfdbatl r4,1 110 stw r4,SL_DBAT1+4(r1) 111 mfdbatu r4,2 112 stw r4,SL_DBAT2(r1) 113 mfdbatl r4,2 114 stw r4,SL_DBAT2+4(r1) 115 mfdbatu r4,3 116 stw r4,SL_DBAT3(r1) 117 mfdbatl r4,3 118 stw r4,SL_DBAT3+4(r1) 119 mfibatu r4,0 120 stw r4,SL_IBAT0(r1) 121 mfibatl r4,0 122 stw r4,SL_IBAT0+4(r1) 123 mfibatu r4,1 124 stw r4,SL_IBAT1(r1) 125 mfibatl r4,1 126 stw r4,SL_IBAT1+4(r1) 127 mfibatu r4,2 128 stw r4,SL_IBAT2(r1) 129 mfibatl r4,2 130 stw r4,SL_IBAT2+4(r1) 131 mfibatu r4,3 132 stw r4,SL_IBAT3(r1) 133 mfibatl r4,3 134 stw r4,SL_IBAT3+4(r1) 135 136BEGIN_MMU_FTR_SECTION 137 mfspr r4,SPRN_DBAT4U 138 stw r4,SL_DBAT4(r1) 139 mfspr r4,SPRN_DBAT4L 140 stw r4,SL_DBAT4+4(r1) 141 mfspr r4,SPRN_DBAT5U 142 stw r4,SL_DBAT5(r1) 143 mfspr r4,SPRN_DBAT5L 144 stw r4,SL_DBAT5+4(r1) 145 mfspr r4,SPRN_DBAT6U 146 stw r4,SL_DBAT6(r1) 147 mfspr r4,SPRN_DBAT6L 148 stw r4,SL_DBAT6+4(r1) 149 mfspr r4,SPRN_DBAT7U 150 stw r4,SL_DBAT7(r1) 151 mfspr r4,SPRN_DBAT7L 152 stw r4,SL_DBAT7+4(r1) 153 mfspr r4,SPRN_IBAT4U 154 stw r4,SL_IBAT4(r1) 155 mfspr r4,SPRN_IBAT4L 156 stw r4,SL_IBAT4+4(r1) 157 mfspr r4,SPRN_IBAT5U 158 stw r4,SL_IBAT5(r1) 159 mfspr r4,SPRN_IBAT5L 160 stw r4,SL_IBAT5+4(r1) 161 mfspr r4,SPRN_IBAT6U 162 stw r4,SL_IBAT6(r1) 163 mfspr r4,SPRN_IBAT6L 164 stw r4,SL_IBAT6+4(r1) 165 mfspr r4,SPRN_IBAT7U 166 stw r4,SL_IBAT7(r1) 167 mfspr r4,SPRN_IBAT7L 168 stw r4,SL_IBAT7+4(r1) 169END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 170 171 /* Backup various CPU config stuffs */ 172 bl __save_cpu_setup 173 174 /* The ROM can wake us up via 2 different vectors: 175 * - On wallstreet & lombard, we must write a magic 176 * value 'Lars' at address 4 and a pointer to a 177 * memory location containing the PC to resume from 178 * at address 0. 179 * - On Core99, we must store the wakeup vector at 180 * address 0x80 and eventually it's parameters 181 * at address 0x84. I've have some trouble with those 182 * parameters however and I no longer use them. 183 */ 184 lis r5,grackle_wake_up@ha 185 addi r5,r5,grackle_wake_up@l 186 tophys(r5,r5) 187 stw r5,SL_PC(r1) 188 lis r4,KERNELBASE@h 189 tophys(r5,r1) 190 addi r5,r5,SL_PC 191 lis r6,MAGIC@ha 192 addi r6,r6,MAGIC@l 193 stw r5,0(r4) 194 stw r6,4(r4) 195 /* Setup stuffs at 0x80-0x84 for Core99 */ 196 lis r3,core99_wake_up@ha 197 addi r3,r3,core99_wake_up@l 198 tophys(r3,r3) 199 stw r3,0x80(r4) 200 stw r5,0x84(r4) 201 /* Store a pointer to our backup storage into 202 * a kernel global 203 */ 204 lis r3,sleep_storage@ha 205 addi r3,r3,sleep_storage@l 206 stw r5,0(r3) 207 208 .globl low_cpu_die 209low_cpu_die: 210 /* Flush & disable all caches */ 211 bl flush_disable_caches 212 213 /* Turn off data relocation. */ 214 mfmsr r3 /* Save MSR in r7 */ 215 rlwinm r3,r3,0,28,26 /* Turn off DR bit */ 216 sync 217 mtmsr r3 218 isync 219 220BEGIN_FTR_SECTION 221 /* Flush any pending L2 data prefetches to work around HW bug */ 222 sync 223 lis r3,0xfff0 224 lwz r0,0(r3) /* perform cache-inhibited load to ROM */ 225 sync /* (caches are disabled at this point) */ 226END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) 227 228/* 229 * Set the HID0 and MSR for sleep. 230 */ 231 mfspr r2,SPRN_HID0 232 rlwinm r2,r2,0,10,7 /* clear doze, nap */ 233 oris r2,r2,HID0_SLEEP@h 234 sync 235 isync 236 mtspr SPRN_HID0,r2 237 sync 238 239/* This loop puts us back to sleep in case we have a spurrious 240 * wakeup so that the host bridge properly stays asleep. The 241 * CPU will be turned off, either after a known time (about 1 242 * second) on wallstreet & lombard, or as soon as the CPU enters 243 * SLEEP mode on core99 244 */ 245 mfmsr r2 246 oris r2,r2,MSR_POW@h 2471: sync 248 mtmsr r2 249 isync 250 b 1b 251 252/* 253 * Here is the resume code. 254 */ 255 256 257/* 258 * Core99 machines resume here 259 * r4 has the physical address of SL_PC(sp) (unused) 260 */ 261_GLOBAL(core99_wake_up) 262 /* Make sure HID0 no longer contains any sleep bit and that data cache 263 * is disabled 264 */ 265 mfspr r3,SPRN_HID0 266 rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */ 267 rlwinm 3,r3,0,18,15 /* clear DCE, ICE */ 268 mtspr SPRN_HID0,r3 269 sync 270 isync 271 272 /* sanitize MSR */ 273 mfmsr r3 274 ori r3,r3,MSR_EE|MSR_IP 275 xori r3,r3,MSR_EE|MSR_IP 276 sync 277 isync 278 mtmsr r3 279 sync 280 isync 281 282 /* Recover sleep storage */ 283 lis r3,sleep_storage@ha 284 addi r3,r3,sleep_storage@l 285 tophys(r3,r3) 286 lwz r1,0(r3) 287 288 /* Pass thru to older resume code ... */ 289/* 290 * Here is the resume code for older machines. 291 * r1 has the physical address of SL_PC(sp). 292 */ 293 294grackle_wake_up: 295 296 /* Restore the kernel's segment registers before 297 * we do any r1 memory access as we are not sure they 298 * are in a sane state above the first 256Mb region 299 */ 300 li r0,16 /* load up segment register values */ 301 mtctr r0 /* for context 0 */ 302 lis r3,0x2000 /* Ku = 1, VSID = 0 */ 303 li r4,0 3043: mtsrin r3,r4 305 addi r3,r3,0x111 /* increment VSID */ 306 addis r4,r4,0x1000 /* address of next segment */ 307 bdnz 3b 308 sync 309 isync 310 311 subi r1,r1,SL_PC 312 313 /* Restore various CPU config stuffs */ 314 bl __restore_cpu_setup 315 316 /* Make sure all FPRs have been initialized */ 317 bl reloc_offset 318 bl __init_fpu_registers 319 320 /* Invalidate & enable L1 cache, we don't care about 321 * whatever the ROM may have tried to write to memory 322 */ 323 bl __inval_enable_L1 324 325 /* Restore the BATs, and SDR1. Then we can turn on the MMU. */ 326 lwz r4,SL_SDR1(r1) 327 mtsdr1 r4 328 lwz r4,SL_SPRG0(r1) 329 mtsprg 0,r4 330 lwz r4,SL_SPRG0+4(r1) 331 mtsprg 1,r4 332 lwz r4,SL_SPRG0+8(r1) 333 mtsprg 2,r4 334 lwz r4,SL_SPRG0+12(r1) 335 mtsprg 3,r4 336 337 lwz r4,SL_DBAT0(r1) 338 mtdbatu 0,r4 339 lwz r4,SL_DBAT0+4(r1) 340 mtdbatl 0,r4 341 lwz r4,SL_DBAT1(r1) 342 mtdbatu 1,r4 343 lwz r4,SL_DBAT1+4(r1) 344 mtdbatl 1,r4 345 lwz r4,SL_DBAT2(r1) 346 mtdbatu 2,r4 347 lwz r4,SL_DBAT2+4(r1) 348 mtdbatl 2,r4 349 lwz r4,SL_DBAT3(r1) 350 mtdbatu 3,r4 351 lwz r4,SL_DBAT3+4(r1) 352 mtdbatl 3,r4 353 lwz r4,SL_IBAT0(r1) 354 mtibatu 0,r4 355 lwz r4,SL_IBAT0+4(r1) 356 mtibatl 0,r4 357 lwz r4,SL_IBAT1(r1) 358 mtibatu 1,r4 359 lwz r4,SL_IBAT1+4(r1) 360 mtibatl 1,r4 361 lwz r4,SL_IBAT2(r1) 362 mtibatu 2,r4 363 lwz r4,SL_IBAT2+4(r1) 364 mtibatl 2,r4 365 lwz r4,SL_IBAT3(r1) 366 mtibatu 3,r4 367 lwz r4,SL_IBAT3+4(r1) 368 mtibatl 3,r4 369 370BEGIN_MMU_FTR_SECTION 371 lwz r4,SL_DBAT4(r1) 372 mtspr SPRN_DBAT4U,r4 373 lwz r4,SL_DBAT4+4(r1) 374 mtspr SPRN_DBAT4L,r4 375 lwz r4,SL_DBAT5(r1) 376 mtspr SPRN_DBAT5U,r4 377 lwz r4,SL_DBAT5+4(r1) 378 mtspr SPRN_DBAT5L,r4 379 lwz r4,SL_DBAT6(r1) 380 mtspr SPRN_DBAT6U,r4 381 lwz r4,SL_DBAT6+4(r1) 382 mtspr SPRN_DBAT6L,r4 383 lwz r4,SL_DBAT7(r1) 384 mtspr SPRN_DBAT7U,r4 385 lwz r4,SL_DBAT7+4(r1) 386 mtspr SPRN_DBAT7L,r4 387 lwz r4,SL_IBAT4(r1) 388 mtspr SPRN_IBAT4U,r4 389 lwz r4,SL_IBAT4+4(r1) 390 mtspr SPRN_IBAT4L,r4 391 lwz r4,SL_IBAT5(r1) 392 mtspr SPRN_IBAT5U,r4 393 lwz r4,SL_IBAT5+4(r1) 394 mtspr SPRN_IBAT5L,r4 395 lwz r4,SL_IBAT6(r1) 396 mtspr SPRN_IBAT6U,r4 397 lwz r4,SL_IBAT6+4(r1) 398 mtspr SPRN_IBAT6L,r4 399 lwz r4,SL_IBAT7(r1) 400 mtspr SPRN_IBAT7U,r4 401 lwz r4,SL_IBAT7+4(r1) 402 mtspr SPRN_IBAT7L,r4 403END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 404 405 /* Flush all TLBs */ 406 lis r4,0x1000 4071: addic. r4,r4,-0x1000 408 tlbie r4 409 blt 1b 410 sync 411 412 /* restore the MSR and turn on the MMU */ 413 lwz r3,SL_MSR(r1) 414 bl turn_on_mmu 415 416 /* get back the stack pointer */ 417 tovirt(r1,r1) 418 419 /* Restore TB */ 420 li r3,0 421 mttbl r3 422 lwz r3,SL_TB(r1) 423 lwz r4,SL_TB+4(r1) 424 mttbu r3 425 mttbl r4 426 427 /* Restore the callee-saved registers and return */ 428 lwz r0,SL_CR(r1) 429 mtcr r0 430 lwz r2,SL_R2(r1) 431 lmw r12,SL_R12(r1) 432 addi r1,r1,SL_SIZE 433 lwz r0,4(r1) 434 mtlr r0 435 blr 436 437turn_on_mmu: 438 mflr r4 439 tovirt(r4,r4) 440 mtsrr0 r4 441 mtsrr1 r3 442 sync 443 isync 444 rfi 445 446#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ 447 448 .section .data 449 .balign L1_CACHE_BYTES 450sleep_storage: 451 .long 0 452 .balign L1_CACHE_BYTES, 0 453 454#endif /* CONFIG_6xx */ 455 .section .text 456