1/* 2 * This file contains low level CPU setup functions. 3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 */ 11 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/ppc_asm.h> 16#include <asm/asm-offsets.h> 17#include <asm/cache.h> 18 19_GLOBAL(__setup_cpu_603) 20 mflr r4 21BEGIN_FTR_SECTION 22 bl __init_fpu_registers 23END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) 24 bl setup_common_caches 25 mtlr r4 26 blr 27_GLOBAL(__setup_cpu_604) 28 mflr r4 29 bl setup_common_caches 30 bl setup_604_hid0 31 mtlr r4 32 blr 33_GLOBAL(__setup_cpu_750) 34 mflr r4 35 bl __init_fpu_registers 36 bl setup_common_caches 37 bl setup_750_7400_hid0 38 mtlr r4 39 blr 40_GLOBAL(__setup_cpu_750cx) 41 mflr r4 42 bl __init_fpu_registers 43 bl setup_common_caches 44 bl setup_750_7400_hid0 45 bl setup_750cx 46 mtlr r4 47 blr 48_GLOBAL(__setup_cpu_750fx) 49 mflr r4 50 bl __init_fpu_registers 51 bl setup_common_caches 52 bl setup_750_7400_hid0 53 bl setup_750fx 54 mtlr r4 55 blr 56_GLOBAL(__setup_cpu_7400) 57 mflr r4 58 bl __init_fpu_registers 59 bl setup_7400_workarounds 60 bl setup_common_caches 61 bl setup_750_7400_hid0 62 mtlr r4 63 blr 64_GLOBAL(__setup_cpu_7410) 65 mflr r4 66 bl __init_fpu_registers 67 bl setup_7410_workarounds 68 bl setup_common_caches 69 bl setup_750_7400_hid0 70 li r3,0 71 mtspr SPRN_L2CR2,r3 72 mtlr r4 73 blr 74_GLOBAL(__setup_cpu_745x) 75 mflr r4 76 bl setup_common_caches 77 bl setup_745x_specifics 78 mtlr r4 79 blr 80 81/* Enable caches for 603's, 604, 750 & 7400 */ 82setup_common_caches: 83 mfspr r11,SPRN_HID0 84 andi. r0,r11,HID0_DCE 85 ori r11,r11,HID0_ICE|HID0_DCE 86 ori r8,r11,HID0_ICFI 87 bne 1f /* don't invalidate the D-cache */ 88 ori r8,r8,HID0_DCI /* unless it wasn't enabled */ 891: sync 90 mtspr SPRN_HID0,r8 /* enable and invalidate caches */ 91 sync 92 mtspr SPRN_HID0,r11 /* enable caches */ 93 sync 94 isync 95 blr 96 97/* 604, 604e, 604ev, ... 98 * Enable superscalar execution & branch history table 99 */ 100setup_604_hid0: 101 mfspr r11,SPRN_HID0 102 ori r11,r11,HID0_SIED|HID0_BHTE 103 ori r8,r11,HID0_BTCD 104 sync 105 mtspr SPRN_HID0,r8 /* flush branch target address cache */ 106 sync /* on 604e/604r */ 107 mtspr SPRN_HID0,r11 108 sync 109 isync 110 blr 111 112/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some 113 * erratas we work around here. 114 * Moto MPC710CE.pdf describes them, those are errata 115 * #3, #4 and #5 116 * Note that we assume the firmware didn't choose to 117 * apply other workarounds (there are other ones documented 118 * in the .pdf). It appear that Apple firmware only works 119 * around #3 and with the same fix we use. We may want to 120 * check if the CPU is using 60x bus mode in which case 121 * the workaround for errata #4 is useless. Also, we may 122 * want to explicitly clear HID0_NOPDST as this is not 123 * needed once we have applied workaround #5 (though it's 124 * not set by Apple's firmware at least). 125 */ 126setup_7400_workarounds: 127 mfpvr r3 128 rlwinm r3,r3,0,20,31 129 cmpwi 0,r3,0x0207 130 ble 1f 131 blr 132setup_7410_workarounds: 133 mfpvr r3 134 rlwinm r3,r3,0,20,31 135 cmpwi 0,r3,0x0100 136 bnelr 1371: 138 mfspr r11,SPRN_MSSSR0 139 /* Errata #3: Set L1OPQ_SIZE to 0x10 */ 140 rlwinm r11,r11,0,9,6 141 oris r11,r11,0x0100 142 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ 143 oris r11,r11,0x0002 144 /* Errata #5: Set DRLT_SIZE to 0x01 */ 145 rlwinm r11,r11,0,5,2 146 oris r11,r11,0x0800 147 sync 148 mtspr SPRN_MSSSR0,r11 149 sync 150 isync 151 blr 152 153/* 740/750/7400/7410 154 * Enable Store Gathering (SGE), Address Brodcast (ABE), 155 * Branch History Table (BHTE), Branch Target ICache (BTIC) 156 * Dynamic Power Management (DPM), Speculative (SPD) 157 * Clear Instruction cache throttling (ICTC) 158 */ 159setup_750_7400_hid0: 160 mfspr r11,SPRN_HID0 161 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC 162 oris r11,r11,HID0_DPM@h 163BEGIN_FTR_SECTION 164 xori r11,r11,HID0_BTIC 165END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) 166BEGIN_FTR_SECTION 167 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ 168END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) 169 li r3,HID0_SPD 170 andc r11,r11,r3 /* clear SPD: enable speculative */ 171 li r3,0 172 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ 173 isync 174 mtspr SPRN_HID0,r11 175 sync 176 isync 177 blr 178 179/* 750cx specific 180 * Looks like we have to disable NAP feature for some PLL settings... 181 * (waiting for confirmation) 182 */ 183setup_750cx: 184 mfspr r10, SPRN_HID1 185 rlwinm r10,r10,4,28,31 186 cmpwi cr0,r10,7 187 cmpwi cr1,r10,9 188 cmpwi cr2,r10,11 189 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 190 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 191 bnelr 192 lwz r6,CPU_SPEC_FEATURES(r5) 193 li r7,CPU_FTR_CAN_NAP 194 andc r6,r6,r7 195 stw r6,CPU_SPEC_FEATURES(r5) 196 blr 197 198/* 750fx specific 199 */ 200setup_750fx: 201 blr 202 203/* MPC 745x 204 * Enable Store Gathering (SGE), Branch Folding (FOLD) 205 * Branch History Table (BHTE), Branch Target ICache (BTIC) 206 * Dynamic Power Management (DPM), Speculative (SPD) 207 * Ensure our data cache instructions really operate. 208 * Timebase has to be running or we wouldn't have made it here, 209 * just ensure we don't disable it. 210 * Clear Instruction cache throttling (ICTC) 211 * Enable L2 HW prefetch 212 */ 213setup_745x_specifics: 214 /* We check for the presence of an L3 cache setup by 215 * the firmware. If any, we disable NAP capability as 216 * it's known to be bogus on rev 2.1 and earlier 217 */ 218BEGIN_FTR_SECTION 219 mfspr r11,SPRN_L3CR 220 andis. r11,r11,L3CR_L3E@h 221 beq 1f 222END_FTR_SECTION_IFSET(CPU_FTR_L3CR) 223 lwz r6,CPU_SPEC_FEATURES(r5) 224 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP 225 beq 1f 226 li r7,CPU_FTR_CAN_NAP 227 andc r6,r6,r7 228 stw r6,CPU_SPEC_FEATURES(r5) 2291: 230 mfspr r11,SPRN_HID0 231 232 /* All of the bits we have to set..... 233 */ 234 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE 235 ori r11,r11,HID0_LRSTK | HID0_BTIC 236 oris r11,r11,HID0_DPM@h 237BEGIN_FTR_SECTION 238 xori r11,r11,HID0_BTIC 239END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) 240BEGIN_FTR_SECTION 241 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ 242END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) 243 244 /* All of the bits we have to clear.... 245 */ 246 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI 247 andc r11,r11,r3 /* clear SPD: enable speculative */ 248 li r3,0 249 250 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ 251 isync 252 mtspr SPRN_HID0,r11 253 sync 254 isync 255 256 /* Enable L2 HW prefetch, if L2 is enabled 257 */ 258 mfspr r3,SPRN_L2CR 259 andis. r3,r3,L2CR_L2E@h 260 beqlr 261 mfspr r3,SPRN_MSSCR0 262 ori r3,r3,3 263 sync 264 mtspr SPRN_MSSCR0,r3 265 sync 266 isync 267 blr 268 269/* 270 * Initialize the FPU registers. This is needed to work around an errata 271 * in some 750 cpus where using a not yet initialized FPU register after 272 * power on reset may hang the CPU 273 */ 274_GLOBAL(__init_fpu_registers) 275 mfmsr r10 276 ori r11,r10,MSR_FP 277 mtmsr r11 278 isync 279 addis r9,r3,empty_zero_page@ha 280 addi r9,r9,empty_zero_page@l 281 REST_32FPRS(0,r9) 282 sync 283 mtmsr r10 284 isync 285 blr 286 287 288/* Definitions for the table use to save CPU states */ 289#define CS_HID0 0 290#define CS_HID1 4 291#define CS_HID2 8 292#define CS_MSSCR0 12 293#define CS_MSSSR0 16 294#define CS_ICTRL 20 295#define CS_LDSTCR 24 296#define CS_LDSTDB 28 297#define CS_SIZE 32 298 299 .data 300 .balign L1_CACHE_BYTES 301cpu_state_storage: 302 .space CS_SIZE 303 .balign L1_CACHE_BYTES,0 304 .text 305 306/* Called in normal context to backup CPU 0 state. This 307 * does not include cache settings. This function is also 308 * called for machine sleep. This does not include the MMU 309 * setup, BATs, etc... but rather the "special" registers 310 * like HID0, HID1, MSSCR0, etc... 311 */ 312_GLOBAL(__save_cpu_setup) 313 /* Some CR fields are volatile, we back it up all */ 314 mfcr r7 315 316 /* Get storage ptr */ 317 lis r5,cpu_state_storage@h 318 ori r5,r5,cpu_state_storage@l 319 320 /* Save HID0 (common to all CONFIG_6xx cpus) */ 321 mfspr r3,SPRN_HID0 322 stw r3,CS_HID0(r5) 323 324 /* Now deal with CPU type dependent registers */ 325 mfspr r3,SPRN_PVR 326 srwi r3,r3,16 327 cmplwi cr0,r3,0x8000 /* 7450 */ 328 cmplwi cr1,r3,0x000c /* 7400 */ 329 cmplwi cr2,r3,0x800c /* 7410 */ 330 cmplwi cr3,r3,0x8001 /* 7455 */ 331 cmplwi cr4,r3,0x8002 /* 7457 */ 332 cmplwi cr5,r3,0x8003 /* 7447A */ 333 cmplwi cr6,r3,0x7000 /* 750FX */ 334 cmplwi cr7,r3,0x8004 /* 7448 */ 335 /* cr1 is 7400 || 7410 */ 336 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 337 /* cr0 is 74xx */ 338 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 339 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 340 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 341 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 342 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 343 bne 1f 344 /* Backup 74xx specific regs */ 345 mfspr r4,SPRN_MSSCR0 346 stw r4,CS_MSSCR0(r5) 347 mfspr r4,SPRN_MSSSR0 348 stw r4,CS_MSSSR0(r5) 349 beq cr1,1f 350 /* Backup 745x specific registers */ 351 mfspr r4,SPRN_HID1 352 stw r4,CS_HID1(r5) 353 mfspr r4,SPRN_ICTRL 354 stw r4,CS_ICTRL(r5) 355 mfspr r4,SPRN_LDSTCR 356 stw r4,CS_LDSTCR(r5) 357 mfspr r4,SPRN_LDSTDB 358 stw r4,CS_LDSTDB(r5) 3591: 360 bne cr6,1f 361 /* Backup 750FX specific registers */ 362 mfspr r4,SPRN_HID1 363 stw r4,CS_HID1(r5) 364 /* If rev 2.x, backup HID2 */ 365 mfspr r3,SPRN_PVR 366 andi. r3,r3,0xff00 367 cmpwi cr0,r3,0x0200 368 bne 1f 369 mfspr r4,SPRN_HID2 370 stw r4,CS_HID2(r5) 3711: 372 mtcr r7 373 blr 374 375/* Called with no MMU context (typically MSR:IR/DR off) to 376 * restore CPU state as backed up by the previous 377 * function. This does not include cache setting 378 */ 379_GLOBAL(__restore_cpu_setup) 380 /* Some CR fields are volatile, we back it up all */ 381 mfcr r7 382 383 /* Get storage ptr */ 384 lis r5,(cpu_state_storage-KERNELBASE)@h 385 ori r5,r5,cpu_state_storage@l 386 387 /* Restore HID0 */ 388 lwz r3,CS_HID0(r5) 389 sync 390 isync 391 mtspr SPRN_HID0,r3 392 sync 393 isync 394 395 /* Now deal with CPU type dependent registers */ 396 mfspr r3,SPRN_PVR 397 srwi r3,r3,16 398 cmplwi cr0,r3,0x8000 /* 7450 */ 399 cmplwi cr1,r3,0x000c /* 7400 */ 400 cmplwi cr2,r3,0x800c /* 7410 */ 401 cmplwi cr3,r3,0x8001 /* 7455 */ 402 cmplwi cr4,r3,0x8002 /* 7457 */ 403 cmplwi cr5,r3,0x8003 /* 7447A */ 404 cmplwi cr6,r3,0x7000 /* 750FX */ 405 cmplwi cr7,r3,0x8004 /* 7448 */ 406 /* cr1 is 7400 || 7410 */ 407 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 408 /* cr0 is 74xx */ 409 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 410 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 411 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 412 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 413 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 414 bne 2f 415 /* Restore 74xx specific regs */ 416 lwz r4,CS_MSSCR0(r5) 417 sync 418 mtspr SPRN_MSSCR0,r4 419 sync 420 isync 421 lwz r4,CS_MSSSR0(r5) 422 sync 423 mtspr SPRN_MSSSR0,r4 424 sync 425 isync 426 bne cr2,1f 427 /* Clear 7410 L2CR2 */ 428 li r4,0 429 mtspr SPRN_L2CR2,r4 4301: beq cr1,2f 431 /* Restore 745x specific registers */ 432 lwz r4,CS_HID1(r5) 433 sync 434 mtspr SPRN_HID1,r4 435 isync 436 sync 437 lwz r4,CS_ICTRL(r5) 438 sync 439 mtspr SPRN_ICTRL,r4 440 isync 441 sync 442 lwz r4,CS_LDSTCR(r5) 443 sync 444 mtspr SPRN_LDSTCR,r4 445 isync 446 sync 447 lwz r4,CS_LDSTDB(r5) 448 sync 449 mtspr SPRN_LDSTDB,r4 450 isync 451 sync 4522: bne cr6,1f 453 /* Restore 750FX specific registers 454 * that is restore HID2 on rev 2.x and PLL config & switch 455 * to PLL 0 on all 456 */ 457 /* If rev 2.x, restore HID2 with low voltage bit cleared */ 458 mfspr r3,SPRN_PVR 459 andi. r3,r3,0xff00 460 cmpwi cr0,r3,0x0200 461 bne 4f 462 lwz r4,CS_HID2(r5) 463 rlwinm r4,r4,0,19,17 464 mtspr SPRN_HID2,r4 465 sync 4664: 467 lwz r4,CS_HID1(r5) 468 rlwinm r5,r4,0,16,14 469 mtspr SPRN_HID1,r5 470 /* Wait for PLL to stabilize */ 471 mftbl r5 4723: mftbl r6 473 sub r6,r6,r5 474 cmplwi cr0,r6,10000 475 ble 3b 476 /* Setup final PLL */ 477 mtspr SPRN_HID1,r4 4781: 479 mtcr r7 480 blr 481 482