1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * This file contains miscellaneous low-level functions. 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 7 * and Paul Mackerras. 8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 10 */ 11 12#include <linux/sys.h> 13#include <asm/unistd.h> 14#include <asm/errno.h> 15#include <asm/processor.h> 16#include <asm/page.h> 17#include <asm/cache.h> 18#include <asm/ppc_asm.h> 19#include <asm/asm-offsets.h> 20#include <asm/cputable.h> 21#include <asm/thread_info.h> 22#include <asm/kexec.h> 23#include <asm/ptrace.h> 24#include <asm/mmu.h> 25#include <asm/export.h> 26#include <asm/feature-fixups.h> 27 28 .text 29 30_GLOBAL(call_do_softirq) 31 mflr r0 32 std r0,16(r1) 33 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 34 mr r1,r3 35 bl __do_softirq 36 ld r1,0(r1) 37 ld r0,16(r1) 38 mtlr r0 39 blr 40 41_GLOBAL(call_do_irq) 42 mflr r0 43 std r0,16(r1) 44 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 45 mr r1,r4 46 bl __do_irq 47 ld r1,0(r1) 48 ld r0,16(r1) 49 mtlr r0 50 blr 51 52_GLOBAL(__bswapdi2) 53EXPORT_SYMBOL(__bswapdi2) 54 srdi r8,r3,32 55 rlwinm r7,r3,8,0xffffffff 56 rlwimi r7,r3,24,0,7 57 rlwinm r9,r8,8,0xffffffff 58 rlwimi r7,r3,24,16,23 59 rlwimi r9,r8,24,0,7 60 rlwimi r9,r8,24,16,23 61 sldi r7,r7,32 62 or r3,r7,r9 63 blr 64 65 66#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 67_GLOBAL(rmci_on) 68 sync 69 isync 70 li r3,0x100 71 rldicl r3,r3,32,0 72 mfspr r5,SPRN_HID4 73 or r5,r5,r3 74 sync 75 mtspr SPRN_HID4,r5 76 isync 77 slbia 78 isync 79 sync 80 blr 81 82_GLOBAL(rmci_off) 83 sync 84 isync 85 li r3,0x100 86 rldicl r3,r3,32,0 87 mfspr r5,SPRN_HID4 88 andc r5,r5,r3 89 sync 90 mtspr SPRN_HID4,r5 91 isync 92 slbia 93 isync 94 sync 95 blr 96#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 97 98#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 99 100/* 101 * Do an IO access in real mode 102 */ 103_GLOBAL(real_readb) 104 mfmsr r7 105 ori r0,r7,MSR_DR 106 xori r0,r0,MSR_DR 107 sync 108 mtmsrd r0 109 sync 110 isync 111 mfspr r6,SPRN_HID4 112 rldicl r5,r6,32,0 113 ori r5,r5,0x100 114 rldicl r5,r5,32,0 115 sync 116 mtspr SPRN_HID4,r5 117 isync 118 slbia 119 isync 120 lbz r3,0(r3) 121 sync 122 mtspr SPRN_HID4,r6 123 isync 124 slbia 125 isync 126 mtmsrd r7 127 sync 128 isync 129 blr 130 131 /* 132 * Do an IO access in real mode 133 */ 134_GLOBAL(real_writeb) 135 mfmsr r7 136 ori r0,r7,MSR_DR 137 xori r0,r0,MSR_DR 138 sync 139 mtmsrd r0 140 sync 141 isync 142 mfspr r6,SPRN_HID4 143 rldicl r5,r6,32,0 144 ori r5,r5,0x100 145 rldicl r5,r5,32,0 146 sync 147 mtspr SPRN_HID4,r5 148 isync 149 slbia 150 isync 151 stb r3,0(r4) 152 sync 153 mtspr SPRN_HID4,r6 154 isync 155 slbia 156 isync 157 mtmsrd r7 158 sync 159 isync 160 blr 161#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ 162 163#ifdef CONFIG_PPC_PASEMI 164 165_GLOBAL(real_205_readb) 166 mfmsr r7 167 ori r0,r7,MSR_DR 168 xori r0,r0,MSR_DR 169 sync 170 mtmsrd r0 171 sync 172 isync 173 LBZCIX(R3,R0,R3) 174 isync 175 mtmsrd r7 176 sync 177 isync 178 blr 179 180_GLOBAL(real_205_writeb) 181 mfmsr r7 182 ori r0,r7,MSR_DR 183 xori r0,r0,MSR_DR 184 sync 185 mtmsrd r0 186 sync 187 isync 188 STBCIX(R3,R0,R4) 189 isync 190 mtmsrd r7 191 sync 192 isync 193 blr 194 195#endif /* CONFIG_PPC_PASEMI */ 196 197 198#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) 199/* 200 * SCOM access functions for 970 (FX only for now) 201 * 202 * unsigned long scom970_read(unsigned int address); 203 * void scom970_write(unsigned int address, unsigned long value); 204 * 205 * The address passed in is the 24 bits register address. This code 206 * is 970 specific and will not check the status bits, so you should 207 * know what you are doing. 208 */ 209_GLOBAL(scom970_read) 210 /* interrupts off */ 211 mfmsr r4 212 ori r0,r4,MSR_EE 213 xori r0,r0,MSR_EE 214 mtmsrd r0,1 215 216 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 217 * (including parity). On current CPUs they must be 0'd, 218 * and finally or in RW bit 219 */ 220 rlwinm r3,r3,8,0,15 221 ori r3,r3,0x8000 222 223 /* do the actual scom read */ 224 sync 225 mtspr SPRN_SCOMC,r3 226 isync 227 mfspr r3,SPRN_SCOMD 228 isync 229 mfspr r0,SPRN_SCOMC 230 isync 231 232 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah 233 * that's the best we can do). Not implemented yet as we don't use 234 * the scom on any of the bogus CPUs yet, but may have to be done 235 * ultimately 236 */ 237 238 /* restore interrupts */ 239 mtmsrd r4,1 240 blr 241 242 243_GLOBAL(scom970_write) 244 /* interrupts off */ 245 mfmsr r5 246 ori r0,r5,MSR_EE 247 xori r0,r0,MSR_EE 248 mtmsrd r0,1 249 250 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits 251 * (including parity). On current CPUs they must be 0'd. 252 */ 253 254 rlwinm r3,r3,8,0,15 255 256 sync 257 mtspr SPRN_SCOMD,r4 /* write data */ 258 isync 259 mtspr SPRN_SCOMC,r3 /* write command */ 260 isync 261 mfspr 3,SPRN_SCOMC 262 isync 263 264 /* restore interrupts */ 265 mtmsrd r5,1 266 blr 267#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ 268 269/* kexec_wait(phys_cpu) 270 * 271 * wait for the flag to change, indicating this kernel is going away but 272 * the slave code for the next one is at addresses 0 to 100. 273 * 274 * This is used by all slaves, even those that did not find a matching 275 * paca in the secondary startup code. 276 * 277 * Physical (hardware) cpu id should be in r3. 278 */ 279_GLOBAL(kexec_wait) 280 bl 1f 2811: mflr r5 282 addi r5,r5,kexec_flag-1b 283 28499: HMT_LOW 285#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */ 286 lwz r4,0(r5) 287 cmpwi 0,r4,0 288 beq 99b 289#ifdef CONFIG_PPC_BOOK3S_64 290 li r10,0x60 291 mfmsr r11 292 clrrdi r11,r11,1 /* Clear MSR_LE */ 293 mtsrr0 r10 294 mtsrr1 r11 295 rfid 296#else 297 /* Create TLB entry in book3e_secondary_core_init */ 298 li r4,0 299 ba 0x60 300#endif 301#endif 302 303/* this can be in text because we won't change it until we are 304 * running in real anyways 305 */ 306kexec_flag: 307 .long 0 308 309 310#ifdef CONFIG_KEXEC_CORE 311#ifdef CONFIG_PPC_BOOK3E 312/* 313 * BOOK3E has no real MMU mode, so we have to setup the initial TLB 314 * for a core to identity map v:0 to p:0. This current implementation 315 * assumes that 1G is enough for kexec. 316 */ 317kexec_create_tlb: 318 /* 319 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict. 320 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict. 321 */ 322 PPC_TLBILX_ALL(0,R0) 323 sync 324 isync 325 326 mfspr r10,SPRN_TLB1CFG 327 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */ 328 subi r10,r10,1 /* Last entry: no conflict with kernel text */ 329 lis r9,MAS0_TLBSEL(1)@h 330 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */ 331 332/* Set up a temp identity mapping v:0 to p:0 and return to it. */ 333#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) 334#define M_IF_NEEDED MAS2_M 335#else 336#define M_IF_NEEDED 0 337#endif 338 mtspr SPRN_MAS0,r9 339 340 lis r9,(MAS1_VALID|MAS1_IPROT)@h 341 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l 342 mtspr SPRN_MAS1,r9 343 344 LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED) 345 mtspr SPRN_MAS2,r9 346 347 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX) 348 mtspr SPRN_MAS3,r9 349 li r9,0 350 mtspr SPRN_MAS7,r9 351 352 tlbwe 353 isync 354 blr 355#endif 356 357/* kexec_smp_wait(void) 358 * 359 * call with interrupts off 360 * note: this is a terminal routine, it does not save lr 361 * 362 * get phys id from paca 363 * switch to real mode 364 * mark the paca as no longer used 365 * join other cpus in kexec_wait(phys_id) 366 */ 367_GLOBAL(kexec_smp_wait) 368 lhz r3,PACAHWCPUID(r13) 369 bl real_mode 370 371 li r4,KEXEC_STATE_REAL_MODE 372 stb r4,PACAKEXECSTATE(r13) 373 SYNC 374 375 b kexec_wait 376 377/* 378 * switch to real mode (turn mmu off) 379 * we use the early kernel trick that the hardware ignores bits 380 * 0 and 1 (big endian) of the effective address in real mode 381 * 382 * don't overwrite r3 here, it is live for kexec_wait above. 383 */ 384real_mode: /* assume normal blr return */ 385#ifdef CONFIG_PPC_BOOK3E 386 /* Create an identity mapping. */ 387 b kexec_create_tlb 388#else 3891: li r9,MSR_RI 390 li r10,MSR_DR|MSR_IR 391 mflr r11 /* return address to SRR0 */ 392 mfmsr r12 393 andc r9,r12,r9 394 andc r10,r12,r10 395 396 mtmsrd r9,1 397 mtspr SPRN_SRR1,r10 398 mtspr SPRN_SRR0,r11 399 rfid 400#endif 401 402/* 403 * kexec_sequence(newstack, start, image, control, clear_all(), 404 copy_with_mmu_off) 405 * 406 * does the grungy work with stack switching and real mode switches 407 * also does simple calls to other code 408 */ 409 410_GLOBAL(kexec_sequence) 411 mflr r0 412 std r0,16(r1) 413 414 /* switch stacks to newstack -- &kexec_stack.stack */ 415 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 416 mr r1,r3 417 418 li r0,0 419 std r0,16(r1) 420 421BEGIN_FTR_SECTION 422 /* 423 * This is the best time to turn AMR/IAMR off. 424 * key 0 is used in radix for supervisor<->user 425 * protection, but on hash key 0 is reserved 426 * ideally we want to enter with a clean state. 427 * NOTE, we rely on r0 being 0 from above. 428 */ 429 mtspr SPRN_IAMR,r0 430BEGIN_FTR_SECTION_NESTED(42) 431 mtspr SPRN_AMOR,r0 432END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) 433END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 434 435 /* save regs for local vars on new stack. 436 * yes, we won't go back, but ... 437 */ 438 std r31,-8(r1) 439 std r30,-16(r1) 440 std r29,-24(r1) 441 std r28,-32(r1) 442 std r27,-40(r1) 443 std r26,-48(r1) 444 std r25,-56(r1) 445 446 stdu r1,-STACK_FRAME_OVERHEAD-64(r1) 447 448 /* save args into preserved regs */ 449 mr r31,r3 /* newstack (both) */ 450 mr r30,r4 /* start (real) */ 451 mr r29,r5 /* image (virt) */ 452 mr r28,r6 /* control, unused */ 453 mr r27,r7 /* clear_all() fn desc */ 454 mr r26,r8 /* copy_with_mmu_off */ 455 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 456 457 /* disable interrupts, we are overwriting kernel data next */ 458#ifdef CONFIG_PPC_BOOK3E 459 wrteei 0 460#else 461 mfmsr r3 462 rlwinm r3,r3,0,17,15 463 mtmsrd r3,1 464#endif 465 466 /* We need to turn the MMU off unless we are in hash mode 467 * under a hypervisor 468 */ 469 cmpdi r26,0 470 beq 1f 471 bl real_mode 4721: 473 /* copy dest pages, flush whole dest image */ 474 mr r3,r29 475 bl kexec_copy_flush /* (image) */ 476 477 /* turn off mmu now if not done earlier */ 478 cmpdi r26,0 479 bne 1f 480 bl real_mode 481 482 /* copy 0x100 bytes starting at start to 0 */ 4831: li r3,0 484 mr r4,r30 /* start, aka phys mem offset */ 485 li r5,0x100 486 li r6,0 487 bl copy_and_flush /* (dest, src, copy limit, start offset) */ 4881: /* assume normal blr return */ 489 490 /* release other cpus to the new kernel secondary start at 0x60 */ 491 mflr r5 492 li r6,1 493 stw r6,kexec_flag-1b(5) 494 495 cmpdi r27,0 496 beq 1f 497 498 /* clear out hardware hash page table and tlb */ 499#ifdef PPC64_ELF_ABI_v1 500 ld r12,0(r27) /* deref function descriptor */ 501#else 502 mr r12,r27 503#endif 504 mtctr r12 505 bctrl /* mmu_hash_ops.hpte_clear_all(void); */ 506 507/* 508 * kexec image calling is: 509 * the first 0x100 bytes of the entry point are copied to 0 510 * 511 * all slaves branch to slave = 0x60 (absolute) 512 * slave(phys_cpu_id); 513 * 514 * master goes to start = entry point 515 * start(phys_cpu_id, start, 0); 516 * 517 * 518 * a wrapper is needed to call existing kernels, here is an approximate 519 * description of one method: 520 * 521 * v2: (2.6.10) 522 * start will be near the boot_block (maybe 0x100 bytes before it?) 523 * it will have a 0x60, which will b to boot_block, where it will wait 524 * and 0 will store phys into struct boot-block and load r3 from there, 525 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again 526 * 527 * v1: (2.6.9) 528 * boot block will have all cpus scanning device tree to see if they 529 * are the boot cpu ????? 530 * other device tree differences (prop sizes, va vs pa, etc)... 531 */ 5321: mr r3,r25 # my phys cpu 533 mr r4,r30 # start, aka phys mem offset 534 mtlr 4 535 li r5,0 536 blr /* image->start(physid, image->start, 0); */ 537#endif /* CONFIG_KEXEC_CORE */ 538