1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2004 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * Copyright 2004 Freescale Semiconductor, Inc 25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> 26 * 27 * This program is free software; you can redistribute it and/or modify it 28 * under the terms of the GNU General Public License as published by the 29 * Free Software Foundation; either version 2 of the License, or (at your 30 * option) any later version. 31 */ 32 33#include <linux/threads.h> 34#include <asm/processor.h> 35#include <asm/page.h> 36#include <asm/mmu.h> 37#include <asm/pgtable.h> 38#include <asm/cputable.h> 39#include <asm/thread_info.h> 40#include <asm/ppc_asm.h> 41#include <asm/asm-offsets.h> 42#include <asm/cache.h> 43#include "head_booke.h" 44 45/* As with the other PowerPC ports, it is expected that when code 46 * execution begins here, the following registers contain valid, yet 47 * optional, information: 48 * 49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 50 * r4 - Starting address of the init RAM disk 51 * r5 - Ending address of the init RAM disk 52 * r6 - Start of kernel command line string (e.g. "mem=128") 53 * r7 - End of kernel command line string 54 * 55 */ 56 .section .text.head, "ax" 57_ENTRY(_stext); 58_ENTRY(_start); 59 /* 60 * Reserve a word at a fixed location to store the address 61 * of abatron_pteptrs 62 */ 63 nop 64/* 65 * Save parameters we are passed 66 */ 67 mr r31,r3 68 mr r30,r4 69 mr r29,r5 70 mr r28,r6 71 mr r27,r7 72 li r25,0 /* phys kernel start (low) */ 73 li r24,0 /* CPU number */ 74 li r23,0 /* phys kernel start (high) */ 75 76/* We try to not make any assumptions about how the boot loader 77 * setup or used the TLBs. We invalidate all mappings from the 78 * boot loader and load a single entry in TLB1[0] to map the 79 * first 64M of kernel memory. Any boot info passed from the 80 * bootloader needs to live in this first 64M. 81 * 82 * Requirement on bootloader: 83 * - The page we're executing in needs to reside in TLB1 and 84 * have IPROT=1. If not an invalidate broadcast could 85 * evict the entry we're currently executing in. 86 * 87 * r3 = Index of TLB1 were executing in 88 * r4 = Current MSR[IS] 89 * r5 = Index of TLB1 temp mapping 90 * 91 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] 92 * if needed 93 */ 94 95_ENTRY(__early_start) 96/* 1. Find the index of the entry we're executing in */ 97 bl invstr /* Find our address */ 98invstr: mflr r6 /* Make it accessible */ 99 mfmsr r7 100 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ 101 mfspr r7, SPRN_PID0 102 slwi r7,r7,16 103 or r7,r7,r4 104 mtspr SPRN_MAS6,r7 105 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ 106#ifndef CONFIG_E200 107 mfspr r7,SPRN_MAS1 108 andis. r7,r7,MAS1_VALID@h 109 bne match_TLB 110 mfspr r7,SPRN_PID1 111 slwi r7,r7,16 112 or r7,r7,r4 113 mtspr SPRN_MAS6,r7 114 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ 115 mfspr r7,SPRN_MAS1 116 andis. r7,r7,MAS1_VALID@h 117 bne match_TLB 118 mfspr r7, SPRN_PID2 119 slwi r7,r7,16 120 or r7,r7,r4 121 mtspr SPRN_MAS6,r7 122 tlbsx 0,r6 /* Fall through, we had to match */ 123#endif 124match_TLB: 125 mfspr r7,SPRN_MAS0 126 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ 127 128 mfspr r7,SPRN_MAS1 /* Insure IPROT set */ 129 oris r7,r7,MAS1_IPROT@h 130 mtspr SPRN_MAS1,r7 131 tlbwe 132 133/* 2. Invalidate all entries except the entry we're executing in */ 134 mfspr r9,SPRN_TLB1CFG 135 andi. r9,r9,0xfff 136 li r6,0 /* Set Entry counter to 0 */ 1371: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 138 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ 139 mtspr SPRN_MAS0,r7 140 tlbre 141 mfspr r7,SPRN_MAS1 142 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ 143 cmpw r3,r6 144 beq skpinv /* Dont update the current execution TLB */ 145 mtspr SPRN_MAS1,r7 146 tlbwe 147 isync 148skpinv: addi r6,r6,1 /* Increment */ 149 cmpw r6,r9 /* Are we done? */ 150 bne 1b /* If not, repeat */ 151 152 /* Invalidate TLB0 */ 153 li r6,0x04 154 tlbivax 0,r6 155 TLBSYNC 156 /* Invalidate TLB1 */ 157 li r6,0x0c 158 tlbivax 0,r6 159 TLBSYNC 160 161/* 3. Setup a temp mapping and jump to it */ 162 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 163 addi r5, r5, 0x1 164 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 165 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 166 mtspr SPRN_MAS0,r7 167 tlbre 168 169 /* grab and fixup the RPN */ 170 mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */ 171 rlwinm r6,r6,25,27,30 172 li r8,-1 173 addi r6,r6,10 174 slw r6,r8,r6 /* convert to mask */ 175 176 bl 1f /* Find our address */ 1771: mflr r7 178 179 mfspr r8,SPRN_MAS3 180#ifdef CONFIG_PHYS_64BIT 181 mfspr r23,SPRN_MAS7 182#endif 183 and r8,r6,r8 184 subfic r9,r6,-4096 185 and r9,r9,r7 186 187 or r25,r8,r9 188 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR) 189 190 /* Just modify the entry ID and EPN for the temp mapping */ 191 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 192 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 193 mtspr SPRN_MAS0,r7 194 xori r6,r4,1 /* Setup TMP mapping in the other Address space */ 195 slwi r6,r6,12 196 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h 197 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l 198 mtspr SPRN_MAS1,r6 199 mfspr r6,SPRN_MAS2 200 li r7,0 /* temp EPN = 0 */ 201 rlwimi r7,r6,0,20,31 202 mtspr SPRN_MAS2,r7 203 mtspr SPRN_MAS3,r8 204 tlbwe 205 206 xori r6,r4,1 207 slwi r6,r6,5 /* setup new context with other address space */ 208 bl 1f /* Find our address */ 2091: mflr r9 210 rlwimi r7,r9,0,20,31 211 addi r7,r7,24 212 mtspr SPRN_SRR0,r7 213 mtspr SPRN_SRR1,r6 214 rfi 215 216/* 4. Clear out PIDs & Search info */ 217 li r6,0 218 mtspr SPRN_PID0,r6 219#ifndef CONFIG_E200 220 mtspr SPRN_PID1,r6 221 mtspr SPRN_PID2,r6 222#endif 223 mtspr SPRN_MAS6,r6 224 225/* 5. Invalidate mapping we started in */ 226 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 227 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 228 mtspr SPRN_MAS0,r7 229 tlbre 230 mfspr r6,SPRN_MAS1 231 rlwinm r6,r6,0,2,0 /* clear IPROT */ 232 mtspr SPRN_MAS1,r6 233 tlbwe 234 /* Invalidate TLB1 */ 235 li r9,0x0c 236 tlbivax 0,r9 237 TLBSYNC 238 239/* The mapping only needs to be cache-coherent on SMP */ 240#ifdef CONFIG_SMP 241#define M_IF_SMP MAS2_M 242#else 243#define M_IF_SMP 0 244#endif 245 246/* 6. Setup KERNELBASE mapping in TLB1[0] */ 247 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 248 mtspr SPRN_MAS0,r6 249 lis r6,(MAS1_VALID|MAS1_IPROT)@h 250 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l 251 mtspr SPRN_MAS1,r6 252 lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h 253 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l 254 mtspr SPRN_MAS2,r6 255 mtspr SPRN_MAS3,r8 256 tlbwe 257 258/* 7. Jump to KERNELBASE mapping */ 259 lis r6,(KERNELBASE & ~0xfff)@h 260 ori r6,r6,(KERNELBASE & ~0xfff)@l 261 lis r7,MSR_KERNEL@h 262 ori r7,r7,MSR_KERNEL@l 263 bl 1f /* Find our address */ 2641: mflr r9 265 rlwimi r6,r9,0,20,31 266 addi r6,r6,(2f - 1b) 267 mtspr SPRN_SRR0,r6 268 mtspr SPRN_SRR1,r7 269 rfi /* start execution out of TLB1[0] entry */ 270 271/* 8. Clear out the temp mapping */ 2722: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 273 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 274 mtspr SPRN_MAS0,r7 275 tlbre 276 mfspr r8,SPRN_MAS1 277 rlwinm r8,r8,0,2,0 /* clear IPROT */ 278 mtspr SPRN_MAS1,r8 279 tlbwe 280 /* Invalidate TLB1 */ 281 li r9,0x0c 282 tlbivax 0,r9 283 TLBSYNC 284 285 /* Establish the interrupt vector offsets */ 286 SET_IVOR(0, CriticalInput); 287 SET_IVOR(1, MachineCheck); 288 SET_IVOR(2, DataStorage); 289 SET_IVOR(3, InstructionStorage); 290 SET_IVOR(4, ExternalInput); 291 SET_IVOR(5, Alignment); 292 SET_IVOR(6, Program); 293 SET_IVOR(7, FloatingPointUnavailable); 294 SET_IVOR(8, SystemCall); 295 SET_IVOR(9, AuxillaryProcessorUnavailable); 296 SET_IVOR(10, Decrementer); 297 SET_IVOR(11, FixedIntervalTimer); 298 SET_IVOR(12, WatchdogTimer); 299 SET_IVOR(13, DataTLBError); 300 SET_IVOR(14, InstructionTLBError); 301 SET_IVOR(15, DebugDebug); 302#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC) 303 SET_IVOR(15, DebugCrit); 304#endif 305 SET_IVOR(32, SPEUnavailable); 306 SET_IVOR(33, SPEFloatingPointData); 307 SET_IVOR(34, SPEFloatingPointRound); 308#ifndef CONFIG_E200 309 SET_IVOR(35, PerformanceMonitor); 310#endif 311#ifdef CONFIG_PPC_E500MC 312 SET_IVOR(36, Doorbell); 313#endif 314 315 /* Establish the interrupt vector base */ 316 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 317 mtspr SPRN_IVPR,r4 318 319 /* Setup the defaults for TLB entries */ 320 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l 321#ifdef CONFIG_E200 322 oris r2,r2,MAS4_TLBSELD(1)@h 323#endif 324 mtspr SPRN_MAS4, r2 325 326#if 0 327 /* Enable DOZE */ 328 mfspr r2,SPRN_HID0 329 oris r2,r2,HID0_DOZE@h 330 mtspr SPRN_HID0, r2 331#endif 332#ifdef CONFIG_E200 333 /* enable dedicated debug exception handling resources (Debug APU) */ 334 mfspr r2,SPRN_HID0 335 ori r2,r2,HID0_DAPUEN@l 336 mtspr SPRN_HID0,r2 337#endif 338 339#if !defined(CONFIG_BDI_SWITCH) 340 /* 341 * The Abatron BDI JTAG debugger does not tolerate others 342 * mucking with the debug registers. 343 */ 344 lis r2,DBCR0_IDM@h 345 mtspr SPRN_DBCR0,r2 346 isync 347 /* clear any residual debug events */ 348 li r2,-1 349 mtspr SPRN_DBSR,r2 350#endif 351 352#ifdef CONFIG_SMP 353 /* Check to see if we're the second processor, and jump 354 * to the secondary_start code if so 355 */ 356 mfspr r24,SPRN_PIR 357 cmpwi r24,0 358 bne __secondary_start 359#endif 360 361 /* 362 * This is where the main kernel code starts. 363 */ 364 365 /* ptr to current */ 366 lis r2,init_task@h 367 ori r2,r2,init_task@l 368 369 /* ptr to current thread */ 370 addi r4,r2,THREAD /* init task's THREAD */ 371 mtspr SPRN_SPRG3,r4 372 373 /* stack */ 374 lis r1,init_thread_union@h 375 ori r1,r1,init_thread_union@l 376 li r0,0 377 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 378 379 bl early_init 380 381#ifdef CONFIG_RELOCATABLE 382 lis r3,kernstart_addr@ha 383 la r3,kernstart_addr@l(r3) 384#ifdef CONFIG_PHYS_64BIT 385 stw r23,0(r3) 386 stw r25,4(r3) 387#else 388 stw r25,0(r3) 389#endif 390#endif 391 392/* 393 * Decide what sort of machine this is and initialize the MMU. 394 */ 395 mr r3,r31 396 mr r4,r30 397 mr r5,r29 398 mr r6,r28 399 mr r7,r27 400 bl machine_init 401 bl MMU_init 402 403 /* Setup PTE pointers for the Abatron bdiGDB */ 404 lis r6, swapper_pg_dir@h 405 ori r6, r6, swapper_pg_dir@l 406 lis r5, abatron_pteptrs@h 407 ori r5, r5, abatron_pteptrs@l 408 lis r4, KERNELBASE@h 409 ori r4, r4, KERNELBASE@l 410 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 411 stw r6, 0(r5) 412 413 /* Let's move on */ 414 lis r4,start_kernel@h 415 ori r4,r4,start_kernel@l 416 lis r3,MSR_KERNEL@h 417 ori r3,r3,MSR_KERNEL@l 418 mtspr SPRN_SRR0,r4 419 mtspr SPRN_SRR1,r3 420 rfi /* change context and jump to start_kernel */ 421 422/* Macros to hide the PTE size differences 423 * 424 * FIND_PTE -- walks the page tables given EA & pgdir pointer 425 * r10 -- EA of fault 426 * r11 -- PGDIR pointer 427 * r12 -- free 428 * label 2: is the bailout case 429 * 430 * if we find the pte (fall through): 431 * r11 is low pte word 432 * r12 is pointer to the pte 433 */ 434#ifdef CONFIG_PTE_64BIT 435#define FIND_PTE \ 436 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 437 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 438 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 439 beq 2f; /* Bail if no table */ \ 440 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 441 lwz r11, 4(r12); /* Get pte entry */ 442#else 443#define FIND_PTE \ 444 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 445 lwz r11, 0(r11); /* Get L1 entry */ \ 446 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ 447 beq 2f; /* Bail if no table */ \ 448 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ 449 lwz r11, 0(r12); /* Get Linux PTE */ 450#endif 451 452/* 453 * Interrupt vector entry code 454 * 455 * The Book E MMUs are always on so we don't need to handle 456 * interrupts in real mode as with previous PPC processors. In 457 * this case we handle interrupts in the kernel virtual address 458 * space. 459 * 460 * Interrupt vectors are dynamically placed relative to the 461 * interrupt prefix as determined by the address of interrupt_base. 462 * The interrupt vectors offsets are programmed using the labels 463 * for each interrupt vector entry. 464 * 465 * Interrupt vectors must be aligned on a 16 byte boundary. 466 * We align on a 32 byte cache line boundary for good measure. 467 */ 468 469interrupt_base: 470 /* Critical Input Interrupt */ 471 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 472 473 /* Machine Check Interrupt */ 474#ifdef CONFIG_E200 475 /* no RFMCI, MCSRRs on E200 */ 476 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 477#else 478 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 479#endif 480 481 /* Data Storage Interrupt */ 482 START_EXCEPTION(DataStorage) 483 NORMAL_EXCEPTION_PROLOG 484 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ 485 stw r5,_ESR(r11) 486 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 487 andis. r10,r5,(ESR_ILK|ESR_DLK)@h 488 bne 1f 489 EXC_XFER_EE_LITE(0x0300, handle_page_fault) 4901: 491 addi r3,r1,STACK_FRAME_OVERHEAD 492 EXC_XFER_EE_LITE(0x0300, CacheLockingException) 493 494 /* Instruction Storage Interrupt */ 495 INSTRUCTION_STORAGE_EXCEPTION 496 497 /* External Input Interrupt */ 498 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 499 500 /* Alignment Interrupt */ 501 ALIGNMENT_EXCEPTION 502 503 /* Program Interrupt */ 504 PROGRAM_EXCEPTION 505 506 /* Floating Point Unavailable Interrupt */ 507#ifdef CONFIG_PPC_FPU 508 FP_UNAVAILABLE_EXCEPTION 509#else 510#ifdef CONFIG_E200 511 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 512 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) 513#else 514 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 515#endif 516#endif 517 518 /* System Call Interrupt */ 519 START_EXCEPTION(SystemCall) 520 NORMAL_EXCEPTION_PROLOG 521 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 522 523 /* Auxillary Processor Unavailable Interrupt */ 524 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 525 526 /* Decrementer Interrupt */ 527 DECREMENTER_EXCEPTION 528 529 /* Fixed Internal Timer Interrupt */ 530 /* TODO: Add FIT support */ 531 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 532 533 /* Watchdog Timer Interrupt */ 534#ifdef CONFIG_BOOKE_WDT 535 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 536#else 537 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) 538#endif 539 540 /* Data TLB Error Interrupt */ 541 START_EXCEPTION(DataTLBError) 542 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 543 mtspr SPRN_SPRG1, r11 544 mtspr SPRN_SPRG4W, r12 545 mtspr SPRN_SPRG5W, r13 546 mfcr r11 547 mtspr SPRN_SPRG7W, r11 548 mfspr r10, SPRN_DEAR /* Get faulting address */ 549 550 /* If we are faulting a kernel address, we have to use the 551 * kernel page tables. 552 */ 553 lis r11, PAGE_OFFSET@h 554 cmplw 5, r10, r11 555 blt 5, 3f 556 lis r11, swapper_pg_dir@h 557 ori r11, r11, swapper_pg_dir@l 558 559 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 560 rlwinm r12,r12,0,16,1 561 mtspr SPRN_MAS1,r12 562 563 b 4f 564 565 /* Get the PGD for the current thread */ 5663: 567 mfspr r11,SPRN_SPRG3 568 lwz r11,PGDIR(r11) 569 5704: 571 /* Mask of required permission bits. Note that while we 572 * do copy ESR:ST to _PAGE_RW position as trying to write 573 * to an RO page is pretty common, we don't do it with 574 * _PAGE_DIRTY. We could do it, but it's a fairly rare 575 * event so I'd rather take the overhead when it happens 576 * rather than adding an instruction here. We should measure 577 * whether the whole thing is worth it in the first place 578 * as we could avoid loading SPRN_ESR completely in the first 579 * place... 580 * 581 * TODO: Is it worth doing that mfspr & rlwimi in the first 582 * place or can we save a couple of instructions here ? 583 */ 584 mfspr r12,SPRN_ESR 585 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 586 rlwimi r13,r12,11,29,29 587 588 FIND_PTE 589 andc. r13,r13,r11 /* Check permission */ 590 591#ifdef CONFIG_PTE_64BIT 592#ifdef CONFIG_SMP 593 subf r10,r11,r12 /* create false data dep */ 594 lwzx r13,r11,r10 /* Get upper pte bits */ 595#else 596 lwz r13,0(r12) /* Get upper pte bits */ 597#endif 598#endif 599 600 bne 2f /* Bail if permission/valid mismach */ 601 602 /* Jump to common tlb load */ 603 b finish_tlb_load 6042: 605 /* The bailout. Restore registers to pre-exception conditions 606 * and call the heavyweights to help us out. 607 */ 608 mfspr r11, SPRN_SPRG7R 609 mtcr r11 610 mfspr r13, SPRN_SPRG5R 611 mfspr r12, SPRN_SPRG4R 612 mfspr r11, SPRN_SPRG1 613 mfspr r10, SPRN_SPRG0 614 b DataStorage 615 616 /* Instruction TLB Error Interrupt */ 617 /* 618 * Nearly the same as above, except we get our 619 * information from different registers and bailout 620 * to a different point. 621 */ 622 START_EXCEPTION(InstructionTLBError) 623 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 624 mtspr SPRN_SPRG1, r11 625 mtspr SPRN_SPRG4W, r12 626 mtspr SPRN_SPRG5W, r13 627 mfcr r11 628 mtspr SPRN_SPRG7W, r11 629 mfspr r10, SPRN_SRR0 /* Get faulting address */ 630 631 /* If we are faulting a kernel address, we have to use the 632 * kernel page tables. 633 */ 634 lis r11, PAGE_OFFSET@h 635 cmplw 5, r10, r11 636 blt 5, 3f 637 lis r11, swapper_pg_dir@h 638 ori r11, r11, swapper_pg_dir@l 639 640 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 641 rlwinm r12,r12,0,16,1 642 mtspr SPRN_MAS1,r12 643 644 b 4f 645 646 /* Get the PGD for the current thread */ 6473: 648 mfspr r11,SPRN_SPRG3 649 lwz r11,PGDIR(r11) 650 6514: 652 /* Make up the required permissions */ 653 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC 654 655 FIND_PTE 656 andc. r13,r13,r11 /* Check permission */ 657 658#ifdef CONFIG_PTE_64BIT 659#ifdef CONFIG_SMP 660 subf r10,r11,r12 /* create false data dep */ 661 lwzx r13,r11,r10 /* Get upper pte bits */ 662#else 663 lwz r13,0(r12) /* Get upper pte bits */ 664#endif 665#endif 666 667 bne 2f /* Bail if permission mismach */ 668 669 /* Jump to common TLB load point */ 670 b finish_tlb_load 671 6722: 673 /* The bailout. Restore registers to pre-exception conditions 674 * and call the heavyweights to help us out. 675 */ 676 mfspr r11, SPRN_SPRG7R 677 mtcr r11 678 mfspr r13, SPRN_SPRG5R 679 mfspr r12, SPRN_SPRG4R 680 mfspr r11, SPRN_SPRG1 681 mfspr r10, SPRN_SPRG0 682 b InstructionStorage 683 684#ifdef CONFIG_SPE 685 /* SPE Unavailable */ 686 START_EXCEPTION(SPEUnavailable) 687 NORMAL_EXCEPTION_PROLOG 688 bne load_up_spe 689 addi r3,r1,STACK_FRAME_OVERHEAD 690 EXC_XFER_EE_LITE(0x2010, KernelSPE) 691#else 692 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 693#endif /* CONFIG_SPE */ 694 695 /* SPE Floating Point Data */ 696#ifdef CONFIG_SPE 697 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 698 699 /* SPE Floating Point Round */ 700 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) 701#else 702 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) 703 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 704#endif /* CONFIG_SPE */ 705 706 /* Performance Monitor */ 707 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 708 709#ifdef CONFIG_PPC_E500MC 710 EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_STD) 711#endif 712 713 /* Debug Interrupt */ 714 DEBUG_DEBUG_EXCEPTION 715#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC) 716 DEBUG_CRIT_EXCEPTION 717#endif 718 719/* 720 * Local functions 721 */ 722 723/* 724 * Both the instruction and data TLB miss get to this 725 * point to load the TLB. 726 * r10 - available to use 727 * r11 - TLB (info from Linux PTE) 728 * r12 - available to use 729 * r13 - upper bits of PTE (if PTE_64BIT) or available to use 730 * CR5 - results of addr >= PAGE_OFFSET 731 * MAS0, MAS1 - loaded with proper value when we get here 732 * MAS2, MAS3 - will need additional info from Linux PTE 733 * Upon exit, we reload everything and RFI. 734 */ 735finish_tlb_load: 736 /* 737 * We set execute, because we don't have the granularity to 738 * properly set this at the page level (Linux problem). 739 * Many of these bits are software only. Bits we don't set 740 * here we (properly should) assume have the appropriate value. 741 */ 742 743 mfspr r12, SPRN_MAS2 744#ifdef CONFIG_PTE_64BIT 745 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ 746#else 747 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 748#endif 749#ifdef CONFIG_SMP 750 ori r12, r12, MAS2_M 751#endif 752 mtspr SPRN_MAS2, r12 753 754 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT) 755 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ 756 and r12, r11, r10 757 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 758 slwi r10, r12, 1 759 or r10, r10, r12 760 iseleq r12, r12, r10 761 762#ifdef CONFIG_PTE_64BIT 763 rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 764 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 765 mtspr SPRN_MAS3, r12 766BEGIN_MMU_FTR_SECTION 767 srwi r10, r13, 8 /* grab RPN[8:31] */ 768 mtspr SPRN_MAS7, r10 769END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) 770#else 771 rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 772 mtspr SPRN_MAS3, r11 773#endif 774#ifdef CONFIG_E200 775 /* Round robin TLB1 entries assignment */ 776 mfspr r12, SPRN_MAS0 777 778 /* Extract TLB1CFG(NENTRY) */ 779 mfspr r11, SPRN_TLB1CFG 780 andi. r11, r11, 0xfff 781 782 /* Extract MAS0(NV) */ 783 andi. r13, r12, 0xfff 784 addi r13, r13, 1 785 cmpw 0, r13, r11 786 addi r12, r12, 1 787 788 /* check if we need to wrap */ 789 blt 7f 790 791 /* wrap back to first free tlbcam entry */ 792 lis r13, tlbcam_index@ha 793 lwz r13, tlbcam_index@l(r13) 794 rlwimi r12, r13, 0, 20, 31 7957: 796 mtspr SPRN_MAS0,r12 797#endif /* CONFIG_E200 */ 798 799 tlbwe 800 801 /* Done...restore registers and get out of here. */ 802 mfspr r11, SPRN_SPRG7R 803 mtcr r11 804 mfspr r13, SPRN_SPRG5R 805 mfspr r12, SPRN_SPRG4R 806 mfspr r11, SPRN_SPRG1 807 mfspr r10, SPRN_SPRG0 808 rfi /* Force context change */ 809 810#ifdef CONFIG_SPE 811/* Note that the SPE support is closely modeled after the AltiVec 812 * support. Changes to one are likely to be applicable to the 813 * other! */ 814load_up_spe: 815/* 816 * Disable SPE for the task which had SPE previously, 817 * and save its SPE registers in its thread_struct. 818 * Enables SPE for use in the kernel on return. 819 * On SMP we know the SPE units are free, since we give it up every 820 * switch. -- Kumar 821 */ 822 mfmsr r5 823 oris r5,r5,MSR_SPE@h 824 mtmsr r5 /* enable use of SPE now */ 825 isync 826/* 827 * For SMP, we don't do lazy SPE switching because it just gets too 828 * horrendously complex, especially when a task switches from one CPU 829 * to another. Instead we call giveup_spe in switch_to. 830 */ 831#ifndef CONFIG_SMP 832 lis r3,last_task_used_spe@ha 833 lwz r4,last_task_used_spe@l(r3) 834 cmpi 0,r4,0 835 beq 1f 836 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 837 SAVE_32EVRS(0,r10,r4) 838 evxor evr10, evr10, evr10 /* clear out evr10 */ 839 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 840 li r5,THREAD_ACC 841 evstddx evr10, r4, r5 /* save off accumulator */ 842 lwz r5,PT_REGS(r4) 843 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 844 lis r10,MSR_SPE@h 845 andc r4,r4,r10 /* disable SPE for previous task */ 846 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 8471: 848#endif /* !CONFIG_SMP */ 849 /* enable use of SPE after return */ 850 oris r9,r9,MSR_SPE@h 851 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 852 li r4,1 853 li r10,THREAD_ACC 854 stw r4,THREAD_USED_SPE(r5) 855 evlddx evr4,r10,r5 856 evmra evr4,evr4 857 REST_32EVRS(0,r10,r5) 858#ifndef CONFIG_SMP 859 subi r4,r5,THREAD 860 stw r4,last_task_used_spe@l(r3) 861#endif /* !CONFIG_SMP */ 862 /* restore registers and return */ 8632: REST_4GPRS(3, r11) 864 lwz r10,_CCR(r11) 865 REST_GPR(1, r11) 866 mtcr r10 867 lwz r10,_LINK(r11) 868 mtlr r10 869 REST_GPR(10, r11) 870 mtspr SPRN_SRR1,r9 871 mtspr SPRN_SRR0,r12 872 REST_GPR(9, r11) 873 REST_GPR(12, r11) 874 lwz r11,GPR11(r11) 875 rfi 876 877/* 878 * SPE unavailable trap from kernel - print a message, but let 879 * the task use SPE in the kernel until it returns to user mode. 880 */ 881KernelSPE: 882 lwz r3,_MSR(r1) 883 oris r3,r3,MSR_SPE@h 884 stw r3,_MSR(r1) /* enable use of SPE after return */ 885 lis r3,87f@h 886 ori r3,r3,87f@l 887 mr r4,r2 /* current */ 888 lwz r5,_NIP(r1) 889 bl printk 890 b ret_from_except 89187: .string "SPE used in kernel (task=%p, pc=%x) \n" 892 .align 4,0 893 894#endif /* CONFIG_SPE */ 895 896/* 897 * Global functions 898 */ 899 900/* 901 * extern void loadcam_entry(unsigned int index) 902 * 903 * Load TLBCAM[index] entry in to the L2 CAM MMU 904 */ 905_GLOBAL(loadcam_entry) 906 lis r4,TLBCAM@ha 907 addi r4,r4,TLBCAM@l 908 mulli r5,r3,TLBCAM_SIZE 909 add r3,r5,r4 910 lwz r4,0(r3) 911 mtspr SPRN_MAS0,r4 912 lwz r4,4(r3) 913 mtspr SPRN_MAS1,r4 914 lwz r4,8(r3) 915 mtspr SPRN_MAS2,r4 916 lwz r4,12(r3) 917 mtspr SPRN_MAS3,r4 918 tlbwe 919 isync 920 blr 921 922/* 923 * extern void giveup_altivec(struct task_struct *prev) 924 * 925 * The e500 core does not have an AltiVec unit. 926 */ 927_GLOBAL(giveup_altivec) 928 blr 929 930#ifdef CONFIG_SPE 931/* 932 * extern void giveup_spe(struct task_struct *prev) 933 * 934 */ 935_GLOBAL(giveup_spe) 936 mfmsr r5 937 oris r5,r5,MSR_SPE@h 938 mtmsr r5 /* enable use of SPE now */ 939 isync 940 cmpi 0,r3,0 941 beqlr- /* if no previous owner, done */ 942 addi r3,r3,THREAD /* want THREAD of task */ 943 lwz r5,PT_REGS(r3) 944 cmpi 0,r5,0 945 SAVE_32EVRS(0, r4, r3) 946 evxor evr6, evr6, evr6 /* clear out evr6 */ 947 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 948 li r4,THREAD_ACC 949 evstddx evr6, r4, r3 /* save off accumulator */ 950 mfspr r6,SPRN_SPEFSCR 951 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ 952 beq 1f 953 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 954 lis r3,MSR_SPE@h 955 andc r4,r4,r3 /* disable SPE for previous task */ 956 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 9571: 958#ifndef CONFIG_SMP 959 li r5,0 960 lis r4,last_task_used_spe@ha 961 stw r5,last_task_used_spe@l(r4) 962#endif /* !CONFIG_SMP */ 963 blr 964#endif /* CONFIG_SPE */ 965 966/* 967 * extern void giveup_fpu(struct task_struct *prev) 968 * 969 * Not all FSL Book-E cores have an FPU 970 */ 971#ifndef CONFIG_PPC_FPU 972_GLOBAL(giveup_fpu) 973 blr 974#endif 975 976/* 977 * extern void abort(void) 978 * 979 * At present, this routine just applies a system reset. 980 */ 981_GLOBAL(abort) 982 li r13,0 983 mtspr SPRN_DBCR0,r13 /* disable all debug events */ 984 isync 985 mfmsr r13 986 ori r13,r13,MSR_DE@l /* Enable Debug Events */ 987 mtmsr r13 988 isync 989 mfspr r13,SPRN_DBCR0 990 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h 991 mtspr SPRN_DBCR0,r13 992 isync 993 994_GLOBAL(set_context) 995 996#ifdef CONFIG_BDI_SWITCH 997 /* Context switch the PTE pointer for the Abatron BDI2000. 998 * The PGDIR is the second parameter. 999 */ 1000 lis r5, abatron_pteptrs@h 1001 ori r5, r5, abatron_pteptrs@l 1002 stw r4, 0x4(r5) 1003#endif 1004 mtspr SPRN_PID,r3 1005 isync /* Force context change */ 1006 blr 1007 1008_GLOBAL(flush_dcache_L1) 1009 mfspr r3,SPRN_L1CFG0 1010 1011 rlwinm r5,r3,9,3 /* Extract cache block size */ 1012 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1013 * are currently defined. 1014 */ 1015 li r4,32 1016 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1017 * log2(number of ways) 1018 */ 1019 slw r5,r4,r5 /* r5 = cache block size */ 1020 1021 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1022 mulli r7,r7,13 /* An 8-way cache will require 13 1023 * loads per set. 1024 */ 1025 slw r7,r7,r6 1026 1027 /* save off HID0 and set DCFA */ 1028 mfspr r8,SPRN_HID0 1029 ori r9,r8,HID0_DCFA@l 1030 mtspr SPRN_HID0,r9 1031 isync 1032 1033 lis r4,KERNELBASE@h 1034 mtctr r7 1035 10361: lwz r3,0(r4) /* Load... */ 1037 add r4,r4,r5 1038 bdnz 1b 1039 1040 msync 1041 lis r4,KERNELBASE@h 1042 mtctr r7 1043 10441: dcbf 0,r4 /* ...and flush. */ 1045 add r4,r4,r5 1046 bdnz 1b 1047 1048 /* restore HID0 */ 1049 mtspr SPRN_HID0,r8 1050 isync 1051 1052 blr 1053 1054#ifdef CONFIG_SMP 1055/* When we get here, r24 needs to hold the CPU # */ 1056 .globl __secondary_start 1057__secondary_start: 1058 lis r3,__secondary_hold_acknowledge@h 1059 ori r3,r3,__secondary_hold_acknowledge@l 1060 stw r24,0(r3) 1061 1062 li r3,0 1063 mr r4,r24 /* Why? */ 1064 bl call_setup_cpu 1065 1066 lis r3,tlbcam_index@ha 1067 lwz r3,tlbcam_index@l(r3) 1068 mtctr r3 1069 li r26,0 /* r26 safe? */ 1070 1071 /* Load each CAM entry */ 10721: mr r3,r26 1073 bl loadcam_entry 1074 addi r26,r26,1 1075 bdnz 1b 1076 1077 /* get current_thread_info and current */ 1078 lis r1,secondary_ti@ha 1079 lwz r1,secondary_ti@l(r1) 1080 lwz r2,TI_TASK(r1) 1081 1082 /* stack */ 1083 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1084 li r0,0 1085 stw r0,0(r1) 1086 1087 /* ptr to current thread */ 1088 addi r4,r2,THREAD /* address of our thread_struct */ 1089 mtspr SPRN_SPRG3,r4 1090 1091 /* Setup the defaults for TLB entries */ 1092 li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l 1093 mtspr SPRN_MAS4,r4 1094 1095 /* Jump to start_secondary */ 1096 lis r4,MSR_KERNEL@h 1097 ori r4,r4,MSR_KERNEL@l 1098 lis r3,start_secondary@h 1099 ori r3,r3,start_secondary@l 1100 mtspr SPRN_SRR0,r3 1101 mtspr SPRN_SRR1,r4 1102 sync 1103 rfi 1104 sync 1105 1106 .globl __secondary_hold_acknowledge 1107__secondary_hold_acknowledge: 1108 .long -1 1109#endif 1110 1111/* 1112 * We put a few things here that have to be page-aligned. This stuff 1113 * goes at the beginning of the data segment, which is page-aligned. 1114 */ 1115 .data 1116 .align 12 1117 .globl sdata 1118sdata: 1119 .globl empty_zero_page 1120empty_zero_page: 1121 .space 4096 1122 .globl swapper_pg_dir 1123swapper_pg_dir: 1124 .space PGD_TABLE_SIZE 1125 1126/* 1127 * Room for two PTE pointers, usually the kernel and current user pointers 1128 * to their respective root page table. 1129 */ 1130abatron_pteptrs: 1131 .space 8 1132