• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158BEGIN_MMU_FTR_SECTION
159	bl	reloc_offset
160	bl	early_hash_table
161END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
162#if defined(CONFIG_BOOTX_TEXT)
163	bl	setup_disp_bat
164#endif
165#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
166	bl	setup_cpm_bat
167#endif
168#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
169	bl	setup_usbgecko_bat
170#endif
171
172/*
173 * Call setup_cpu for CPU 0 and initialize 6xx Idle
174 */
175	bl	reloc_offset
176	li	r24,0			/* cpu# */
177	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
178	bl	reloc_offset
179	bl	init_idle_6xx
180
181
182/*
183 * We need to run with _start at physical address 0.
184 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
185 * the exception vectors at 0 (and therefore this copy
186 * overwrites OF's exception vectors with our own).
187 * The MMU is off at this point.
188 */
189	bl	reloc_offset
190	mr	r26,r3
191	addis	r4,r3,KERNELBASE@h	/* current address of _start */
192	lis	r5,PHYSICAL_START@h
193	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
194	bne	relocate_kernel
195/*
196 * we now have the 1st 16M of ram mapped with the bats.
197 * prep needs the mmu to be turned on here, but pmac already has it on.
198 * this shouldn't bother the pmac since it just gets turned on again
199 * as we jump to our code at KERNELBASE. -- Cort
200 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
201 * off, and in other cases, we now turn it off before changing BATs above.
202 */
203turn_on_mmu:
204	mfmsr	r0
205	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
206	mtspr	SPRN_SRR1,r0
207	lis	r0,start_here@h
208	ori	r0,r0,start_here@l
209	mtspr	SPRN_SRR0,r0
210	RFI				/* enables MMU */
211
212/*
213 * We need __secondary_hold as a place to hold the other cpus on
214 * an SMP machine, even when we are running a UP kernel.
215 */
216	. = 0xc0			/* for prep bootloader */
217	li	r3,1			/* MTX only has 1 cpu */
218	.globl	__secondary_hold
219__secondary_hold:
220	/* tell the master we're here */
221	stw	r3,__secondary_hold_acknowledge@l(0)
222#ifdef CONFIG_SMP
223100:	lwz	r4,0(0)
224	/* wait until we're told to start */
225	cmpw	0,r4,r3
226	bne	100b
227	/* our cpu # was at addr 0 - go */
228	mr	r24,r3			/* cpu # */
229	b	__secondary_start
230#else
231	b	.
232#endif /* CONFIG_SMP */
233
234	.globl	__secondary_hold_spinloop
235__secondary_hold_spinloop:
236	.long	0
237	.globl	__secondary_hold_acknowledge
238__secondary_hold_acknowledge:
239	.long	-1
240
241/* System reset */
242/* core99 pmac starts the seconary here by changing the vector, and
243   putting it back to what it was (unknown_exception) when done.  */
244	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
245
246/* Machine check */
247/*
248 * On CHRP, this is complicated by the fact that we could get a
249 * machine check inside RTAS, and we have no guarantee that certain
250 * critical registers will have the values we expect.  The set of
251 * registers that might have bad values includes all the GPRs
252 * and all the BATs.  We indicate that we are in RTAS by putting
253 * a non-zero value, the address of the exception frame to use,
254 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
255 * and uses its value if it is non-zero.
256 * (Other exception handlers assume that r1 is a valid kernel stack
257 * pointer when we take an exception from supervisor mode.)
258 *	-- paulus.
259 */
260	. = 0x200
261	DO_KVM  0x200
262MachineCheck:
263	EXCEPTION_PROLOG_0
264#ifdef CONFIG_PPC_CHRP
265#ifdef CONFIG_VMAP_STACK
266	mr	r11, r1
267	mfspr	r1, SPRN_SPRG_THREAD
268	lwz	r1, RTAS_SP(r1)
269	cmpwi	cr1, r1, 0
270	bne	cr1, 7f
271	mr	r1, r11
272#else
273	mfspr	r11, SPRN_SPRG_THREAD
274	lwz	r11, RTAS_SP(r11)
275	cmpwi	cr1, r11, 0
276	bne	cr1, 7f
277#endif
278#endif /* CONFIG_PPC_CHRP */
279	EXCEPTION_PROLOG_1 for_rtas=1
2807:	EXCEPTION_PROLOG_2
281	addi	r3,r1,STACK_FRAME_OVERHEAD
282#ifdef CONFIG_PPC_CHRP
283	beq	cr1, machine_check_tramp
284	twi	31, 0, 0
285#else
286	b	machine_check_tramp
287#endif
288
289/* Data access exception. */
290	. = 0x300
291	DO_KVM  0x300
292DataAccess:
293#ifdef CONFIG_VMAP_STACK
294	mtspr	SPRN_SPRG_SCRATCH0,r10
295	mfspr	r10, SPRN_SPRG_THREAD
296BEGIN_MMU_FTR_SECTION
297	stw	r11, THR11(r10)
298	mfspr	r10, SPRN_DSISR
299	mfcr	r11
300#ifdef CONFIG_PPC_KUAP
301	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
302#else
303	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
304#endif
305	mfspr	r10, SPRN_SPRG_THREAD
306	beq	hash_page_dsi
307.Lhash_page_dsi_cont:
308	mtcr	r11
309	lwz	r11, THR11(r10)
310END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
311	mtspr	SPRN_SPRG_SCRATCH1,r11
312	mfspr	r11, SPRN_DAR
313	stw	r11, DAR(r10)
314	mfspr	r11, SPRN_DSISR
315	stw	r11, DSISR(r10)
316	mfspr	r11, SPRN_SRR0
317	stw	r11, SRR0(r10)
318	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
319	stw	r11, SRR1(r10)
320	mfcr	r10
321	andi.	r11, r11, MSR_PR
322
323	EXCEPTION_PROLOG_1
324	b	handle_page_fault_tramp_1
325#else	/* CONFIG_VMAP_STACK */
326	EXCEPTION_PROLOG handle_dar_dsisr=1
327	get_and_save_dar_dsisr_on_stack	r4, r5, r11
328BEGIN_MMU_FTR_SECTION
329#ifdef CONFIG_PPC_KUAP
330	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
331#else
332	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
333#endif
334	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
335	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
336	bl	hash_page
337	b	handle_page_fault_tramp_1
338FTR_SECTION_ELSE
339	b	handle_page_fault_tramp_2
340ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
341#endif	/* CONFIG_VMAP_STACK */
342
343/* Instruction access exception. */
344	. = 0x400
345	DO_KVM  0x400
346InstructionAccess:
347#ifdef CONFIG_VMAP_STACK
348	mtspr	SPRN_SPRG_SCRATCH0,r10
349	mtspr	SPRN_SPRG_SCRATCH1,r11
350	mfspr	r10, SPRN_SPRG_THREAD
351	mfspr	r11, SPRN_SRR0
352	stw	r11, SRR0(r10)
353	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
354	stw	r11, SRR1(r10)
355	mfcr	r10
356BEGIN_MMU_FTR_SECTION
357	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
358	bne	hash_page_isi
359.Lhash_page_isi_cont:
360	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
361END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
362	andi.	r11, r11, MSR_PR
363
364	EXCEPTION_PROLOG_1
365	EXCEPTION_PROLOG_2
366#else	/* CONFIG_VMAP_STACK */
367	EXCEPTION_PROLOG
368	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
369	beq	1f			/* if so, try to put a PTE */
370	li	r3,0			/* into the hash table */
371	mr	r4,r12			/* SRR0 is fault address */
372BEGIN_MMU_FTR_SECTION
373	bl	hash_page
374END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
375#endif	/* CONFIG_VMAP_STACK */
3761:	mr	r4,r12
377	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
378	stw	r4, _DAR(r11)
379	EXC_XFER_LITE(0x400, handle_page_fault)
380
381/* External interrupt */
382	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
383
384/* Alignment exception */
385	. = 0x600
386	DO_KVM  0x600
387Alignment:
388	EXCEPTION_PROLOG handle_dar_dsisr=1
389	save_dar_dsisr_on_stack r4, r5, r11
390	addi	r3,r1,STACK_FRAME_OVERHEAD
391	b	alignment_exception_tramp
392
393/* Program check exception */
394	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
395
396/* Floating-point unavailable */
397	. = 0x800
398	DO_KVM  0x800
399FPUnavailable:
400BEGIN_FTR_SECTION
401/*
402 * Certain Freescale cores don't have a FPU and treat fp instructions
403 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
404 */
405	b 	ProgramCheck
406END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
407	EXCEPTION_PROLOG
408	beq	1f
409	bl	load_up_fpu		/* if from user, just load it up */
410	b	fast_exception_return
4111:	addi	r3,r1,STACK_FRAME_OVERHEAD
412	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
413
414/* Decrementer */
415	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
416
417	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
418	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
419
420/* System call */
421	. = 0xc00
422	DO_KVM  0xc00
423SystemCall:
424	SYSCALL_ENTRY	0xc00
425
426	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
427	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
428
429/*
430 * The Altivec unavailable trap is at 0x0f20.  Foo.
431 * We effectively remap it to 0x3000.
432 * We include an altivec unavailable exception vector even if
433 * not configured for Altivec, so that you can't panic a
434 * non-altivec kernel running on a machine with altivec just
435 * by executing an altivec instruction.
436 */
437	. = 0xf00
438	DO_KVM  0xf00
439	b	PerformanceMonitor
440
441	. = 0xf20
442	DO_KVM  0xf20
443	b	AltiVecUnavailable
444
445/*
446 * Handle TLB miss for instruction on 603/603e.
447 * Note: we get an alternate set of r0 - r3 to use automatically.
448 */
449	. = 0x1000
450InstructionTLBMiss:
451/*
452 * r0:	scratch
453 * r1:	linux style pte ( later becomes ppc hardware pte )
454 * r2:	ptr to linux-style pte
455 * r3:	scratch
456 */
457	/* Get PTE (linux-style) and check access */
458	mfspr	r3,SPRN_IMISS
459#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
460	lis	r1, TASK_SIZE@h		/* check if kernel address */
461	cmplw	0,r1,r3
462#endif
463	mfspr	r2, SPRN_SPRG_PGDIR
464	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
465#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
466	bgt-	112f
467	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
468	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
469	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
470#endif
471112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
472	lwz	r2,0(r2)		/* get pmd entry */
473	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
474	beq-	InstructionAddressInvalid	/* return if no mapping */
475	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
476	lwz	r0,0(r2)		/* get linux-style pte */
477	andc.	r1,r1,r0		/* check access & ~permission */
478	bne-	InstructionAddressInvalid /* return if access not permitted */
479	/* Convert linux-style PTE to low word of PPC-style PTE */
480	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
481	ori	r1, r1, 0xe06		/* clear out reserved bits */
482	andc	r1, r0, r1		/* PP = user? 1 : 0 */
483BEGIN_FTR_SECTION
484	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
485END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
486	mtspr	SPRN_RPA,r1
487	tlbli	r3
488	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
489	mtcrf	0x80,r3
490	rfi
491InstructionAddressInvalid:
492	mfspr	r3,SPRN_SRR1
493	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
494
495	addis	r1,r1,0x2000
496	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
497	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
498	or	r2,r2,r1
499	mtspr	SPRN_SRR1,r2
500	mfspr	r1,SPRN_IMISS	/* Get failing address */
501	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
502	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
503	xor	r1,r1,r2
504	mtspr	SPRN_DAR,r1	/* Set fault address */
505	mfmsr	r0		/* Restore "normal" registers */
506	xoris	r0,r0,MSR_TGPR>>16
507	mtcrf	0x80,r3		/* Restore CR0 */
508	mtmsr	r0
509	b	InstructionAccess
510
511/*
512 * Handle TLB miss for DATA Load operation on 603/603e
513 */
514	. = 0x1100
515DataLoadTLBMiss:
516/*
517 * r0:	scratch
518 * r1:	linux style pte ( later becomes ppc hardware pte )
519 * r2:	ptr to linux-style pte
520 * r3:	scratch
521 */
522	/* Get PTE (linux-style) and check access */
523	mfspr	r3,SPRN_DMISS
524	lis	r1, TASK_SIZE@h		/* check if kernel address */
525	cmplw	0,r1,r3
526	mfspr	r2, SPRN_SPRG_PGDIR
527	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
528	bgt-	112f
529	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
530	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
531	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
532112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
533	lwz	r2,0(r2)		/* get pmd entry */
534	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
535	beq-	DataAddressInvalid	/* return if no mapping */
536	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
537	lwz	r0,0(r2)		/* get linux-style pte */
538	andc.	r1,r1,r0		/* check access & ~permission */
539	bne-	DataAddressInvalid	/* return if access not permitted */
540	/*
541	 * NOTE! We are assuming this is not an SMP system, otherwise
542	 * we would need to update the pte atomically with lwarx/stwcx.
543	 */
544	/* Convert linux-style PTE to low word of PPC-style PTE */
545	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
546	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
547	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
548	ori	r1,r1,0xe04		/* clear out reserved bits */
549	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
550BEGIN_FTR_SECTION
551	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
552END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
553	mtspr	SPRN_RPA,r1
554	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
555	mtcrf	0x80,r2
556BEGIN_MMU_FTR_SECTION
557	li	r0,1
558	mfspr	r1,SPRN_SPRG_603_LRU
559	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
560	slw	r0,r0,r2
561	xor	r1,r0,r1
562	srw	r0,r1,r2
563	mtspr   SPRN_SPRG_603_LRU,r1
564	mfspr	r2,SPRN_SRR1
565	rlwimi	r2,r0,31-14,14,14
566	mtspr   SPRN_SRR1,r2
567END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
568	tlbld	r3
569	rfi
570DataAddressInvalid:
571	mfspr	r3,SPRN_SRR1
572	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
573	addis	r1,r1,0x2000
574	mtspr	SPRN_DSISR,r1
575	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
576	mtspr	SPRN_SRR1,r2
577	mfspr	r1,SPRN_DMISS	/* Get failing address */
578	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
579	beq	20f		/* Jump if big endian */
580	xori	r1,r1,3
58120:	mtspr	SPRN_DAR,r1	/* Set fault address */
582	mfmsr	r0		/* Restore "normal" registers */
583	xoris	r0,r0,MSR_TGPR>>16
584	mtcrf	0x80,r3		/* Restore CR0 */
585	mtmsr	r0
586	b	DataAccess
587
588/*
589 * Handle TLB miss for DATA Store on 603/603e
590 */
591	. = 0x1200
592DataStoreTLBMiss:
593/*
594 * r0:	scratch
595 * r1:	linux style pte ( later becomes ppc hardware pte )
596 * r2:	ptr to linux-style pte
597 * r3:	scratch
598 */
599	/* Get PTE (linux-style) and check access */
600	mfspr	r3,SPRN_DMISS
601	lis	r1, TASK_SIZE@h		/* check if kernel address */
602	cmplw	0,r1,r3
603	mfspr	r2, SPRN_SPRG_PGDIR
604	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
605	bgt-	112f
606	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
607	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
608	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
609112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
610	lwz	r2,0(r2)		/* get pmd entry */
611	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
612	beq-	DataAddressInvalid	/* return if no mapping */
613	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
614	lwz	r0,0(r2)		/* get linux-style pte */
615	andc.	r1,r1,r0		/* check access & ~permission */
616	bne-	DataAddressInvalid	/* return if access not permitted */
617	/*
618	 * NOTE! We are assuming this is not an SMP system, otherwise
619	 * we would need to update the pte atomically with lwarx/stwcx.
620	 */
621	/* Convert linux-style PTE to low word of PPC-style PTE */
622	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
623	li	r1,0xe06		/* clear out reserved bits & PP msb */
624	andc	r1,r0,r1		/* PP = user? 1: 0 */
625BEGIN_FTR_SECTION
626	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
627END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
628	mtspr	SPRN_RPA,r1
629	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
630	mtcrf	0x80,r2
631BEGIN_MMU_FTR_SECTION
632	li	r0,1
633	mfspr	r1,SPRN_SPRG_603_LRU
634	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
635	slw	r0,r0,r2
636	xor	r1,r0,r1
637	srw	r0,r1,r2
638	mtspr   SPRN_SPRG_603_LRU,r1
639	mfspr	r2,SPRN_SRR1
640	rlwimi	r2,r0,31-14,14,14
641	mtspr   SPRN_SRR1,r2
642END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
643	tlbld	r3
644	rfi
645
646#ifndef CONFIG_ALTIVEC
647#define altivec_assist_exception	unknown_exception
648#endif
649
650#ifndef CONFIG_TAU_INT
651#define TAUException	unknown_exception
652#endif
653
654	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
655	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
656	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
657	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
658	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
659	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
660	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
661	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
663	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
664	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
666	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
668	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
673	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
675	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
676	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
677	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
678	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
679	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
680	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
681	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
682	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
683
684	. = 0x3000
685
686machine_check_tramp:
687	EXC_XFER_STD(0x200, machine_check_exception)
688
689alignment_exception_tramp:
690	EXC_XFER_STD(0x600, alignment_exception)
691
692handle_page_fault_tramp_1:
693#ifdef CONFIG_VMAP_STACK
694	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
695#endif
696	lwz	r4, _DAR(r11)
697	lwz	r5, _DSISR(r11)
698	/* fall through */
699handle_page_fault_tramp_2:
700	EXC_XFER_LITE(0x300, handle_page_fault)
701
702#ifdef CONFIG_VMAP_STACK
703.macro save_regs_thread		thread
704	stw	r0, THR0(\thread)
705	stw	r3, THR3(\thread)
706	stw	r4, THR4(\thread)
707	stw	r5, THR5(\thread)
708	stw	r6, THR6(\thread)
709	stw	r8, THR8(\thread)
710	stw	r9, THR9(\thread)
711	mflr	r0
712	stw	r0, THLR(\thread)
713	mfctr	r0
714	stw	r0, THCTR(\thread)
715.endm
716
717.macro restore_regs_thread	thread
718	lwz	r0, THLR(\thread)
719	mtlr	r0
720	lwz	r0, THCTR(\thread)
721	mtctr	r0
722	lwz	r0, THR0(\thread)
723	lwz	r3, THR3(\thread)
724	lwz	r4, THR4(\thread)
725	lwz	r5, THR5(\thread)
726	lwz	r6, THR6(\thread)
727	lwz	r8, THR8(\thread)
728	lwz	r9, THR9(\thread)
729.endm
730
731hash_page_dsi:
732	save_regs_thread	r10
733	mfdsisr	r3
734	mfdar	r4
735	mfsrr0	r5
736	mfsrr1	r9
737	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
738	bl	hash_page
739	mfspr	r10, SPRN_SPRG_THREAD
740	restore_regs_thread r10
741	b	.Lhash_page_dsi_cont
742
743hash_page_isi:
744	mr	r11, r10
745	mfspr	r10, SPRN_SPRG_THREAD
746	save_regs_thread	r10
747	li	r3, 0
748	lwz	r4, SRR0(r10)
749	lwz	r9, SRR1(r10)
750	bl	hash_page
751	mfspr	r10, SPRN_SPRG_THREAD
752	restore_regs_thread r10
753	mr	r10, r11
754	b	.Lhash_page_isi_cont
755
756	.globl fast_hash_page_return
757fast_hash_page_return:
758	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
759	mfspr	r10, SPRN_SPRG_THREAD
760	restore_regs_thread r10
761	bne	1f
762
763	/* DSI */
764	mtcr	r11
765	lwz	r11, THR11(r10)
766	mfspr	r10, SPRN_SPRG_SCRATCH0
767	RFI
768
7691:	/* ISI */
770	mtcr	r11
771	mfspr	r11, SPRN_SPRG_SCRATCH1
772	mfspr	r10, SPRN_SPRG_SCRATCH0
773	RFI
774
775stack_overflow:
776	vmap_stack_overflow_exception
777#endif
778
779AltiVecUnavailable:
780	EXCEPTION_PROLOG
781#ifdef CONFIG_ALTIVEC
782	beq	1f
783	bl	load_up_altivec		/* if from user, just load it up */
784	b	fast_exception_return
785#endif /* CONFIG_ALTIVEC */
7861:	addi	r3,r1,STACK_FRAME_OVERHEAD
787	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
788
789PerformanceMonitor:
790	EXCEPTION_PROLOG
791	addi	r3,r1,STACK_FRAME_OVERHEAD
792	EXC_XFER_STD(0xf00, performance_monitor_exception)
793
794
795/*
796 * This code is jumped to from the startup code to copy
797 * the kernel image to physical address PHYSICAL_START.
798 */
799relocate_kernel:
800	addis	r9,r26,klimit@ha	/* fetch klimit */
801	lwz	r25,klimit@l(r9)
802	addis	r25,r25,-KERNELBASE@h
803	lis	r3,PHYSICAL_START@h	/* Destination base address */
804	li	r6,0			/* Destination offset */
805	li	r5,0x4000		/* # bytes of memory to copy */
806	bl	copy_and_flush		/* copy the first 0x4000 bytes */
807	addi	r0,r3,4f@l		/* jump to the address of 4f */
808	mtctr	r0			/* in copy and do the rest. */
809	bctr				/* jump to the copy */
8104:	mr	r5,r25
811	bl	copy_and_flush		/* copy the rest */
812	b	turn_on_mmu
813
814/*
815 * Copy routine used to copy the kernel to start at physical address 0
816 * and flush and invalidate the caches as needed.
817 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
818 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
819 */
820_ENTRY(copy_and_flush)
821	addi	r5,r5,-4
822	addi	r6,r6,-4
8234:	li	r0,L1_CACHE_BYTES/4
824	mtctr	r0
8253:	addi	r6,r6,4			/* copy a cache line */
826	lwzx	r0,r6,r4
827	stwx	r0,r6,r3
828	bdnz	3b
829	dcbst	r6,r3			/* write it to memory */
830	sync
831	icbi	r6,r3			/* flush the icache line */
832	cmplw	0,r6,r5
833	blt	4b
834	sync				/* additional sync needed on g4 */
835	isync
836	addi	r5,r5,4
837	addi	r6,r6,4
838	blr
839
840#ifdef CONFIG_SMP
841	.globl __secondary_start_mpc86xx
842__secondary_start_mpc86xx:
843	mfspr	r3, SPRN_PIR
844	stw	r3, __secondary_hold_acknowledge@l(0)
845	mr	r24, r3			/* cpu # */
846	b	__secondary_start
847
848	.globl	__secondary_start_pmac_0
849__secondary_start_pmac_0:
850	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
851	li	r24,0
852	b	1f
853	li	r24,1
854	b	1f
855	li	r24,2
856	b	1f
857	li	r24,3
8581:
859	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
860	   set to map the 0xf0000000 - 0xffffffff region */
861	mfmsr	r0
862	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
863	mtmsr	r0
864	isync
865
866	.globl	__secondary_start
867__secondary_start:
868	/* Copy some CPU settings from CPU 0 */
869	bl	__restore_cpu_setup
870
871	lis	r3,-KERNELBASE@h
872	mr	r4,r24
873	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
874	lis	r3,-KERNELBASE@h
875	bl	init_idle_6xx
876
877	/* get current's stack and current */
878	lis	r2,secondary_current@ha
879	tophys(r2,r2)
880	lwz	r2,secondary_current@l(r2)
881	tophys(r1,r2)
882	lwz	r1,TASK_STACK(r1)
883
884	/* stack */
885	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
886	li	r0,0
887	tophys(r3,r1)
888	stw	r0,0(r3)
889
890	/* load up the MMU */
891	bl	load_segment_registers
892	bl	load_up_mmu
893
894	/* ptr to phys current thread */
895	tophys(r4,r2)
896	addi	r4,r4,THREAD	/* phys address of our thread_struct */
897	mtspr	SPRN_SPRG_THREAD,r4
898	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
899	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
900	mtspr	SPRN_SPRG_PGDIR, r4
901
902	/* enable MMU and jump to start_secondary */
903	li	r4,MSR_KERNEL
904	lis	r3,start_secondary@h
905	ori	r3,r3,start_secondary@l
906	mtspr	SPRN_SRR0,r3
907	mtspr	SPRN_SRR1,r4
908	RFI
909#endif /* CONFIG_SMP */
910
911#ifdef CONFIG_KVM_BOOK3S_HANDLER
912#include "../kvm/book3s_rmhandlers.S"
913#endif
914
915/*
916 * Load stuff into the MMU.  Intended to be called with
917 * IR=0 and DR=0.
918 */
919early_hash_table:
920	sync			/* Force all PTE updates to finish */
921	isync
922	tlbia			/* Clear all TLB entries */
923	sync			/* wait for tlbia/tlbie to finish */
924	TLBSYNC			/* ... on all CPUs */
925	/* Load the SDR1 register (hash table base & size) */
926	lis	r6, early_hash - PAGE_OFFSET@h
927	ori	r6, r6, 3	/* 256kB table */
928	mtspr	SPRN_SDR1, r6
929	lis	r6, early_hash@h
930	addis	r3, r3, Hash@ha
931	stw	r6, Hash@l(r3)
932	blr
933
934load_up_mmu:
935	sync			/* Force all PTE updates to finish */
936	isync
937	tlbia			/* Clear all TLB entries */
938	sync			/* wait for tlbia/tlbie to finish */
939	TLBSYNC			/* ... on all CPUs */
940	/* Load the SDR1 register (hash table base & size) */
941	lis	r6,_SDR1@ha
942	tophys(r6,r6)
943	lwz	r6,_SDR1@l(r6)
944	mtspr	SPRN_SDR1,r6
945
946/* Load the BAT registers with the values set up by MMU_init. */
947	lis	r3,BATS@ha
948	addi	r3,r3,BATS@l
949	tophys(r3,r3)
950	LOAD_BAT(0,r3,r4,r5)
951	LOAD_BAT(1,r3,r4,r5)
952	LOAD_BAT(2,r3,r4,r5)
953	LOAD_BAT(3,r3,r4,r5)
954BEGIN_MMU_FTR_SECTION
955	LOAD_BAT(4,r3,r4,r5)
956	LOAD_BAT(5,r3,r4,r5)
957	LOAD_BAT(6,r3,r4,r5)
958	LOAD_BAT(7,r3,r4,r5)
959END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
960	blr
961
962_GLOBAL(load_segment_registers)
963	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
964	mtctr	r0		/* for context 0 */
965	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
966#ifdef CONFIG_PPC_KUEP
967	oris	r3, r3, SR_NX@h	/* Set Nx */
968#endif
969#ifdef CONFIG_PPC_KUAP
970	oris	r3, r3, SR_KS@h	/* Set Ks */
971#endif
972	li	r4, 0
9733:	mtsrin	r3, r4
974	addi	r3, r3, 0x111	/* increment VSID */
975	addis	r4, r4, 0x1000	/* address of next segment */
976	bdnz	3b
977	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
978	mtctr	r0			/* for context 0 */
979	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
980	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
981	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9823:	mtsrin	r3, r4
983	addi	r3, r3, 0x111	/* increment VSID */
984	addis	r4, r4, 0x1000	/* address of next segment */
985	bdnz	3b
986	blr
987
988/*
989 * This is where the main kernel code starts.
990 */
991start_here:
992	/* ptr to current */
993	lis	r2,init_task@h
994	ori	r2,r2,init_task@l
995	/* Set up for using our exception vectors */
996	/* ptr to phys current thread */
997	tophys(r4,r2)
998	addi	r4,r4,THREAD	/* init task's THREAD */
999	mtspr	SPRN_SPRG_THREAD,r4
1000	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
1001	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1002	mtspr	SPRN_SPRG_PGDIR, r4
1003
1004	/* stack */
1005	lis	r1,init_thread_union@ha
1006	addi	r1,r1,init_thread_union@l
1007	li	r0,0
1008	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1009/*
1010 * Do early platform-specific initialization,
1011 * and set up the MMU.
1012 */
1013#ifdef CONFIG_KASAN
1014	bl	kasan_early_init
1015#endif
1016	li	r3,0
1017	mr	r4,r31
1018	bl	machine_init
1019	bl	__save_cpu_setup
1020	bl	MMU_init
1021	bl	MMU_init_hw_patch
1022
1023/*
1024 * Go back to running unmapped so we can load up new values
1025 * for SDR1 (hash table pointer) and the segment registers
1026 * and change to using our exception vectors.
1027 */
1028	lis	r4,2f@h
1029	ori	r4,r4,2f@l
1030	tophys(r4,r4)
1031	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1032
1033	.align	4
1034	mtspr	SPRN_SRR0,r4
1035	mtspr	SPRN_SRR1,r3
1036	RFI
1037/* Load up the kernel context */
10382:	bl	load_up_mmu
1039
1040#ifdef CONFIG_BDI_SWITCH
1041	/* Add helper information for the Abatron bdiGDB debugger.
1042	 * We do this here because we know the mmu is disabled, and
1043	 * will be enabled for real in just a few instructions.
1044	 */
1045	lis	r5, abatron_pteptrs@h
1046	ori	r5, r5, abatron_pteptrs@l
1047	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1048	lis	r6, swapper_pg_dir@h
1049	ori	r6, r6, swapper_pg_dir@l
1050	tophys(r5, r5)
1051	stw	r6, 0(r5)
1052#endif /* CONFIG_BDI_SWITCH */
1053
1054/* Now turn on the MMU for real! */
1055	li	r4,MSR_KERNEL
1056	lis	r3,start_kernel@h
1057	ori	r3,r3,start_kernel@l
1058	mtspr	SPRN_SRR0,r3
1059	mtspr	SPRN_SRR1,r4
1060	RFI
1061
1062/*
1063 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1064 *
1065 * Set up the segment registers for a new context.
1066 */
1067_ENTRY(switch_mmu_context)
1068	lwz	r3,MMCONTEXTID(r4)
1069	cmpwi	cr0,r3,0
1070	blt-	4f
1071	mulli	r3,r3,897	/* multiply context by skew factor */
1072	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1073#ifdef CONFIG_PPC_KUEP
1074	oris	r3, r3, SR_NX@h	/* Set Nx */
1075#endif
1076#ifdef CONFIG_PPC_KUAP
1077	oris	r3, r3, SR_KS@h	/* Set Ks */
1078#endif
1079	li	r0,NUM_USER_SEGMENTS
1080	mtctr	r0
1081
1082	lwz	r4, MM_PGD(r4)
1083#ifdef CONFIG_BDI_SWITCH
1084	/* Context switch the PTE pointer for the Abatron BDI2000.
1085	 * The PGDIR is passed as second argument.
1086	 */
1087	lis	r5, abatron_pteptrs@ha
1088	stw	r4, abatron_pteptrs@l + 0x4(r5)
1089#endif
1090	tophys(r4, r4)
1091	mtspr	SPRN_SPRG_PGDIR, r4
1092	li	r4,0
1093	isync
10943:
1095	mtsrin	r3,r4
1096	addi	r3,r3,0x111	/* next VSID */
1097	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1098	addis	r4,r4,0x1000	/* address of next segment */
1099	bdnz	3b
1100	sync
1101	isync
1102	blr
11034:	trap
1104	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1105	blr
1106EXPORT_SYMBOL(switch_mmu_context)
1107
1108/*
1109 * An undocumented "feature" of 604e requires that the v bit
1110 * be cleared before changing BAT values.
1111 *
1112 * Also, newer IBM firmware does not clear bat3 and 4 so
1113 * this makes sure it's done.
1114 *  -- Cort
1115 */
1116clear_bats:
1117	li	r10,0
1118
1119	mtspr	SPRN_DBAT0U,r10
1120	mtspr	SPRN_DBAT0L,r10
1121	mtspr	SPRN_DBAT1U,r10
1122	mtspr	SPRN_DBAT1L,r10
1123	mtspr	SPRN_DBAT2U,r10
1124	mtspr	SPRN_DBAT2L,r10
1125	mtspr	SPRN_DBAT3U,r10
1126	mtspr	SPRN_DBAT3L,r10
1127	mtspr	SPRN_IBAT0U,r10
1128	mtspr	SPRN_IBAT0L,r10
1129	mtspr	SPRN_IBAT1U,r10
1130	mtspr	SPRN_IBAT1L,r10
1131	mtspr	SPRN_IBAT2U,r10
1132	mtspr	SPRN_IBAT2L,r10
1133	mtspr	SPRN_IBAT3U,r10
1134	mtspr	SPRN_IBAT3L,r10
1135BEGIN_MMU_FTR_SECTION
1136	/* Here's a tweak: at this point, CPU setup have
1137	 * not been called yet, so HIGH_BAT_EN may not be
1138	 * set in HID0 for the 745x processors. However, it
1139	 * seems that doesn't affect our ability to actually
1140	 * write to these SPRs.
1141	 */
1142	mtspr	SPRN_DBAT4U,r10
1143	mtspr	SPRN_DBAT4L,r10
1144	mtspr	SPRN_DBAT5U,r10
1145	mtspr	SPRN_DBAT5L,r10
1146	mtspr	SPRN_DBAT6U,r10
1147	mtspr	SPRN_DBAT6L,r10
1148	mtspr	SPRN_DBAT7U,r10
1149	mtspr	SPRN_DBAT7L,r10
1150	mtspr	SPRN_IBAT4U,r10
1151	mtspr	SPRN_IBAT4L,r10
1152	mtspr	SPRN_IBAT5U,r10
1153	mtspr	SPRN_IBAT5L,r10
1154	mtspr	SPRN_IBAT6U,r10
1155	mtspr	SPRN_IBAT6L,r10
1156	mtspr	SPRN_IBAT7U,r10
1157	mtspr	SPRN_IBAT7L,r10
1158END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1159	blr
1160
1161_ENTRY(update_bats)
1162	lis	r4, 1f@h
1163	ori	r4, r4, 1f@l
1164	tophys(r4, r4)
1165	mfmsr	r6
1166	mflr	r7
1167	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1168	rlwinm	r0, r6, 0, ~MSR_RI
1169	rlwinm	r0, r0, 0, ~MSR_EE
1170	mtmsr	r0
1171
1172	.align	4
1173	mtspr	SPRN_SRR0, r4
1174	mtspr	SPRN_SRR1, r3
1175	RFI
11761:	bl	clear_bats
1177	lis	r3, BATS@ha
1178	addi	r3, r3, BATS@l
1179	tophys(r3, r3)
1180	LOAD_BAT(0, r3, r4, r5)
1181	LOAD_BAT(1, r3, r4, r5)
1182	LOAD_BAT(2, r3, r4, r5)
1183	LOAD_BAT(3, r3, r4, r5)
1184BEGIN_MMU_FTR_SECTION
1185	LOAD_BAT(4, r3, r4, r5)
1186	LOAD_BAT(5, r3, r4, r5)
1187	LOAD_BAT(6, r3, r4, r5)
1188	LOAD_BAT(7, r3, r4, r5)
1189END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1190	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1191	mtmsr	r3
1192	mtspr	SPRN_SRR0, r7
1193	mtspr	SPRN_SRR1, r6
1194	RFI
1195
1196flush_tlbs:
1197	lis	r10, 0x40
11981:	addic.	r10, r10, -0x1000
1199	tlbie	r10
1200	bgt	1b
1201	sync
1202	blr
1203
1204mmu_off:
1205 	addi	r4, r3, __after_mmu_off - _start
1206	mfmsr	r3
1207	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1208	beqlr
1209	andc	r3,r3,r0
1210
1211	.align	4
1212	mtspr	SPRN_SRR0,r4
1213	mtspr	SPRN_SRR1,r3
1214	sync
1215	RFI
1216
1217/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1218initial_bats:
1219	lis	r11,PAGE_OFFSET@h
1220	tophys(r8,r11)
1221#ifdef CONFIG_SMP
1222	ori	r8,r8,0x12		/* R/W access, M=1 */
1223#else
1224	ori	r8,r8,2			/* R/W access */
1225#endif /* CONFIG_SMP */
1226	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1227
1228	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1229	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1230	mtspr	SPRN_IBAT0L,r8
1231	mtspr	SPRN_IBAT0U,r11
1232	isync
1233	blr
1234
1235#ifdef CONFIG_BOOTX_TEXT
1236setup_disp_bat:
1237	/*
1238	 * setup the display bat prepared for us in prom.c
1239	 */
1240	mflr	r8
1241	bl	reloc_offset
1242	mtlr	r8
1243	addis	r8,r3,disp_BAT@ha
1244	addi	r8,r8,disp_BAT@l
1245	cmpwi	cr0,r8,0
1246	beqlr
1247	lwz	r11,0(r8)
1248	lwz	r8,4(r8)
1249	mtspr	SPRN_DBAT3L,r8
1250	mtspr	SPRN_DBAT3U,r11
1251	blr
1252#endif /* CONFIG_BOOTX_TEXT */
1253
1254#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1255setup_cpm_bat:
1256	lis	r8, 0xf000
1257	ori	r8, r8,	0x002a
1258	mtspr	SPRN_DBAT1L, r8
1259
1260	lis	r11, 0xf000
1261	ori	r11, r11, (BL_1M << 2) | 2
1262	mtspr	SPRN_DBAT1U, r11
1263
1264	blr
1265#endif
1266
1267#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1268setup_usbgecko_bat:
1269	/* prepare a BAT for early io */
1270#if defined(CONFIG_GAMECUBE)
1271	lis	r8, 0x0c00
1272#elif defined(CONFIG_WII)
1273	lis	r8, 0x0d00
1274#else
1275#error Invalid platform for USB Gecko based early debugging.
1276#endif
1277	/*
1278	 * The virtual address used must match the virtual address
1279	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1280	 */
1281	lis	r11, 0xfffe	/* top 128K */
1282	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1283	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1284	mtspr	SPRN_DBAT1L, r8
1285	mtspr	SPRN_DBAT1U, r11
1286	blr
1287#endif
1288
1289#ifdef CONFIG_8260
1290/* Jump into the system reset for the rom.
1291 * We first disable the MMU, and then jump to the ROM reset address.
1292 *
1293 * r3 is the board info structure, r4 is the location for starting.
1294 * I use this for building a small kernel that can load other kernels,
1295 * rather than trying to write or rely on a rom monitor that can tftp load.
1296 */
1297       .globl  m8260_gorom
1298m8260_gorom:
1299	mfmsr	r0
1300	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1301	sync
1302	mtmsr	r0
1303	sync
1304	mfspr	r11, SPRN_HID0
1305	lis	r10, 0
1306	ori	r10,r10,HID0_ICE|HID0_DCE
1307	andc	r11, r11, r10
1308	mtspr	SPRN_HID0, r11
1309	isync
1310	li	r5, MSR_ME|MSR_RI
1311	lis	r6,2f@h
1312	addis	r6,r6,-KERNELBASE@h
1313	ori	r6,r6,2f@l
1314	mtspr	SPRN_SRR0,r6
1315	mtspr	SPRN_SRR1,r5
1316	isync
1317	sync
1318	rfi
13192:
1320	mtlr	r4
1321	blr
1322#endif
1323
1324
1325/*
1326 * We put a few things here that have to be page-aligned.
1327 * This stuff goes at the beginning of the data segment,
1328 * which is page-aligned.
1329 */
1330	.data
1331	.globl	sdata
1332sdata:
1333	.globl	empty_zero_page
1334empty_zero_page:
1335	.space	4096
1336EXPORT_SYMBOL(empty_zero_page)
1337
1338	.globl	swapper_pg_dir
1339swapper_pg_dir:
1340	.space	PGD_TABLE_SIZE
1341
1342/* Room for two PTE pointers, usually the kernel and current user pointers
1343 * to their respective root page table.
1344 */
1345abatron_pteptrs:
1346	.space	8
1347