• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  Low level TLB miss handlers for Book3E
3 *
4 *  Copyright (C) 2008-2009
5 *      Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6 *
7 *  This program is free software; you can redistribute it and/or
8 *  modify it under the terms of the GNU General Public License
9 *  as published by the Free Software Foundation; either version
10 *  2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/cputable.h>
20#include <asm/pgtable.h>
21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_booke_hv_asm.h>
25#include <asm/feature-fixups.h>
26
27#ifdef CONFIG_PPC_64K_PAGES
28#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE+1)
29#else
30#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE)
31#endif
32#define VPTE_PUD_SHIFT	(VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
33#define VPTE_PGD_SHIFT	(VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
34#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
35
36/**********************************************************************
37 *                                                                    *
38 * TLB miss handling for Book3E with a bolted linear mapping          *
39 * No virtual page table, no nested TLB misses                        *
40 *                                                                    *
41 **********************************************************************/
42
43/*
44 * Note that, unlike non-bolted handlers, TLB_EXFRAME is not
45 * modified by the TLB miss handlers themselves, since the TLB miss
46 * handler code will not itself cause a recursive TLB miss.
47 *
48 * TLB_EXFRAME will be modified when crit/mc/debug exceptions are
49 * entered/exited.
50 */
51.macro tlb_prolog_bolted intnum addr
52	mtspr	SPRN_SPRG_GEN_SCRATCH,r12
53	mfspr	r12,SPRN_SPRG_TLB_EXFRAME
54	std	r13,EX_TLB_R13(r12)
55	std	r10,EX_TLB_R10(r12)
56	mfspr	r13,SPRN_SPRG_PACA
57
58	mfcr	r10
59	std	r11,EX_TLB_R11(r12)
60#ifdef CONFIG_KVM_BOOKE_HV
61BEGIN_FTR_SECTION
62	mfspr	r11, SPRN_SRR1
63END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
64#endif
65	DO_KVM	\intnum, SPRN_SRR1
66	std	r16,EX_TLB_R16(r12)
67	mfspr	r16,\addr		/* get faulting address */
68	std	r14,EX_TLB_R14(r12)
69	ld	r14,PACAPGD(r13)
70	std	r15,EX_TLB_R15(r12)
71	std	r10,EX_TLB_CR(r12)
72#ifdef CONFIG_PPC_FSL_BOOK3E
73START_BTB_FLUSH_SECTION
74	mfspr r11, SPRN_SRR1
75	andi. r10,r11,MSR_PR
76	beq 1f
77	BTB_FLUSH(r10)
781:
79END_BTB_FLUSH_SECTION
80	std	r7,EX_TLB_R7(r12)
81#endif
82	TLB_MISS_PROLOG_STATS
83.endm
84
85.macro tlb_epilog_bolted
86	ld	r14,EX_TLB_CR(r12)
87#ifdef CONFIG_PPC_FSL_BOOK3E
88	ld	r7,EX_TLB_R7(r12)
89#endif
90	ld	r10,EX_TLB_R10(r12)
91	ld	r11,EX_TLB_R11(r12)
92	ld	r13,EX_TLB_R13(r12)
93	mtcr	r14
94	ld	r14,EX_TLB_R14(r12)
95	ld	r15,EX_TLB_R15(r12)
96	TLB_MISS_RESTORE_STATS
97	ld	r16,EX_TLB_R16(r12)
98	mfspr	r12,SPRN_SPRG_GEN_SCRATCH
99.endm
100
101/* Data TLB miss */
102	START_EXCEPTION(data_tlb_miss_bolted)
103	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
104
105	/* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
106
107	/* We do the user/kernel test for the PID here along with the RW test
108	 */
109	/* We pre-test some combination of permissions to avoid double
110	 * faults:
111	 *
112	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
113	 * ESR_ST   is 0x00800000
114	 * _PAGE_BAP_SW is 0x00000010
115	 * So the shift is >> 19. This tests for supervisor writeability.
116	 * If the page happens to be supervisor writeable and not user
117	 * writeable, we will take a new fault later, but that should be
118	 * a rare enough case.
119	 *
120	 * We also move ESR_ST in _PAGE_DIRTY position
121	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
122	 *
123	 * MAS1 is preset for all we need except for TID that needs to
124	 * be cleared for kernel translations
125	 */
126
127	mfspr	r11,SPRN_ESR
128
129	srdi	r15,r16,60		/* get region */
130	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
131	bne-	dtlb_miss_fault_bolted	/* Bail if fault addr is invalid */
132
133	rlwinm	r10,r11,32-19,27,27
134	rlwimi	r10,r11,32-16,19,19
135	cmpwi	r15,0			/* user vs kernel check */
136	ori	r10,r10,_PAGE_PRESENT
137	oris	r11,r10,_PAGE_ACCESSED@h
138
139	TLB_MISS_STATS_SAVE_INFO_BOLTED
140	bne	tlb_miss_kernel_bolted
141
142tlb_miss_common_bolted:
143/*
144 * This is the guts of the TLB miss handler for bolted-linear.
145 * We are entered with:
146 *
147 * r16 = faulting address
148 * r15 = crap (free to use)
149 * r14 = page table base
150 * r13 = PACA
151 * r11 = PTE permission mask
152 * r10 = crap (free to use)
153 */
154	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
155	cmpldi	cr0,r14,0
156	clrrdi	r15,r15,3
157	beq	tlb_miss_fault_bolted	/* No PGDIR, bail */
158
159BEGIN_MMU_FTR_SECTION
160	/* Set the TLB reservation and search for existing entry. Then load
161	 * the entry.
162	 */
163	PPC_TLBSRX_DOT(0,R16)
164	ldx	r14,r14,r15		/* grab pgd entry */
165	beq	tlb_miss_done_bolted	/* tlb exists already, bail */
166MMU_FTR_SECTION_ELSE
167	ldx	r14,r14,r15		/* grab pgd entry */
168ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
169
170#ifndef CONFIG_PPC_64K_PAGES
171	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
172	clrrdi	r15,r15,3
173	cmpdi	cr0,r14,0
174	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
175	ldx	r14,r14,r15		/* grab pud entry */
176#endif /* CONFIG_PPC_64K_PAGES */
177
178	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
179	clrrdi	r15,r15,3
180	cmpdi	cr0,r14,0
181	bge	tlb_miss_fault_bolted
182	ldx	r14,r14,r15		/* Grab pmd entry */
183
184	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
185	clrrdi	r15,r15,3
186	cmpdi	cr0,r14,0
187	bge	tlb_miss_fault_bolted
188	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
189
190	/* Check if required permissions are met */
191	andc.	r15,r11,r14
192	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
193	bne-	tlb_miss_fault_bolted
194
195	/* Now we build the MAS:
196	 *
197	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
198	 * MAS 1   :	Almost fully setup
199	 *               - PID already updated by caller if necessary
200	 *               - TSIZE need change if !base page size, not
201	 *                 yet implemented for now
202	 * MAS 2   :	Defaults not useful, need to be redone
203	 * MAS 3+7 :	Needs to be done
204	 */
205	clrrdi	r11,r16,12		/* Clear low crap in EA */
206	clrldi	r15,r15,12		/* Clear crap at the top */
207	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
208	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
209	mtspr	SPRN_MAS2,r11
210	andi.	r11,r14,_PAGE_DIRTY
211	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
212
213	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
214	bne	1f
215	li	r11,MAS3_SW|MAS3_UW
216	andc	r15,r15,r11
2171:
218	mtspr	SPRN_MAS7_MAS3,r15
219	tlbwe
220
221tlb_miss_done_bolted:
222	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
223	tlb_epilog_bolted
224	rfi
225
226itlb_miss_kernel_bolted:
227	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
228	oris	r11,r11,_PAGE_ACCESSED@h
229tlb_miss_kernel_bolted:
230	mfspr	r10,SPRN_MAS1
231	ld	r14,PACA_KERNELPGD(r13)
232	cmpldi	cr0,r15,8		/* Check for vmalloc region */
233	rlwinm	r10,r10,0,16,1		/* Clear TID */
234	mtspr	SPRN_MAS1,r10
235	beq+	tlb_miss_common_bolted
236
237tlb_miss_fault_bolted:
238	/* We need to check if it was an instruction miss */
239	andi.	r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
240	bne	itlb_miss_fault_bolted
241dtlb_miss_fault_bolted:
242	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
243	tlb_epilog_bolted
244	b	exc_data_storage_book3e
245itlb_miss_fault_bolted:
246	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
247	tlb_epilog_bolted
248	b	exc_instruction_storage_book3e
249
250/* Instruction TLB miss */
251	START_EXCEPTION(instruction_tlb_miss_bolted)
252	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
253
254	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
255	srdi	r15,r16,60		/* get region */
256	TLB_MISS_STATS_SAVE_INFO_BOLTED
257	bne-	itlb_miss_fault_bolted
258
259	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
260
261	/* We do the user/kernel test for the PID here along with the RW test
262	 */
263
264	cmpldi	cr0,r15,0			/* Check for user region */
265	oris	r11,r11,_PAGE_ACCESSED@h
266	beq	tlb_miss_common_bolted
267	b	itlb_miss_kernel_bolted
268
269#ifdef CONFIG_PPC_FSL_BOOK3E
270/*
271 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
272 *
273 * Linear mapping is bolted: no virtual page table or nested TLB misses
274 * Indirect entries in TLB1, hardware loads resulting direct entries
275 *    into TLB0
276 * No HES or NV hint on TLB1, so we need to do software round-robin
277 * No tlbsrx. so we need a spinlock, and we have to deal
278 *    with MAS-damage caused by tlbsx
279 * 4K pages only
280 */
281
282	START_EXCEPTION(instruction_tlb_miss_e6500)
283	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
284
285	ld	r11,PACA_TCD_PTR(r13)
286	srdi.	r15,r16,60		/* get region */
287	ori	r16,r16,1
288
289	TLB_MISS_STATS_SAVE_INFO_BOLTED
290	bne	tlb_miss_kernel_e6500	/* user/kernel test */
291
292	b	tlb_miss_common_e6500
293
294	START_EXCEPTION(data_tlb_miss_e6500)
295	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
296
297	ld	r11,PACA_TCD_PTR(r13)
298	srdi.	r15,r16,60		/* get region */
299	rldicr	r16,r16,0,62
300
301	TLB_MISS_STATS_SAVE_INFO_BOLTED
302	bne	tlb_miss_kernel_e6500	/* user vs kernel check */
303
304/*
305 * This is the guts of the TLB miss handler for e6500 and derivatives.
306 * We are entered with:
307 *
308 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
309 * r15 = crap (free to use)
310 * r14 = page table base
311 * r13 = PACA
312 * r11 = tlb_per_core ptr
313 * r10 = crap (free to use)
314 * r7  = esel_next
315 */
316tlb_miss_common_e6500:
317	crmove	cr2*4+2,cr0*4+2		/* cr2.eq != 0 if kernel address */
318
319BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
320	/*
321	 * Search if we already have an indirect entry for that virtual
322	 * address, and if we do, bail out.
323	 *
324	 * MAS6:IND should be already set based on MAS4
325	 */
326	lhz	r10,PACAPACAINDEX(r13)
327	addi	r10,r10,1
328	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
3291:	lbarx	r15,0,r11
330	cmpdi	r15,0
331	bne	2f
332	stbcx.	r10,0,r11
333	bne	1b
3343:
335	.subsection 1
3362:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
337	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
33810:	lbz	r15,0(r11)
339	cmpdi	r15,0
340	bne	10b
341	b	1b
342	.previous
343END_FTR_SECTION_IFSET(CPU_FTR_SMT)
344
345	lbz	r7,TCD_ESEL_NEXT(r11)
346
347BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
348	/*
349	 * Erratum A-008139 says that we can't use tlbwe to change
350	 * an indirect entry in any way (including replacing or
351	 * invalidating) if the other thread could be in the process
352	 * of a lookup.  The workaround is to invalidate the entry
353	 * with tlbilx before overwriting.
354	 */
355
356	rlwinm	r10,r7,16,0xff0000
357	oris	r10,r10,MAS0_TLBSEL(1)@h
358	mtspr	SPRN_MAS0,r10
359	isync
360	tlbre
361	mfspr	r15,SPRN_MAS1
362	andis.	r15,r15,MAS1_VALID@h
363	beq	5f
364
365BEGIN_FTR_SECTION_NESTED(532)
366	mfspr	r10,SPRN_MAS8
367	rlwinm	r10,r10,0,0x80000fff  /* tgs,tlpid -> sgs,slpid */
368	mtspr	SPRN_MAS5,r10
369END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
370
371	mfspr	r10,SPRN_MAS1
372	rlwinm	r15,r10,0,0x3fff0000  /* tid -> spid */
373	rlwimi	r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
374	mfspr	r10,SPRN_MAS6
375	mtspr	SPRN_MAS6,r15
376
377	mfspr	r15,SPRN_MAS2
378	isync
379	tlbilxva 0,r15
380	isync
381
382	mtspr	SPRN_MAS6,r10
383
3845:
385BEGIN_FTR_SECTION_NESTED(532)
386	li	r10,0
387	mtspr	SPRN_MAS8,r10
388	mtspr	SPRN_MAS5,r10
389END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
390
391	tlbsx	0,r16
392	mfspr	r10,SPRN_MAS1
393	andis.	r15,r10,MAS1_VALID@h
394	bne	tlb_miss_done_e6500
395FTR_SECTION_ELSE
396	mfspr	r10,SPRN_MAS1
397ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
398
399	oris	r10,r10,MAS1_VALID@h
400	beq	cr2,4f
401	rlwinm	r10,r10,0,16,1		/* Clear TID */
4024:	mtspr	SPRN_MAS1,r10
403
404	/* Now, we need to walk the page tables. First check if we are in
405	 * range.
406	 */
407	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
408	bne-	tlb_miss_fault_e6500
409
410	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
411	cmpldi	cr0,r14,0
412	clrrdi	r15,r15,3
413	beq-	tlb_miss_fault_e6500 /* No PGDIR, bail */
414	ldx	r14,r14,r15		/* grab pgd entry */
415
416	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
417	clrrdi	r15,r15,3
418	cmpdi	cr0,r14,0
419	bge	tlb_miss_huge_e6500	/* Bad pgd entry or hugepage; bail */
420	ldx	r14,r14,r15		/* grab pud entry */
421
422	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
423	clrrdi	r15,r15,3
424	cmpdi	cr0,r14,0
425	bge	tlb_miss_huge_e6500
426	ldx	r14,r14,r15		/* Grab pmd entry */
427
428	mfspr	r10,SPRN_MAS0
429	cmpdi	cr0,r14,0
430	bge	tlb_miss_huge_e6500
431
432	/* Now we build the MAS for a 2M indirect page:
433	 *
434	 * MAS 0   :	ESEL needs to be filled by software round-robin
435	 * MAS 1   :	Fully set up
436	 *               - PID already updated by caller if necessary
437	 *               - TSIZE for now is base ind page size always
438	 *               - TID already cleared if necessary
439	 * MAS 2   :	Default not 2M-aligned, need to be redone
440	 * MAS 3+7 :	Needs to be done
441	 */
442
443	ori	r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
444	mtspr	SPRN_MAS7_MAS3,r14
445
446	clrrdi	r15,r16,21		/* make EA 2M-aligned */
447	mtspr	SPRN_MAS2,r15
448
449tlb_miss_huge_done_e6500:
450	lbz	r16,TCD_ESEL_MAX(r11)
451	lbz	r14,TCD_ESEL_FIRST(r11)
452	rlwimi	r10,r7,16,0x00ff0000	/* insert esel_next into MAS0 */
453	addi	r7,r7,1			/* increment esel_next */
454	mtspr	SPRN_MAS0,r10
455	cmpw	r7,r16
456	iseleq	r7,r14,r7		/* if next == last use first */
457	stb	r7,TCD_ESEL_NEXT(r11)
458
459	tlbwe
460
461tlb_miss_done_e6500:
462	.macro	tlb_unlock_e6500
463BEGIN_FTR_SECTION
464	beq	cr1,1f		/* no unlock if lock was recursively grabbed */
465	li	r15,0
466	isync
467	stb	r15,0(r11)
4681:
469END_FTR_SECTION_IFSET(CPU_FTR_SMT)
470	.endm
471
472	tlb_unlock_e6500
473	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
474	tlb_epilog_bolted
475	rfi
476
477tlb_miss_huge_e6500:
478	beq	tlb_miss_fault_e6500
479	li	r10,1
480	andi.	r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */
481	rldimi	r14,r10,63,0		/* Set PD_HUGE */
482	xor	r14,r14,r15		/* Clear size bits */
483	ldx	r14,0,r14
484
485	/*
486	 * Now we build the MAS for a huge page.
487	 *
488	 * MAS 0   :	ESEL needs to be filled by software round-robin
489	 *		 - can be handled by indirect code
490	 * MAS 1   :	Need to clear IND and set TSIZE
491	 * MAS 2,3+7:	Needs to be redone similar to non-tablewalk handler
492	 */
493
494	subi	r15,r15,10		/* Convert psize to tsize */
495	mfspr	r10,SPRN_MAS1
496	rlwinm	r10,r10,0,~MAS1_IND
497	rlwimi	r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK
498	mtspr	SPRN_MAS1,r10
499
500	li	r10,-0x400
501	sld	r15,r10,r15		/* Generate mask based on size */
502	and	r10,r16,r15
503	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
504	rlwimi	r10,r14,32-19,27,31	/* Insert WIMGE */
505	clrldi	r15,r15,PAGE_SHIFT	/* Clear crap at the top */
506	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
507	mtspr	SPRN_MAS2,r10
508	andi.	r10,r14,_PAGE_DIRTY
509	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
510
511	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
512	bne	1f
513	li	r10,MAS3_SW|MAS3_UW
514	andc	r15,r15,r10
5151:
516	mtspr	SPRN_MAS7_MAS3,r15
517
518	mfspr	r10,SPRN_MAS0
519	b	tlb_miss_huge_done_e6500
520
521tlb_miss_kernel_e6500:
522	ld	r14,PACA_KERNELPGD(r13)
523	cmpldi	cr1,r15,8		/* Check for vmalloc region */
524	beq+	cr1,tlb_miss_common_e6500
525
526tlb_miss_fault_e6500:
527	tlb_unlock_e6500
528	/* We need to check if it was an instruction miss */
529	andi.	r16,r16,1
530	bne	itlb_miss_fault_e6500
531dtlb_miss_fault_e6500:
532	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
533	tlb_epilog_bolted
534	b	exc_data_storage_book3e
535itlb_miss_fault_e6500:
536	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
537	tlb_epilog_bolted
538	b	exc_instruction_storage_book3e
539#endif /* CONFIG_PPC_FSL_BOOK3E */
540
541/**********************************************************************
542 *                                                                    *
543 * TLB miss handling for Book3E with TLB reservation and HES support  *
544 *                                                                    *
545 **********************************************************************/
546
547
548/* Data TLB miss */
549	START_EXCEPTION(data_tlb_miss)
550	TLB_MISS_PROLOG
551
552	/* Now we handle the fault proper. We only save DEAR in normal
553	 * fault case since that's the only interesting values here.
554	 * We could probably also optimize by not saving SRR0/1 in the
555	 * linear mapping case but I'll leave that for later
556	 */
557	mfspr	r14,SPRN_ESR
558	mfspr	r16,SPRN_DEAR		/* get faulting address */
559	srdi	r15,r16,60		/* get region */
560	cmpldi	cr0,r15,0xc		/* linear mapping ? */
561	TLB_MISS_STATS_SAVE_INFO
562	beq	tlb_load_linear		/* yes -> go to linear map load */
563
564	/* The page tables are mapped virtually linear. At this point, though,
565	 * we don't know whether we are trying to fault in a first level
566	 * virtual address or a virtual page table address. We can get that
567	 * from bit 0x1 of the region ID which we have set for a page table
568	 */
569	andi.	r10,r15,0x1
570	bne-	virt_page_table_tlb_miss
571
572	std	r14,EX_TLB_ESR(r12);	/* save ESR */
573	std	r16,EX_TLB_DEAR(r12);	/* save DEAR */
574
575	 /* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
576	li	r11,_PAGE_PRESENT
577	oris	r11,r11,_PAGE_ACCESSED@h
578
579	/* We do the user/kernel test for the PID here along with the RW test
580	 */
581	cmpldi	cr0,r15,0		/* Check for user region */
582
583	/* We pre-test some combination of permissions to avoid double
584	 * faults:
585	 *
586	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
587	 * ESR_ST   is 0x00800000
588	 * _PAGE_BAP_SW is 0x00000010
589	 * So the shift is >> 19. This tests for supervisor writeability.
590	 * If the page happens to be supervisor writeable and not user
591	 * writeable, we will take a new fault later, but that should be
592	 * a rare enough case.
593	 *
594	 * We also move ESR_ST in _PAGE_DIRTY position
595	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
596	 *
597	 * MAS1 is preset for all we need except for TID that needs to
598	 * be cleared for kernel translations
599	 */
600	rlwimi	r11,r14,32-19,27,27
601	rlwimi	r11,r14,32-16,19,19
602	beq	normal_tlb_miss
603	/* XXX replace the RMW cycles with immediate loads + writes */
6041:	mfspr	r10,SPRN_MAS1
605	cmpldi	cr0,r15,8		/* Check for vmalloc region */
606	rlwinm	r10,r10,0,16,1		/* Clear TID */
607	mtspr	SPRN_MAS1,r10
608	beq+	normal_tlb_miss
609
610	/* We got a crappy address, just fault with whatever DEAR and ESR
611	 * are here
612	 */
613	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
614	TLB_MISS_EPILOG_ERROR
615	b	exc_data_storage_book3e
616
617/* Instruction TLB miss */
618	START_EXCEPTION(instruction_tlb_miss)
619	TLB_MISS_PROLOG
620
621	/* If we take a recursive fault, the second level handler may need
622	 * to know whether we are handling a data or instruction fault in
623	 * order to get to the right store fault handler. We provide that
624	 * info by writing a crazy value in ESR in our exception frame
625	 */
626	li	r14,-1	/* store to exception frame is done later */
627
628	/* Now we handle the fault proper. We only save DEAR in the non
629	 * linear mapping case since we know the linear mapping case will
630	 * not re-enter. We could indeed optimize and also not save SRR0/1
631	 * in the linear mapping case but I'll leave that for later
632	 *
633	 * Faulting address is SRR0 which is already in r16
634	 */
635	srdi	r15,r16,60		/* get region */
636	cmpldi	cr0,r15,0xc		/* linear mapping ? */
637	TLB_MISS_STATS_SAVE_INFO
638	beq	tlb_load_linear		/* yes -> go to linear map load */
639
640	/* We do the user/kernel test for the PID here along with the RW test
641	 */
642	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
643	oris	r11,r11,_PAGE_ACCESSED@h
644
645	cmpldi	cr0,r15,0			/* Check for user region */
646	std	r14,EX_TLB_ESR(r12)		/* write crazy -1 to frame */
647	beq	normal_tlb_miss
648
649	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
650	oris	r11,r11,_PAGE_ACCESSED@h
651	/* XXX replace the RMW cycles with immediate loads + writes */
652	mfspr	r10,SPRN_MAS1
653	cmpldi	cr0,r15,8			/* Check for vmalloc region */
654	rlwinm	r10,r10,0,16,1			/* Clear TID */
655	mtspr	SPRN_MAS1,r10
656	beq+	normal_tlb_miss
657
658	/* We got a crappy address, just fault */
659	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
660	TLB_MISS_EPILOG_ERROR
661	b	exc_instruction_storage_book3e
662
663/*
664 * This is the guts of the first-level TLB miss handler for direct
665 * misses. We are entered with:
666 *
667 * r16 = faulting address
668 * r15 = region ID
669 * r14 = crap (free to use)
670 * r13 = PACA
671 * r12 = TLB exception frame in PACA
672 * r11 = PTE permission mask
673 * r10 = crap (free to use)
674 */
675normal_tlb_miss:
676	/* So we first construct the page table address. We do that by
677	 * shifting the bottom of the address (not the region ID) by
678	 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
679	 * or'ing the fourth high bit.
680	 *
681	 * NOTE: For 64K pages, we do things slightly differently in
682	 * order to handle the weird page table format used by linux
683	 */
684	ori	r10,r15,0x1
685#ifdef CONFIG_PPC_64K_PAGES
686	/* For the top bits, 16 bytes per PTE */
687	rldicl	r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
688	/* Now create the bottom bits as 0 in position 0x8000 and
689	 * the rest calculated for 8 bytes per PTE
690	 */
691	rldicl	r15,r16,64-(PAGE_SHIFT-3),64-15
692	/* Insert the bottom bits in */
693	rlwimi	r14,r15,0,16,31
694#else
695	rldicl	r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
696#endif
697	sldi	r15,r10,60
698	clrrdi	r14,r14,3
699	or	r10,r15,r14
700
701BEGIN_MMU_FTR_SECTION
702	/* Set the TLB reservation and search for existing entry. Then load
703	 * the entry.
704	 */
705	PPC_TLBSRX_DOT(0,R16)
706	ld	r14,0(r10)
707	beq	normal_tlb_miss_done
708MMU_FTR_SECTION_ELSE
709	ld	r14,0(r10)
710ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
711
712finish_normal_tlb_miss:
713	/* Check if required permissions are met */
714	andc.	r15,r11,r14
715	bne-	normal_tlb_miss_access_fault
716
717	/* Now we build the MAS:
718	 *
719	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
720	 * MAS 1   :	Almost fully setup
721	 *               - PID already updated by caller if necessary
722	 *               - TSIZE need change if !base page size, not
723	 *                 yet implemented for now
724	 * MAS 2   :	Defaults not useful, need to be redone
725	 * MAS 3+7 :	Needs to be done
726	 *
727	 * TODO: mix up code below for better scheduling
728	 */
729	clrrdi	r11,r16,12		/* Clear low crap in EA */
730	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
731	mtspr	SPRN_MAS2,r11
732
733	/* Check page size, if not standard, update MAS1 */
734	rldicl	r11,r14,64-8,64-8
735#ifdef CONFIG_PPC_64K_PAGES
736	cmpldi	cr0,r11,BOOK3E_PAGESZ_64K
737#else
738	cmpldi	cr0,r11,BOOK3E_PAGESZ_4K
739#endif
740	beq-	1f
741	mfspr	r11,SPRN_MAS1
742	rlwimi	r11,r14,31,21,24
743	rlwinm	r11,r11,0,21,19
744	mtspr	SPRN_MAS1,r11
7451:
746	/* Move RPN in position */
747	rldicr	r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
748	clrldi	r15,r11,12		/* Clear crap at the top */
749	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
750	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
751
752	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
753	andi.	r11,r14,_PAGE_DIRTY
754	bne	1f
755	li	r11,MAS3_SW|MAS3_UW
756	andc	r15,r15,r11
7571:
758BEGIN_MMU_FTR_SECTION
759	srdi	r16,r15,32
760	mtspr	SPRN_MAS3,r15
761	mtspr	SPRN_MAS7,r16
762MMU_FTR_SECTION_ELSE
763	mtspr	SPRN_MAS7_MAS3,r15
764ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
765
766	tlbwe
767
768normal_tlb_miss_done:
769	/* We don't bother with restoring DEAR or ESR since we know we are
770	 * level 0 and just going back to userland. They are only needed
771	 * if you are going to take an access fault
772	 */
773	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
774	TLB_MISS_EPILOG_SUCCESS
775	rfi
776
777normal_tlb_miss_access_fault:
778	/* We need to check if it was an instruction miss */
779	andi.	r10,r11,_PAGE_EXEC
780	bne	1f
781	ld	r14,EX_TLB_DEAR(r12)
782	ld	r15,EX_TLB_ESR(r12)
783	mtspr	SPRN_DEAR,r14
784	mtspr	SPRN_ESR,r15
785	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
786	TLB_MISS_EPILOG_ERROR
787	b	exc_data_storage_book3e
7881:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
789	TLB_MISS_EPILOG_ERROR
790	b	exc_instruction_storage_book3e
791
792
793/*
794 * This is the guts of the second-level TLB miss handler for direct
795 * misses. We are entered with:
796 *
797 * r16 = virtual page table faulting address
798 * r15 = region (top 4 bits of address)
799 * r14 = crap (free to use)
800 * r13 = PACA
801 * r12 = TLB exception frame in PACA
802 * r11 = crap (free to use)
803 * r10 = crap (free to use)
804 *
805 * Note that this should only ever be called as a second level handler
806 * with the current scheme when using SW load.
807 * That means we can always get the original fault DEAR at
808 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
809 *
810 * It can be re-entered by the linear mapping miss handler. However, to
811 * avoid too much complication, it will restart the whole fault at level
812 * 0 so we don't care too much about clobbers
813 *
814 * XXX That code was written back when we couldn't clobber r14. We can now,
815 * so we could probably optimize things a bit
816 */
817virt_page_table_tlb_miss:
818	/* Are we hitting a kernel page table ? */
819	andi.	r10,r15,0x8
820
821	/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
822	 * and we happen to have the swapper_pg_dir at offset 8 from the user
823	 * pgdir in the PACA :-).
824	 */
825	add	r11,r10,r13
826
827	/* If kernel, we need to clear MAS1 TID */
828	beq	1f
829	/* XXX replace the RMW cycles with immediate loads + writes */
830	mfspr	r10,SPRN_MAS1
831	rlwinm	r10,r10,0,16,1			/* Clear TID */
832	mtspr	SPRN_MAS1,r10
8331:
834BEGIN_MMU_FTR_SECTION
835	/* Search if we already have a TLB entry for that virtual address, and
836	 * if we do, bail out.
837	 */
838	PPC_TLBSRX_DOT(0,R16)
839	beq	virt_page_table_tlb_miss_done
840END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
841
842	/* Now, we need to walk the page tables. First check if we are in
843	 * range.
844	 */
845	rldicl.	r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
846	bne-	virt_page_table_tlb_miss_fault
847
848	/* Get the PGD pointer */
849	ld	r15,PACAPGD(r11)
850	cmpldi	cr0,r15,0
851	beq-	virt_page_table_tlb_miss_fault
852
853	/* Get to PGD entry */
854	rldicl	r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
855	clrrdi	r10,r11,3
856	ldx	r15,r10,r15
857	cmpdi	cr0,r15,0
858	bge	virt_page_table_tlb_miss_fault
859
860#ifndef CONFIG_PPC_64K_PAGES
861	/* Get to PUD entry */
862	rldicl	r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
863	clrrdi	r10,r11,3
864	ldx	r15,r10,r15
865	cmpdi	cr0,r15,0
866	bge	virt_page_table_tlb_miss_fault
867#endif /* CONFIG_PPC_64K_PAGES */
868
869	/* Get to PMD entry */
870	rldicl	r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
871	clrrdi	r10,r11,3
872	ldx	r15,r10,r15
873	cmpdi	cr0,r15,0
874	bge	virt_page_table_tlb_miss_fault
875
876	/* Ok, we're all right, we can now create a kernel translation for
877	 * a 4K or 64K page from r16 -> r15.
878	 */
879	/* Now we build the MAS:
880	 *
881	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
882	 * MAS 1   :	Almost fully setup
883	 *               - PID already updated by caller if necessary
884	 *               - TSIZE for now is base page size always
885	 * MAS 2   :	Use defaults
886	 * MAS 3+7 :	Needs to be done
887	 *
888	 * So we only do MAS 2 and 3 for now...
889	 */
890	clrldi	r11,r15,4		/* remove region ID from RPN */
891	ori	r10,r11,1		/* Or-in SR */
892
893BEGIN_MMU_FTR_SECTION
894	srdi	r16,r10,32
895	mtspr	SPRN_MAS3,r10
896	mtspr	SPRN_MAS7,r16
897MMU_FTR_SECTION_ELSE
898	mtspr	SPRN_MAS7_MAS3,r10
899ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
900
901	tlbwe
902
903BEGIN_MMU_FTR_SECTION
904virt_page_table_tlb_miss_done:
905
906	/* We have overridden MAS2:EPN but currently our primary TLB miss
907	 * handler will always restore it so that should not be an issue,
908	 * if we ever optimize the primary handler to not write MAS2 on
909	 * some cases, we'll have to restore MAS2:EPN here based on the
910	 * original fault's DEAR. If we do that we have to modify the
911	 * ITLB miss handler to also store SRR0 in the exception frame
912	 * as DEAR.
913	 *
914	 * However, one nasty thing we did is we cleared the reservation
915	 * (well, potentially we did). We do a trick here thus if we
916	 * are not a level 0 exception (we interrupted the TLB miss) we
917	 * offset the return address by -4 in order to replay the tlbsrx
918	 * instruction there
919	 */
920	subf	r10,r13,r12
921	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
922	bne-	1f
923	ld	r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
924	addi	r10,r11,-4
925	std	r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
9261:
927END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
928	/* Return to caller, normal case */
929	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
930	TLB_MISS_EPILOG_SUCCESS
931	rfi
932
933virt_page_table_tlb_miss_fault:
934	/* If we fault here, things are a little bit tricky. We need to call
935	 * either data or instruction store fault, and we need to retrieve
936	 * the original fault address and ESR (for data).
937	 *
938	 * The thing is, we know that in normal circumstances, this is
939	 * always called as a second level tlb miss for SW load or as a first
940	 * level TLB miss for HW load, so we should be able to peek at the
941	 * relevant information in the first exception frame in the PACA.
942	 *
943	 * However, we do need to double check that, because we may just hit
944	 * a stray kernel pointer or a userland attack trying to hit those
945	 * areas. If that is the case, we do a data fault. (We can't get here
946	 * from an instruction tlb miss anyway).
947	 *
948	 * Note also that when going to a fault, we must unwind the previous
949	 * level as well. Since we are doing that, we don't need to clear or
950	 * restore the TLB reservation neither.
951	 */
952	subf	r10,r13,r12
953	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
954	bne-	virt_page_table_tlb_miss_whacko_fault
955
956	/* We dig the original DEAR and ESR from slot 0 */
957	ld	r15,EX_TLB_DEAR+PACA_EXTLB(r13)
958	ld	r16,EX_TLB_ESR+PACA_EXTLB(r13)
959
960	/* We check for the "special" ESR value for instruction faults */
961	cmpdi	cr0,r16,-1
962	beq	1f
963	mtspr	SPRN_DEAR,r15
964	mtspr	SPRN_ESR,r16
965	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
966	TLB_MISS_EPILOG_ERROR
967	b	exc_data_storage_book3e
9681:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
969	TLB_MISS_EPILOG_ERROR
970	b	exc_instruction_storage_book3e
971
972virt_page_table_tlb_miss_whacko_fault:
973	/* The linear fault will restart everything so ESR and DEAR will
974	 * not have been clobbered, let's just fault with what we have
975	 */
976	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
977	TLB_MISS_EPILOG_ERROR
978	b	exc_data_storage_book3e
979
980
981/**************************************************************
982 *                                                            *
983 * TLB miss handling for Book3E with hw page table support    *
984 *                                                            *
985 **************************************************************/
986
987
988/* Data TLB miss */
989	START_EXCEPTION(data_tlb_miss_htw)
990	TLB_MISS_PROLOG
991
992	/* Now we handle the fault proper. We only save DEAR in normal
993	 * fault case since that's the only interesting values here.
994	 * We could probably also optimize by not saving SRR0/1 in the
995	 * linear mapping case but I'll leave that for later
996	 */
997	mfspr	r14,SPRN_ESR
998	mfspr	r16,SPRN_DEAR		/* get faulting address */
999	srdi	r11,r16,60		/* get region */
1000	cmpldi	cr0,r11,0xc		/* linear mapping ? */
1001	TLB_MISS_STATS_SAVE_INFO
1002	beq	tlb_load_linear		/* yes -> go to linear map load */
1003
1004	/* We do the user/kernel test for the PID here along with the RW test
1005	 */
1006	cmpldi	cr0,r11,0		/* Check for user region */
1007	ld	r15,PACAPGD(r13)	/* Load user pgdir */
1008	beq	htw_tlb_miss
1009
1010	/* XXX replace the RMW cycles with immediate loads + writes */
10111:	mfspr	r10,SPRN_MAS1
1012	cmpldi	cr0,r11,8		/* Check for vmalloc region */
1013	rlwinm	r10,r10,0,16,1		/* Clear TID */
1014	mtspr	SPRN_MAS1,r10
1015	ld	r15,PACA_KERNELPGD(r13)	/* Load kernel pgdir */
1016	beq+	htw_tlb_miss
1017
1018	/* We got a crappy address, just fault with whatever DEAR and ESR
1019	 * are here
1020	 */
1021	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
1022	TLB_MISS_EPILOG_ERROR
1023	b	exc_data_storage_book3e
1024
1025/* Instruction TLB miss */
1026	START_EXCEPTION(instruction_tlb_miss_htw)
1027	TLB_MISS_PROLOG
1028
1029	/* If we take a recursive fault, the second level handler may need
1030	 * to know whether we are handling a data or instruction fault in
1031	 * order to get to the right store fault handler. We provide that
1032	 * info by keeping a crazy value for ESR in r14
1033	 */
1034	li	r14,-1	/* store to exception frame is done later */
1035
1036	/* Now we handle the fault proper. We only save DEAR in the non
1037	 * linear mapping case since we know the linear mapping case will
1038	 * not re-enter. We could indeed optimize and also not save SRR0/1
1039	 * in the linear mapping case but I'll leave that for later
1040	 *
1041	 * Faulting address is SRR0 which is already in r16
1042	 */
1043	srdi	r11,r16,60		/* get region */
1044	cmpldi	cr0,r11,0xc		/* linear mapping ? */
1045	TLB_MISS_STATS_SAVE_INFO
1046	beq	tlb_load_linear		/* yes -> go to linear map load */
1047
1048	/* We do the user/kernel test for the PID here along with the RW test
1049	 */
1050	cmpldi	cr0,r11,0			/* Check for user region */
1051	ld	r15,PACAPGD(r13)		/* Load user pgdir */
1052	beq	htw_tlb_miss
1053
1054	/* XXX replace the RMW cycles with immediate loads + writes */
10551:	mfspr	r10,SPRN_MAS1
1056	cmpldi	cr0,r11,8			/* Check for vmalloc region */
1057	rlwinm	r10,r10,0,16,1			/* Clear TID */
1058	mtspr	SPRN_MAS1,r10
1059	ld	r15,PACA_KERNELPGD(r13)		/* Load kernel pgdir */
1060	beq+	htw_tlb_miss
1061
1062	/* We got a crappy address, just fault */
1063	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
1064	TLB_MISS_EPILOG_ERROR
1065	b	exc_instruction_storage_book3e
1066
1067
1068/*
1069 * This is the guts of the second-level TLB miss handler for direct
1070 * misses. We are entered with:
1071 *
1072 * r16 = virtual page table faulting address
1073 * r15 = PGD pointer
1074 * r14 = ESR
1075 * r13 = PACA
1076 * r12 = TLB exception frame in PACA
1077 * r11 = crap (free to use)
1078 * r10 = crap (free to use)
1079 *
1080 * It can be re-entered by the linear mapping miss handler. However, to
1081 * avoid too much complication, it will save/restore things for us
1082 */
1083htw_tlb_miss:
1084	/* Search if we already have a TLB entry for that virtual address, and
1085	 * if we do, bail out.
1086	 *
1087	 * MAS1:IND should be already set based on MAS4
1088	 */
1089	PPC_TLBSRX_DOT(0,R16)
1090	beq	htw_tlb_miss_done
1091
1092	/* Now, we need to walk the page tables. First check if we are in
1093	 * range.
1094	 */
1095	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
1096	bne-	htw_tlb_miss_fault
1097
1098	/* Get the PGD pointer */
1099	cmpldi	cr0,r15,0
1100	beq-	htw_tlb_miss_fault
1101
1102	/* Get to PGD entry */
1103	rldicl	r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
1104	clrrdi	r10,r11,3
1105	ldx	r15,r10,r15
1106	cmpdi	cr0,r15,0
1107	bge	htw_tlb_miss_fault
1108
1109#ifndef CONFIG_PPC_64K_PAGES
1110	/* Get to PUD entry */
1111	rldicl	r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
1112	clrrdi	r10,r11,3
1113	ldx	r15,r10,r15
1114	cmpdi	cr0,r15,0
1115	bge	htw_tlb_miss_fault
1116#endif /* CONFIG_PPC_64K_PAGES */
1117
1118	/* Get to PMD entry */
1119	rldicl	r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
1120	clrrdi	r10,r11,3
1121	ldx	r15,r10,r15
1122	cmpdi	cr0,r15,0
1123	bge	htw_tlb_miss_fault
1124
1125	/* Ok, we're all right, we can now create an indirect entry for
1126	 * a 1M or 256M page.
1127	 *
1128	 * The last trick is now that because we use "half" pages for
1129	 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
1130	 * for an added LSB bit to the RPN. For 64K pages, there is no
1131	 * problem as we already use 32K arrays (half PTE pages), but for
1132	 * 4K page we need to extract a bit from the virtual address and
1133	 * insert it into the "PA52" bit of the RPN.
1134	 */
1135#ifndef CONFIG_PPC_64K_PAGES
1136	rlwimi	r15,r16,32-9,20,20
1137#endif
1138	/* Now we build the MAS:
1139	 *
1140	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
1141	 * MAS 1   :	Almost fully setup
1142	 *               - PID already updated by caller if necessary
1143	 *               - TSIZE for now is base ind page size always
1144	 * MAS 2   :	Use defaults
1145	 * MAS 3+7 :	Needs to be done
1146	 */
1147#ifdef CONFIG_PPC_64K_PAGES
1148	ori	r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
1149#else
1150	ori	r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
1151#endif
1152
1153BEGIN_MMU_FTR_SECTION
1154	srdi	r16,r10,32
1155	mtspr	SPRN_MAS3,r10
1156	mtspr	SPRN_MAS7,r16
1157MMU_FTR_SECTION_ELSE
1158	mtspr	SPRN_MAS7_MAS3,r10
1159ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1160
1161	tlbwe
1162
1163htw_tlb_miss_done:
1164	/* We don't bother with restoring DEAR or ESR since we know we are
1165	 * level 0 and just going back to userland. They are only needed
1166	 * if you are going to take an access fault
1167	 */
1168	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
1169	TLB_MISS_EPILOG_SUCCESS
1170	rfi
1171
1172htw_tlb_miss_fault:
1173	/* We need to check if it was an instruction miss. We know this
1174	 * though because r14 would contain -1
1175	 */
1176	cmpdi	cr0,r14,-1
1177	beq	1f
1178	mtspr	SPRN_DEAR,r16
1179	mtspr	SPRN_ESR,r14
1180	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
1181	TLB_MISS_EPILOG_ERROR
1182	b	exc_data_storage_book3e
11831:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
1184	TLB_MISS_EPILOG_ERROR
1185	b	exc_instruction_storage_book3e
1186
1187/*
1188 * This is the guts of "any" level TLB miss handler for kernel linear
1189 * mapping misses. We are entered with:
1190 *
1191 *
1192 * r16 = faulting address
1193 * r15 = crap (free to use)
1194 * r14 = ESR (data) or -1 (instruction)
1195 * r13 = PACA
1196 * r12 = TLB exception frame in PACA
1197 * r11 = crap (free to use)
1198 * r10 = crap (free to use)
1199 *
1200 * In addition we know that we will not re-enter, so in theory, we could
1201 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
1202 *
1203 * We also need to be careful about MAS registers here & TLB reservation,
1204 * as we know we'll have clobbered them if we interrupt the main TLB miss
1205 * handlers in which case we probably want to do a full restart at level
1206 * 0 rather than saving / restoring the MAS.
1207 *
1208 * Note: If we care about performance of that core, we can easily shuffle
1209 *       a few things around
1210 */
1211tlb_load_linear:
1212	/* For now, we assume the linear mapping is contiguous and stops at
1213	 * linear_map_top. We also assume the size is a multiple of 1G, thus
1214	 * we only use 1G pages for now. That might have to be changed in a
1215	 * final implementation, especially when dealing with hypervisors
1216	 */
1217	ld	r11,PACATOC(r13)
1218	ld	r11,linear_map_top@got(r11)
1219	ld	r10,0(r11)
1220	tovirt(10,10)
1221	cmpld	cr0,r16,r10
1222	bge	tlb_load_linear_fault
1223
1224	/* MAS1 need whole new setup. */
1225	li	r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
1226	oris	r15,r15,MAS1_VALID@h	/* MAS1 needs V and TSIZE */
1227	mtspr	SPRN_MAS1,r15
1228
1229	/* Already somebody there ? */
1230	PPC_TLBSRX_DOT(0,R16)
1231	beq	tlb_load_linear_done
1232
1233	/* Now we build the remaining MAS. MAS0 and 2 should be fine
1234	 * with their defaults, which leaves us with MAS 3 and 7. The
1235	 * mapping is linear, so we just take the address, clear the
1236	 * region bits, and or in the permission bits which are currently
1237	 * hard wired
1238	 */
1239	clrrdi	r10,r16,30		/* 1G page index */
1240	clrldi	r10,r10,4		/* clear region bits */
1241	ori	r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
1242
1243BEGIN_MMU_FTR_SECTION
1244	srdi	r16,r10,32
1245	mtspr	SPRN_MAS3,r10
1246	mtspr	SPRN_MAS7,r16
1247MMU_FTR_SECTION_ELSE
1248	mtspr	SPRN_MAS7_MAS3,r10
1249ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1250
1251	tlbwe
1252
1253tlb_load_linear_done:
1254	/* We use the "error" epilog for success as we do want to
1255	 * restore to the initial faulting context, whatever it was.
1256	 * We do that because we can't resume a fault within a TLB
1257	 * miss handler, due to MAS and TLB reservation being clobbered.
1258	 */
1259	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
1260	TLB_MISS_EPILOG_ERROR
1261	rfi
1262
1263tlb_load_linear_fault:
1264	/* We keep the DEAR and ESR around, this shouldn't have happened */
1265	cmpdi	cr0,r14,-1
1266	beq	1f
1267	TLB_MISS_EPILOG_ERROR_SPECIAL
1268	b	exc_data_storage_book3e
12691:	TLB_MISS_EPILOG_ERROR_SPECIAL
1270	b	exc_instruction_storage_book3e
1271
1272
1273#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
1274.tlb_stat_inc:
12751:	ldarx	r8,0,r9
1276	addi	r8,r8,1
1277	stdcx.	r8,0,r9
1278	bne-	1b
1279	blr
1280#endif
1281