• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  Low level TLB miss handlers for Book3E
3 *
4 *  Copyright (C) 2008-2009
5 *      Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6 *
7 *  This program is free software; you can redistribute it and/or
8 *  modify it under the terms of the GNU General Public License
9 *  as published by the Free Software Foundation; either version
10 *  2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/cputable.h>
20#include <asm/pgtable.h>
21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h>
23
24#ifdef CONFIG_PPC_64K_PAGES
25#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE+1)
26#else
27#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE)
28#endif
29#define VPTE_PUD_SHIFT	(VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
30#define VPTE_PGD_SHIFT	(VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
31#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
32
33/**********************************************************************
34 *                                                                    *
35 * TLB miss handling for Book3E with a bolted linear mapping          *
36 * No virtual page table, no nested TLB misses                        *
37 *                                                                    *
38 **********************************************************************/
39
40.macro tlb_prolog_bolted addr
41	mtspr	SPRN_SPRG_TLB_SCRATCH,r13
42	mfspr	r13,SPRN_SPRG_PACA
43	std	r10,PACA_EXTLB+EX_TLB_R10(r13)
44	mfcr	r10
45	std	r11,PACA_EXTLB+EX_TLB_R11(r13)
46	std	r16,PACA_EXTLB+EX_TLB_R16(r13)
47	mfspr	r16,\addr		/* get faulting address */
48	std	r14,PACA_EXTLB+EX_TLB_R14(r13)
49	ld	r14,PACAPGD(r13)
50	std	r15,PACA_EXTLB+EX_TLB_R15(r13)
51	std	r10,PACA_EXTLB+EX_TLB_CR(r13)
52	TLB_MISS_PROLOG_STATS_BOLTED
53.endm
54
55.macro tlb_epilog_bolted
56	ld	r14,PACA_EXTLB+EX_TLB_CR(r13)
57	ld	r10,PACA_EXTLB+EX_TLB_R10(r13)
58	ld	r11,PACA_EXTLB+EX_TLB_R11(r13)
59	mtcr	r14
60	ld	r14,PACA_EXTLB+EX_TLB_R14(r13)
61	ld	r15,PACA_EXTLB+EX_TLB_R15(r13)
62	TLB_MISS_RESTORE_STATS_BOLTED
63	ld	r16,PACA_EXTLB+EX_TLB_R16(r13)
64	mfspr	r13,SPRN_SPRG_TLB_SCRATCH
65.endm
66
67/* Data TLB miss */
68	START_EXCEPTION(data_tlb_miss_bolted)
69	tlb_prolog_bolted SPRN_DEAR
70
71	/* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
72
73	/* We do the user/kernel test for the PID here along with the RW test
74	 */
75	/* We pre-test some combination of permissions to avoid double
76	 * faults:
77	 *
78	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
79	 * ESR_ST   is 0x00800000
80	 * _PAGE_BAP_SW is 0x00000010
81	 * So the shift is >> 19. This tests for supervisor writeability.
82	 * If the page happens to be supervisor writeable and not user
83	 * writeable, we will take a new fault later, but that should be
84	 * a rare enough case.
85	 *
86	 * We also move ESR_ST in _PAGE_DIRTY position
87	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
88	 *
89	 * MAS1 is preset for all we need except for TID that needs to
90	 * be cleared for kernel translations
91	 */
92
93	mfspr	r11,SPRN_ESR
94
95	srdi	r15,r16,60		/* get region */
96	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
97	bne-	dtlb_miss_fault_bolted	/* Bail if fault addr is invalid */
98
99	rlwinm	r10,r11,32-19,27,27
100	rlwimi	r10,r11,32-16,19,19
101	cmpwi	r15,0			/* user vs kernel check */
102	ori	r10,r10,_PAGE_PRESENT
103	oris	r11,r10,_PAGE_ACCESSED@h
104
105	TLB_MISS_STATS_SAVE_INFO_BOLTED
106	bne	tlb_miss_kernel_bolted
107
108tlb_miss_common_bolted:
109/*
110 * This is the guts of the TLB miss handler for bolted-linear.
111 * We are entered with:
112 *
113 * r16 = faulting address
114 * r15 = crap (free to use)
115 * r14 = page table base
116 * r13 = PACA
117 * r11 = PTE permission mask
118 * r10 = crap (free to use)
119 */
120	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
121	cmpldi	cr0,r14,0
122	clrrdi	r15,r15,3
123	beq	tlb_miss_fault_bolted	/* No PGDIR, bail */
124
125BEGIN_MMU_FTR_SECTION
126	/* Set the TLB reservation and search for existing entry. Then load
127	 * the entry.
128	 */
129	PPC_TLBSRX_DOT(0,r16)
130	ldx	r14,r14,r15		/* grab pgd entry */
131	beq	normal_tlb_miss_done	/* tlb exists already, bail */
132MMU_FTR_SECTION_ELSE
133	ldx	r14,r14,r15		/* grab pgd entry */
134ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
135
136#ifndef CONFIG_PPC_64K_PAGES
137	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
138	clrrdi	r15,r15,3
139	cmpdi	cr0,r14,0
140	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
141	ldx	r14,r14,r15		/* grab pud entry */
142#endif /* CONFIG_PPC_64K_PAGES */
143
144	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
145	clrrdi	r15,r15,3
146	cmpdi	cr0,r14,0
147	bge	tlb_miss_fault_bolted
148	ldx	r14,r14,r15		/* Grab pmd entry */
149
150	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
151	clrrdi	r15,r15,3
152	cmpdi	cr0,r14,0
153	bge	tlb_miss_fault_bolted
154	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
155
156	/* Check if required permissions are met */
157	andc.	r15,r11,r14
158	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
159	bne-	tlb_miss_fault_bolted
160
161	/* Now we build the MAS:
162	 *
163	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
164	 * MAS 1   :	Almost fully setup
165	 *               - PID already updated by caller if necessary
166	 *               - TSIZE need change if !base page size, not
167	 *                 yet implemented for now
168	 * MAS 2   :	Defaults not useful, need to be redone
169	 * MAS 3+7 :	Needs to be done
170	 */
171	clrrdi	r11,r16,12		/* Clear low crap in EA */
172	clrldi	r15,r15,12		/* Clear crap at the top */
173	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
174	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
175	mtspr	SPRN_MAS2,r11
176	andi.	r11,r14,_PAGE_DIRTY
177	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
178
179	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
180	bne	1f
181	li	r11,MAS3_SW|MAS3_UW
182	andc	r15,r15,r11
1831:
184	mtspr	SPRN_MAS7_MAS3,r15
185	tlbwe
186
187	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
188	tlb_epilog_bolted
189	rfi
190
191itlb_miss_kernel_bolted:
192	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
193	oris	r11,r11,_PAGE_ACCESSED@h
194tlb_miss_kernel_bolted:
195	mfspr	r10,SPRN_MAS1
196	ld	r14,PACA_KERNELPGD(r13)
197	cmpldi	cr0,r15,8		/* Check for vmalloc region */
198	rlwinm	r10,r10,0,16,1		/* Clear TID */
199	mtspr	SPRN_MAS1,r10
200	beq+	tlb_miss_common_bolted
201
202tlb_miss_fault_bolted:
203	/* We need to check if it was an instruction miss */
204	andi.	r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
205	bne	itlb_miss_fault_bolted
206dtlb_miss_fault_bolted:
207	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
208	tlb_epilog_bolted
209	b	exc_data_storage_book3e
210itlb_miss_fault_bolted:
211	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
212	tlb_epilog_bolted
213	b	exc_instruction_storage_book3e
214
215/* Instruction TLB miss */
216	START_EXCEPTION(instruction_tlb_miss_bolted)
217	tlb_prolog_bolted SPRN_SRR0
218
219	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
220	srdi	r15,r16,60		/* get region */
221	TLB_MISS_STATS_SAVE_INFO_BOLTED
222	bne-	itlb_miss_fault_bolted
223
224	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
225
226	/* We do the user/kernel test for the PID here along with the RW test
227	 */
228
229	cmpldi	cr0,r15,0			/* Check for user region */
230	oris	r11,r11,_PAGE_ACCESSED@h
231	beq	tlb_miss_common_bolted
232	b	itlb_miss_kernel_bolted
233
234/**********************************************************************
235 *                                                                    *
236 * TLB miss handling for Book3E with TLB reservation and HES support  *
237 *                                                                    *
238 **********************************************************************/
239
240
241/* Data TLB miss */
242	START_EXCEPTION(data_tlb_miss)
243	TLB_MISS_PROLOG
244
245	/* Now we handle the fault proper. We only save DEAR in normal
246	 * fault case since that's the only interesting values here.
247	 * We could probably also optimize by not saving SRR0/1 in the
248	 * linear mapping case but I'll leave that for later
249	 */
250	mfspr	r14,SPRN_ESR
251	mfspr	r16,SPRN_DEAR		/* get faulting address */
252	srdi	r15,r16,60		/* get region */
253	cmpldi	cr0,r15,0xc		/* linear mapping ? */
254	TLB_MISS_STATS_SAVE_INFO
255	beq	tlb_load_linear		/* yes -> go to linear map load */
256
257	/* The page tables are mapped virtually linear. At this point, though,
258	 * we don't know whether we are trying to fault in a first level
259	 * virtual address or a virtual page table address. We can get that
260	 * from bit 0x1 of the region ID which we have set for a page table
261	 */
262	andi.	r10,r15,0x1
263	bne-	virt_page_table_tlb_miss
264
265	std	r14,EX_TLB_ESR(r12);	/* save ESR */
266	std	r16,EX_TLB_DEAR(r12);	/* save DEAR */
267
268	 /* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
269	li	r11,_PAGE_PRESENT
270	oris	r11,r11,_PAGE_ACCESSED@h
271
272	/* We do the user/kernel test for the PID here along with the RW test
273	 */
274	cmpldi	cr0,r15,0		/* Check for user region */
275
276	/* We pre-test some combination of permissions to avoid double
277	 * faults:
278	 *
279	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
280	 * ESR_ST   is 0x00800000
281	 * _PAGE_BAP_SW is 0x00000010
282	 * So the shift is >> 19. This tests for supervisor writeability.
283	 * If the page happens to be supervisor writeable and not user
284	 * writeable, we will take a new fault later, but that should be
285	 * a rare enough case.
286	 *
287	 * We also move ESR_ST in _PAGE_DIRTY position
288	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
289	 *
290	 * MAS1 is preset for all we need except for TID that needs to
291	 * be cleared for kernel translations
292	 */
293	rlwimi	r11,r14,32-19,27,27
294	rlwimi	r11,r14,32-16,19,19
295	beq	normal_tlb_miss
296	/* XXX replace the RMW cycles with immediate loads + writes */
2971:	mfspr	r10,SPRN_MAS1
298	cmpldi	cr0,r15,8		/* Check for vmalloc region */
299	rlwinm	r10,r10,0,16,1		/* Clear TID */
300	mtspr	SPRN_MAS1,r10
301	beq+	normal_tlb_miss
302
303	/* We got a crappy address, just fault with whatever DEAR and ESR
304	 * are here
305	 */
306	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
307	TLB_MISS_EPILOG_ERROR
308	b	exc_data_storage_book3e
309
310/* Instruction TLB miss */
311	START_EXCEPTION(instruction_tlb_miss)
312	TLB_MISS_PROLOG
313
314	/* If we take a recursive fault, the second level handler may need
315	 * to know whether we are handling a data or instruction fault in
316	 * order to get to the right store fault handler. We provide that
317	 * info by writing a crazy value in ESR in our exception frame
318	 */
319	li	r14,-1	/* store to exception frame is done later */
320
321	/* Now we handle the fault proper. We only save DEAR in the non
322	 * linear mapping case since we know the linear mapping case will
323	 * not re-enter. We could indeed optimize and also not save SRR0/1
324	 * in the linear mapping case but I'll leave that for later
325	 *
326	 * Faulting address is SRR0 which is already in r16
327	 */
328	srdi	r15,r16,60		/* get region */
329	cmpldi	cr0,r15,0xc		/* linear mapping ? */
330	TLB_MISS_STATS_SAVE_INFO
331	beq	tlb_load_linear		/* yes -> go to linear map load */
332
333	/* We do the user/kernel test for the PID here along with the RW test
334	 */
335	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
336	oris	r11,r11,_PAGE_ACCESSED@h
337
338	cmpldi	cr0,r15,0			/* Check for user region */
339	std	r14,EX_TLB_ESR(r12)		/* write crazy -1 to frame */
340	beq	normal_tlb_miss
341
342	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
343	oris	r11,r11,_PAGE_ACCESSED@h
344	/* XXX replace the RMW cycles with immediate loads + writes */
345	mfspr	r10,SPRN_MAS1
346	cmpldi	cr0,r15,8			/* Check for vmalloc region */
347	rlwinm	r10,r10,0,16,1			/* Clear TID */
348	mtspr	SPRN_MAS1,r10
349	beq+	normal_tlb_miss
350
351	/* We got a crappy address, just fault */
352	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
353	TLB_MISS_EPILOG_ERROR
354	b	exc_instruction_storage_book3e
355
356/*
357 * This is the guts of the first-level TLB miss handler for direct
358 * misses. We are entered with:
359 *
360 * r16 = faulting address
361 * r15 = region ID
362 * r14 = crap (free to use)
363 * r13 = PACA
364 * r12 = TLB exception frame in PACA
365 * r11 = PTE permission mask
366 * r10 = crap (free to use)
367 */
368normal_tlb_miss:
369	/* So we first construct the page table address. We do that by
370	 * shifting the bottom of the address (not the region ID) by
371	 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
372	 * or'ing the fourth high bit.
373	 *
374	 * NOTE: For 64K pages, we do things slightly differently in
375	 * order to handle the weird page table format used by linux
376	 */
377	ori	r10,r15,0x1
378#ifdef CONFIG_PPC_64K_PAGES
379	/* For the top bits, 16 bytes per PTE */
380	rldicl	r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
381	/* Now create the bottom bits as 0 in position 0x8000 and
382	 * the rest calculated for 8 bytes per PTE
383	 */
384	rldicl	r15,r16,64-(PAGE_SHIFT-3),64-15
385	/* Insert the bottom bits in */
386	rlwimi	r14,r15,0,16,31
387#else
388	rldicl	r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
389#endif
390	sldi	r15,r10,60
391	clrrdi	r14,r14,3
392	or	r10,r15,r14
393
394BEGIN_MMU_FTR_SECTION
395	/* Set the TLB reservation and search for existing entry. Then load
396	 * the entry.
397	 */
398	PPC_TLBSRX_DOT(0,r16)
399	ld	r14,0(r10)
400	beq	normal_tlb_miss_done
401MMU_FTR_SECTION_ELSE
402	ld	r14,0(r10)
403ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
404
405finish_normal_tlb_miss:
406	/* Check if required permissions are met */
407	andc.	r15,r11,r14
408	bne-	normal_tlb_miss_access_fault
409
410	/* Now we build the MAS:
411	 *
412	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
413	 * MAS 1   :	Almost fully setup
414	 *               - PID already updated by caller if necessary
415	 *               - TSIZE need change if !base page size, not
416	 *                 yet implemented for now
417	 * MAS 2   :	Defaults not useful, need to be redone
418	 * MAS 3+7 :	Needs to be done
419	 *
420	 * TODO: mix up code below for better scheduling
421	 */
422	clrrdi	r11,r16,12		/* Clear low crap in EA */
423	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
424	mtspr	SPRN_MAS2,r11
425
426	/* Check page size, if not standard, update MAS1 */
427	rldicl	r11,r14,64-8,64-8
428#ifdef CONFIG_PPC_64K_PAGES
429	cmpldi	cr0,r11,BOOK3E_PAGESZ_64K
430#else
431	cmpldi	cr0,r11,BOOK3E_PAGESZ_4K
432#endif
433	beq-	1f
434	mfspr	r11,SPRN_MAS1
435	rlwimi	r11,r14,31,21,24
436	rlwinm	r11,r11,0,21,19
437	mtspr	SPRN_MAS1,r11
4381:
439	/* Move RPN in position */
440	rldicr	r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
441	clrldi	r15,r11,12		/* Clear crap at the top */
442	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
443	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
444
445	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
446	andi.	r11,r14,_PAGE_DIRTY
447	bne	1f
448	li	r11,MAS3_SW|MAS3_UW
449	andc	r15,r15,r11
4501:
451BEGIN_MMU_FTR_SECTION
452	srdi	r16,r15,32
453	mtspr	SPRN_MAS3,r15
454	mtspr	SPRN_MAS7,r16
455MMU_FTR_SECTION_ELSE
456	mtspr	SPRN_MAS7_MAS3,r15
457ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
458
459	tlbwe
460
461normal_tlb_miss_done:
462	/* We don't bother with restoring DEAR or ESR since we know we are
463	 * level 0 and just going back to userland. They are only needed
464	 * if you are going to take an access fault
465	 */
466	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
467	TLB_MISS_EPILOG_SUCCESS
468	rfi
469
470normal_tlb_miss_access_fault:
471	/* We need to check if it was an instruction miss */
472	andi.	r10,r11,_PAGE_EXEC
473	bne	1f
474	ld	r14,EX_TLB_DEAR(r12)
475	ld	r15,EX_TLB_ESR(r12)
476	mtspr	SPRN_DEAR,r14
477	mtspr	SPRN_ESR,r15
478	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
479	TLB_MISS_EPILOG_ERROR
480	b	exc_data_storage_book3e
4811:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
482	TLB_MISS_EPILOG_ERROR
483	b	exc_instruction_storage_book3e
484
485
486/*
487 * This is the guts of the second-level TLB miss handler for direct
488 * misses. We are entered with:
489 *
490 * r16 = virtual page table faulting address
491 * r15 = region (top 4 bits of address)
492 * r14 = crap (free to use)
493 * r13 = PACA
494 * r12 = TLB exception frame in PACA
495 * r11 = crap (free to use)
496 * r10 = crap (free to use)
497 *
498 * Note that this should only ever be called as a second level handler
499 * with the current scheme when using SW load.
500 * That means we can always get the original fault DEAR at
501 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
502 *
503 * It can be re-entered by the linear mapping miss handler. However, to
504 * avoid too much complication, it will restart the whole fault at level
505 * 0 so we don't care too much about clobbers
506 *
507 * XXX That code was written back when we couldn't clobber r14. We can now,
508 * so we could probably optimize things a bit
509 */
510virt_page_table_tlb_miss:
511	/* Are we hitting a kernel page table ? */
512	andi.	r10,r15,0x8
513
514	/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
515	 * and we happen to have the swapper_pg_dir at offset 8 from the user
516	 * pgdir in the PACA :-).
517	 */
518	add	r11,r10,r13
519
520	/* If kernel, we need to clear MAS1 TID */
521	beq	1f
522	/* XXX replace the RMW cycles with immediate loads + writes */
523	mfspr	r10,SPRN_MAS1
524	rlwinm	r10,r10,0,16,1			/* Clear TID */
525	mtspr	SPRN_MAS1,r10
5261:
527BEGIN_MMU_FTR_SECTION
528	/* Search if we already have a TLB entry for that virtual address, and
529	 * if we do, bail out.
530	 */
531	PPC_TLBSRX_DOT(0,r16)
532	beq	virt_page_table_tlb_miss_done
533END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
534
535	/* Now, we need to walk the page tables. First check if we are in
536	 * range.
537	 */
538	rldicl.	r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
539	bne-	virt_page_table_tlb_miss_fault
540
541	/* Get the PGD pointer */
542	ld	r15,PACAPGD(r11)
543	cmpldi	cr0,r15,0
544	beq-	virt_page_table_tlb_miss_fault
545
546	/* Get to PGD entry */
547	rldicl	r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
548	clrrdi	r10,r11,3
549	ldx	r15,r10,r15
550	cmpdi	cr0,r15,0
551	bge	virt_page_table_tlb_miss_fault
552
553#ifndef CONFIG_PPC_64K_PAGES
554	/* Get to PUD entry */
555	rldicl	r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
556	clrrdi	r10,r11,3
557	ldx	r15,r10,r15
558	cmpdi	cr0,r15,0
559	bge	virt_page_table_tlb_miss_fault
560#endif /* CONFIG_PPC_64K_PAGES */
561
562	/* Get to PMD entry */
563	rldicl	r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
564	clrrdi	r10,r11,3
565	ldx	r15,r10,r15
566	cmpdi	cr0,r15,0
567	bge	virt_page_table_tlb_miss_fault
568
569	/* Ok, we're all right, we can now create a kernel translation for
570	 * a 4K or 64K page from r16 -> r15.
571	 */
572	/* Now we build the MAS:
573	 *
574	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
575	 * MAS 1   :	Almost fully setup
576	 *               - PID already updated by caller if necessary
577	 *               - TSIZE for now is base page size always
578	 * MAS 2   :	Use defaults
579	 * MAS 3+7 :	Needs to be done
580	 *
581	 * So we only do MAS 2 and 3 for now...
582	 */
583	clrldi	r11,r15,4		/* remove region ID from RPN */
584	ori	r10,r11,1		/* Or-in SR */
585
586BEGIN_MMU_FTR_SECTION
587	srdi	r16,r10,32
588	mtspr	SPRN_MAS3,r10
589	mtspr	SPRN_MAS7,r16
590MMU_FTR_SECTION_ELSE
591	mtspr	SPRN_MAS7_MAS3,r10
592ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
593
594	tlbwe
595
596BEGIN_MMU_FTR_SECTION
597virt_page_table_tlb_miss_done:
598
599	/* We have overriden MAS2:EPN but currently our primary TLB miss
600	 * handler will always restore it so that should not be an issue,
601	 * if we ever optimize the primary handler to not write MAS2 on
602	 * some cases, we'll have to restore MAS2:EPN here based on the
603	 * original fault's DEAR. If we do that we have to modify the
604	 * ITLB miss handler to also store SRR0 in the exception frame
605	 * as DEAR.
606	 *
607	 * However, one nasty thing we did is we cleared the reservation
608	 * (well, potentially we did). We do a trick here thus if we
609	 * are not a level 0 exception (we interrupted the TLB miss) we
610	 * offset the return address by -4 in order to replay the tlbsrx
611	 * instruction there
612	 */
613	subf	r10,r13,r12
614	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
615	bne-	1f
616	ld	r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
617	addi	r10,r11,-4
618	std	r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
6191:
620END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
621	/* Return to caller, normal case */
622	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
623	TLB_MISS_EPILOG_SUCCESS
624	rfi
625
626virt_page_table_tlb_miss_fault:
627	/* If we fault here, things are a little bit tricky. We need to call
628	 * either data or instruction store fault, and we need to retrieve
629	 * the original fault address and ESR (for data).
630	 *
631	 * The thing is, we know that in normal circumstances, this is
632	 * always called as a second level tlb miss for SW load or as a first
633	 * level TLB miss for HW load, so we should be able to peek at the
634	 * relevant information in the first exception frame in the PACA.
635	 *
636	 * However, we do need to double check that, because we may just hit
637	 * a stray kernel pointer or a userland attack trying to hit those
638	 * areas. If that is the case, we do a data fault. (We can't get here
639	 * from an instruction tlb miss anyway).
640	 *
641	 * Note also that when going to a fault, we must unwind the previous
642	 * level as well. Since we are doing that, we don't need to clear or
643	 * restore the TLB reservation neither.
644	 */
645	subf	r10,r13,r12
646	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
647	bne-	virt_page_table_tlb_miss_whacko_fault
648
649	/* We dig the original DEAR and ESR from slot 0 */
650	ld	r15,EX_TLB_DEAR+PACA_EXTLB(r13)
651	ld	r16,EX_TLB_ESR+PACA_EXTLB(r13)
652
653	/* We check for the "special" ESR value for instruction faults */
654	cmpdi	cr0,r16,-1
655	beq	1f
656	mtspr	SPRN_DEAR,r15
657	mtspr	SPRN_ESR,r16
658	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
659	TLB_MISS_EPILOG_ERROR
660	b	exc_data_storage_book3e
6611:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
662	TLB_MISS_EPILOG_ERROR
663	b	exc_instruction_storage_book3e
664
665virt_page_table_tlb_miss_whacko_fault:
666	/* The linear fault will restart everything so ESR and DEAR will
667	 * not have been clobbered, let's just fault with what we have
668	 */
669	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
670	TLB_MISS_EPILOG_ERROR
671	b	exc_data_storage_book3e
672
673
674/**************************************************************
675 *                                                            *
676 * TLB miss handling for Book3E with hw page table support    *
677 *                                                            *
678 **************************************************************/
679
680
681/* Data TLB miss */
682	START_EXCEPTION(data_tlb_miss_htw)
683	TLB_MISS_PROLOG
684
685	/* Now we handle the fault proper. We only save DEAR in normal
686	 * fault case since that's the only interesting values here.
687	 * We could probably also optimize by not saving SRR0/1 in the
688	 * linear mapping case but I'll leave that for later
689	 */
690	mfspr	r14,SPRN_ESR
691	mfspr	r16,SPRN_DEAR		/* get faulting address */
692	srdi	r11,r16,60		/* get region */
693	cmpldi	cr0,r11,0xc		/* linear mapping ? */
694	TLB_MISS_STATS_SAVE_INFO
695	beq	tlb_load_linear		/* yes -> go to linear map load */
696
697	/* We do the user/kernel test for the PID here along with the RW test
698	 */
699	cmpldi	cr0,r11,0		/* Check for user region */
700	ld	r15,PACAPGD(r13)	/* Load user pgdir */
701	beq	htw_tlb_miss
702
703	/* XXX replace the RMW cycles with immediate loads + writes */
7041:	mfspr	r10,SPRN_MAS1
705	cmpldi	cr0,r11,8		/* Check for vmalloc region */
706	rlwinm	r10,r10,0,16,1		/* Clear TID */
707	mtspr	SPRN_MAS1,r10
708	ld	r15,PACA_KERNELPGD(r13)	/* Load kernel pgdir */
709	beq+	htw_tlb_miss
710
711	/* We got a crappy address, just fault with whatever DEAR and ESR
712	 * are here
713	 */
714	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
715	TLB_MISS_EPILOG_ERROR
716	b	exc_data_storage_book3e
717
718/* Instruction TLB miss */
719	START_EXCEPTION(instruction_tlb_miss_htw)
720	TLB_MISS_PROLOG
721
722	/* If we take a recursive fault, the second level handler may need
723	 * to know whether we are handling a data or instruction fault in
724	 * order to get to the right store fault handler. We provide that
725	 * info by keeping a crazy value for ESR in r14
726	 */
727	li	r14,-1	/* store to exception frame is done later */
728
729	/* Now we handle the fault proper. We only save DEAR in the non
730	 * linear mapping case since we know the linear mapping case will
731	 * not re-enter. We could indeed optimize and also not save SRR0/1
732	 * in the linear mapping case but I'll leave that for later
733	 *
734	 * Faulting address is SRR0 which is already in r16
735	 */
736	srdi	r11,r16,60		/* get region */
737	cmpldi	cr0,r11,0xc		/* linear mapping ? */
738	TLB_MISS_STATS_SAVE_INFO
739	beq	tlb_load_linear		/* yes -> go to linear map load */
740
741	/* We do the user/kernel test for the PID here along with the RW test
742	 */
743	cmpldi	cr0,r11,0			/* Check for user region */
744	ld	r15,PACAPGD(r13)		/* Load user pgdir */
745	beq	htw_tlb_miss
746
747	/* XXX replace the RMW cycles with immediate loads + writes */
7481:	mfspr	r10,SPRN_MAS1
749	cmpldi	cr0,r11,8			/* Check for vmalloc region */
750	rlwinm	r10,r10,0,16,1			/* Clear TID */
751	mtspr	SPRN_MAS1,r10
752	ld	r15,PACA_KERNELPGD(r13)		/* Load kernel pgdir */
753	beq+	htw_tlb_miss
754
755	/* We got a crappy address, just fault */
756	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
757	TLB_MISS_EPILOG_ERROR
758	b	exc_instruction_storage_book3e
759
760
761/*
762 * This is the guts of the second-level TLB miss handler for direct
763 * misses. We are entered with:
764 *
765 * r16 = virtual page table faulting address
766 * r15 = PGD pointer
767 * r14 = ESR
768 * r13 = PACA
769 * r12 = TLB exception frame in PACA
770 * r11 = crap (free to use)
771 * r10 = crap (free to use)
772 *
773 * It can be re-entered by the linear mapping miss handler. However, to
774 * avoid too much complication, it will save/restore things for us
775 */
776htw_tlb_miss:
777	/* Search if we already have a TLB entry for that virtual address, and
778	 * if we do, bail out.
779	 *
780	 * MAS1:IND should be already set based on MAS4
781	 */
782	PPC_TLBSRX_DOT(0,r16)
783	beq	htw_tlb_miss_done
784
785	/* Now, we need to walk the page tables. First check if we are in
786	 * range.
787	 */
788	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
789	bne-	htw_tlb_miss_fault
790
791	/* Get the PGD pointer */
792	cmpldi	cr0,r15,0
793	beq-	htw_tlb_miss_fault
794
795	/* Get to PGD entry */
796	rldicl	r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
797	clrrdi	r10,r11,3
798	ldx	r15,r10,r15
799	cmpdi	cr0,r15,0
800	bge	htw_tlb_miss_fault
801
802#ifndef CONFIG_PPC_64K_PAGES
803	/* Get to PUD entry */
804	rldicl	r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
805	clrrdi	r10,r11,3
806	ldx	r15,r10,r15
807	cmpdi	cr0,r15,0
808	bge	htw_tlb_miss_fault
809#endif /* CONFIG_PPC_64K_PAGES */
810
811	/* Get to PMD entry */
812	rldicl	r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
813	clrrdi	r10,r11,3
814	ldx	r15,r10,r15
815	cmpdi	cr0,r15,0
816	bge	htw_tlb_miss_fault
817
818	/* Ok, we're all right, we can now create an indirect entry for
819	 * a 1M or 256M page.
820	 *
821	 * The last trick is now that because we use "half" pages for
822	 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
823	 * for an added LSB bit to the RPN. For 64K pages, there is no
824	 * problem as we already use 32K arrays (half PTE pages), but for
825	 * 4K page we need to extract a bit from the virtual address and
826	 * insert it into the "PA52" bit of the RPN.
827	 */
828#ifndef CONFIG_PPC_64K_PAGES
829	rlwimi	r15,r16,32-9,20,20
830#endif
831	/* Now we build the MAS:
832	 *
833	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
834	 * MAS 1   :	Almost fully setup
835	 *               - PID already updated by caller if necessary
836	 *               - TSIZE for now is base ind page size always
837	 * MAS 2   :	Use defaults
838	 * MAS 3+7 :	Needs to be done
839	 */
840#ifdef CONFIG_PPC_64K_PAGES
841	ori	r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
842#else
843	ori	r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
844#endif
845
846BEGIN_MMU_FTR_SECTION
847	srdi	r16,r10,32
848	mtspr	SPRN_MAS3,r10
849	mtspr	SPRN_MAS7,r16
850MMU_FTR_SECTION_ELSE
851	mtspr	SPRN_MAS7_MAS3,r10
852ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
853
854	tlbwe
855
856htw_tlb_miss_done:
857	/* We don't bother with restoring DEAR or ESR since we know we are
858	 * level 0 and just going back to userland. They are only needed
859	 * if you are going to take an access fault
860	 */
861	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
862	TLB_MISS_EPILOG_SUCCESS
863	rfi
864
865htw_tlb_miss_fault:
866	/* We need to check if it was an instruction miss. We know this
867	 * though because r14 would contain -1
868	 */
869	cmpdi	cr0,r14,-1
870	beq	1f
871	mtspr	SPRN_DEAR,r16
872	mtspr	SPRN_ESR,r14
873	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
874	TLB_MISS_EPILOG_ERROR
875	b	exc_data_storage_book3e
8761:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
877	TLB_MISS_EPILOG_ERROR
878	b	exc_instruction_storage_book3e
879
880/*
881 * This is the guts of "any" level TLB miss handler for kernel linear
882 * mapping misses. We are entered with:
883 *
884 *
885 * r16 = faulting address
886 * r15 = crap (free to use)
887 * r14 = ESR (data) or -1 (instruction)
888 * r13 = PACA
889 * r12 = TLB exception frame in PACA
890 * r11 = crap (free to use)
891 * r10 = crap (free to use)
892 *
893 * In addition we know that we will not re-enter, so in theory, we could
894 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
895 *
896 * We also need to be careful about MAS registers here & TLB reservation,
897 * as we know we'll have clobbered them if we interrupt the main TLB miss
898 * handlers in which case we probably want to do a full restart at level
899 * 0 rather than saving / restoring the MAS.
900 *
901 * Note: If we care about performance of that core, we can easily shuffle
902 *       a few things around
903 */
904tlb_load_linear:
905	/* For now, we assume the linear mapping is contiguous and stops at
906	 * linear_map_top. We also assume the size is a multiple of 1G, thus
907	 * we only use 1G pages for now. That might have to be changed in a
908	 * final implementation, especially when dealing with hypervisors
909	 */
910	ld	r11,PACATOC(r13)
911	ld	r11,linear_map_top@got(r11)
912	ld	r10,0(r11)
913	cmpld	cr0,r10,r16
914	bge	tlb_load_linear_fault
915
916	/* MAS1 need whole new setup. */
917	li	r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
918	oris	r15,r15,MAS1_VALID@h	/* MAS1 needs V and TSIZE */
919	mtspr	SPRN_MAS1,r15
920
921	/* Already somebody there ? */
922	PPC_TLBSRX_DOT(0,r16)
923	beq	tlb_load_linear_done
924
925	/* Now we build the remaining MAS. MAS0 and 2 should be fine
926	 * with their defaults, which leaves us with MAS 3 and 7. The
927	 * mapping is linear, so we just take the address, clear the
928	 * region bits, and or in the permission bits which are currently
929	 * hard wired
930	 */
931	clrrdi	r10,r16,30		/* 1G page index */
932	clrldi	r10,r10,4		/* clear region bits */
933	ori	r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
934
935BEGIN_MMU_FTR_SECTION
936	srdi	r16,r10,32
937	mtspr	SPRN_MAS3,r10
938	mtspr	SPRN_MAS7,r16
939MMU_FTR_SECTION_ELSE
940	mtspr	SPRN_MAS7_MAS3,r10
941ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
942
943	tlbwe
944
945tlb_load_linear_done:
946	/* We use the "error" epilog for success as we do want to
947	 * restore to the initial faulting context, whatever it was.
948	 * We do that because we can't resume a fault within a TLB
949	 * miss handler, due to MAS and TLB reservation being clobbered.
950	 */
951	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
952	TLB_MISS_EPILOG_ERROR
953	rfi
954
955tlb_load_linear_fault:
956	/* We keep the DEAR and ESR around, this shouldn't have happened */
957	cmpdi	cr0,r14,-1
958	beq	1f
959	TLB_MISS_EPILOG_ERROR_SPECIAL
960	b	exc_data_storage_book3e
9611:	TLB_MISS_EPILOG_ERROR_SPECIAL
962	b	exc_instruction_storage_book3e
963
964
965#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
966.tlb_stat_inc:
9671:	ldarx	r8,0,r9
968	addi	r8,r8,1
969	stdcx.	r8,0,r9
970	bne-	1b
971	blr
972#endif
973