• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 *    Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 *  This program is free software; you can redistribute it and/or
12 *  modify it under the terms of the GNU General Public License
13 *  as published by the Free Software Foundation; either version
14 *  2 of the License, or (at your option) any later version.
15 */
16
17#include <asm/processor.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
24#include <asm/firmware.h>
25
26/* void slb_allocate_realmode(unsigned long ea);
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * 	r3 = faulting address, r13 = PACA
30 *	r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
33_GLOBAL(slb_allocate_realmode)
34	/* r3 = faulting address */
35
36	srdi	r9,r3,60		/* get region */
37	srdi	r10,r3,28		/* get esid */
38	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
39
40	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
41	blt	cr7,0f			/* user or kernel? */
42
43	/* kernel address: proto-VSID = ESID */
44	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
45	 * this code will generate the protoVSID 0xfffffffff for the
46	 * top segment.  That's ok, the scramble below will translate
47	 * it to VSID 0, which is reserved as a bad VSID - one which
48	 * will never have any pages in it.  */
49
50	/* Check if hitting the linear mapping or some other kernel space
51	*/
52	bne	cr7,1f
53
54	/* Linear mapping encoding bits, the "li" instruction below will
55	 * be patched by the kernel at boot
56	 */
57_GLOBAL(slb_miss_kernel_load_linear)
58	li	r11,0
59BEGIN_FTR_SECTION
60	b	slb_finish_load
61END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
62	b	slb_finish_load_1T
63
641:
65#ifdef CONFIG_SPARSEMEM_VMEMMAP
66	/* Check virtual memmap region. To be patches at kernel boot */
67	cmpldi	cr0,r9,0xf
68	bne	1f
69_GLOBAL(slb_miss_kernel_load_vmemmap)
70	li	r11,0
71	b	6f
721:
73#endif /* CONFIG_SPARSEMEM_VMEMMAP */
74
75	/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
76	 * will be patched by the kernel at boot
77	 */
78BEGIN_FTR_SECTION
79	/* check whether this is in vmalloc or ioremap space */
80	clrldi	r11,r10,48
81	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
82	bgt	5f
83	lhz	r11,PACAVMALLOCSLLP(r13)
84	b	6f
855:
86END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
87_GLOBAL(slb_miss_kernel_load_io)
88	li	r11,0
896:
90BEGIN_FTR_SECTION
91	b	slb_finish_load
92END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
93	b	slb_finish_load_1T
94
950:	/* user address: proto-VSID = context << 15 | ESID. First check
96	 * if the address is within the boundaries of the user region
97	 */
98	srdi.	r9,r10,USER_ESID_BITS
99	bne-	8f			/* invalid ea bits set */
100
101
102	/* when using slices, we extract the psize off the slice bitmaps
103	 * and then we need to get the sllp encoding off the mmu_psize_defs
104	 * array.
105	 *
106	 * XXX This is a bit inefficient especially for the normal case,
107	 * so we should try to implement a fast path for the standard page
108	 * size using the old sllp value so we avoid the array. We cannot
109	 * really do dynamic patching unfortunately as processes might flip
110	 * between 4k and 64k standard page size
111	 */
112#ifdef CONFIG_PPC_MM_SLICES
113	cmpldi	r10,16
114
115	/* Get the slice index * 4 in r11 and matching slice size mask in r9 */
116	ld	r9,PACALOWSLICESPSIZE(r13)
117	sldi	r11,r10,2
118	blt	5f
119	ld	r9,PACAHIGHSLICEPSIZE(r13)
120	srdi	r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
121	andi.	r11,r11,0x3c
122
1235:	/* Extract the psize and multiply to get an array offset */
124	srd	r9,r9,r11
125	andi.	r9,r9,0xf
126	mulli	r9,r9,MMUPSIZEDEFSIZE
127
128	/* Now get to the array and obtain the sllp
129	 */
130	ld	r11,PACATOC(r13)
131	ld	r11,mmu_psize_defs@got(r11)
132	add	r11,r11,r9
133	ld	r11,MMUPSIZESLLP(r11)
134	ori	r11,r11,SLB_VSID_USER
135#else
136	/* paca context sllp already contains the SLB_VSID_USER bits */
137	lhz	r11,PACACONTEXTSLLP(r13)
138#endif /* CONFIG_PPC_MM_SLICES */
139
140	ld	r9,PACACONTEXTID(r13)
141BEGIN_FTR_SECTION
142	cmpldi	r10,0x1000
143END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
144	rldimi	r10,r9,USER_ESID_BITS,0
145BEGIN_FTR_SECTION
146	bge	slb_finish_load_1T
147END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
148	b	slb_finish_load
149
1508:	/* invalid EA */
151	li	r10,0			/* BAD_VSID */
152	li	r11,SLB_VSID_USER	/* flags don't much matter */
153	b	slb_finish_load
154
155#ifdef __DISABLED__
156
157/* void slb_allocate_user(unsigned long ea);
158 *
159 * Create an SLB entry for the given EA (user or kernel).
160 * 	r3 = faulting address, r13 = PACA
161 *	r9, r10, r11 are clobbered by this function
162 * No other registers are examined or changed.
163 *
164 * It is called with translation enabled in order to be able to walk the
165 * page tables. This is not currently used.
166 */
167_GLOBAL(slb_allocate_user)
168	/* r3 = faulting address */
169	srdi	r10,r3,28		/* get esid */
170
171	crset	4*cr7+lt		/* set "user" flag for later */
172
173	/* check if we fit in the range covered by the pagetables*/
174	srdi.	r9,r3,PGTABLE_EADDR_SIZE
175	crnot	4*cr0+eq,4*cr0+eq
176	beqlr
177
178	/* now we need to get to the page tables in order to get the page
179	 * size encoding from the PMD. In the future, we'll be able to deal
180	 * with 1T segments too by getting the encoding from the PGD instead
181	 */
182	ld	r9,PACAPGDIR(r13)
183	cmpldi	cr0,r9,0
184	beqlr
185	rlwinm	r11,r10,8,25,28
186	ldx	r9,r9,r11		/* get pgd_t */
187	cmpldi	cr0,r9,0
188	beqlr
189	rlwinm	r11,r10,3,17,28
190	ldx	r9,r9,r11		/* get pmd_t */
191	cmpldi	cr0,r9,0
192	beqlr
193
194	/* build vsid flags */
195	andi.	r11,r9,SLB_VSID_LLP
196	ori	r11,r11,SLB_VSID_USER
197
198	/* get context to calculate proto-VSID */
199	ld	r9,PACACONTEXTID(r13)
200	rldimi	r10,r9,USER_ESID_BITS,0
201
202	/* fall through slb_finish_load */
203
204#endif /* __DISABLED__ */
205
206
207/*
208 * Finish loading of an SLB entry and return
209 *
210 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
211 */
212slb_finish_load:
213	ASM_VSID_SCRAMBLE(r10,r9,256M)
214	rldimi	r11,r10,SLB_VSID_SHIFT,16	/* combine VSID and flags */
215
216	/* r3 = EA, r11 = VSID data */
217	/*
218	 * Find a slot, round robin. Previously we tried to find a
219	 * free slot first but that took too long. Unfortunately we
220 	 * dont have any LRU information to help us choose a slot.
221 	 */
222#ifdef CONFIG_PPC_ISERIES
223BEGIN_FW_FTR_SECTION
224	/*
225	 * On iSeries, the "bolted" stack segment can be cast out on
226	 * shared processor switch so we need to check for a miss on
227	 * it and restore it to the right slot.
228	 */
229	ld	r9,PACAKSAVE(r13)
230	clrrdi	r9,r9,28
231	clrrdi	r3,r3,28
232	li	r10,SLB_NUM_BOLTED-1	/* Stack goes in last bolted slot */
233	cmpld	r9,r3
234	beq	3f
235END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
236#endif /* CONFIG_PPC_ISERIES */
237
2387:	ld	r10,PACASTABRR(r13)
239	addi	r10,r10,1
240	/* This gets soft patched on boot. */
241_GLOBAL(slb_compare_rr_to_size)
242	cmpldi	r10,0
243
244	blt+	4f
245	li	r10,SLB_NUM_BOLTED
246
2474:
248	std	r10,PACASTABRR(r13)
249
2503:
251	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */
252	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */
253
254	/* r3 = ESID data, r11 = VSID data */
255
256	/*
257	 * No need for an isync before or after this slbmte. The exception
258	 * we enter with and the rfid we exit with are context synchronizing.
259	 */
260	slbmte	r11,r10
261
262	/* we're done for kernel addresses */
263	crclr	4*cr0+eq		/* set result to "success" */
264	bgelr	cr7
265
266	/* Update the slb cache */
267	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */
268	cmpldi	r3,SLB_CACHE_ENTRIES
269	bge	1f
270
271	/* still room in the slb cache */
272	sldi	r11,r3,1		/* r11 = offset * sizeof(u16) */
273	rldicl	r10,r10,36,28		/* get low 16 bits of the ESID */
274	add	r11,r11,r13		/* r11 = (u16 *)paca + offset */
275	sth	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
276	addi	r3,r3,1			/* offset++ */
277	b	2f
2781:					/* offset >= SLB_CACHE_ENTRIES */
279	li	r3,SLB_CACHE_ENTRIES+1
2802:
281	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */
282	crclr	4*cr0+eq		/* set result to "success" */
283	blr
284
285/*
286 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
287 * We assume legacy iSeries will never have 1T segments.
288 *
289 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
290 */
291slb_finish_load_1T:
292	srdi	r10,r10,40-28		/* get 1T ESID */
293	ASM_VSID_SCRAMBLE(r10,r9,1T)
294	rldimi	r11,r10,SLB_VSID_SHIFT_1T,16	/* combine VSID and flags */
295	li	r10,MMU_SEGSIZE_1T
296	rldimi	r11,r10,SLB_VSID_SSIZE_SHIFT,0	/* insert segment size */
297
298	/* r3 = EA, r11 = VSID data */
299	clrrdi	r3,r3,SID_SHIFT_1T	/* clear out non-ESID bits */
300	b	7b
301
302