• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
3 /*
4  * PowerPC64 memory management structures
5  *
6  * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7  *   PPC64 rework.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17 
18 /*
19  * Segment table
20  */
21 
22 #define STE_ESID_V	0x80
23 #define STE_ESID_KS	0x20
24 #define STE_ESID_KP	0x10
25 #define STE_ESID_N	0x08
26 
27 #define STE_VSID_SHIFT	12
28 
29 /* Location of cpu0's segment table */
30 #define STAB0_PAGE	0x6
31 #define STAB0_OFFSET	(STAB0_PAGE << 12)
32 #define STAB0_PHYS_ADDR	(STAB0_OFFSET + PHYSICAL_START)
33 
34 #ifndef __ASSEMBLY__
35 extern char initial_stab[];
36 #endif /* ! __ASSEMBLY */
37 
38 /*
39  * SLB
40  */
41 
42 #define SLB_NUM_BOLTED		3
43 #define SLB_CACHE_ENTRIES	8
44 
45 /* Bits in the SLB ESID word */
46 #define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */
47 
48 /* Bits in the SLB VSID word */
49 #define SLB_VSID_SHIFT		12
50 #define SLB_VSID_SHIFT_1T	24
51 #define SLB_VSID_SSIZE_SHIFT	62
52 #define SLB_VSID_B		ASM_CONST(0xc000000000000000)
53 #define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
54 #define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
55 #define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
56 #define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
57 #define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
58 #define SLB_VSID_L		ASM_CONST(0x0000000000000100)
59 #define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
60 #define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
61 #define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
62 #define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
63 #define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
64 #define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
65 #define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)
66 
67 #define SLB_VSID_KERNEL		(SLB_VSID_KP)
68 #define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
69 
70 #define SLBIE_C			(0x08000000)
71 #define SLBIE_SSIZE_SHIFT	25
72 
73 /*
74  * Hash table
75  */
76 
77 #define HPTES_PER_GROUP 8
78 
79 #define HPTE_V_SSIZE_SHIFT	62
80 #define HPTE_V_AVPN_SHIFT	7
81 #define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
82 #define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
83 #define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
84 #define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
85 #define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
86 #define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
87 #define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
88 #define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
89 
90 #define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
91 #define HPTE_R_TS		ASM_CONST(0x4000000000000000)
92 #define HPTE_R_RPN_SHIFT	12
93 #define HPTE_R_RPN		ASM_CONST(0x3ffffffffffff000)
94 #define HPTE_R_FLAGS		ASM_CONST(0x00000000000003ff)
95 #define HPTE_R_PP		ASM_CONST(0x0000000000000003)
96 #define HPTE_R_N		ASM_CONST(0x0000000000000004)
97 #define HPTE_R_C		ASM_CONST(0x0000000000000080)
98 #define HPTE_R_R		ASM_CONST(0x0000000000000100)
99 
100 #define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
101 #define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
102 
103 /* Values for PP (assumes Ks=0, Kp=1) */
104 /* pp0 will always be 0 for linux     */
105 #define PP_RWXX	0	/* Supervisor read/write, User none */
106 #define PP_RWRX 1	/* Supervisor read/write, User read */
107 #define PP_RWRW 2	/* Supervisor read/write, User read/write */
108 #define PP_RXRX 3	/* Supervisor read,       User read */
109 
110 #ifndef __ASSEMBLY__
111 
112 struct hash_pte {
113 	unsigned long v;
114 	unsigned long r;
115 };
116 
117 extern struct hash_pte *htab_address;
118 extern unsigned long htab_size_bytes;
119 extern unsigned long htab_hash_mask;
120 
121 /*
122  * Page size definition
123  *
124  *    shift : is the "PAGE_SHIFT" value for that page size
125  *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
126  *            directly to a slbmte "vsid" value
127  *    penc  : is the HPTE encoding mask for the "LP" field:
128  *
129  */
130 struct mmu_psize_def
131 {
132 	unsigned int	shift;	/* number of bits */
133 	unsigned int	penc;	/* HPTE encoding */
134 	unsigned int	tlbiel;	/* tlbiel supported for that page size */
135 	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
136 	unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
137 };
138 
139 #endif /* __ASSEMBLY__ */
140 
141 /*
142  * The kernel use the constants below to index in the page sizes array.
143  * The use of fixed constants for this purpose is better for performances
144  * of the low level hash refill handlers.
145  *
146  * A non supported page size has a "shift" field set to 0
147  *
148  * Any new page size being implemented can get a new entry in here. Whether
149  * the kernel will use it or not is a different matter though. The actual page
150  * size used by hugetlbfs is not defined here and may be made variable
151  */
152 
153 #define MMU_PAGE_4K		0	/* 4K */
154 #define MMU_PAGE_64K		1	/* 64K */
155 #define MMU_PAGE_64K_AP		2	/* 64K Admixed (in a 4K segment) */
156 #define MMU_PAGE_1M		3	/* 1M */
157 #define MMU_PAGE_16M		4	/* 16M */
158 #define MMU_PAGE_16G		5	/* 16G */
159 #define MMU_PAGE_COUNT		6
160 
161 /*
162  * Segment sizes.
163  * These are the values used by hardware in the B field of
164  * SLB entries and the first dword of MMU hashtable entries.
165  * The B field is 2 bits; the values 2 and 3 are unused and reserved.
166  */
167 #define MMU_SEGSIZE_256M	0
168 #define MMU_SEGSIZE_1T		1
169 
170 
171 #ifndef __ASSEMBLY__
172 
173 /*
174  * The current system page and segment sizes
175  */
176 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
177 extern int mmu_linear_psize;
178 extern int mmu_virtual_psize;
179 extern int mmu_vmalloc_psize;
180 extern int mmu_vmemmap_psize;
181 extern int mmu_io_psize;
182 extern int mmu_kernel_ssize;
183 extern int mmu_highuser_ssize;
184 extern u16 mmu_slb_size;
185 extern unsigned long tce_alloc_start, tce_alloc_end;
186 
187 /*
188  * If the processor supports 64k normal pages but not 64k cache
189  * inhibited pages, we have to be prepared to switch processes
190  * to use 4k pages when they create cache-inhibited mappings.
191  * If this is the case, mmu_ci_restrictions will be set to 1.
192  */
193 extern int mmu_ci_restrictions;
194 
195 #ifdef CONFIG_HUGETLB_PAGE
196 /*
197  * The page size indexes of the huge pages for use by hugetlbfs
198  */
199 extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
200 
201 #endif /* CONFIG_HUGETLB_PAGE */
202 
203 /*
204  * This function sets the AVPN and L fields of the HPTE  appropriately
205  * for the page size
206  */
hpte_encode_v(unsigned long va,int psize,int ssize)207 static inline unsigned long hpte_encode_v(unsigned long va, int psize,
208 					  int ssize)
209 {
210 	unsigned long v;
211 	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
212 	v <<= HPTE_V_AVPN_SHIFT;
213 	if (psize != MMU_PAGE_4K)
214 		v |= HPTE_V_LARGE;
215 	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
216 	return v;
217 }
218 
219 /*
220  * This function sets the ARPN, and LP fields of the HPTE appropriately
221  * for the page size. We assume the pa is already "clean" that is properly
222  * aligned for the requested page size
223  */
hpte_encode_r(unsigned long pa,int psize)224 static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
225 {
226 	unsigned long r;
227 
228 	/* A 4K page needs no special encoding */
229 	if (psize == MMU_PAGE_4K)
230 		return pa & HPTE_R_RPN;
231 	else {
232 		unsigned int penc = mmu_psize_defs[psize].penc;
233 		unsigned int shift = mmu_psize_defs[psize].shift;
234 		return (pa & ~((1ul << shift) - 1)) | (penc << 12);
235 	}
236 	return r;
237 }
238 
239 /*
240  * Build a VA given VSID, EA and segment size
241  */
hpt_va(unsigned long ea,unsigned long vsid,int ssize)242 static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
243 				   int ssize)
244 {
245 	if (ssize == MMU_SEGSIZE_256M)
246 		return (vsid << 28) | (ea & 0xfffffffUL);
247 	return (vsid << 40) | (ea & 0xffffffffffUL);
248 }
249 
250 /*
251  * This hashes a virtual address
252  */
253 
hpt_hash(unsigned long va,unsigned int shift,int ssize)254 static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
255 				     int ssize)
256 {
257 	unsigned long hash, vsid;
258 
259 	if (ssize == MMU_SEGSIZE_256M) {
260 		hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
261 	} else {
262 		vsid = va >> 40;
263 		hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
264 	}
265 	return hash & 0x7fffffffffUL;
266 }
267 
268 extern int __hash_page_4K(unsigned long ea, unsigned long access,
269 			  unsigned long vsid, pte_t *ptep, unsigned long trap,
270 			  unsigned int local, int ssize, int subpage_prot);
271 extern int __hash_page_64K(unsigned long ea, unsigned long access,
272 			   unsigned long vsid, pte_t *ptep, unsigned long trap,
273 			   unsigned int local, int ssize);
274 struct mm_struct;
275 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
276 extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
277 			  unsigned long ea, unsigned long vsid, int local,
278 			  unsigned long trap);
279 
280 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
281 			     unsigned long pstart, unsigned long prot,
282 			     int psize, int ssize);
283 extern void add_gpage(unsigned long addr, unsigned long page_size,
284 			  unsigned long number_of_pages);
285 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
286 
287 extern void htab_initialize(void);
288 extern void htab_initialize_secondary(void);
289 extern void hpte_init_native(void);
290 extern void hpte_init_lpar(void);
291 extern void hpte_init_iSeries(void);
292 extern void hpte_init_beat(void);
293 extern void hpte_init_beat_v3(void);
294 
295 extern void stabs_alloc(void);
296 extern void slb_initialize(void);
297 extern void slb_flush_and_rebolt(void);
298 extern void stab_initialize(unsigned long stab);
299 
300 extern void slb_vmalloc_update(void);
301 #endif /* __ASSEMBLY__ */
302 
303 /*
304  * VSID allocation
305  *
306  * We first generate a 36-bit "proto-VSID".  For kernel addresses this
307  * is equal to the ESID, for user addresses it is:
308  *	(context << 15) | (esid & 0x7fff)
309  *
310  * The two forms are distinguishable because the top bit is 0 for user
311  * addresses, whereas the top two bits are 1 for kernel addresses.
312  * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
313  * now.
314  *
315  * The proto-VSIDs are then scrambled into real VSIDs with the
316  * multiplicative hash:
317  *
318  *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
319  *	where	VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
320  *		VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
321  *
322  * This scramble is only well defined for proto-VSIDs below
323  * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
324  * reserved.  VSID_MULTIPLIER is prime, so in particular it is
325  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
326  * Because the modulus is 2^n-1 we can compute it efficiently without
327  * a divide or extra multiply (see below).
328  *
329  * This scheme has several advantages over older methods:
330  *
331  * 	- We have VSIDs allocated for every kernel address
332  * (i.e. everything above 0xC000000000000000), except the very top
333  * segment, which simplifies several things.
334  *
335  * 	- We allow for 15 significant bits of ESID and 20 bits of
336  * context for user addresses.  i.e. 8T (43 bits) of address space for
337  * up to 1M contexts (although the page table structure and context
338  * allocation will need changes to take advantage of this).
339  *
340  * 	- The scramble function gives robust scattering in the hash
341  * table (at least based on some initial results).  The previous
342  * method was more susceptible to pathological cases giving excessive
343  * hash collisions.
344  */
345 /*
346  * WARNING - If you change these you must make sure the asm
347  * implementations in slb_allocate (slb_low.S), do_stab_bolted
348  * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
349  *
350  * You'll also need to change the precomputed VSID values in head.S
351  * which are used by the iSeries firmware.
352  */
353 
354 #define VSID_MULTIPLIER_256M	ASM_CONST(200730139)	/* 28-bit prime */
355 #define VSID_BITS_256M		36
356 #define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
357 
358 #define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
359 #define VSID_BITS_1T		24
360 #define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)
361 
362 #define CONTEXT_BITS		19
363 #define USER_ESID_BITS		16
364 #define USER_ESID_BITS_1T	4
365 
366 #define USER_VSID_RANGE	(1UL << (USER_ESID_BITS + SID_SHIFT))
367 
368 /*
369  * This macro generates asm code to compute the VSID scramble
370  * function.  Used in slb_allocate() and do_stab_bolted.  The function
371  * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
372  *
373  *	rt = register continaing the proto-VSID and into which the
374  *		VSID will be stored
375  *	rx = scratch register (clobbered)
376  *
377  * 	- rt and rx must be different registers
378  * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
379  * 	  bits may contain other garbage, so you may need to mask the
380  * 	  result.
381  */
382 #define ASM_VSID_SCRAMBLE(rt, rx, size)					\
383 	lis	rx,VSID_MULTIPLIER_##size@h;				\
384 	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
385 	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
386 									\
387 	srdi	rx,rt,VSID_BITS_##size;					\
388 	clrldi	rt,rt,(64-VSID_BITS_##size);				\
389 	add	rt,rt,rx;		/* add high and low bits */	\
390 	/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
391 	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
392 	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
393 	 * the bit clear, r3 already has the answer we want, if it	\
394 	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
395 	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
396 	addi	rx,rt,1;						\
397 	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
398 	add	rt,rt,rx
399 
400 
401 #ifndef __ASSEMBLY__
402 
403 typedef unsigned long mm_context_id_t;
404 
405 typedef struct {
406 	mm_context_id_t id;
407 	u16 user_psize;		/* page size index */
408 
409 #ifdef CONFIG_PPC_MM_SLICES
410 	u64 low_slices_psize;	/* SLB page size encodings */
411 	u64 high_slices_psize;  /* 4 bits per slice for now */
412 #else
413 	u16 sllp;		/* SLB page size encoding */
414 #endif
415 	unsigned long vdso_base;
416 } mm_context_t;
417 
418 
419 #if 0
420 /*
421  * The code below is equivalent to this function for arguments
422  * < 2^VSID_BITS, which is all this should ever be called
423  * with.  However gcc is not clever enough to compute the
424  * modulus (2^n-1) without a second multiply.
425  */
426 #define vsid_scrample(protovsid, size) \
427 	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
428 
429 #else /* 1 */
430 #define vsid_scramble(protovsid, size) \
431 	({								 \
432 		unsigned long x;					 \
433 		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
434 		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
435 		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
436 	})
437 #endif /* 1 */
438 
439 /* This is only valid for addresses >= PAGE_OFFSET */
get_kernel_vsid(unsigned long ea,int ssize)440 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
441 {
442 	if (ssize == MMU_SEGSIZE_256M)
443 		return vsid_scramble(ea >> SID_SHIFT, 256M);
444 	return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
445 }
446 
447 /* Returns the segment size indicator for a user address */
user_segment_size(unsigned long addr)448 static inline int user_segment_size(unsigned long addr)
449 {
450 	/* Use 1T segments if possible for addresses >= 1T */
451 	if (addr >= (1UL << SID_SHIFT_1T))
452 		return mmu_highuser_ssize;
453 	return MMU_SEGSIZE_256M;
454 }
455 
456 /* This is only valid for user addresses (which are below 2^44) */
get_vsid(unsigned long context,unsigned long ea,int ssize)457 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
458 				     int ssize)
459 {
460 	if (ssize == MMU_SEGSIZE_256M)
461 		return vsid_scramble((context << USER_ESID_BITS)
462 				     | (ea >> SID_SHIFT), 256M);
463 	return vsid_scramble((context << USER_ESID_BITS_1T)
464 			     | (ea >> SID_SHIFT_1T), 1T);
465 }
466 
467 /*
468  * This is only used on legacy iSeries in lparmap.c,
469  * hence the 256MB segment assumption.
470  */
471 #define VSID_SCRAMBLE(pvsid)	(((pvsid) * VSID_MULTIPLIER_256M) %	\
472 				 VSID_MODULUS_256M)
473 #define KERNEL_VSID(ea)		VSID_SCRAMBLE(GET_ESID(ea))
474 
475 #endif /* __ASSEMBLY__ */
476 
477 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
478