• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
3 /*
4  * PowerPC64 memory management structures
5  *
6  * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7  *   PPC64 rework.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17 
18 /*
19  * This is necessary to get the definition of PGTABLE_RANGE which we
20  * need for various slices related matters. Note that this isn't the
21  * complete pgtable.h but only a portion of it.
22  */
23 #include <asm/pgtable-ppc64.h>
24 #include <asm/bug.h>
25 
26 /*
27  * Segment table
28  */
29 
30 #define STE_ESID_V	0x80
31 #define STE_ESID_KS	0x20
32 #define STE_ESID_KP	0x10
33 #define STE_ESID_N	0x08
34 
35 #define STE_VSID_SHIFT	12
36 
37 /* Location of cpu0's segment table */
38 #define STAB0_PAGE	0x8
39 #define STAB0_OFFSET	(STAB0_PAGE << 12)
40 #define STAB0_PHYS_ADDR	(STAB0_OFFSET + PHYSICAL_START)
41 
42 #ifndef __ASSEMBLY__
43 extern char initial_stab[];
44 #endif /* ! __ASSEMBLY */
45 
46 /*
47  * SLB
48  */
49 
50 #define SLB_NUM_BOLTED		3
51 #define SLB_CACHE_ENTRIES	8
52 #define SLB_MIN_SIZE		32
53 
54 /* Bits in the SLB ESID word */
55 #define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */
56 
57 /* Bits in the SLB VSID word */
58 #define SLB_VSID_SHIFT		12
59 #define SLB_VSID_SHIFT_1T	24
60 #define SLB_VSID_SSIZE_SHIFT	62
61 #define SLB_VSID_B		ASM_CONST(0xc000000000000000)
62 #define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
63 #define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
64 #define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
65 #define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
66 #define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
67 #define SLB_VSID_L		ASM_CONST(0x0000000000000100)
68 #define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
69 #define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
70 #define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
71 #define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
72 #define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
73 #define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
74 #define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)
75 
76 #define SLB_VSID_KERNEL		(SLB_VSID_KP)
77 #define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
78 
79 #define SLBIE_C			(0x08000000)
80 #define SLBIE_SSIZE_SHIFT	25
81 
82 /*
83  * Hash table
84  */
85 
86 #define HPTES_PER_GROUP 8
87 
88 #define HPTE_V_SSIZE_SHIFT	62
89 #define HPTE_V_AVPN_SHIFT	7
90 #define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
91 #define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
92 #define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
93 #define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
94 #define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
95 #define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
96 #define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
97 #define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
98 
99 #define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
100 #define HPTE_R_TS		ASM_CONST(0x4000000000000000)
101 #define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
102 #define HPTE_R_RPN_SHIFT	12
103 #define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
104 #define HPTE_R_PP		ASM_CONST(0x0000000000000003)
105 #define HPTE_R_N		ASM_CONST(0x0000000000000004)
106 #define HPTE_R_G		ASM_CONST(0x0000000000000008)
107 #define HPTE_R_M		ASM_CONST(0x0000000000000010)
108 #define HPTE_R_I		ASM_CONST(0x0000000000000020)
109 #define HPTE_R_W		ASM_CONST(0x0000000000000040)
110 #define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
111 #define HPTE_R_C		ASM_CONST(0x0000000000000080)
112 #define HPTE_R_R		ASM_CONST(0x0000000000000100)
113 #define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
114 
115 #define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
116 #define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
117 
118 /* Values for PP (assumes Ks=0, Kp=1) */
119 #define PP_RWXX	0	/* Supervisor read/write, User none */
120 #define PP_RWRX 1	/* Supervisor read/write, User read */
121 #define PP_RWRW 2	/* Supervisor read/write, User read/write */
122 #define PP_RXRX 3	/* Supervisor read,       User read */
123 #define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
124 
125 /* Fields for tlbiel instruction in architecture 2.06 */
126 #define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
127 #define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
128 #define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
129 #define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
130 #define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
131 #define TLBIEL_INVAL_SET_SHIFT	12
132 
133 #define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
134 
135 #ifndef __ASSEMBLY__
136 
137 struct hash_pte {
138 	unsigned long v;
139 	unsigned long r;
140 };
141 
142 extern struct hash_pte *htab_address;
143 extern unsigned long htab_size_bytes;
144 extern unsigned long htab_hash_mask;
145 
146 /*
147  * Page size definition
148  *
149  *    shift : is the "PAGE_SHIFT" value for that page size
150  *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
151  *            directly to a slbmte "vsid" value
152  *    penc  : is the HPTE encoding mask for the "LP" field:
153  *
154  */
155 struct mmu_psize_def
156 {
157 	unsigned int	shift;	/* number of bits */
158 	int		penc[MMU_PAGE_COUNT];	/* HPTE encoding */
159 	unsigned int	tlbiel;	/* tlbiel supported for that page size */
160 	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
161 	unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
162 };
163 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
164 
shift_to_mmu_psize(unsigned int shift)165 static inline int shift_to_mmu_psize(unsigned int shift)
166 {
167 	int psize;
168 
169 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
170 		if (mmu_psize_defs[psize].shift == shift)
171 			return psize;
172 	return -1;
173 }
174 
mmu_psize_to_shift(unsigned int mmu_psize)175 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
176 {
177 	if (mmu_psize_defs[mmu_psize].shift)
178 		return mmu_psize_defs[mmu_psize].shift;
179 	BUG();
180 }
181 
182 #endif /* __ASSEMBLY__ */
183 
184 /*
185  * Segment sizes.
186  * These are the values used by hardware in the B field of
187  * SLB entries and the first dword of MMU hashtable entries.
188  * The B field is 2 bits; the values 2 and 3 are unused and reserved.
189  */
190 #define MMU_SEGSIZE_256M	0
191 #define MMU_SEGSIZE_1T		1
192 
193 /*
194  * encode page number shift.
195  * in order to fit the 78 bit va in a 64 bit variable we shift the va by
196  * 12 bits. This enable us to address upto 76 bit va.
197  * For hpt hash from a va we can ignore the page size bits of va and for
198  * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
199  * we work in all cases including 4k page size.
200  */
201 #define VPN_SHIFT	12
202 
203 /*
204  * HPTE Large Page (LP) details
205  */
206 #define LP_SHIFT	12
207 #define LP_BITS		8
208 #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
209 
210 #ifndef __ASSEMBLY__
211 
segment_shift(int ssize)212 static inline int segment_shift(int ssize)
213 {
214 	if (ssize == MMU_SEGSIZE_256M)
215 		return SID_SHIFT;
216 	return SID_SHIFT_1T;
217 }
218 
219 /*
220  * The current system page and segment sizes
221  */
222 extern int mmu_linear_psize;
223 extern int mmu_virtual_psize;
224 extern int mmu_vmalloc_psize;
225 extern int mmu_vmemmap_psize;
226 extern int mmu_io_psize;
227 extern int mmu_kernel_ssize;
228 extern int mmu_highuser_ssize;
229 extern u16 mmu_slb_size;
230 extern unsigned long tce_alloc_start, tce_alloc_end;
231 
232 /*
233  * If the processor supports 64k normal pages but not 64k cache
234  * inhibited pages, we have to be prepared to switch processes
235  * to use 4k pages when they create cache-inhibited mappings.
236  * If this is the case, mmu_ci_restrictions will be set to 1.
237  */
238 extern int mmu_ci_restrictions;
239 
240 /*
241  * This computes the AVPN and B fields of the first dword of a HPTE,
242  * for use when we want to match an existing PTE.  The bottom 7 bits
243  * of the returned value are zero.
244  */
hpte_encode_avpn(unsigned long vpn,int psize,int ssize)245 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
246 					     int ssize)
247 {
248 	unsigned long v;
249 	/*
250 	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
251 	 * These bits are not needed in the PTE, because the
252 	 * low-order b of these bits are part of the byte offset
253 	 * into the virtual page and, if b < 23, the high-order
254 	 * 23-b of these bits are always used in selecting the
255 	 * PTEGs to be searched
256 	 */
257 	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
258 	v <<= HPTE_V_AVPN_SHIFT;
259 	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
260 	return v;
261 }
262 
263 /*
264  * This function sets the AVPN and L fields of the HPTE  appropriately
265  * using the base page size and actual page size.
266  */
hpte_encode_v(unsigned long vpn,int base_psize,int actual_psize,int ssize)267 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
268 					  int actual_psize, int ssize)
269 {
270 	unsigned long v;
271 	v = hpte_encode_avpn(vpn, base_psize, ssize);
272 	if (actual_psize != MMU_PAGE_4K)
273 		v |= HPTE_V_LARGE;
274 	return v;
275 }
276 
277 /*
278  * This function sets the ARPN, and LP fields of the HPTE appropriately
279  * for the page size. We assume the pa is already "clean" that is properly
280  * aligned for the requested page size
281  */
hpte_encode_r(unsigned long pa,int base_psize,int actual_psize)282 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
283 					  int actual_psize)
284 {
285 	/* A 4K page needs no special encoding */
286 	if (actual_psize == MMU_PAGE_4K)
287 		return pa & HPTE_R_RPN;
288 	else {
289 		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
290 		unsigned int shift = mmu_psize_defs[actual_psize].shift;
291 		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
292 	}
293 }
294 
295 /*
296  * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
297  */
hpt_vpn(unsigned long ea,unsigned long vsid,int ssize)298 static inline unsigned long hpt_vpn(unsigned long ea,
299 				    unsigned long vsid, int ssize)
300 {
301 	unsigned long mask;
302 	int s_shift = segment_shift(ssize);
303 
304 	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
305 	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
306 }
307 
308 /*
309  * This hashes a virtual address
310  */
hpt_hash(unsigned long vpn,unsigned int shift,int ssize)311 static inline unsigned long hpt_hash(unsigned long vpn,
312 				     unsigned int shift, int ssize)
313 {
314 	int mask;
315 	unsigned long hash, vsid;
316 
317 	/* VPN_SHIFT can be atmost 12 */
318 	if (ssize == MMU_SEGSIZE_256M) {
319 		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
320 		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
321 			((vpn & mask) >> (shift - VPN_SHIFT));
322 	} else {
323 		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
324 		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
325 		hash = vsid ^ (vsid << 25) ^
326 			((vpn & mask) >> (shift - VPN_SHIFT)) ;
327 	}
328 	return hash & 0x7fffffffffUL;
329 }
330 
331 extern int __hash_page_4K(unsigned long ea, unsigned long access,
332 			  unsigned long vsid, pte_t *ptep, unsigned long trap,
333 			  unsigned int local, int ssize, int subpage_prot);
334 extern int __hash_page_64K(unsigned long ea, unsigned long access,
335 			   unsigned long vsid, pte_t *ptep, unsigned long trap,
336 			   unsigned int local, int ssize);
337 struct mm_struct;
338 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
339 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
340 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
341 		     pte_t *ptep, unsigned long trap, int local, int ssize,
342 		     unsigned int shift, unsigned int mmu_psize);
343 extern void hash_failure_debug(unsigned long ea, unsigned long access,
344 			       unsigned long vsid, unsigned long trap,
345 			       int ssize, int psize, int lpsize,
346 			       unsigned long pte);
347 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
348 			     unsigned long pstart, unsigned long prot,
349 			     int psize, int ssize);
350 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
351 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
352 
353 extern void hpte_init_native(void);
354 extern void hpte_init_lpar(void);
355 extern void hpte_init_beat(void);
356 extern void hpte_init_beat_v3(void);
357 
358 extern void stabs_alloc(void);
359 extern void slb_initialize(void);
360 extern void slb_flush_and_rebolt(void);
361 extern void stab_initialize(unsigned long stab);
362 
363 extern void slb_vmalloc_update(void);
364 extern void slb_set_size(u16 size);
365 #endif /* __ASSEMBLY__ */
366 
367 /*
368  * VSID allocation (256MB segment)
369  *
370  * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
371  * from mmu context id and effective segment id of the address.
372  *
373  * For user processes max context id is limited to ((1ul << 19) - 5)
374  * for kernel space, we use the top 4 context ids to map address as below
375  * NOTE: each context only support 64TB now.
376  * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
377  * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
378  * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
379  * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
380  *
381  * The proto-VSIDs are then scrambled into real VSIDs with the
382  * multiplicative hash:
383  *
384  *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
385  *
386  * VSID_MULTIPLIER is prime, so in particular it is
387  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
388  * Because the modulus is 2^n-1 we can compute it efficiently without
389  * a divide or extra multiply (see below). The scramble function gives
390  * robust scattering in the hash table (at least based on some initial
391  * results).
392  *
393  * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
394  * bad address. This enables us to consolidate bad address handling in
395  * hash_page.
396  *
397  * We also need to avoid the last segment of the last context, because that
398  * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
399  * because of the modulo operation in vsid scramble. But the vmemmap
400  * (which is what uses region 0xf) will never be close to 64TB in size
401  * (it's 56 bytes per page of system memory).
402  */
403 
404 #define CONTEXT_BITS		19
405 #define ESID_BITS		18
406 #define ESID_BITS_1T		6
407 
408 /*
409  * 256MB segment
410  * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
411  * available for user + kernel mapping. The top 4 contexts are used for
412  * kernel mapping. Each segment contains 2^28 bytes. Each
413  * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
414  * (19 == 37 + 28 - 46).
415  */
416 #define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 5)
417 
418 /*
419  * This should be computed such that protovosid * vsid_mulitplier
420  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
421  */
422 #define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
423 #define VSID_BITS_256M		(CONTEXT_BITS + ESID_BITS)
424 #define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
425 
426 #define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
427 #define VSID_BITS_1T		(CONTEXT_BITS + ESID_BITS_1T)
428 #define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)
429 
430 
431 #define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
432 
433 /*
434  * This macro generates asm code to compute the VSID scramble
435  * function.  Used in slb_allocate() and do_stab_bolted.  The function
436  * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
437  *
438  *	rt = register continaing the proto-VSID and into which the
439  *		VSID will be stored
440  *	rx = scratch register (clobbered)
441  *
442  * 	- rt and rx must be different registers
443  * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
444  * 	  bits may contain other garbage, so you may need to mask the
445  * 	  result.
446  */
447 #define ASM_VSID_SCRAMBLE(rt, rx, size)					\
448 	lis	rx,VSID_MULTIPLIER_##size@h;				\
449 	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
450 	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
451 									\
452 	srdi	rx,rt,VSID_BITS_##size;					\
453 	clrldi	rt,rt,(64-VSID_BITS_##size);				\
454 	add	rt,rt,rx;		/* add high and low bits */	\
455 	/* NOTE: explanation based on VSID_BITS_##size = 36		\
456 	 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
457 	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
458 	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
459 	 * the bit clear, r3 already has the answer we want, if it	\
460 	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
461 	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
462 	addi	rx,rt,1;						\
463 	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
464 	add	rt,rt,rx
465 
466 /* 4 bits per slice and we have one slice per 1TB */
467 #define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
468 
469 #ifndef __ASSEMBLY__
470 
471 #ifdef CONFIG_PPC_SUBPAGE_PROT
472 /*
473  * For the sub-page protection option, we extend the PGD with one of
474  * these.  Basically we have a 3-level tree, with the top level being
475  * the protptrs array.  To optimize speed and memory consumption when
476  * only addresses < 4GB are being protected, pointers to the first
477  * four pages of sub-page protection words are stored in the low_prot
478  * array.
479  * Each page of sub-page protection words protects 1GB (4 bytes
480  * protects 64k).  For the 3-level tree, each page of pointers then
481  * protects 8TB.
482  */
483 struct subpage_prot_table {
484 	unsigned long maxaddr;	/* only addresses < this are protected */
485 	unsigned int **protptrs[2];
486 	unsigned int *low_prot[4];
487 };
488 
489 #define SBP_L1_BITS		(PAGE_SHIFT - 2)
490 #define SBP_L2_BITS		(PAGE_SHIFT - 3)
491 #define SBP_L1_COUNT		(1 << SBP_L1_BITS)
492 #define SBP_L2_COUNT		(1 << SBP_L2_BITS)
493 #define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
494 #define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
495 
496 extern void subpage_prot_free(struct mm_struct *mm);
497 extern void subpage_prot_init_new_context(struct mm_struct *mm);
498 #else
subpage_prot_free(struct mm_struct * mm)499 static inline void subpage_prot_free(struct mm_struct *mm) {}
subpage_prot_init_new_context(struct mm_struct * mm)500 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
501 #endif /* CONFIG_PPC_SUBPAGE_PROT */
502 
503 typedef unsigned long mm_context_id_t;
504 struct spinlock;
505 
506 typedef struct {
507 	mm_context_id_t id;
508 	u16 user_psize;		/* page size index */
509 
510 #ifdef CONFIG_PPC_MM_SLICES
511 	u64 low_slices_psize;	/* SLB page size encodings */
512 	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
513 #else
514 	u16 sllp;		/* SLB page size encoding */
515 #endif
516 	unsigned long vdso_base;
517 #ifdef CONFIG_PPC_SUBPAGE_PROT
518 	struct subpage_prot_table spt;
519 #endif /* CONFIG_PPC_SUBPAGE_PROT */
520 #ifdef CONFIG_PPC_ICSWX
521 	struct spinlock *cop_lockp; /* guard acop and cop_pid */
522 	unsigned long acop;	/* mask of enabled coprocessor types */
523 	unsigned int cop_pid;	/* pid value used with coprocessors */
524 #endif /* CONFIG_PPC_ICSWX */
525 #ifdef CONFIG_PPC_64K_PAGES
526 	/* for 4K PTE fragment support */
527 	void *pte_frag;
528 #endif
529 } mm_context_t;
530 
531 
532 #if 0
533 /*
534  * The code below is equivalent to this function for arguments
535  * < 2^VSID_BITS, which is all this should ever be called
536  * with.  However gcc is not clever enough to compute the
537  * modulus (2^n-1) without a second multiply.
538  */
539 #define vsid_scramble(protovsid, size) \
540 	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
541 
542 #else /* 1 */
543 #define vsid_scramble(protovsid, size) \
544 	({								 \
545 		unsigned long x;					 \
546 		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
547 		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
548 		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
549 	})
550 #endif /* 1 */
551 
552 /* Returns the segment size indicator for a user address */
user_segment_size(unsigned long addr)553 static inline int user_segment_size(unsigned long addr)
554 {
555 	/* Use 1T segments if possible for addresses >= 1T */
556 	if (addr >= (1UL << SID_SHIFT_1T))
557 		return mmu_highuser_ssize;
558 	return MMU_SEGSIZE_256M;
559 }
560 
get_vsid(unsigned long context,unsigned long ea,int ssize)561 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
562 				     int ssize)
563 {
564 	/*
565 	 * Bad address. We return VSID 0 for that
566 	 */
567 	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
568 		return 0;
569 
570 	if (ssize == MMU_SEGSIZE_256M)
571 		return vsid_scramble((context << ESID_BITS)
572 				     | (ea >> SID_SHIFT), 256M);
573 	return vsid_scramble((context << ESID_BITS_1T)
574 			     | (ea >> SID_SHIFT_1T), 1T);
575 }
576 
577 /*
578  * This is only valid for addresses >= PAGE_OFFSET
579  *
580  * For kernel space, we use the top 4 context ids to map address as below
581  * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
582  * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
583  * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
584  * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
585  */
get_kernel_vsid(unsigned long ea,int ssize)586 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
587 {
588 	unsigned long context;
589 
590 	/*
591 	 * kernel take the top 4 context from the available range
592 	 */
593 	context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
594 	return get_vsid(context, ea, ssize);
595 }
596 #endif /* __ASSEMBLY__ */
597 
598 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
599