• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_POWERPC_PGTABLE_64K_H
2 #define _ASM_POWERPC_PGTABLE_64K_H
3 
4 #include <asm-generic/pgtable-nopud.h>
5 
6 
7 #define PTE_INDEX_SIZE  12
8 #define PMD_INDEX_SIZE  12
9 #define PUD_INDEX_SIZE	0
10 #define PGD_INDEX_SIZE  4
11 
12 #ifndef __ASSEMBLY__
13 #define PTE_TABLE_SIZE	(sizeof(real_pte_t) << PTE_INDEX_SIZE)
14 #define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
15 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
16 
17 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
18 #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
19 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
20 
21 #ifdef CONFIG_PPC_SUBPAGE_PROT
22 /*
23  * For the sub-page protection option, we extend the PGD with one of
24  * these.  Basically we have a 3-level tree, with the top level being
25  * the protptrs array.  To optimize speed and memory consumption when
26  * only addresses < 4GB are being protected, pointers to the first
27  * four pages of sub-page protection words are stored in the low_prot
28  * array.
29  * Each page of sub-page protection words protects 1GB (4 bytes
30  * protects 64k).  For the 3-level tree, each page of pointers then
31  * protects 8TB.
32  */
33 struct subpage_prot_table {
34 	unsigned long maxaddr;	/* only addresses < this are protected */
35 	unsigned int **protptrs[2];
36 	unsigned int *low_prot[4];
37 };
38 
39 #undef PGD_TABLE_SIZE
40 #define PGD_TABLE_SIZE		((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
41 				 sizeof(struct subpage_prot_table))
42 
43 #define SBP_L1_BITS		(PAGE_SHIFT - 2)
44 #define SBP_L2_BITS		(PAGE_SHIFT - 3)
45 #define SBP_L1_COUNT		(1 << SBP_L1_BITS)
46 #define SBP_L2_COUNT		(1 << SBP_L2_BITS)
47 #define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
48 #define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
49 
50 extern void subpage_prot_free(pgd_t *pgd);
51 
pgd_subpage_prot(pgd_t * pgd)52 static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
53 {
54 	return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
55 }
56 #endif /* CONFIG_PPC_SUBPAGE_PROT */
57 #endif	/* __ASSEMBLY__ */
58 
59 /* With 4k base page size, hugepage PTEs go at the PMD level */
60 #define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
61 
62 /* PMD_SHIFT determines what a second-level page table entry can map */
63 #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
64 #define PMD_SIZE	(1UL << PMD_SHIFT)
65 #define PMD_MASK	(~(PMD_SIZE-1))
66 
67 /* PGDIR_SHIFT determines what a third-level page table entry can map */
68 #define PGDIR_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
69 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
70 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
71 
72 /* Additional PTE bits (don't change without checking asm in hash_low.S) */
73 #define __HAVE_ARCH_PTE_SPECIAL
74 #define _PAGE_SPECIAL	0x00000400 /* software: special page */
75 #define _PAGE_HPTE_SUB	0x0ffff000 /* combo only: sub pages HPTE bits */
76 #define _PAGE_HPTE_SUB0	0x08000000 /* combo only: first sub page */
77 #define _PAGE_COMBO	0x10000000 /* this is a combo 4k page */
78 #define _PAGE_4K_PFN	0x20000000 /* PFN is for a single 4k page */
79 
80 /* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
81  * we set that to be the whole sub-bits mask. The C code will only
82  * test this, so a multi-bit mask will work. For combo pages, this
83  * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
84  * all the sub bits. For real 64k pages, we now have the assembly set
85  * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
86  * that mask. This is fine as long as the HIDX bits are never set on
87  * a PTE that isn't hashed, which is the case today.
88  *
89  * A little nit is for the huge page C code, which does the hashing
90  * in C, we need to provide which bit to use.
91  */
92 #define _PAGE_HASHPTE	_PAGE_HPTE_SUB
93 
94 /* Note the full page bits must be in the same location as for normal
95  * 4k pages as the same asssembly will be used to insert 64K pages
96  * wether the kernel has CONFIG_PPC_64K_PAGES or not
97  */
98 #define _PAGE_F_SECOND  0x00008000 /* full page: hidx bits */
99 #define _PAGE_F_GIX     0x00007000 /* full page: hidx bits */
100 
101 /* PTE flags to conserve for HPTE identification */
102 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
103 
104 /* Shift to put page number into pte.
105  *
106  * That gives us a max RPN of 34 bits, which means a max of 50 bits
107  * of addressable physical space, or 46 bits for the special 4k PFNs.
108  */
109 #define PTE_RPN_SHIFT	(30)
110 #define PTE_RPN_MAX	(1UL << (64 - PTE_RPN_SHIFT))
111 #define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
112 
113 /* _PAGE_CHG_MASK masks of bits that are to be preserved accross
114  * pgprot changes
115  */
116 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
117                          _PAGE_ACCESSED | _PAGE_SPECIAL)
118 
119 /* Bits to mask out from a PMD to get to the PTE page */
120 #define PMD_MASKED_BITS		0x1ff
121 /* Bits to mask out from a PGD/PUD to get to the PMD page */
122 #define PUD_MASKED_BITS		0x1ff
123 
124 /* Manipulate "rpte" values */
125 #define __real_pte(e,p) 	((real_pte_t) { \
126 	(e), pte_val(*((p) + PTRS_PER_PTE)) })
127 #define __rpte_to_hidx(r,index)	((pte_val((r).pte) & _PAGE_COMBO) ? \
128         (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
129 #define __rpte_to_pte(r)	((r).pte)
130 #define __rpte_sub_valid(rpte, index) \
131 	(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
132 
133 
134 /* Trick: we set __end to va + 64k, which happens works for
135  * a 16M page as well as we want only one iteration
136  */
137 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)	    \
138         do {                                                                \
139                 unsigned long __end = va + PAGE_SIZE;                       \
140                 unsigned __split = (psize == MMU_PAGE_4K ||                 \
141 				    psize == MMU_PAGE_64K_AP);              \
142                 shift = mmu_psize_defs[psize].shift;                        \
143 		for (index = 0; va < __end; index++, va += (1L << shift)) { \
144 		        if (!__split || __rpte_sub_valid(rpte, index)) do { \
145 
146 #define pte_iterate_hashed_end() } while(0); } } while(0)
147 
148 #define pte_pagesize_index(mm, addr, pte)	\
149 	(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
150 
151 #define remap_4k_pfn(vma, addr, pfn, prot)				\
152 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,		\
153 			__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
154 
155 #endif /* _ASM_POWERPC_PGTABLE_64K_H */
156