1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_32_H
10 #define _ASM_PGTABLE_32_H
11
12 #include <asm/addrspace.h>
13 #include <asm/page.h>
14
15 #include <linux/linkage.h>
16 #include <asm/cachectl.h>
17 #include <asm/fixmap.h>
18
19 #define __ARCH_USE_5LEVEL_HACK
20 #include <asm-generic/pgtable-nopmd.h>
21
22 #ifdef CONFIG_HIGHMEM
23 #include <asm/highmem.h>
24 #endif
25
26 extern int temp_tlb_entry;
27
28 /*
29 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
30 * starting at the top and working down. This is for populating the
31 * TLB before trap_init() puts the TLB miss handler in place. It
32 * should be used only for entries matching the actual page tables,
33 * to prevent inconsistencies.
34 */
35 extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
36 unsigned long entryhi, unsigned long pagemask);
37
38 /*
39 * Basically we have the same two-level (which is the logical three level
40 * Linux page table layout folded) page tables as the i386. Some day
41 * when we have proper page coloring support we can have a 1% quicker
42 * tlb refill handling mechanism, but for now it is a bit slower but
43 * works even with the cache aliasing problem the R4k and above have.
44 */
45
46 /* PGDIR_SHIFT determines what a third-level page table entry can map */
47 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
48 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49 #define PGDIR_MASK (~(PGDIR_SIZE-1))
50
51 /*
52 * Entries per page directory level: we use two-level, so
53 * we don't really have any PUD/PMD directory physically.
54 */
55 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
56 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
57 #define PUD_ORDER aieeee_attempt_to_allocate_pud
58 #define PMD_ORDER 1
59 #define PTE_ORDER 0
60
61 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
62 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
63
64 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
65 #define FIRST_USER_ADDRESS 0UL
66
67 #define VMALLOC_START MAP_BASE
68
69 #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
70 #define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
71
72 #ifdef CONFIG_HIGHMEM
73 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
74 #else
75 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
76 #endif
77
78 #ifdef CONFIG_PHYS_ADDR_T_64BIT
79 #define pte_ERROR(e) \
80 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
81 #else
82 #define pte_ERROR(e) \
83 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
84 #endif
85 #define pgd_ERROR(e) \
86 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
87
88 extern void load_pgd(unsigned long pg_dir);
89
90 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
91
92 /*
93 * Empty pgd/pmd entries point to the invalid_pte_table.
94 */
pmd_none(pmd_t pmd)95 static inline int pmd_none(pmd_t pmd)
96 {
97 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
98 }
99
100 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
101
pmd_present(pmd_t pmd)102 static inline int pmd_present(pmd_t pmd)
103 {
104 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
105 }
106
pmd_clear(pmd_t * pmdp)107 static inline void pmd_clear(pmd_t *pmdp)
108 {
109 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
110 }
111
112 #if defined(CONFIG_XPA)
113
114 #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
115 static inline pte_t
pfn_pte(unsigned long pfn,pgprot_t prot)116 pfn_pte(unsigned long pfn, pgprot_t prot)
117 {
118 pte_t pte;
119
120 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
121 (pgprot_val(prot) & ~_PFNX_MASK);
122 pte.pte_high = (pfn << _PFN_SHIFT) |
123 (pgprot_val(prot) & ~_PFN_MASK);
124 return pte;
125 }
126
127 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
128
129 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
130
pfn_pte(unsigned long pfn,pgprot_t prot)131 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
132 {
133 pte_t pte;
134
135 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
136 pte.pte_low = pgprot_val(prot);
137
138 return pte;
139 }
140
141 #else
142
143 #ifdef CONFIG_CPU_VR41XX
144 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
145 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
146 #else
147 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
148 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
149 #endif
150 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
151
152 #define pte_page(x) pfn_to_page(pte_pfn(x))
153
154 #define __pgd_offset(address) pgd_index(address)
155 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
156 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
157
158 /* to find an entry in a kernel page-table-directory */
159 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
160
161 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
162
163 /* to find an entry in a page-table-directory */
164 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
165
166 /* Find an entry in the third-level page table.. */
167 #define __pte_offset(address) \
168 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
169 #define pte_offset(dir, address) \
170 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
171 #define pte_offset_kernel(dir, address) \
172 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
173
174 #define pte_offset_map(dir, address) \
175 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
176 #define pte_unmap(pte) ((void)(pte))
177
178 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
179
180 /* Swap entries must have VALID bit cleared. */
181 #define __swp_type(x) (((x).val >> 10) & 0x1f)
182 #define __swp_offset(x) ((x).val >> 15)
183 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
184 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
185 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
186
187 #else
188
189 #if defined(CONFIG_XPA)
190
191 /* Swap entries must have VALID and GLOBAL bits cleared. */
192 #define __swp_type(x) (((x).val >> 4) & 0x1f)
193 #define __swp_offset(x) ((x).val >> 9)
194 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
195 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
196 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
197
198 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
199
200 /* Swap entries must have VALID and GLOBAL bits cleared. */
201 #define __swp_type(x) (((x).val >> 2) & 0x1f)
202 #define __swp_offset(x) ((x).val >> 7)
203 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
204 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
205 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
206
207 #else
208 /*
209 * Constraints:
210 * _PAGE_PRESENT at bit 0
211 * _PAGE_MODIFIED at bit 4
212 * _PAGE_GLOBAL at bit 6
213 * _PAGE_VALID at bit 7
214 */
215 #define __swp_type(x) (((x).val >> 8) & 0x1f)
216 #define __swp_offset(x) ((x).val >> 13)
217 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
218 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
219 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
220
221 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
222
223 #endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
224
225 #endif /* _ASM_PGTABLE_32_H */
226