1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_64_PGTABLE_H
4 /*
5 * This file contains the functions and defines necessary to modify and use
6 * the ppc64 non-hashed page table.
7 */
8
9 #include <linux/sizes.h>
10
11 #include <asm/nohash/64/pgtable-4k.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14
15 #define FIRST_USER_ADDRESS 0UL
16
17 /*
18 * Size of EA range mapped by our pagetables.
19 */
20 #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
21 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
22 #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
23
24 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
25 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
26
27 /*
28 * Define the address range of the kernel non-linear virtual area
29 */
30 #define KERN_VIRT_START ASM_CONST(0x8000000000000000)
31 #define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
32
33 /*
34 * The vmalloc space starts at the beginning of that region, and
35 * occupies a quarter of it on Book3E
36 * (we keep a quarter for the virtual memmap)
37 */
38 #define VMALLOC_START KERN_VIRT_START
39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
40 #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
41
42 /*
43 * The second half of the kernel virtual space is used for IO mappings,
44 * it's itself carved into the PIO region (ISA and PHB IO space) and
45 * the ioremap space
46 *
47 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
48 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
49 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
50 */
51 #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
52 #define FULL_IO_SIZE 0x80000000ul
53 #define ISA_IO_BASE (KERN_IO_START)
54 #define ISA_IO_END (KERN_IO_START + 0x10000ul)
55 #define PHB_IO_BASE (ISA_IO_END)
56 #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
57 #define IOREMAP_BASE (PHB_IO_END)
58 #define IOREMAP_START (ioremap_bot)
59 #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
60 #define FIXADDR_SIZE SZ_32M
61
62
63 /*
64 * Region IDs
65 */
66 #define REGION_SHIFT 60UL
67 #define REGION_MASK (0xfUL << REGION_SHIFT)
68 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
69
70 #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
71 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
72 #define USER_REGION_ID (0UL)
73
74 /*
75 * Defines the address of the vmemap area, in its own region on
76 * after the vmalloc space on Book3E
77 */
78 #define VMEMMAP_BASE VMALLOC_END
79 #define VMEMMAP_END KERN_IO_START
80 #define vmemmap ((struct page *)VMEMMAP_BASE)
81
82
83 /*
84 * Include the PTE bits definitions
85 */
86 #include <asm/nohash/pte-book3e.h>
87
88 #define _PAGE_SAO 0
89
90 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
91
92 /*
93 * _PAGE_CHG_MASK masks of bits that are to be preserved across
94 * pgprot changes.
95 */
96 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
97
98 #define H_PAGE_4K_PFN 0
99
100 #ifndef __ASSEMBLY__
101 /* pte_clear moved to later in this file */
102
pte_mkwrite(pte_t pte)103 static inline pte_t pte_mkwrite(pte_t pte)
104 {
105 return __pte(pte_val(pte) | _PAGE_RW);
106 }
107
pte_mkdirty(pte_t pte)108 static inline pte_t pte_mkdirty(pte_t pte)
109 {
110 return __pte(pte_val(pte) | _PAGE_DIRTY);
111 }
112
pte_mkyoung(pte_t pte)113 static inline pte_t pte_mkyoung(pte_t pte)
114 {
115 return __pte(pte_val(pte) | _PAGE_ACCESSED);
116 }
117
pte_wrprotect(pte_t pte)118 static inline pte_t pte_wrprotect(pte_t pte)
119 {
120 return __pte(pte_val(pte) & ~_PAGE_RW);
121 }
122
pte_mkexec(pte_t pte)123 static inline pte_t pte_mkexec(pte_t pte)
124 {
125 return __pte(pte_val(pte) | _PAGE_EXEC);
126 }
127
128 #define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
129 #define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
130
pmd_set(pmd_t * pmdp,unsigned long val)131 static inline void pmd_set(pmd_t *pmdp, unsigned long val)
132 {
133 *pmdp = __pmd(val);
134 }
135
pmd_clear(pmd_t * pmdp)136 static inline void pmd_clear(pmd_t *pmdp)
137 {
138 *pmdp = __pmd(0);
139 }
140
pmd_pte(pmd_t pmd)141 static inline pte_t pmd_pte(pmd_t pmd)
142 {
143 return __pte(pmd_val(pmd));
144 }
145
146 #define pmd_none(pmd) (!pmd_val(pmd))
147 #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
148 || (pmd_val(pmd) & PMD_BAD_BITS))
149 #define pmd_present(pmd) (!pmd_none(pmd))
150 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
151 extern struct page *pmd_page(pmd_t pmd);
152
pud_set(pud_t * pudp,unsigned long val)153 static inline void pud_set(pud_t *pudp, unsigned long val)
154 {
155 *pudp = __pud(val);
156 }
157
pud_clear(pud_t * pudp)158 static inline void pud_clear(pud_t *pudp)
159 {
160 *pudp = __pud(0);
161 }
162
163 #define pud_none(pud) (!pud_val(pud))
164 #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
165 || (pud_val(pud) & PUD_BAD_BITS))
166 #define pud_present(pud) (pud_val(pud) != 0)
167 #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
168
169 extern struct page *pud_page(pud_t pud);
170
pud_pte(pud_t pud)171 static inline pte_t pud_pte(pud_t pud)
172 {
173 return __pte(pud_val(pud));
174 }
175
pte_pud(pte_t pte)176 static inline pud_t pte_pud(pte_t pte)
177 {
178 return __pud(pte_val(pte));
179 }
180 #define pud_write(pud) pte_write(pud_pte(pud))
181 #define p4d_write(pgd) pte_write(p4d_pte(p4d))
182
p4d_set(p4d_t * p4dp,unsigned long val)183 static inline void p4d_set(p4d_t *p4dp, unsigned long val)
184 {
185 *p4dp = __p4d(val);
186 }
187
188 /* Atomic PTE updates */
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long clr,unsigned long set,int huge)189 static inline unsigned long pte_update(struct mm_struct *mm,
190 unsigned long addr,
191 pte_t *ptep, unsigned long clr,
192 unsigned long set,
193 int huge)
194 {
195 unsigned long old = pte_val(*ptep);
196 *ptep = __pte((old & ~clr) | set);
197
198 /* huge pages use the old page table lock */
199 if (!huge)
200 assert_pte_locked(mm, addr);
201
202 return old;
203 }
204
pte_young(pte_t pte)205 static inline int pte_young(pte_t pte)
206 {
207 return pte_val(pte) & _PAGE_ACCESSED;
208 }
209
__ptep_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pte_t * ptep)210 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
211 unsigned long addr, pte_t *ptep)
212 {
213 unsigned long old;
214
215 if (pte_young(*ptep))
216 return 0;
217 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
218 return (old & _PAGE_ACCESSED) != 0;
219 }
220 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
221 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
222 ({ \
223 int __r; \
224 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
225 __r; \
226 })
227
228 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)229 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
230 pte_t *ptep)
231 {
232
233 if ((pte_val(*ptep) & _PAGE_RW) == 0)
234 return;
235
236 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
237 }
238
239 #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)240 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
241 unsigned long addr, pte_t *ptep)
242 {
243 if ((pte_val(*ptep) & _PAGE_RW) == 0)
244 return;
245
246 pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
247 }
248
249 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
250 #define ptep_clear_flush_young(__vma, __address, __ptep) \
251 ({ \
252 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
253 __ptep); \
254 __young; \
255 })
256
257 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)258 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
259 unsigned long addr, pte_t *ptep)
260 {
261 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
262 return __pte(old);
263 }
264
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)265 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
266 pte_t * ptep)
267 {
268 pte_update(mm, addr, ptep, ~0UL, 0, 0);
269 }
270
271
272 /* Set the dirty and/or accessed bits atomically in a linux PTE */
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)273 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
274 pte_t *ptep, pte_t entry,
275 unsigned long address,
276 int psize)
277 {
278 unsigned long bits = pte_val(entry) &
279 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
280
281 unsigned long old = pte_val(*ptep);
282 *ptep = __pte(old | bits);
283
284 flush_tlb_page(vma, address);
285 }
286
287 #define __HAVE_ARCH_PTE_SAME
288 #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
289
290 #define pte_ERROR(e) \
291 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
292 #define pmd_ERROR(e) \
293 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
294 #define pgd_ERROR(e) \
295 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
296
297 /* Encode and de-code a swap entry */
298 #define MAX_SWAPFILES_CHECK() do { \
299 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
300 } while (0)
301
302 #define SWP_TYPE_BITS 5
303 #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
304 & ((1UL << SWP_TYPE_BITS) - 1))
305 #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
306 #define __swp_entry(type, offset) ((swp_entry_t) { \
307 ((type) << _PAGE_BIT_SWAP_TYPE) \
308 | ((offset) << PTE_RPN_SHIFT) })
309
310 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
311 #define __swp_entry_to_pte(x) __pte((x).val)
312
313 int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
314 void unmap_kernel_page(unsigned long va);
315 extern int __meminit vmemmap_create_mapping(unsigned long start,
316 unsigned long page_size,
317 unsigned long phys);
318 extern void vmemmap_remove_mapping(unsigned long start,
319 unsigned long page_size);
320 #endif /* __ASSEMBLY__ */
321
322 #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
323