1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4
5 #if defined(CONFIG_PPC64)
6 #include <asm/nohash/64/pgtable.h>
7 #else
8 #include <asm/nohash/32/pgtable.h>
9 #endif
10
11 /* Permission masks used for kernel mappings */
12 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
15 _PAGE_NO_CACHE | _PAGE_GUARDED)
16 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
17 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
18 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
19
20 /*
21 * Protection used for kernel text. We want the debuggers to be able to
22 * set breakpoints anywhere, so don't write protect the kernel text
23 * on platforms where such control is possible.
24 */
25 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
26 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
27 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
28 #else
29 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
30 #endif
31
32 /* Make modules code happy. We don't set RO yet */
33 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
34
35 /* Advertise special mapping type for AGP */
36 #define PAGE_AGP (PAGE_KERNEL_NC)
37 #define HAVE_PAGE_AGP
38
39 #ifndef __ASSEMBLY__
40
41 /* Generic accessors to PTE bits */
42 #ifndef pte_write
pte_write(pte_t pte)43 static inline int pte_write(pte_t pte)
44 {
45 return pte_val(pte) & _PAGE_RW;
46 }
47 #endif
48 #ifndef pte_read
pte_read(pte_t pte)49 static inline int pte_read(pte_t pte) { return 1; }
50 #endif
pte_dirty(pte_t pte)51 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_special(pte_t pte)52 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
pte_none(pte_t pte)53 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_hashpte(pte_t pte)54 static inline bool pte_hashpte(pte_t pte) { return false; }
pte_ci(pte_t pte)55 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
pte_exec(pte_t pte)56 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
57
58 #ifdef CONFIG_NUMA_BALANCING
59 /*
60 * These work without NUMA balancing but the kernel does not care. See the
61 * comment in include/linux/pgtable.h . On powerpc, this will only
62 * work for user pages and always return true for kernel pages.
63 */
pte_protnone(pte_t pte)64 static inline int pte_protnone(pte_t pte)
65 {
66 return pte_present(pte) && !pte_user(pte);
67 }
68
pmd_protnone(pmd_t pmd)69 static inline int pmd_protnone(pmd_t pmd)
70 {
71 return pte_protnone(pmd_pte(pmd));
72 }
73 #endif /* CONFIG_NUMA_BALANCING */
74
pte_present(pte_t pte)75 static inline int pte_present(pte_t pte)
76 {
77 return pte_val(pte) & _PAGE_PRESENT;
78 }
79
pte_hw_valid(pte_t pte)80 static inline bool pte_hw_valid(pte_t pte)
81 {
82 return pte_val(pte) & _PAGE_PRESENT;
83 }
84
85 /*
86 * Don't just check for any non zero bits in __PAGE_USER, since for book3e
87 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
88 * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
89 */
90 #ifndef pte_user
pte_user(pte_t pte)91 static inline bool pte_user(pte_t pte)
92 {
93 return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
94 }
95 #endif
96
97 /*
98 * We only find page table entry in the last level
99 * Hence no need for other accessors
100 */
101 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)102 static inline bool pte_access_permitted(pte_t pte, bool write)
103 {
104 /*
105 * A read-only access is controlled by _PAGE_USER bit.
106 * We have _PAGE_READ set for WRITE and EXECUTE
107 */
108 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
109 return false;
110
111 if (write && !pte_write(pte))
112 return false;
113
114 return true;
115 }
116
117 /* Conversion functions: convert a page and protection to a page entry,
118 * and a page entry and page directory to the page they refer to.
119 *
120 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
121 * long for now.
122 */
pfn_pte(unsigned long pfn,pgprot_t pgprot)123 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
124 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
125 pgprot_val(pgprot)); }
pte_pfn(pte_t pte)126 static inline unsigned long pte_pfn(pte_t pte) {
127 return pte_val(pte) >> PTE_RPN_SHIFT; }
128
129 /* Generic modifiers for PTE bits */
pte_exprotect(pte_t pte)130 static inline pte_t pte_exprotect(pte_t pte)
131 {
132 return __pte(pte_val(pte) & ~_PAGE_EXEC);
133 }
134
pte_mkclean(pte_t pte)135 static inline pte_t pte_mkclean(pte_t pte)
136 {
137 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
138 }
139
pte_mkold(pte_t pte)140 static inline pte_t pte_mkold(pte_t pte)
141 {
142 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
143 }
144
pte_mkspecial(pte_t pte)145 static inline pte_t pte_mkspecial(pte_t pte)
146 {
147 return __pte(pte_val(pte) | _PAGE_SPECIAL);
148 }
149
150 #ifndef pte_mkhuge
pte_mkhuge(pte_t pte)151 static inline pte_t pte_mkhuge(pte_t pte)
152 {
153 return __pte(pte_val(pte));
154 }
155 #endif
156
157 #ifndef pte_mkprivileged
pte_mkprivileged(pte_t pte)158 static inline pte_t pte_mkprivileged(pte_t pte)
159 {
160 return __pte(pte_val(pte) & ~_PAGE_USER);
161 }
162 #endif
163
164 #ifndef pte_mkuser
pte_mkuser(pte_t pte)165 static inline pte_t pte_mkuser(pte_t pte)
166 {
167 return __pte(pte_val(pte) | _PAGE_USER);
168 }
169 #endif
170
pte_modify(pte_t pte,pgprot_t newprot)171 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
172 {
173 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
174 }
175
176 /* Insert a PTE, top-level function is out of line. It uses an inline
177 * low level function in the respective pgtable-* files
178 */
179 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
180 pte_t pte);
181
182 /* This low level function performs the actual PTE insertion
183 * Setting the PTE depends on the MMU type and other factors. It's
184 * an horrible mess that I'm not going to try to clean up now but
185 * I'm keeping it in one place rather than spread around
186 */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)187 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
188 pte_t *ptep, pte_t pte, int percpu)
189 {
190 /* Second case is 32-bit with 64-bit PTE. In this case, we
191 * can just store as long as we do the two halves in the right order
192 * with a barrier in between.
193 * In the percpu case, we also fallback to the simple update
194 */
195 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
196 __asm__ __volatile__("\
197 stw%X0 %2,%0\n\
198 eieio\n\
199 stw%X1 %L2,%1"
200 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
201 : "r" (pte) : "memory");
202 return;
203 }
204 /* Anything else just stores the PTE normally. That covers all 64-bit
205 * cases, and 32-bit non-hash with 32-bit PTEs.
206 */
207 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
208 ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
209 #else
210 *ptep = pte;
211 #endif
212
213 /*
214 * With hardware tablewalk, a sync is needed to ensure that
215 * subsequent accesses see the PTE we just wrote. Unlike userspace
216 * mappings, we can't tolerate spurious faults, so make sure
217 * the new PTE will be seen the first time.
218 */
219 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
220 mb();
221 }
222
223
224 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
225 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
226 pte_t *ptep, pte_t entry, int dirty);
227
228 /*
229 * Macro to mark a page protection value as "uncacheable".
230 */
231
232 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
233 _PAGE_WRITETHRU)
234
235 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
236 _PAGE_NO_CACHE | _PAGE_GUARDED))
237
238 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
239 _PAGE_NO_CACHE))
240
241 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
242 _PAGE_COHERENT))
243
244 #if _PAGE_WRITETHRU != 0
245 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
246 _PAGE_COHERENT | _PAGE_WRITETHRU))
247 #else
248 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
249 #endif
250
251 #define pgprot_cached_noncoherent(prot) \
252 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
253
254 #define pgprot_writecombine pgprot_noncached_wc
255
256 struct file;
257 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
258 unsigned long size, pgprot_t vma_prot);
259 #define __HAVE_PHYS_MEM_ACCESS_PROT
260
261 #ifdef CONFIG_HUGETLB_PAGE
hugepd_ok(hugepd_t hpd)262 static inline int hugepd_ok(hugepd_t hpd)
263 {
264 #ifdef CONFIG_PPC_8xx
265 return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
266 #else
267 /* We clear the top bit to indicate hugepd */
268 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
269 #endif
270 }
271
pmd_huge(pmd_t pmd)272 static inline int pmd_huge(pmd_t pmd)
273 {
274 return 0;
275 }
276
pud_huge(pud_t pud)277 static inline int pud_huge(pud_t pud)
278 {
279 return 0;
280 }
281
pgd_huge(pgd_t pgd)282 static inline int pgd_huge(pgd_t pgd)
283 {
284 return 0;
285 }
286 #define pgd_huge pgd_huge
287
288 #define is_hugepd(hpd) (hugepd_ok(hpd))
289 #endif
290
291 /*
292 * This gets called at the end of handling a page fault, when
293 * the kernel has put a new PTE into the page table for the process.
294 * We use it to ensure coherency between the i-cache and d-cache
295 * for the page which has just been mapped in.
296 */
297 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
298 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
299 #else
300 static inline
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)301 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
302 #endif
303
304 #endif /* __ASSEMBLY__ */
305 #endif
306