1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
4
5 /* asm/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
7 *
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 */
11
12 #include <linux/const.h>
13
14 #define PMD_SHIFT 18
15 #define PMD_SIZE (1UL << PMD_SHIFT)
16 #define PMD_MASK (~(PMD_SIZE-1))
17 #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
18
19 #define PGDIR_SHIFT 24
20 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
21 #define PGDIR_MASK (~(PGDIR_SIZE-1))
22 #define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23
24 #ifndef __ASSEMBLY__
25 #include <asm-generic/pgtable-nopud.h>
26
27 #include <linux/spinlock.h>
28 #include <linux/mm_types.h>
29 #include <asm/types.h>
30 #include <asm/pgtsrmmu.h>
31 #include <asm/vaddrs.h>
32 #include <asm/oplib.h>
33 #include <asm/cpu_type.h>
34
35
36 struct vm_area_struct;
37 struct page;
38
39 void load_mmu(void);
40 unsigned long calc_highpages(void);
41 unsigned long __init bootmem_init(unsigned long *pages_avail);
42
43 #define pte_ERROR(e) __builtin_trap()
44 #define pmd_ERROR(e) __builtin_trap()
45 #define pgd_ERROR(e) __builtin_trap()
46
47 #define PTRS_PER_PTE 64
48 #define PTRS_PER_PMD 64
49 #define PTRS_PER_PGD 256
50 #define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
51 #define PTE_SIZE (PTRS_PER_PTE*4)
52
53 #define PAGE_NONE SRMMU_PAGE_NONE
54 #define PAGE_SHARED SRMMU_PAGE_SHARED
55 #define PAGE_COPY SRMMU_PAGE_COPY
56 #define PAGE_READONLY SRMMU_PAGE_RDONLY
57 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
58
59 /* Top-level page directory - dummy used by init-mm.
60 * srmmu.c will assign the real one (which is dynamically sized) */
61 #define swapper_pg_dir NULL
62
63 void paging_init(void);
64
65 extern unsigned long ptr_in_current_pgd;
66
67 /* xwr */
68 #define __P000 PAGE_NONE
69 #define __P001 PAGE_READONLY
70 #define __P010 PAGE_COPY
71 #define __P011 PAGE_COPY
72 #define __P100 PAGE_READONLY
73 #define __P101 PAGE_READONLY
74 #define __P110 PAGE_COPY
75 #define __P111 PAGE_COPY
76
77 #define __S000 PAGE_NONE
78 #define __S001 PAGE_READONLY
79 #define __S010 PAGE_SHARED
80 #define __S011 PAGE_SHARED
81 #define __S100 PAGE_READONLY
82 #define __S101 PAGE_READONLY
83 #define __S110 PAGE_SHARED
84 #define __S111 PAGE_SHARED
85
86 /* First physical page can be anywhere, the following is needed so that
87 * va-->pa and vice versa conversions work properly without performance
88 * hit for all __pa()/__va() operations.
89 */
90 extern unsigned long phys_base;
91 extern unsigned long pfn_base;
92
93 /*
94 * ZERO_PAGE is a global shared page that is always zero: used
95 * for zero-mapped memory areas etc..
96 */
97 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
98
99 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100
101 /*
102 * In general all page table modifications should use the V8 atomic
103 * swap instruction. This insures the mmu and the cpu are in sync
104 * with respect to ref/mod bits in the page tables.
105 */
srmmu_swap(unsigned long * addr,unsigned long value)106 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
107 {
108 __asm__ __volatile__("swap [%2], %0" :
109 "=&r" (value) : "0" (value), "r" (addr) : "memory");
110 return value;
111 }
112
113 /* Certain architectures need to do special things when pte's
114 * within a page table are directly modified. Thus, the following
115 * hook is made available.
116 */
117
set_pte(pte_t * ptep,pte_t pteval)118 static inline void set_pte(pte_t *ptep, pte_t pteval)
119 {
120 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
121 }
122
123 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
124
srmmu_device_memory(unsigned long x)125 static inline int srmmu_device_memory(unsigned long x)
126 {
127 return ((x & 0xF0000000) != 0);
128 }
129
pmd_page(pmd_t pmd)130 static inline struct page *pmd_page(pmd_t pmd)
131 {
132 if (srmmu_device_memory(pmd_val(pmd)))
133 BUG();
134 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
135 }
136
__pmd_page(pmd_t pmd)137 static inline unsigned long __pmd_page(pmd_t pmd)
138 {
139 unsigned long v;
140
141 if (srmmu_device_memory(pmd_val(pmd)))
142 BUG();
143
144 v = pmd_val(pmd) & SRMMU_PTD_PMASK;
145 return (unsigned long)__nocache_va(v << 4);
146 }
147
pmd_page_vaddr(pmd_t pmd)148 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
149 {
150 unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
151 return (unsigned long)__nocache_va(v << 4);
152 }
153
pud_pgtable(pud_t pud)154 static inline pmd_t *pud_pgtable(pud_t pud)
155 {
156 if (srmmu_device_memory(pud_val(pud))) {
157 return (pmd_t *)~0;
158 } else {
159 unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
160 return (pmd_t *)__nocache_va(v << 4);
161 }
162 }
163
pte_present(pte_t pte)164 static inline int pte_present(pte_t pte)
165 {
166 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
167 }
168
pte_none(pte_t pte)169 static inline int pte_none(pte_t pte)
170 {
171 return !pte_val(pte);
172 }
173
__pte_clear(pte_t * ptep)174 static inline void __pte_clear(pte_t *ptep)
175 {
176 set_pte(ptep, __pte(0));
177 }
178
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)179 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
180 {
181 __pte_clear(ptep);
182 }
183
pmd_bad(pmd_t pmd)184 static inline int pmd_bad(pmd_t pmd)
185 {
186 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
187 }
188
pmd_present(pmd_t pmd)189 static inline int pmd_present(pmd_t pmd)
190 {
191 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
192 }
193
pmd_none(pmd_t pmd)194 static inline int pmd_none(pmd_t pmd)
195 {
196 return !pmd_val(pmd);
197 }
198
pmd_clear(pmd_t * pmdp)199 static inline void pmd_clear(pmd_t *pmdp)
200 {
201 set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
202 }
203
pud_none(pud_t pud)204 static inline int pud_none(pud_t pud)
205 {
206 return !(pud_val(pud) & 0xFFFFFFF);
207 }
208
pud_bad(pud_t pud)209 static inline int pud_bad(pud_t pud)
210 {
211 return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
212 }
213
pud_present(pud_t pud)214 static inline int pud_present(pud_t pud)
215 {
216 return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
217 }
218
pud_clear(pud_t * pudp)219 static inline void pud_clear(pud_t *pudp)
220 {
221 set_pte((pte_t *)pudp, __pte(0));
222 }
223
224 /*
225 * The following only work if pte_present() is true.
226 * Undefined behaviour if not..
227 */
pte_write(pte_t pte)228 static inline int pte_write(pte_t pte)
229 {
230 return pte_val(pte) & SRMMU_WRITE;
231 }
232
pte_dirty(pte_t pte)233 static inline int pte_dirty(pte_t pte)
234 {
235 return pte_val(pte) & SRMMU_DIRTY;
236 }
237
pte_young(pte_t pte)238 static inline int pte_young(pte_t pte)
239 {
240 return pte_val(pte) & SRMMU_REF;
241 }
242
pte_wrprotect(pte_t pte)243 static inline pte_t pte_wrprotect(pte_t pte)
244 {
245 return __pte(pte_val(pte) & ~SRMMU_WRITE);
246 }
247
pte_mkclean(pte_t pte)248 static inline pte_t pte_mkclean(pte_t pte)
249 {
250 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
251 }
252
pte_mkold(pte_t pte)253 static inline pte_t pte_mkold(pte_t pte)
254 {
255 return __pte(pte_val(pte) & ~SRMMU_REF);
256 }
257
pte_mkwrite(pte_t pte)258 static inline pte_t pte_mkwrite(pte_t pte)
259 {
260 return __pte(pte_val(pte) | SRMMU_WRITE);
261 }
262
pte_mkdirty(pte_t pte)263 static inline pte_t pte_mkdirty(pte_t pte)
264 {
265 return __pte(pte_val(pte) | SRMMU_DIRTY);
266 }
267
pte_mkyoung(pte_t pte)268 static inline pte_t pte_mkyoung(pte_t pte)
269 {
270 return __pte(pte_val(pte) | SRMMU_REF);
271 }
272
273 #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
274
pte_pfn(pte_t pte)275 static inline unsigned long pte_pfn(pte_t pte)
276 {
277 if (srmmu_device_memory(pte_val(pte))) {
278 /* Just return something that will cause
279 * pfn_valid() to return false. This makes
280 * copy_one_pte() to just directly copy to
281 * PTE over.
282 */
283 return ~0UL;
284 }
285 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
286 }
287
288 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
289
290 /*
291 * Conversion functions: convert a page and protection to a page entry,
292 * and a page entry and page directory to the page they refer to.
293 */
mk_pte(struct page * page,pgprot_t pgprot)294 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
295 {
296 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
297 }
298
mk_pte_phys(unsigned long page,pgprot_t pgprot)299 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
300 {
301 return __pte(((page) >> 4) | pgprot_val(pgprot));
302 }
303
mk_pte_io(unsigned long page,pgprot_t pgprot,int space)304 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
305 {
306 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
307 }
308
309 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t prot)310 static inline pgprot_t pgprot_noncached(pgprot_t prot)
311 {
312 pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
313 return prot;
314 }
315
316 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
pte_modify(pte_t pte,pgprot_t newprot)317 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
318 {
319 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
320 pgprot_val(newprot));
321 }
322
323 /* only used by the huge vmap code, should never be called */
324 #define pud_page(pud) NULL
325
326 struct seq_file;
327 void mmu_info(struct seq_file *m);
328
329 /* Fault handler stuff... */
330 #define FAULT_CODE_PROT 0x1
331 #define FAULT_CODE_WRITE 0x2
332 #define FAULT_CODE_USER 0x4
333
334 #define update_mmu_cache(vma, address, ptep) do { } while (0)
335
336 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
337 unsigned long xva, unsigned int len);
338 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
339
340 /* Encode and de-code a swap entry */
__swp_type(swp_entry_t entry)341 static inline unsigned long __swp_type(swp_entry_t entry)
342 {
343 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
344 }
345
__swp_offset(swp_entry_t entry)346 static inline unsigned long __swp_offset(swp_entry_t entry)
347 {
348 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
349 }
350
__swp_entry(unsigned long type,unsigned long offset)351 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
352 {
353 return (swp_entry_t) {
354 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
355 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
356 }
357
358 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
359 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361 static inline unsigned long
__get_phys(unsigned long addr)362 __get_phys (unsigned long addr)
363 {
364 switch (sparc_cpu_model){
365 case sun4m:
366 case sun4d:
367 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
368 default:
369 return 0;
370 }
371 }
372
373 static inline int
__get_iospace(unsigned long addr)374 __get_iospace (unsigned long addr)
375 {
376 switch (sparc_cpu_model){
377 case sun4m:
378 case sun4d:
379 return (srmmu_get_pte (addr) >> 28);
380 default:
381 return -1;
382 }
383 }
384
385 extern unsigned long *sparc_valid_addr_bitmap;
386
387 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
388 #define kern_addr_valid(addr) \
389 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
390
391 /*
392 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
393 * its high 4 bits. These macros/functions put it there or get it from there.
394 */
395 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
396 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
397 #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
398
399 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
400 unsigned long, pgprot_t);
401
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)402 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
403 unsigned long from, unsigned long pfn,
404 unsigned long size, pgprot_t prot)
405 {
406 unsigned long long offset, space, phys_base;
407
408 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
409 space = GET_IOSPACE(pfn);
410 phys_base = offset | (space << 32ULL);
411
412 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
413 }
414 #define io_remap_pfn_range io_remap_pfn_range
415
416 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
417 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
418 ({ \
419 int __changed = !pte_same(*(__ptep), __entry); \
420 if (__changed) { \
421 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
422 flush_tlb_page(__vma, __address); \
423 } \
424 __changed; \
425 })
426
427 #endif /* !(__ASSEMBLY__) */
428
429 #define VMALLOC_START _AC(0xfe600000,UL)
430 #define VMALLOC_END _AC(0xffc00000,UL)
431
432 /* We provide our own get_unmapped_area to cope with VA holes for userland */
433 #define HAVE_ARCH_UNMAPPED_AREA
434
435 #define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
436
437 #endif /* !(_SPARC_PGTABLE_H) */
438