1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #ifndef _ASMNDS32_PGTABLE_H
5 #define _ASMNDS32_PGTABLE_H
6
7 #include <asm-generic/pgtable-nopmd.h>
8 #include <linux/sizes.h>
9
10 #include <asm/memory.h>
11 #include <asm/nds32.h>
12 #ifndef __ASSEMBLY__
13 #include <asm/fixmap.h>
14 #include <nds32_intrinsic.h>
15 #endif
16
17 #ifdef CONFIG_ANDES_PAGE_SIZE_4KB
18 #define PGDIR_SHIFT 22
19 #define PTRS_PER_PGD 1024
20 #define PTRS_PER_PTE 1024
21 #endif
22
23 #ifdef CONFIG_ANDES_PAGE_SIZE_8KB
24 #define PGDIR_SHIFT 24
25 #define PTRS_PER_PGD 256
26 #define PTRS_PER_PTE 2048
27 #endif
28
29 #ifndef __ASSEMBLY__
30 extern void __pte_error(const char *file, int line, unsigned long val);
31 extern void __pgd_error(const char *file, int line, unsigned long val);
32
33 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
34 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
35 #endif /* !__ASSEMBLY__ */
36
37 #define PMD_SIZE (1UL << PMD_SHIFT)
38 #define PMD_MASK (~(PMD_SIZE-1))
39 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
40 #define PGDIR_MASK (~(PGDIR_SIZE-1))
41
42 /*
43 * This is the lowest virtual address we can permit any user space
44 * mapping to be mapped at. This is particularly important for
45 * non-high vector CPUs.
46 */
47 #define FIRST_USER_ADDRESS 0x8000
48
49 #ifdef CONFIG_HIGHMEM
50 #define CONSISTENT_BASE ((PKMAP_BASE) - (SZ_2M))
51 #define CONSISTENT_END (PKMAP_BASE)
52 #else
53 #define CONSISTENT_BASE (FIXADDR_START - SZ_2M)
54 #define CONSISTENT_END (FIXADDR_START)
55 #endif
56 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
57
58 #ifdef CONFIG_HIGHMEM
59 #ifndef __ASSEMBLY__
60 #include <asm/highmem.h>
61 #endif
62 #endif
63
64 #define VMALLOC_RESERVE SZ_128M
65 #define VMALLOC_END (CONSISTENT_BASE - PAGE_SIZE)
66 #define VMALLOC_START ((VMALLOC_END) - VMALLOC_RESERVE)
67 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
68 #define MAXMEM __pa(VMALLOC_START)
69 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
70
71 #define FIRST_USER_PGD_NR 0
72 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR)
73
74 /* L2 PTE */
75 #define _PAGE_V (1UL << 0)
76
77 #define _PAGE_M_XKRW (0UL << 1)
78 #define _PAGE_M_UR_KR (1UL << 1)
79 #define _PAGE_M_UR_KRW (2UL << 1)
80 #define _PAGE_M_URW_KRW (3UL << 1)
81 #define _PAGE_M_KR (5UL << 1)
82 #define _PAGE_M_KRW (7UL << 1)
83
84 #define _PAGE_D (1UL << 4)
85 #define _PAGE_E (1UL << 5)
86 #define _PAGE_A (1UL << 6)
87 #define _PAGE_G (1UL << 7)
88
89 #define _PAGE_C_DEV (0UL << 8)
90 #define _PAGE_C_DEV_WB (1UL << 8)
91 #define _PAGE_C_MEM (2UL << 8)
92 #define _PAGE_C_MEM_SHRD_WB (4UL << 8)
93 #define _PAGE_C_MEM_SHRD_WT (5UL << 8)
94 #define _PAGE_C_MEM_WB (6UL << 8)
95 #define _PAGE_C_MEM_WT (7UL << 8)
96
97 #define _PAGE_L (1UL << 11)
98
99 #define _HAVE_PAGE_L (_PAGE_L)
100 #define _PAGE_FILE (1UL << 1)
101 #define _PAGE_YOUNG 0
102 #define _PAGE_M_MASK _PAGE_M_KRW
103 #define _PAGE_C_MASK _PAGE_C_MEM_WT
104
105 #ifdef CONFIG_SMP
106 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
107 #define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WT
108 #else
109 #define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WB
110 #endif
111 #else
112 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
113 #define _PAGE_CACHE_SHRD _PAGE_C_MEM_WT
114 #else
115 #define _PAGE_CACHE_SHRD _PAGE_C_MEM_WB
116 #endif
117 #endif
118
119 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
120 #define _PAGE_CACHE _PAGE_C_MEM_WT
121 #else
122 #define _PAGE_CACHE _PAGE_C_MEM_WB
123 #endif
124
125 #define _PAGE_IOREMAP \
126 (_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
127
128 /*
129 * + Level 1 descriptor (PMD)
130 */
131 #define PMD_TYPE_TABLE 0
132
133 #ifndef __ASSEMBLY__
134
135 #define _PAGE_USER_TABLE PMD_TYPE_TABLE
136 #define _PAGE_KERNEL_TABLE PMD_TYPE_TABLE
137
138 #define PAGE_EXEC __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E)
139 #define PAGE_NONE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A)
140 #define PAGE_READ __pgprot(_PAGE_V | _PAGE_M_UR_KR)
141 #define PAGE_RDWR __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D)
142 #define PAGE_COPY __pgprot(_PAGE_V | _PAGE_M_UR_KR)
143
144 #define PAGE_UXKRWX_V1 __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
145 #define PAGE_UXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
146 #define PAGE_URXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_UR_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
147 #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
148 #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
149 #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
150 #define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
151 #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
152 #endif /* __ASSEMBLY__ */
153
154 /* xwr */
155 #define __P000 (PAGE_NONE | _PAGE_CACHE_SHRD)
156 #define __P001 (PAGE_READ | _PAGE_CACHE_SHRD)
157 #define __P010 (PAGE_COPY | _PAGE_CACHE_SHRD)
158 #define __P011 (PAGE_COPY | _PAGE_CACHE_SHRD)
159 #define __P100 (PAGE_EXEC | _PAGE_CACHE_SHRD)
160 #define __P101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
161 #define __P110 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
162 #define __P111 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
163
164 #define __S000 (PAGE_NONE | _PAGE_CACHE_SHRD)
165 #define __S001 (PAGE_READ | _PAGE_CACHE_SHRD)
166 #define __S010 (PAGE_RDWR | _PAGE_CACHE_SHRD)
167 #define __S011 (PAGE_RDWR | _PAGE_CACHE_SHRD)
168 #define __S100 (PAGE_EXEC | _PAGE_CACHE_SHRD)
169 #define __S101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
170 #define __S110 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
171 #define __S111 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
172
173 #ifndef __ASSEMBLY__
174 /*
175 * ZERO_PAGE is a global shared page that is always zero: used
176 * for zero-mapped memory areas etc..
177 */
178 extern struct page *empty_zero_page;
179 extern void paging_init(void);
180 #define ZERO_PAGE(vaddr) (empty_zero_page)
181
182 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
183 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
184
185 #define pte_none(pte) !(pte_val(pte))
186 #define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
187 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
188
pmd_page_vaddr(pmd_t pmd)189 static unsigned long pmd_page_vaddr(pmd_t pmd)
190 {
191 return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
192 }
193
194 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
195 /*
196 * Set a level 1 translation table entry, and clean it out of
197 * any caches such that the MMUs can load it correctly.
198 */
set_pmd(pmd_t * pmdp,pmd_t pmd)199 static inline void set_pmd(pmd_t * pmdp, pmd_t pmd)
200 {
201
202 *pmdp = pmd;
203 #if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
204 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (pmdp):"memory");
205 __nds32__msync_all();
206 __nds32__dsb();
207 #endif
208 }
209
210 /*
211 * Set a PTE and flush it out
212 */
set_pte(pte_t * ptep,pte_t pte)213 static inline void set_pte(pte_t * ptep, pte_t pte)
214 {
215
216 *ptep = pte;
217 #if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
218 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (ptep):"memory");
219 __nds32__msync_all();
220 __nds32__dsb();
221 #endif
222 }
223
224 /*
225 * The following only work if pte_present() is true.
226 * Undefined behaviour if not..
227 */
228
229 /*
230 * pte_write: this page is writeable for user mode
231 * pte_read: this page is readable for user mode
232 * pte_kernel_write: this page is writeable for kernel mode
233 *
234 * We don't have pte_kernel_read because kernel always can read.
235 *
236 * */
237
238 #define pte_present(pte) (pte_val(pte) & _PAGE_V)
239 #define pte_write(pte) ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)
240 #define pte_read(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \
241 ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
242 ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW))
243 #define pte_kernel_write(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \
244 ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
245 ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \
246 (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte)))
247 #define pte_exec(pte) (pte_val(pte) & _PAGE_E)
248 #define pte_dirty(pte) (pte_val(pte) & _PAGE_D)
249 #define pte_young(pte) (pte_val(pte) & _PAGE_YOUNG)
250
251 /*
252 * The following only works if pte_present() is not true.
253 */
254 #define pte_file(pte) (pte_val(pte) & _PAGE_FILE)
255 #define pte_to_pgoff(x) (pte_val(x) >> 2)
256 #define pgoff_to_pte(x) __pte(((x) << 2) | _PAGE_FILE)
257
258 #define PTE_FILE_MAX_BITS 29
259
260 #define PTE_BIT_FUNC(fn,op) \
261 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
262
pte_wrprotect(pte_t pte)263 static inline pte_t pte_wrprotect(pte_t pte)
264 {
265 pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
266 pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR;
267 return pte;
268 }
269
pte_mkwrite(pte_t pte)270 static inline pte_t pte_mkwrite(pte_t pte)
271 {
272 pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
273 pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW;
274 return pte;
275 }
276
277 PTE_BIT_FUNC(exprotect, &=~_PAGE_E);
278 PTE_BIT_FUNC(mkexec, |=_PAGE_E);
279 PTE_BIT_FUNC(mkclean, &=~_PAGE_D);
280 PTE_BIT_FUNC(mkdirty, |=_PAGE_D);
281 PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG);
282 PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG);
283
284 /*
285 * Mark the prot value as uncacheable and unbufferable.
286 */
287 #define pgprot_noncached(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV)
288 #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB)
289
290 #define pmd_none(pmd) (pmd_val(pmd)&0x1)
291 #define pmd_present(pmd) (!pmd_none(pmd))
292 #define pmd_bad(pmd) pmd_none(pmd)
293
294 #define copy_pmd(pmdpd,pmdps) set_pmd((pmdpd), *(pmdps))
295 #define pmd_clear(pmdp) set_pmd((pmdp), __pmd(1))
296
__mk_pmd(pte_t * ptep,unsigned long prot)297 static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
298 {
299 unsigned long ptr = (unsigned long)ptep;
300 pmd_t pmd;
301
302 /*
303 * The pmd must be loaded with the physical
304 * address of the PTE table
305 */
306
307 pmd_val(pmd) = __virt_to_phys(ptr) | prot;
308 return pmd;
309 }
310
311 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
312
313 /*
314 * Permanent address of a page. We never have highmem, so this is trivial.
315 */
316 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
317
318 /*
319 * Conversion functions: convert a page and protection to a page entry,
320 * and a page entry and page directory to the page they refer to.
321 */
322 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
323
324 /*
325 * The "pgd_xxx()" functions here are trivial for a folded two-level
326 * setup: the pgd is never bad, and a pmd always exists (as it's folded
327 * into the pgd entry)
328 */
329 #define pgd_none(pgd) (0)
330 #define pgd_bad(pgd) (0)
331 #define pgd_present(pgd) (1)
332 #define pgd_clear(pgdp) do { } while (0)
333
334 #define page_pte_prot(page,prot) mk_pte(page, prot)
335 #define page_pte(page) mk_pte(page, __pgprot(0))
336 /*
337 * L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2);
338 * L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2);
339 * PPN = (phys & 0xfffff000);
340 *
341 */
342
pte_modify(pte_t pte,pgprot_t newprot)343 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
344 {
345 const unsigned long mask = 0xfff;
346 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
347 return pte;
348 }
349
350 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
351
352 /* Encode and decode a swap entry.
353 *
354 * We support up to 32GB of swap on 4k machines
355 */
356 #define __swp_type(x) (((x).val >> 2) & 0x7f)
357 #define __swp_offset(x) ((x).val >> 9)
358 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
359 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
360 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
361
362 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
363 #define kern_addr_valid(addr) (1)
364
365 /*
366 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
367 */
368 #define HAVE_ARCH_UNMAPPED_AREA
369
370 /*
371 * remap a physical address `phys' of size `size' with page protection `prot'
372 * into virtual address `from'
373 */
374
375 #endif /* !__ASSEMBLY__ */
376
377 #endif /* _ASMNDS32_PGTABLE_H */
378