1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _PARISC_PGTABLE_H
3 #define _PARISC_PGTABLE_H
4
5 #include <asm/page.h>
6
7 #if CONFIG_PGTABLE_LEVELS == 3
8 #include <asm-generic/pgtable-nopud.h>
9 #elif CONFIG_PGTABLE_LEVELS == 2
10 #include <asm-generic/pgtable-nopmd.h>
11 #endif
12
13 #include <asm/fixmap.h>
14
15 #ifndef __ASSEMBLY__
16 /*
17 * we simulate an x86-style page table for the linux mm code
18 */
19
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm_types.h>
23 #include <asm/processor.h>
24 #include <asm/cache.h>
25
26 /*
27 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
28 * memory. For the return value to be meaningful, ADDR must be >=
29 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
30 * require a hash-, or multi-level tree-lookup or something of that
31 * sort) but it guarantees to return TRUE only if accessing the page
32 * at that address does not cause an error. Note that there may be
33 * addresses for which kern_addr_valid() returns FALSE even though an
34 * access would not cause an error (e.g., this is typically true for
35 * memory mapped I/O regions.
36 *
37 * XXX Need to implement this for parisc.
38 */
39 #define kern_addr_valid(addr) (1)
40
41 /* This is for the serialization of PxTLB broadcasts. At least on the N class
42 * systems, only one PxTLB inter processor broadcast can be active at any one
43 * time on the Merced bus. */
44 extern spinlock_t pa_tlb_flush_lock;
45 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
46 extern int pa_serialize_tlb_flushes;
47 #else
48 #define pa_serialize_tlb_flushes (0)
49 #endif
50
51 #define purge_tlb_start(flags) do { \
52 if (pa_serialize_tlb_flushes) \
53 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
54 else \
55 local_irq_save(flags); \
56 } while (0)
57 #define purge_tlb_end(flags) do { \
58 if (pa_serialize_tlb_flushes) \
59 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
60 else \
61 local_irq_restore(flags); \
62 } while (0)
63
64 /* Purge data and instruction TLB entries. The TLB purge instructions
65 * are slow on SMP machines since the purge must be broadcast to all CPUs.
66 */
67
purge_tlb_entries(struct mm_struct * mm,unsigned long addr)68 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
69 {
70 unsigned long flags;
71
72 purge_tlb_start(flags);
73 mtsp(mm->context, 1);
74 pdtlb(addr);
75 pitlb(addr);
76 purge_tlb_end(flags);
77 }
78
79 extern void __update_cache(pte_t pte);
80
81 /* Certain architectures need to do special things when PTEs
82 * within a page table are directly modified. Thus, the following
83 * hook is made available.
84 */
85 #define set_pte(pteptr, pteval) \
86 do { \
87 *(pteptr) = (pteval); \
88 mb(); \
89 } while(0)
90
91 #define set_pte_at(mm, addr, pteptr, pteval) \
92 do { \
93 if (pte_present(pteval) && \
94 pte_user(pteval)) \
95 __update_cache(pteval); \
96 *(pteptr) = (pteval); \
97 purge_tlb_entries(mm, addr); \
98 } while (0)
99
100 #endif /* !__ASSEMBLY__ */
101
102 #define pte_ERROR(e) \
103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
104 #if CONFIG_PGTABLE_LEVELS == 3
105 #define pmd_ERROR(e) \
106 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
107 #endif
108 #define pgd_ERROR(e) \
109 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
110
111 /* This is the size of the initially mapped kernel memory */
112 #if defined(CONFIG_64BIT)
113 #define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
114 #else
115 #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
116 #endif
117 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
118
119 #if CONFIG_PGTABLE_LEVELS == 3
120 #define PMD_ORDER 1
121 #define PGD_ORDER 0
122 #else
123 #define PGD_ORDER 1
124 #endif
125
126 /* Definitions for 3rd level (we use PLD here for Page Lower directory
127 * because PTE_SHIFT is used lower down to mean shift that has to be
128 * done to get usable bits out of the PTE) */
129 #define PLD_SHIFT PAGE_SHIFT
130 #define PLD_SIZE PAGE_SIZE
131 #define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
132 #define PTRS_PER_PTE (1UL << BITS_PER_PTE)
133
134 /* Definitions for 2nd level */
135 #if CONFIG_PGTABLE_LEVELS == 3
136 #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
137 #define PMD_SIZE (1UL << PMD_SHIFT)
138 #define PMD_MASK (~(PMD_SIZE-1))
139 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
140 #define PTRS_PER_PMD (1UL << BITS_PER_PMD)
141 #else
142 #define BITS_PER_PMD 0
143 #endif
144
145 /* Definitions for 1st level */
146 #define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
147 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
148 #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
149 #else
150 #define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
151 #endif
152 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
153 #define PGDIR_MASK (~(PGDIR_SIZE-1))
154 #define PTRS_PER_PGD (1UL << BITS_PER_PGD)
155 #define USER_PTRS_PER_PGD PTRS_PER_PGD
156
157 #ifdef CONFIG_64BIT
158 #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
159 #define MAX_ADDRESS (1UL << MAX_ADDRBITS)
160 #define SPACEID_SHIFT (MAX_ADDRBITS - 32)
161 #else
162 #define MAX_ADDRBITS (BITS_PER_LONG)
163 #define MAX_ADDRESS (1UL << MAX_ADDRBITS)
164 #define SPACEID_SHIFT 0
165 #endif
166
167 /* This calculates the number of initial pages we need for the initial
168 * page tables */
169 #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
170 # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
171 #else
172 # define PT_INITIAL (1) /* all initial PTEs fit into one page */
173 #endif
174
175 /*
176 * pgd entries used up by user/kernel:
177 */
178
179 #define FIRST_USER_ADDRESS 0UL
180
181 /* NB: The tlb miss handlers make certain assumptions about the order */
182 /* of the following bits, so be careful (One example, bits 25-31 */
183 /* are moved together in one instruction). */
184
185 #define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
186 #define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
187 #define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
188 #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
189 #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
190 #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
191 #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
192 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
193 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
194 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
195 #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
196 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
197
198 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
199 /* following macro is ok for both 32 and 64 bit. */
200
201 #define xlate_pabit(x) (31 - x)
202
203 /* this defines the shift to the usable bits in the PTE it is set so
204 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
205 * to zero */
206 #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
207
208 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
209 #define PFN_PTE_SHIFT 12
210
211 #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
212 #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
213 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
214 #define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
215 #define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
216 #define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
217 #define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
218 #define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
219 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
220 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
221 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
222 #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
223 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
224
225 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
226 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
227 #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
228 #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
229 #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
230 #define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
231
232 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
233 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
234 * for a few meta-information bits, so we shift the address to be
235 * able to effectively address 40/42/44-bits of physical address space
236 * depending on 4k/16k/64k PAGE_SIZE */
237 #define _PxD_PRESENT_BIT 31
238 #define _PxD_VALID_BIT 30
239
240 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
241 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
242 #define PxD_FLAG_MASK (0xf)
243 #define PxD_FLAG_SHIFT (4)
244 #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
245
246 #ifndef __ASSEMBLY__
247
248 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
249 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
250 /* Others seem to make this executable, I don't know if that's correct
251 or not. The stack is mapped this way though so this is necessary
252 in the short term - dhd@linuxcare.com, 2000-08-08 */
253 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
254 #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
255 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
256 #define PAGE_COPY PAGE_EXECREAD
257 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
258 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
259 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
260 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
261 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
262 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
263 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
264
265
266 /*
267 * We could have an execute only page using "gateway - promote to priv
268 * level 3", but that is kind of silly. So, the way things are defined
269 * now, we must always have read permission for pages with execute
270 * permission. For the fun of it we'll go ahead and support write only
271 * pages.
272 */
273
274 /*xwr*/
275 #define __P000 PAGE_NONE
276 #define __P001 PAGE_READONLY
277 #define __P010 __P000 /* copy on write */
278 #define __P011 __P001 /* copy on write */
279 #define __P100 PAGE_EXECREAD
280 #define __P101 PAGE_EXECREAD
281 #define __P110 __P100 /* copy on write */
282 #define __P111 __P101 /* copy on write */
283
284 #define __S000 PAGE_NONE
285 #define __S001 PAGE_READONLY
286 #define __S010 PAGE_WRITEONLY
287 #define __S011 PAGE_SHARED
288 #define __S100 PAGE_EXECREAD
289 #define __S101 PAGE_EXECREAD
290 #define __S110 PAGE_RWX
291 #define __S111 PAGE_RWX
292
293
294 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
295
296 /* initial page tables for 0-8MB for kernel */
297
298 extern pte_t pg0[];
299
300 /* zero page used for uninitialized stuff */
301
302 extern unsigned long *empty_zero_page;
303
304 /*
305 * ZERO_PAGE is a global shared page that is always zero: used
306 * for zero-mapped memory areas etc..
307 */
308
309 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
310
311 #define pte_none(x) (pte_val(x) == 0)
312 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
313 #define pte_user(x) (pte_val(x) & _PAGE_USER)
314 #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
315
316 #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
317 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
318 #define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
319 #define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
320 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
321 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
322
323 #define pmd_none(x) (!pmd_val(x))
324 #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
325 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
pmd_clear(pmd_t * pmd)326 static inline void pmd_clear(pmd_t *pmd) {
327 set_pmd(pmd, __pmd(0));
328 }
329
330
331
332 #if CONFIG_PGTABLE_LEVELS == 3
333 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
334 #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
335
336 /* For 64 bit we have three level tables */
337
338 #define pud_none(x) (!pud_val(x))
339 #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
340 #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
pud_clear(pud_t * pud)341 static inline void pud_clear(pud_t *pud) {
342 set_pud(pud, __pud(0));
343 }
344 #endif
345
346 /*
347 * The following only work if pte_present() is true.
348 * Undefined behaviour if not..
349 */
pte_dirty(pte_t pte)350 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_young(pte_t pte)351 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
pte_write(pte_t pte)352 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
353
pte_mkclean(pte_t pte)354 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
pte_mkold(pte_t pte)355 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
pte_wrprotect(pte_t pte)356 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
pte_mkdirty(pte_t pte)357 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
pte_mkyoung(pte_t pte)358 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
pte_mkwrite(pte_t pte)359 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
360
361 /*
362 * Huge pte definitions.
363 */
364 #ifdef CONFIG_HUGETLB_PAGE
365 #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
366 #define pte_mkhuge(pte) (__pte(pte_val(pte) | \
367 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
368 #else
369 #define pte_huge(pte) (0)
370 #define pte_mkhuge(pte) (pte)
371 #endif
372
373
374 /*
375 * Conversion functions: convert a page and protection to a page entry,
376 * and a page entry and page directory to the page they refer to.
377 */
378 #define __mk_pte(addr,pgprot) \
379 ({ \
380 pte_t __pte; \
381 \
382 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
383 \
384 __pte; \
385 })
386
387 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
388
pfn_pte(unsigned long pfn,pgprot_t pgprot)389 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
390 {
391 pte_t pte;
392 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
393 return pte;
394 }
395
pte_modify(pte_t pte,pgprot_t newprot)396 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
397 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
398
399 /* Permanent address of a page. On parisc we don't have highmem. */
400
401 #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
402
403 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
404
pmd_page_vaddr(pmd_t pmd)405 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
406 {
407 return ((unsigned long) __va(pmd_address(pmd)));
408 }
409
410 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
411 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
412
413 /* Find an entry in the second-level page table.. */
414
415 extern void paging_init (void);
416
417 /* Used for deferring calls to flush_dcache_page() */
418
419 #define PG_dcache_dirty PG_arch_1
420
421 #define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
422
423 /* Encode and de-code a swap entry */
424
425 #define __swp_type(x) ((x).val & 0x1f)
426 #define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
427 (((x).val >> 8) & ~0x7) )
428 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
429 ((offset & 0x7) << 6) | \
430 ((offset & ~0x7) << 8) })
431 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
432 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
433
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)434 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
435 {
436 pte_t pte;
437
438 if (!pte_young(*ptep))
439 return 0;
440
441 pte = *ptep;
442 if (!pte_young(pte)) {
443 return 0;
444 }
445 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
446 return 1;
447 }
448
449 struct mm_struct;
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)450 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
451 {
452 pte_t old_pte;
453
454 old_pte = *ptep;
455 set_pte_at(mm, addr, ptep, __pte(0));
456
457 return old_pte;
458 }
459
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)460 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
461 {
462 set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
463 }
464
465 #define pte_same(A,B) (pte_val(A) == pte_val(B))
466
467 struct seq_file;
468 extern void arch_report_meminfo(struct seq_file *m);
469
470 #endif /* !__ASSEMBLY__ */
471
472
473 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
474 #define _PAGE_SIZE_ENCODING_4K 0
475 #define _PAGE_SIZE_ENCODING_16K 1
476 #define _PAGE_SIZE_ENCODING_64K 2
477 #define _PAGE_SIZE_ENCODING_256K 3
478 #define _PAGE_SIZE_ENCODING_1M 4
479 #define _PAGE_SIZE_ENCODING_4M 5
480 #define _PAGE_SIZE_ENCODING_16M 6
481 #define _PAGE_SIZE_ENCODING_64M 7
482
483 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
484 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
485 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
486 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
487 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
488 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
489 #endif
490
491
492 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
493
494 /* We provide our own get_unmapped_area to provide cache coherency */
495
496 #define HAVE_ARCH_UNMAPPED_AREA
497 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
498
499 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
500 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
501 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
502 #define __HAVE_ARCH_PTE_SAME
503
504 #endif /* _PARISC_PGTABLE_H */
505