• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* MN10300 Page table definitions
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_PAGE_H
12 #define _ASM_PAGE_H
13 
14 /* PAGE_SHIFT determines the page size */
15 #define PAGE_SHIFT	12
16 
17 #ifndef __ASSEMBLY__
18 #define PAGE_SIZE	(1UL << PAGE_SHIFT)
19 #define PAGE_MASK	(~(PAGE_SIZE - 1))
20 #else
21 #define PAGE_SIZE	+(1 << PAGE_SHIFT)	/* unary plus marks an
22 						 * immediate val not an addr */
23 #define PAGE_MASK	+(~(PAGE_SIZE - 1))
24 #endif
25 
26 #ifdef __KERNEL__
27 #ifndef __ASSEMBLY__
28 
29 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
30 #define copy_page(to, from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
31 
32 #define clear_user_page(addr, vaddr, page)	clear_page(addr)
33 #define copy_user_page(vto, vfrom, vaddr, to)	copy_page(vto, vfrom)
34 
35 /*
36  * These are used to make use of C type-checking..
37  */
38 typedef struct { unsigned long pte; } pte_t;
39 typedef struct { unsigned long pgd; } pgd_t;
40 typedef struct { unsigned long pgprot; } pgprot_t;
41 typedef struct page *pgtable_t;
42 
43 #define PTE_MASK	PAGE_MASK
44 #define HPAGE_SHIFT	22
45 
46 #ifdef CONFIG_HUGETLB_PAGE
47 #define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
48 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
49 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
50 #endif
51 
52 #define pte_val(x)	((x).pte)
53 #define pgd_val(x)	((x).pgd)
54 #define pgprot_val(x)	((x).pgprot)
55 
56 #define __pte(x)	((pte_t) { (x) })
57 #define __pgd(x)	((pgd_t) { (x) })
58 #define __pgprot(x)	((pgprot_t) { (x) })
59 
60 #include <asm-generic/pgtable-nopmd.h>
61 
62 #endif /* !__ASSEMBLY__ */
63 
64 /*
65  * This handles the memory map.. We could make this a config
66  * option, but too many people screw it up, and too few need
67  * it.
68  *
69  * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
70  * a virtual address space of one gigabyte, which limits the
71  * amount of physical memory you can use to about 950MB.
72  */
73 
74 #ifndef __ASSEMBLY__
75 
76 /* Pure 2^n version of get_order */
77 static inline int get_order(unsigned long size) __attribute__((const));
get_order(unsigned long size)78 static inline int get_order(unsigned long size)
79 {
80 	int order;
81 
82 	size = (size - 1) >> (PAGE_SHIFT - 1);
83 	order = -1;
84 	do {
85 		size >>= 1;
86 		order++;
87 	} while (size);
88 	return order;
89 }
90 
91 #endif /* __ASSEMBLY__ */
92 
93 #include <asm/page_offset.h>
94 
95 #define __PAGE_OFFSET		(PAGE_OFFSET_RAW)
96 #define PAGE_OFFSET		((unsigned long) __PAGE_OFFSET)
97 
98 /*
99  * main RAM and kernel working space are coincident at 0x90000000, but to make
100  * life more interesting, there's also an uncached virtual shadow at 0xb0000000
101  * - these mappings are fixed in the MMU
102  */
103 #define __pfn_disp		(CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT)
104 
105 #define __pa(x)			((unsigned long)(x))
106 #define __va(x)			((void *)(unsigned long)(x))
107 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
108 #define pfn_to_page(pfn)	(mem_map + ((pfn) - __pfn_disp))
109 #define page_to_pfn(page)	((unsigned long)((page) - mem_map) + __pfn_disp)
110 
111 #define pfn_valid(pfn)					\
112 ({							\
113 	unsigned long __pfn = (pfn) - __pfn_disp;	\
114 	__pfn < max_mapnr;				\
115 })
116 
117 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
118 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
119 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
120 
121 #define VM_DATA_DEFAULT_FLAGS \
122 	(VM_READ | VM_WRITE | \
123 	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
124 		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
125 
126 #endif /* __KERNEL__ */
127 
128 #endif /* _ASM_PAGE_H */
129