1 #ifndef _ASM_X86_XEN_PAGE_H
2 #define _ASM_X86_XEN_PAGE_H
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/spinlock.h>
7 #include <linux/pfn.h>
8
9 #include <asm/uaccess.h>
10 #include <asm/page.h>
11 #include <asm/pgtable.h>
12
13 #include <xen/interface/xen.h>
14 #include <xen/features.h>
15
16 /* Xen machine address */
17 typedef struct xmaddr {
18 phys_addr_t maddr;
19 } xmaddr_t;
20
21 /* Xen pseudo-physical address */
22 typedef struct xpaddr {
23 phys_addr_t paddr;
24 } xpaddr_t;
25
26 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
27 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
28
29 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
30 #define INVALID_P2M_ENTRY (~0UL)
31 #define FOREIGN_FRAME_BIT (1UL<<31)
32 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
33
34 /* Maximum amount of memory we can handle in a domain in pages */
35 #define MAX_DOMAIN_PAGES \
36 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
37
38
39 extern unsigned long get_phys_to_machine(unsigned long pfn);
40 extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
41
pfn_to_mfn(unsigned long pfn)42 static inline unsigned long pfn_to_mfn(unsigned long pfn)
43 {
44 if (xen_feature(XENFEAT_auto_translated_physmap))
45 return pfn;
46
47 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
48 }
49
phys_to_machine_mapping_valid(unsigned long pfn)50 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
51 {
52 if (xen_feature(XENFEAT_auto_translated_physmap))
53 return 1;
54
55 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
56 }
57
mfn_to_pfn(unsigned long mfn)58 static inline unsigned long mfn_to_pfn(unsigned long mfn)
59 {
60 unsigned long pfn;
61
62 if (xen_feature(XENFEAT_auto_translated_physmap))
63 return mfn;
64
65 #if 0
66 if (unlikely((mfn >> machine_to_phys_order) != 0))
67 return max_mapnr;
68 #endif
69
70 pfn = 0;
71 /*
72 * The array access can fail (e.g., device space beyond end of RAM).
73 * In such cases it doesn't matter what we return (we return garbage),
74 * but we must handle the fault without crashing!
75 */
76 __get_user(pfn, &machine_to_phys_mapping[mfn]);
77
78 return pfn;
79 }
80
phys_to_machine(xpaddr_t phys)81 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
82 {
83 unsigned offset = phys.paddr & ~PAGE_MASK;
84 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
85 }
86
machine_to_phys(xmaddr_t machine)87 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
88 {
89 unsigned offset = machine.maddr & ~PAGE_MASK;
90 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
91 }
92
93 /*
94 * We detect special mappings in one of two ways:
95 * 1. If the MFN is an I/O page then Xen will set the m2p entry
96 * to be outside our maximum possible pseudophys range.
97 * 2. If the MFN belongs to a different domain then we will certainly
98 * not have MFN in our p2m table. Conversely, if the page is ours,
99 * then we'll have p2m(m2p(MFN))==MFN.
100 * If we detect a special mapping then it doesn't have a 'struct page'.
101 * We force !pfn_valid() by returning an out-of-range pointer.
102 *
103 * NB. These checks require that, for any MFN that is not in our reservation,
104 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
105 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
106 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
107 *
108 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
109 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
110 * require. In all the cases we care about, the FOREIGN_FRAME bit is
111 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
112 */
mfn_to_local_pfn(unsigned long mfn)113 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
114 {
115 extern unsigned long max_mapnr;
116 unsigned long pfn = mfn_to_pfn(mfn);
117 if ((pfn < max_mapnr)
118 && !xen_feature(XENFEAT_auto_translated_physmap)
119 && (get_phys_to_machine(pfn) != mfn))
120 return max_mapnr; /* force !pfn_valid() */
121 /* XXX fixme; not true with sparsemem */
122 return pfn;
123 }
124
125 /* VIRT <-> MACHINE conversion */
126 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
127 #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
128 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
129
pte_mfn(pte_t pte)130 static inline unsigned long pte_mfn(pte_t pte)
131 {
132 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
133 }
134
mfn_pte(unsigned long page_nr,pgprot_t pgprot)135 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
136 {
137 pte_t pte;
138
139 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
140 massage_pgprot(pgprot);
141
142 return pte;
143 }
144
pte_val_ma(pte_t pte)145 static inline pteval_t pte_val_ma(pte_t pte)
146 {
147 return pte.pte;
148 }
149
__pte_ma(pteval_t x)150 static inline pte_t __pte_ma(pteval_t x)
151 {
152 return (pte_t) { .pte = x };
153 }
154
155 #define pmd_val_ma(v) ((v).pmd)
156 #ifdef __PAGETABLE_PUD_FOLDED
157 #define pud_val_ma(v) ((v).pgd.pgd)
158 #else
159 #define pud_val_ma(v) ((v).pud)
160 #endif
161 #define __pmd_ma(x) ((pmd_t) { (x) } )
162
163 #define pgd_val_ma(x) ((x).pgd)
164
165
166 xmaddr_t arbitrary_virt_to_machine(void *address);
167 void make_lowmem_page_readonly(void *vaddr);
168 void make_lowmem_page_readwrite(void *vaddr);
169
170 #endif /* _ASM_X86_XEN_PAGE_H */
171