• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    This file is part of Valgrind, a dynamic binary instrumentation
3    framework.
4 
5    Copyright (C) 2012-2017 Citrix
6 
7    This program is free software; you can redistribute it and/or
8    modify it under the terms of the GNU General Public License as
9    published by the Free Software Foundation; either version 2 of the
10    License, or (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful, but
13    WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20    02111-1307, USA.
21 
22    The GNU General Public License is contained in the file COPYING.
23 */
24 
25 /* Contributed by Andrew Cooper <andrew.cooper3@citrix.com>
26    and Ian Campbell <ian.campbell@citrix.com> */
27 
28 #ifndef __VKI_XEN_X86_H
29 #define __VKI_XEN_X86_H
30 
31 #if defined(__i386__)
32 #define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type)			\
33     typedef struct { type *p; }						\
34         __vki_xen_guest_handle_ ## name;                                \
35     typedef struct { union { type *p; vki_xen_uint64_aligned_t q; }; }  \
36         __vki_xen_guest_handle_64_ ## name
37 #define vki_xen_uint64_aligned_t vki_uint64_t __attribute__((aligned(8)))
38 #define __VKI_XEN_GUEST_HANDLE_64(name) __vki_xen_guest_handle_64_ ## name
39 #define VKI_XEN_GUEST_HANDLE_64(name) __VKI_XEN_GUEST_HANDLE_64(name)
40 #else
41 #define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
42     typedef struct { type *p; } __vki_xen_guest_handle_ ## name
43 #define vki_xen_uint64_aligned_t vki_uint64_t
44 #define __DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
45     ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type);   \
46     ___DEFINE_VKI_XEN_GUEST_HANDLE(const_##name, const type)
47 #define DEFINE_VKI_XEN_GUEST_HANDLE(name)   __DEFINE_VKI_XEN_GUEST_HANDLE(name, name)
48 #define VKI_XEN_GUEST_HANDLE_64(name) VKI_XEN_GUEST_HANDLE(name)
49 #endif
50 
51 #define __VKI_XEN_GUEST_HANDLE(name)  __vki_xen_guest_handle_ ## name
52 #define VKI_XEN_GUEST_HANDLE(name)    __VKI_XEN_GUEST_HANDLE(name)
53 
54 typedef unsigned long vki_xen_pfn_t;
55 typedef unsigned long vki_xen_ulong_t;
56 
57 #if defined(__i386__)
58 struct vki_xen_cpu_user_regs {
59     vki_uint32_t ebx;
60     vki_uint32_t ecx;
61     vki_uint32_t edx;
62     vki_uint32_t esi;
63     vki_uint32_t edi;
64     vki_uint32_t ebp;
65     vki_uint32_t eax;
66     vki_uint16_t error_code;    /* private */
67     vki_uint16_t entry_vector;  /* private */
68     vki_uint32_t eip;
69     vki_uint16_t cs;
70     vki_uint8_t  saved_upcall_mask;
71     vki_uint8_t  _pad0;
72     vki_uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
73     vki_uint32_t esp;
74     vki_uint16_t ss, _pad1;
75     vki_uint16_t es, _pad2;
76     vki_uint16_t ds, _pad3;
77     vki_uint16_t fs, _pad4;
78     vki_uint16_t gs, _pad5;
79 };
80 #else
81 struct vki_xen_cpu_user_regs {
82     vki_uint64_t r15;
83     vki_uint64_t r14;
84     vki_uint64_t r13;
85     vki_uint64_t r12;
86     vki_uint64_t rbp;
87     vki_uint64_t rbx;
88     vki_uint64_t r11;
89     vki_uint64_t r10;
90     vki_uint64_t r9;
91     vki_uint64_t r8;
92     vki_uint64_t rax;
93     vki_uint64_t rcx;
94     vki_uint64_t rdx;
95     vki_uint64_t rsi;
96     vki_uint64_t rdi;
97     vki_uint32_t error_code;    /* private */
98     vki_uint32_t entry_vector;  /* private */
99     vki_uint64_t rip;
100     vki_uint16_t cs, _pad0[1];
101     vki_uint8_t  saved_upcall_mask;
102     vki_uint8_t  _pad1[3];
103     vki_uint64_t rflags;      /* rflags.IF == !saved_upcall_mask */
104     vki_uint64_t rsp;
105     vki_uint16_t ss, _pad2[3];
106     vki_uint16_t es, _pad3[3];
107     vki_uint16_t ds, _pad4[3];
108     vki_uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
109     vki_uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
110 };
111 #endif
112 
113 struct vki_xen_trap_info {
114     vki_uint8_t   vector;  /* exception vector                              */
115     vki_uint8_t   flags;   /* 0-3: privilege level; 4: clear event enable?  */
116     vki_uint16_t  cs;      /* code selector                                 */
117     unsigned long address; /* code offset                                   */
118 };
119 
120 struct vki_xen_vcpu_guest_context {
121     /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
122     struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
123     unsigned long flags;                    /* VGCF_* flags                 */
124     struct vki_xen_cpu_user_regs user_regs; /* User-level CPU registers     */
125     struct vki_xen_trap_info trap_ctxt[256];/* Virtual IDT                  */
126     unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
127     unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
128     unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
129     /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
130     unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
131     unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
132 #ifdef __i386__
133     unsigned long event_callback_cs;        /* CS:EIP of event callback     */
134     unsigned long event_callback_eip;
135     unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
136     unsigned long failsafe_callback_eip;
137 #else
138     unsigned long event_callback_eip;
139     unsigned long failsafe_callback_eip;
140     unsigned long syscall_callback_eip;
141 #endif
142     unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
143 #ifdef __x86_64__
144     /* Segment base addresses. */
145     vki_uint64_t  fs_base;
146     vki_uint64_t  gs_base_kernel;
147     vki_uint64_t  gs_base_user;
148 #endif
149 };
150 typedef struct vki_xen_vcpu_guest_context vki_xen_vcpu_guest_context_t;
151 DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_vcpu_guest_context_t);
152 
153 
154 /* HVM_SAVE types and declarations for getcontext_partial */
155 # define VKI_DECLARE_HVM_SAVE_TYPE(_x, _code, _type)                         \
156     struct __VKI_HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
157 
158 #define VKI_HVM_SAVE_TYPE(_x) typeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->t)
159 #define VKI_HVM_SAVE_LENGTH(_x) (sizeof (VKI_HVM_SAVE_TYPE(_x)))
160 #define VKI_HVM_SAVE_CODE(_x) (sizeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->c))
161 
162 struct vki_hvm_hw_cpu {
163    vki_uint8_t  fpu_regs[512];
164 
165    vki_uint64_t rax;
166    vki_uint64_t rbx;
167    vki_uint64_t rcx;
168    vki_uint64_t rdx;
169    vki_uint64_t rbp;
170    vki_uint64_t rsi;
171    vki_uint64_t rdi;
172    vki_uint64_t rsp;
173    vki_uint64_t r8;
174    vki_uint64_t r9;
175    vki_uint64_t r10;
176    vki_uint64_t r11;
177    vki_uint64_t r12;
178    vki_uint64_t r13;
179    vki_uint64_t r14;
180    vki_uint64_t r15;
181 
182    vki_uint64_t rip;
183    vki_uint64_t rflags;
184 
185    vki_uint64_t cr0;
186    vki_uint64_t cr2;
187    vki_uint64_t cr3;
188    vki_uint64_t cr4;
189 
190    vki_uint64_t dr0;
191    vki_uint64_t dr1;
192    vki_uint64_t dr2;
193    vki_uint64_t dr3;
194    vki_uint64_t dr6;
195    vki_uint64_t dr7;
196 
197    vki_uint32_t cs_sel;
198    vki_uint32_t ds_sel;
199    vki_uint32_t es_sel;
200    vki_uint32_t fs_sel;
201    vki_uint32_t gs_sel;
202    vki_uint32_t ss_sel;
203    vki_uint32_t tr_sel;
204    vki_uint32_t ldtr_sel;
205 
206    vki_uint32_t cs_limit;
207    vki_uint32_t ds_limit;
208    vki_uint32_t es_limit;
209    vki_uint32_t fs_limit;
210    vki_uint32_t gs_limit;
211    vki_uint32_t ss_limit;
212    vki_uint32_t tr_limit;
213    vki_uint32_t ldtr_limit;
214    vki_uint32_t idtr_limit;
215    vki_uint32_t gdtr_limit;
216 
217    vki_uint64_t cs_base;
218    vki_uint64_t ds_base;
219    vki_uint64_t es_base;
220    vki_uint64_t fs_base;
221    vki_uint64_t gs_base;
222    vki_uint64_t ss_base;
223    vki_uint64_t tr_base;
224    vki_uint64_t ldtr_base;
225    vki_uint64_t idtr_base;
226    vki_uint64_t gdtr_base;
227 
228    vki_uint32_t cs_arbytes;
229    vki_uint32_t ds_arbytes;
230    vki_uint32_t es_arbytes;
231    vki_uint32_t fs_arbytes;
232    vki_uint32_t gs_arbytes;
233    vki_uint32_t ss_arbytes;
234    vki_uint32_t tr_arbytes;
235    vki_uint32_t ldtr_arbytes;
236 
237    vki_uint64_t sysenter_cs;
238    vki_uint64_t sysenter_esp;
239    vki_uint64_t sysenter_eip;
240 
241     /* msr for em64t */
242    vki_uint64_t shadow_gs;
243 
244     /* msr content saved/restored. */
245    vki_uint64_t msr_flags;
246    vki_uint64_t msr_lstar;
247    vki_uint64_t msr_star;
248    vki_uint64_t msr_cstar;
249    vki_uint64_t msr_syscall_mask;
250    vki_uint64_t msr_efer;
251    vki_uint64_t msr_tsc_aux;
252 
253     /* guest's idea of what rdtsc() would return */
254    vki_uint64_t tsc;
255 
256     /* pending event, if any */
257     union {
258        vki_uint32_t pending_event;
259         struct {
260            vki_uint8_t  pending_vector:8;
261            vki_uint8_t  pending_type:3;
262            vki_uint8_t  pending_error_valid:1;
263            vki_uint32_t pending_reserved:19;
264            vki_uint8_t  pending_valid:1;
265         };
266     };
267     /* error code for pending event */
268    vki_uint32_t error_code;
269 };
270 
271 VKI_DECLARE_HVM_SAVE_TYPE(CPU, 2, struct vki_hvm_hw_cpu);
272 
273 struct vki_hvm_hw_mtrr {
274 #define VKI_MTRR_VCNT     8
275 #define VKI_NUM_FIXED_MSR 11
276    vki_uint64_t msr_pat_cr;
277    /* mtrr physbase & physmask msr pair*/
278    vki_uint64_t msr_mtrr_var[VKI_MTRR_VCNT*2];
279    vki_uint64_t msr_mtrr_fixed[VKI_NUM_FIXED_MSR];
280    vki_uint64_t msr_mtrr_cap;
281    vki_uint64_t msr_mtrr_def_type;
282 };
283 
284 VKI_DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct vki_hvm_hw_mtrr);
285 
286 #endif // __VKI_XEN_H
287 
288 /*--------------------------------------------------------------------*/
289 /*--- end                                                          ---*/
290 /*--------------------------------------------------------------------*/
291