1 /*
2 ** Copyright (c) 2011, Intel Corporation
3 **
4 ** This software is licensed under the terms of the GNU General Public
5 ** License version 2, as published by the Free Software Foundation, and
6 ** may be copied, distributed, and modified under those terms.
7 **
8 ** This program is distributed in the hope that it will be useful,
9 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
10 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 ** GNU General Public License for more details.
12 */
13
14 /* HAX module interface - darwin version */
15 #include <sys/types.h>
16 #include <sys/stat.h>
17 #include <fcntl.h>
18 #include <errno.h>
19 #include <sys/ioctl.h>
20
21 #include "target-i386/hax-i386.h"
hax_mod_open(void)22 hax_fd hax_mod_open(void)
23 {
24 int fd = open("/dev/HAX", O_RDWR);
25
26 if (fd == -1)
27 {
28 dprint("Failed to open the hax module\n");
29 return -errno;
30 }
31
32 return fd;
33 }
34
hax_populate_ram(uint64_t va,uint32_t size)35 int hax_populate_ram(uint64_t va, uint32_t size)
36 {
37 int ret;
38 struct hax_alloc_ram_info info;
39
40 if (!hax_global.vm || !hax_global.vm->fd)
41 {
42 dprint("Allocate memory before vm create?\n");
43 return -EINVAL;
44 }
45
46 info.size = size;
47 info.va = va;
48 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
49 if (ret < 0)
50 {
51 dprint("Failed to allocate %x memory\n", size);
52 return ret;
53 }
54 return 0;
55 }
56
hax_set_phys_mem(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset)57 int hax_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset)
58 {
59 struct hax_set_ram_info info, *pinfo = &info;
60 int ret;
61 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
62
63 /* We look for the RAM and ROM only */
64 if (flags >= IO_MEM_UNASSIGNED)
65 return 0;
66
67 if ( (start_addr & ~TARGET_PAGE_MASK) || (size & ~TARGET_PAGE_MASK))
68 {
69 dprint("set_phys_mem %x %lx requires page aligned addr and size\n", start_addr, size);
70 exit(1);
71 return -1;
72 }
73
74 info.pa_start = start_addr;
75 info.size = size;
76 info.va = (uint64_t)qemu_get_ram_ptr(phys_offset);
77 info.flags = (flags & IO_MEM_ROM) ? 1 : 0;
78
79 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, pinfo);
80 if (ret < 0)
81 {
82 dprint("has set phys mem failed\n");
83 exit(1);
84 }
85 return ret;
86 }
87
hax_capability(struct hax_state * hax,struct hax_capabilityinfo * cap)88 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
89 {
90 int ret;
91
92 ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
93 if (ret == -1)
94 {
95 dprint("Failed to get HAX capability\n");
96 return -errno;
97 }
98
99 return 0;
100 }
101
hax_mod_version(struct hax_state * hax,struct hax_module_version * version)102 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
103 {
104 int ret;
105
106 ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
107 if (ret == -1)
108 {
109 dprint("Failed to get HAX version\n");
110 return -errno;
111 }
112
113 return 0;
114 }
115
hax_vm_devfs_string(int vm_id)116 static char *hax_vm_devfs_string(int vm_id)
117 {
118 char *name;
119
120 if (vm_id > MAX_VM_ID)
121 {
122 dprint("Too big VM id\n");
123 return NULL;
124 }
125
126 name = qemu_strdup("/dev/hax_vm/vmxx");
127 if (!name)
128 return NULL;
129 sprintf(name, "/dev/hax_vm/vm%02d", vm_id);
130
131 return name;
132 }
133
hax_vcpu_devfs_string(int vm_id,int vcpu_id)134 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
135 {
136 char *name;
137
138 if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID)
139 {
140 dprint("Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
141 return NULL;
142 }
143
144 name = qemu_strdup("/dev/hax_vmxx/vcpuyy");
145 if (!name)
146 return NULL;
147
148 sprintf(name, "/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
149
150 return name;
151 }
152
hax_host_create_vm(struct hax_state * hax,int * vmid)153 int hax_host_create_vm(struct hax_state *hax, int *vmid)
154 {
155 int ret;
156 int vm_id = 0;
157
158 if (hax_invalid_fd(hax->fd))
159 return -EINVAL;
160
161 if (hax->vm)
162 return 0;
163
164 ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
165 *vmid = vm_id;
166 return ret;
167 }
168
hax_host_open_vm(struct hax_state * hax,int vm_id)169 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
170 {
171 hax_fd fd;
172 char *vm_name = NULL;
173
174 vm_name = hax_vm_devfs_string(vm_id);
175 if (!vm_name)
176 return -1;
177
178 fd = open(vm_name, O_RDWR);
179 qemu_free(vm_name);
180
181 return fd;
182 }
183
hax_notify_qemu_version(hax_fd vm_fd,struct hax_qemu_version * qversion)184 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
185 {
186 int ret;
187
188 if (hax_invalid_fd(vm_fd))
189 return -EINVAL;
190
191 ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
192 if (ret == -1)
193 {
194 dprint("Failed to notify qemu API version\n");
195 return -errno;
196 }
197
198 return 0;
199 }
200
201 /*
202 * Simply assume that the size should be bigger than the hax_tunnel,
203 * since the hax_tunnel can be extended later with backward
204 * compatibility.
205 */
hax_host_create_vcpu(hax_fd vm_fd,int vcpuid)206 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
207 {
208 int ret;
209
210 ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
211 if (ret < 0)
212 dprint("Failed to create vcpu %x\n", vcpuid);
213
214 return ret;
215 }
216
hax_host_open_vcpu(int vmid,int vcpuid)217 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
218 {
219 char *devfs_path = NULL;
220 hax_fd fd;
221
222 devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
223 if (!devfs_path)
224 {
225 dprint("Failed to get the devfs\n");
226 return -EINVAL;
227 }
228
229 fd = open(devfs_path, O_RDWR);
230 qemu_free(devfs_path);
231 if (fd < 0)
232 dprint("Failed to open the vcpu devfs\n");
233 return fd;
234 }
235
hax_host_setup_vcpu_channel(struct hax_vcpu_state * vcpu)236 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
237 {
238 int ret;
239 struct hax_tunnel_info info;
240
241 ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
242 if (ret)
243 {
244 dprint("Failed to setup the hax tunnel\n");
245 return ret;
246 }
247
248 if (!valid_hax_tunnel_size(info.size))
249 {
250 dprint("Invalid hax tunnel size %x\n", info.size);
251 ret = -EINVAL;
252 return ret;
253 }
254
255 vcpu->tunnel = (struct hax_tunnel *)(info.va);
256 vcpu->iobuf = (unsigned char *)(info.io_va);
257 return 0;
258 }
259
hax_vcpu_run(struct hax_vcpu_state * vcpu)260 int hax_vcpu_run(struct hax_vcpu_state* vcpu)
261 {
262 int ret;
263
264 ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
265 return ret;
266 }
267
hax_sync_fpu(CPUState * env,struct fx_layout * fl,int set)268 int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
269 {
270 int ret, fd;
271
272 fd = hax_vcpu_get_fd(env);
273 if (fd <= 0)
274 return -1;
275
276 if (set)
277 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
278 else
279 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
280 return ret;
281 }
282
hax_sync_msr(CPUState * env,struct hax_msr_data * msrs,int set)283 int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
284 {
285 int ret, fd;
286
287 fd = hax_vcpu_get_fd(env);
288 if (fd <= 0)
289 return -1;
290 if (set)
291 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
292 else
293 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
294 return ret;
295 }
296
hax_sync_vcpu_state(CPUState * env,struct vcpu_state_t * state,int set)297 int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
298 {
299 int ret, fd;
300
301 fd = hax_vcpu_get_fd(env);
302 if (fd <= 0)
303 return -1;
304
305 if (set)
306 ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
307 else
308 ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
309 return ret;
310 }
311
hax_inject_interrupt(CPUState * env,int vector)312 int hax_inject_interrupt(CPUState *env, int vector)
313 {
314 int ret, fd;
315
316 fd = hax_vcpu_get_fd(env);
317 if (fd <= 0)
318 return -1;
319
320 ret = ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
321 return ret;
322 }
323