1 /*
2 ** Copyright (c) 2011, Intel Corporation
3 **
4 ** This software is licensed under the terms of the GNU General Public
5 ** License version 2, as published by the Free Software Foundation, and
6 ** may be copied, distributed, and modified under those terms.
7 **
8 ** This program is distributed in the hope that it will be useful,
9 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
10 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 ** GNU General Public License for more details.
12 */
13
14 #include "target-i386/hax-i386.h"
15
16 /*
17 * return 0 upon success, -1 when the driver is not loaded,
18 * other negative value for other failures
19 */
hax_open_device(hax_fd * fd)20 static int hax_open_device(hax_fd *fd)
21 {
22 uint32_t errNum = 0;
23 HANDLE hDevice;
24
25 if (!fd)
26 return -2;
27
28 hDevice = CreateFile( "\\\\.\\HAX",
29 GENERIC_READ | GENERIC_WRITE,
30 0,
31 NULL,
32 CREATE_ALWAYS,
33 FILE_ATTRIBUTE_NORMAL,
34 NULL);
35
36 if (hDevice == INVALID_HANDLE_VALUE)
37 {
38 dprint("Failed to open the HAX device!\n");
39 errNum = GetLastError();
40 if (errNum == ERROR_FILE_NOT_FOUND)
41 return -1;
42 return -2;
43 }
44 *fd = hDevice;
45 dprint("device fd:%d\n", *fd);
46 return 0;
47 }
48
49
hax_mod_open(void)50 hax_fd hax_mod_open(void)
51 {
52 int ret;
53 hax_fd fd;
54
55 ret = hax_open_device(&fd);
56 if (ret != 0)
57 dprint("Open HAX device failed\n");
58
59 return fd;
60 }
61
hax_populate_ram(uint64_t va,uint32_t size)62 int hax_populate_ram(uint64_t va, uint32_t size)
63 {
64 int ret;
65 struct hax_alloc_ram_info info;
66 HANDLE hDeviceVM;
67 DWORD dSize = 0;
68
69 if (!hax_global.vm || !hax_global.vm->fd)
70 {
71 dprint("Allocate memory before vm create?\n");
72 return -EINVAL;
73 }
74
75 info.size = size;
76 info.va = va;
77
78 hDeviceVM = hax_global.vm->fd;
79
80 ret = DeviceIoControl(hDeviceVM,
81 HAX_VM_IOCTL_ALLOC_RAM,
82 &info, sizeof(info),
83 NULL, 0,
84 &dSize,
85 (LPOVERLAPPED) NULL);
86
87 if (!ret) {
88 dprint("Failed to allocate %x memory\n", size);
89 return ret;
90 }
91
92 return 0;
93 }
94
95
hax_set_phys_mem(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset)96 int hax_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset)
97 {
98 struct hax_set_ram_info info, *pinfo = &info;
99 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
100 HANDLE hDeviceVM;
101 DWORD dSize = 0;
102 int ret = 0;
103
104 /* We look for the RAM and ROM only */
105 if (flags >= IO_MEM_UNASSIGNED)
106 return 0;
107
108 if ( (start_addr & ~TARGET_PAGE_MASK) || (size & ~TARGET_PAGE_MASK))
109 {
110 dprint(
111 "set_phys_mem %x %lx requires page aligned addr and size\n",
112 start_addr, size);
113 return -1;
114 }
115
116 info.pa_start = start_addr;
117 info.size = size;
118 info.va = (uint64_t)qemu_get_ram_ptr(phys_offset);
119 info.flags = (flags & IO_MEM_ROM) ? 1 : 0;
120
121 hDeviceVM = hax_global.vm->fd;
122
123 ret = DeviceIoControl(hDeviceVM,
124 HAX_VM_IOCTL_SET_RAM,
125 pinfo, sizeof(*pinfo),
126 NULL, 0,
127 &dSize,
128 (LPOVERLAPPED) NULL);
129
130 if (!ret)
131 return -EFAULT;
132 else
133 return 0;
134 }
135
hax_capability(struct hax_state * hax,struct hax_capabilityinfo * cap)136 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
137 {
138 int ret;
139 HANDLE hDevice = hax->fd; //handle to hax module
140 DWORD dSize = 0;
141 DWORD err = 0;
142
143 if (hax_invalid_fd(hDevice)) {
144 dprint("Invalid fd for hax device!\n");
145 return -ENODEV;
146 }
147
148 ret = DeviceIoControl(hDevice,
149 HAX_IOCTL_CAPABILITY,
150 NULL, 0,
151 cap, sizeof(*cap),
152 &dSize,
153 (LPOVERLAPPED) NULL);
154
155 if (!ret) {
156 err = GetLastError();
157 if (err == ERROR_INSUFFICIENT_BUFFER ||
158 err == ERROR_MORE_DATA)
159 dprint("hax capability is too long to hold.\n");
160 dprint("Failed to get Hax capability:%d\n", err);
161 return -EFAULT;
162 } else
163 return 0;
164 }
165
hax_mod_version(struct hax_state * hax,struct hax_module_version * version)166 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
167 {
168 int ret;
169 HANDLE hDevice = hax->fd; //handle to hax module
170 DWORD dSize = 0;
171 DWORD err = 0;
172
173 if (hax_invalid_fd(hDevice)) {
174 dprint("Invalid fd for hax device!\n");
175 return -ENODEV;
176 }
177
178 ret = DeviceIoControl(hDevice,
179 HAX_IOCTL_VERSION,
180 NULL, 0,
181 version, sizeof(*version),
182 &dSize,
183 (LPOVERLAPPED) NULL);
184
185 if (!ret) {
186 err = GetLastError();
187 if (err == ERROR_INSUFFICIENT_BUFFER ||
188 err == ERROR_MORE_DATA)
189 dprint("HAX module is too large.\n");
190 dprint("Failed to get Hax module version:%d\n", err);
191 return -EFAULT;
192 } else
193 return 0;
194 }
195
hax_vm_devfs_string(int vm_id)196 static char *hax_vm_devfs_string(int vm_id)
197 {
198 char *name;
199
200 if (vm_id > MAX_VM_ID)
201 {
202 dprint("Too big VM id\n");
203 return NULL;
204 }
205
206 name = qemu_strdup("\\\\.\\hax_vmxx");
207 if (!name)
208 return NULL;
209 sprintf(name, "\\\\.\\hax_vm%02d", vm_id);
210
211 return name;
212 }
213
hax_vcpu_devfs_string(int vm_id,int vcpu_id)214 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
215 {
216 char *name;
217
218 if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID)
219 {
220 dprint("Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
221 return NULL;
222 }
223 name = qemu_strdup("\\\\.\\hax_vmxx_vcpuxx");
224 if (!name)
225 return NULL;
226 sprintf(name, "\\\\.\\hax_vm%02d_vcpu%02d", vm_id, vcpu_id);
227
228 return name;
229 }
230
hax_host_create_vm(struct hax_state * hax,int * vmid)231 int hax_host_create_vm(struct hax_state *hax, int *vmid)
232 {
233 int ret;
234 int vm_id = 0;
235 DWORD dSize = 0;
236
237 if (hax_invalid_fd(hax->fd))
238 return -EINVAL;
239
240 if (hax->vm)
241 return 0;
242
243 ret = DeviceIoControl(hax->fd,
244 HAX_IOCTL_CREATE_VM,
245 NULL, 0,
246 &vm_id, sizeof(vm_id),
247 &dSize,
248 (LPOVERLAPPED) NULL);
249 if (!ret) {
250 dprint("error code:%d", GetLastError());
251 return -1;
252 }
253 *vmid = vm_id;
254 return 0;
255 }
256
hax_host_open_vm(struct hax_state * hax,int vm_id)257 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
258 {
259 char *vm_name = NULL;
260 hax_fd hDeviceVM;
261
262 vm_name = hax_vm_devfs_string(vm_id);
263 if (!vm_name) {
264 dprint("Incorrect name\n");
265 return INVALID_HANDLE_VALUE;
266 }
267
268 hDeviceVM = CreateFile(vm_name,
269 GENERIC_READ | GENERIC_WRITE,
270 0,
271 NULL,
272 CREATE_ALWAYS,
273 FILE_ATTRIBUTE_NORMAL,
274 NULL);
275 if (hDeviceVM == INVALID_HANDLE_VALUE)
276 dprint("Open the vm devcie error:%s, ec:%d\n", vm_name, GetLastError());
277
278 qemu_free(vm_name);
279 return hDeviceVM;
280 }
281
hax_notify_qemu_version(hax_fd vm_fd,struct hax_qemu_version * qversion)282 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
283 {
284 int ret;
285 DWORD dSize = 0;
286
287 if (hax_invalid_fd(vm_fd))
288 return -EINVAL;
289
290 ret = DeviceIoControl(vm_fd,
291 HAX_VM_IOCTL_NOTIFY_QEMU_VERSION,
292 qversion, sizeof(struct hax_qemu_version),
293 NULL, 0,
294 &dSize,
295 (LPOVERLAPPED) NULL);
296 if (!ret)
297 {
298 dprint("Failed to notify qemu API version\n");
299 return -1;
300 }
301
302 return 0;
303 }
304
hax_host_create_vcpu(hax_fd vm_fd,int vcpuid)305 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
306 {
307 int ret;
308 DWORD dSize = 0;
309
310 ret = DeviceIoControl(vm_fd,
311 HAX_VM_IOCTL_VCPU_CREATE,
312 &vcpuid, sizeof(vcpuid),
313 NULL, 0,
314 &dSize,
315 (LPOVERLAPPED) NULL);
316 if (!ret)
317 {
318 dprint("Failed to create vcpu %x\n", vcpuid);
319 return -1;
320 }
321
322 return 0;
323 }
324
hax_host_open_vcpu(int vmid,int vcpuid)325 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
326 {
327 char *devfs_path = NULL;
328 hax_fd hDeviceVCPU;
329
330 devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
331 if (!devfs_path)
332 {
333 dprint("Failed to get the devfs\n");
334 return INVALID_HANDLE_VALUE;
335 }
336
337 hDeviceVCPU = CreateFile( devfs_path,
338 GENERIC_READ | GENERIC_WRITE,
339 0,
340 NULL,
341 CREATE_ALWAYS,
342 FILE_ATTRIBUTE_NORMAL,
343 NULL);
344
345 if (hDeviceVCPU == INVALID_HANDLE_VALUE)
346 dprint("Failed to open the vcpu devfs\n");
347 qemu_free(devfs_path);
348 return hDeviceVCPU;
349 }
350
hax_host_setup_vcpu_channel(struct hax_vcpu_state * vcpu)351 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
352 {
353 hax_fd hDeviceVCPU = vcpu->fd;
354 int ret;
355 struct hax_tunnel_info info;
356 DWORD dSize = 0;
357
358 ret = DeviceIoControl(hDeviceVCPU,
359 HAX_VCPU_IOCTL_SETUP_TUNNEL,
360 NULL, 0,
361 &info, sizeof(info),
362 &dSize,
363 (LPOVERLAPPED) NULL);
364 if (!ret)
365 {
366 dprint("Failed to setup the hax tunnel\n");
367 return -1;
368 }
369
370 if (!valid_hax_tunnel_size(info.size))
371 {
372 dprint("Invalid hax tunnel size %x\n", info.size);
373 ret = -EINVAL;
374 return ret;
375 }
376 vcpu->tunnel = (struct hax_tunnel *)(info.va);
377 vcpu->iobuf = (unsigned char *)(info.io_va);
378 return 0;
379 }
380
hax_vcpu_run(struct hax_vcpu_state * vcpu)381 int hax_vcpu_run(struct hax_vcpu_state* vcpu)
382 {
383 int ret;
384 HANDLE hDeviceVCPU = vcpu->fd;
385 DWORD dSize = 0;
386
387 ret = DeviceIoControl(hDeviceVCPU,
388 HAX_VCPU_IOCTL_RUN,
389 NULL, 0,
390 NULL, 0,
391 &dSize,
392 (LPOVERLAPPED) NULL);
393 if (!ret)
394 return -EFAULT;
395 else
396 return 0;
397 }
398
hax_sync_fpu(CPUState * env,struct fx_layout * fl,int set)399 int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
400 {
401 int ret;
402 hax_fd fd;
403 HANDLE hDeviceVCPU;
404 DWORD dSize = 0;
405
406 fd = hax_vcpu_get_fd(env);
407 if (hax_invalid_fd(fd))
408 return -1;
409
410 hDeviceVCPU = fd;
411
412 if (set)
413 ret = DeviceIoControl(hDeviceVCPU,
414 HAX_VCPU_IOCTL_SET_FPU,
415 fl, sizeof(*fl),
416 NULL, 0,
417 &dSize,
418 (LPOVERLAPPED) NULL);
419 else
420 ret = DeviceIoControl(hDeviceVCPU,
421 HAX_VCPU_IOCTL_GET_FPU,
422 NULL, 0,
423 fl, sizeof(*fl),
424 &dSize,
425 (LPOVERLAPPED) NULL);
426 if (!ret)
427 return -EFAULT;
428 else
429 return 0;
430 }
431
hax_sync_msr(CPUState * env,struct hax_msr_data * msrs,int set)432 int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
433 {
434 int ret;
435 hax_fd fd;
436 HANDLE hDeviceVCPU;
437 DWORD dSize = 0;
438
439 fd = hax_vcpu_get_fd(env);
440 if (hax_invalid_fd(fd))
441 return -1;
442 hDeviceVCPU = fd;
443
444 if (set)
445 ret = DeviceIoControl(hDeviceVCPU,
446 HAX_VCPU_IOCTL_SET_MSRS,
447 msrs, sizeof(*msrs),
448 msrs, sizeof(*msrs),
449 &dSize,
450 (LPOVERLAPPED) NULL);
451 else
452 ret = DeviceIoControl(hDeviceVCPU,
453 HAX_VCPU_IOCTL_GET_MSRS,
454 msrs, sizeof(*msrs),
455 msrs, sizeof(*msrs),
456 &dSize,
457 (LPOVERLAPPED) NULL);
458 if (!ret)
459 return -EFAULT;
460 else
461 return 0;
462 }
463
hax_sync_vcpu_state(CPUState * env,struct vcpu_state_t * state,int set)464 int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
465 {
466 int ret;
467 hax_fd fd;
468 HANDLE hDeviceVCPU;
469 DWORD dSize;
470
471 fd = hax_vcpu_get_fd(env);
472 if (hax_invalid_fd(fd))
473 return -1;
474
475 hDeviceVCPU = fd;
476
477 if (set)
478 ret = DeviceIoControl(hDeviceVCPU,
479 HAX_VCPU_SET_REGS,
480 state, sizeof(*state),
481 NULL, 0,
482 &dSize,
483 (LPOVERLAPPED) NULL);
484 else
485 ret = DeviceIoControl(hDeviceVCPU,
486 HAX_VCPU_GET_REGS,
487 NULL, 0,
488 state, sizeof(*state),
489 &dSize,
490 (LPOVERLAPPED) NULL);
491 if (!ret)
492 return -EFAULT;
493 else
494 return 0;
495 }
496
hax_inject_interrupt(CPUState * env,int vector)497 int hax_inject_interrupt(CPUState *env, int vector)
498 {
499 int ret;
500 hax_fd fd;
501 HANDLE hDeviceVCPU;
502 DWORD dSize;
503
504 fd = hax_vcpu_get_fd(env);
505 if (hax_invalid_fd(fd))
506 return -1;
507
508 hDeviceVCPU = fd;
509
510 ret = DeviceIoControl(hDeviceVCPU,
511 HAX_VCPU_IOCTL_INTERRUPT,
512 &vector, sizeof(vector),
513 NULL, 0,
514 &dSize,
515 (LPOVERLAPPED) NULL);
516 if (!ret)
517 return -EFAULT;
518 else
519 return 0;
520 }
521