1
2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
33
34 #if defined(ENABLE_XEN)
35
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
38 #include "pub_core_threadstate.h"
39 #include "pub_core_aspacemgr.h"
40 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
41 #include "pub_core_transtab.h" // VG_(discard_translations)
42 #include "pub_core_xarray.h"
43 #include "pub_core_clientstate.h"
44 #include "pub_core_debuglog.h"
45 #include "pub_core_libcbase.h"
46 #include "pub_core_libcassert.h"
47 #include "pub_core_libcfile.h"
48 #include "pub_core_libcprint.h"
49 #include "pub_core_libcproc.h"
50 #include "pub_core_libcsignal.h"
51 #include "pub_core_mallocfree.h"
52 #include "pub_core_tooliface.h"
53 #include "pub_core_options.h"
54 #include "pub_core_scheduler.h"
55 #include "pub_core_signals.h"
56 #include "pub_core_syscall.h"
57 #include "pub_core_syswrap.h"
58 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
59
60 #include "priv_types_n_macros.h"
61 #include "priv_syswrap-generic.h"
62 #include "priv_syswrap-xen.h"
63
64 #include <inttypes.h>
65
66 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
67 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
68
bad_subop(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * args,SyscallStatus * status,UWord * flags,const HChar * hypercall,UWord subop)69 static void bad_subop ( ThreadId tid,
70 SyscallArgLayout* layout,
71 /*MOD*/SyscallArgs* args,
72 /*OUT*/SyscallStatus* status,
73 /*OUT*/UWord* flags,
74 const HChar* hypercall,
75 UWord subop)
76 {
77 VG_(dmsg)("WARNING: unhandled %s subop: %ld\n",
78 hypercall, subop);
79 if (VG_(clo_verbosity) > 1) {
80 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
81 }
82 VG_(dmsg)("You may be able to write your own handler.\n");
83 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
84 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
85 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
86 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
87
88 SET_STATUS_Failure(VKI_ENOSYS);
89 }
90
PRE(memory_op)91 PRE(memory_op)
92 {
93 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1, ARG2);
94
95 switch (ARG1) {
96
97 case VKI_XENMEM_maximum_ram_page:
98 /* No inputs */
99 break;
100
101 case VKI_XENMEM_maximum_gpfn:
102 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
103 (Addr)ARG2, sizeof(vki_xen_domid_t));
104 break;
105
106 case VKI_XENMEM_machphys_mfn_list: {
107 struct vki_xen_machphys_mfn_list *arg =
108 (struct vki_xen_machphys_mfn_list *)ARG2;
109 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
110 (Addr)&arg->max_extents, sizeof(arg->max_extents));
111 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
112 (Addr)&arg->extent_start, sizeof(arg->extent_start));
113 break;
114 }
115
116 case VKI_XENMEM_set_memory_map: {
117 struct vki_xen_foreign_memory_map *arg =
118 (struct vki_xen_foreign_memory_map *)ARG2;
119 PRE_MEM_READ("XENMEM_set_memory_map domid",
120 (Addr)&arg->domid, sizeof(arg->domid));
121 PRE_MEM_READ("XENMEM_set_memory_map map",
122 (Addr)&arg->map, sizeof(arg->map));
123 break;
124 }
125 case VKI_XENMEM_increase_reservation:
126 case VKI_XENMEM_decrease_reservation:
127 case VKI_XENMEM_populate_physmap:
128 case VKI_XENMEM_claim_pages: {
129 struct xen_memory_reservation *memory_reservation =
130 (struct xen_memory_reservation *)ARG2;
131 const HChar *which;
132
133 switch (ARG1) {
134 case VKI_XENMEM_increase_reservation:
135 which = "XENMEM_increase_reservation";
136 break;
137 case VKI_XENMEM_decrease_reservation:
138 which = "XENMEM_decrease_reservation";
139 PRE_MEM_READ(which,
140 (Addr)memory_reservation->extent_start.p,
141 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
142 break;
143 case VKI_XENMEM_populate_physmap:
144 which = "XENMEM_populate_physmap";
145 PRE_MEM_READ(which,
146 (Addr)memory_reservation->extent_start.p,
147 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
148 break;
149 case VKI_XENMEM_claim_pages:
150 which = "XENMEM_claim_pages";
151 break;
152 default:
153 which = "XENMEM_unknown";
154 break;
155 }
156
157 PRE_MEM_READ(which,
158 (Addr)&memory_reservation->extent_start,
159 sizeof(memory_reservation->extent_start));
160 PRE_MEM_READ(which,
161 (Addr)&memory_reservation->nr_extents,
162 sizeof(memory_reservation->nr_extents));
163 PRE_MEM_READ(which,
164 (Addr)&memory_reservation->extent_order,
165 sizeof(memory_reservation->extent_order));
166 PRE_MEM_READ(which,
167 (Addr)&memory_reservation->mem_flags,
168 sizeof(memory_reservation->mem_flags));
169 PRE_MEM_READ(which,
170 (Addr)&memory_reservation->domid,
171 sizeof(memory_reservation->domid));
172 break;
173 }
174
175 case VKI_XENMEM_add_to_physmap: {
176 struct vki_xen_add_to_physmap *arg =
177 (struct vki_xen_add_to_physmap *)ARG2;
178 PRE_MEM_READ("XENMEM_add_to_physmap domid",
179 (Addr)&arg->domid, sizeof(arg->domid));
180 PRE_MEM_READ("XENMEM_add_to_physmap size",
181 (Addr)&arg->size, sizeof(arg->size));
182 PRE_MEM_READ("XENMEM_add_to_physmap space",
183 (Addr)&arg->space, sizeof(arg->space));
184 PRE_MEM_READ("XENMEM_add_to_physmap idx",
185 (Addr)&arg->idx, sizeof(arg->idx));
186 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
187 (Addr)&arg->gpfn, sizeof(arg->gpfn));
188 break;
189 };
190
191 case VKI_XENMEM_remove_from_physmap: {
192 struct vki_xen_remove_from_physmap *arg =
193 (struct vki_xen_remove_from_physmap *)ARG2;
194 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
195 (Addr)&arg->domid, sizeof(arg->domid));
196 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
197 (Addr)&arg->gpfn, sizeof(arg->gpfn));
198 }
199
200 case VKI_XENMEM_get_sharing_freed_pages:
201 case VKI_XENMEM_get_sharing_shared_pages:
202 break;
203
204 default:
205 bad_subop(tid, layout, arrghs, status, flags,
206 "__HYPERVISOR_memory_op", ARG1);
207 break;
208 }
209 }
210
PRE(mmuext_op)211 PRE(mmuext_op)
212 {
213 struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
214 unsigned int i, nr = ARG2;
215
216 for (i=0; i<nr; i++) {
217 struct vki_xen_mmuext_op *op = ops + i;
218 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
219 (Addr)&op->cmd, sizeof(op->cmd));
220 switch(op->cmd) {
221 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
222 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
223 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
224 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
225 case VKI_XEN_MMUEXT_UNPIN_TABLE:
226 case VKI_XEN_MMUEXT_NEW_BASEPTR:
227 case VKI_XEN_MMUEXT_CLEAR_PAGE:
228 case VKI_XEN_MMUEXT_COPY_PAGE:
229 case VKI_XEN_MMUEXT_MARK_SUPER:
230 case VKI_XEN_MMUEXT_UNMARK_SUPER:
231 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
232 (Addr)&op->arg1.mfn,
233 sizeof(op->arg1.mfn));
234 break;
235
236 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
237 case VKI_XEN_MMUEXT_INVLPG_ALL:
238 case VKI_XEN_MMUEXT_SET_LDT:
239 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
240 (Addr)&op->arg1.linear_addr,
241 sizeof(op->arg1.linear_addr));
242 break;
243
244 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
245 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
246 case VKI_XEN_MMUEXT_INVLPG_MULTI:
247 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
248 case VKI_XEN_MMUEXT_FLUSH_CACHE:
249 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
250 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
251 /* None */
252 break;
253 }
254
255 switch(op->cmd) {
256 case VKI_XEN_MMUEXT_SET_LDT:
257 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
258 (Addr)&op->arg2.nr_ents,
259 sizeof(op->arg2.nr_ents));
260 break;
261
262 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
263 case VKI_XEN_MMUEXT_INVLPG_MULTI:
264 /* How many??? */
265 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
266 (Addr)&op->arg2.vcpumask,
267 sizeof(op->arg2.vcpumask));
268 break;
269
270 case VKI_XEN_MMUEXT_COPY_PAGE:
271 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
272 (Addr)&op->arg2.src_mfn,
273 sizeof(op->arg2.src_mfn));
274 break;
275
276 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
277 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
278 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
279 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
280 case VKI_XEN_MMUEXT_UNPIN_TABLE:
281 case VKI_XEN_MMUEXT_NEW_BASEPTR:
282 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
283 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
284 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
285 case VKI_XEN_MMUEXT_INVLPG_ALL:
286 case VKI_XEN_MMUEXT_FLUSH_CACHE:
287 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
288 case VKI_XEN_MMUEXT_CLEAR_PAGE:
289 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
290 case VKI_XEN_MMUEXT_MARK_SUPER:
291 case VKI_XEN_MMUEXT_UNMARK_SUPER:
292 /* None */
293 break;
294 }
295 }
296 }
297
pre_evtchn_op(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * arrghs,SyscallStatus * status,UWord * flags,__vki_u32 cmd,void * arg,int compat)298 static void pre_evtchn_op(ThreadId tid,
299 SyscallArgLayout* layout,
300 /*MOD*/SyscallArgs* arrghs,
301 /*OUT*/SyscallStatus* status,
302 /*OUT*/UWord* flags,
303 __vki_u32 cmd, void *arg, int compat)
304 {
305 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
306 compat ? "_compat" : "", cmd, arg);
307
308 switch (cmd) {
309 case VKI_XEN_EVTCHNOP_alloc_unbound: {
310 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
311 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
312 (Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
313 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
314 (Addr)&alloc_unbound->remote_dom,
315 sizeof(alloc_unbound->remote_dom));
316 break;
317 }
318 default:
319 if ( compat )
320 bad_subop(tid, layout, arrghs, status, flags,
321 "__HYPERVISOR_event_channel_op_compat", cmd);
322 else
323 bad_subop(tid, layout, arrghs, status, flags,
324 "__HYPERVISOR_event_channel_op", cmd);
325 break;
326 }
327 }
328
PRE(evtchn_op)329 PRE(evtchn_op)
330 {
331 pre_evtchn_op(tid, layout, arrghs, status, flags,
332 ARG1, (void *)ARG2, 0);
333 }
334
PRE(evtchn_op_compat)335 PRE(evtchn_op_compat)
336 {
337 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
338 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
339 ARG1, sizeof(*evtchn));
340
341 pre_evtchn_op(tid, layout, arrghs, status, flags,
342 evtchn->cmd, &evtchn->u, 1);
343 }
344
PRE(xen_version)345 PRE(xen_version)
346 {
347 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1, ARG2);
348
349 switch (ARG1) {
350 case VKI_XENVER_version:
351 case VKI_XENVER_extraversion:
352 case VKI_XENVER_compile_info:
353 case VKI_XENVER_capabilities:
354 case VKI_XENVER_changeset:
355 case VKI_XENVER_platform_parameters:
356 case VKI_XENVER_get_features:
357 case VKI_XENVER_pagesize:
358 case VKI_XENVER_guest_handle:
359 case VKI_XENVER_commandline:
360 /* No inputs */
361 break;
362
363 default:
364 bad_subop(tid, layout, arrghs, status, flags,
365 "__HYPERVISOR_xen_version", ARG1);
366 break;
367 }
368 }
369
PRE(grant_table_op)370 PRE(grant_table_op)
371 {
372 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1, ARG2, ARG3);
373 switch (ARG1) {
374 case VKI_XEN_GNTTABOP_setup_table: {
375 struct vki_xen_gnttab_setup_table *gst =
376 (struct vki_xen_gnttab_setup_table*)ARG2;
377 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
378 (Addr)&gst->dom, sizeof(gst->dom));
379 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
380 (Addr)&gst->nr_frames, sizeof(gst->nr_frames));
381 break;
382 }
383 default:
384 bad_subop(tid, layout, arrghs, status, flags,
385 "__HYPERVISOR_grant_table_op", ARG1);
386 break;
387 }
388 }
389
PRE(sysctl)390 PRE(sysctl) {
391 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
392
393 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
394
395 /*
396 * Common part of xen_sysctl:
397 * uint32_t cmd;
398 * uint32_t interface_version;
399 */
400 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
401 sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
402
403 if (!sysctl)
404 return;
405
406 switch (sysctl->interface_version)
407 {
408 case 0x00000008:
409 case 0x00000009:
410 case 0x0000000a:
411 break;
412 default:
413 VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
414 sysctl->interface_version);
415 if (VG_(clo_verbosity) > 1) {
416 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
417 }
418 VG_(dmsg)("You may be able to write your own handler.\n");
419 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
420 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
421 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
422 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
423
424 SET_STATUS_Failure(VKI_EINVAL);
425 return;
426 }
427
428 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
429 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
430 (Addr)&sysctl->u._union._field, \
431 sizeof(sysctl->u._union._field))
432 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
433 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
434
435 switch (sysctl->cmd) {
436 case VKI_XEN_SYSCTL_readconsole:
437 /* These are all unconditionally read */
438 PRE_XEN_SYSCTL_READ(readconsole, clear);
439 PRE_XEN_SYSCTL_READ(readconsole, incremental);
440 PRE_XEN_SYSCTL_READ(readconsole, buffer);
441 PRE_XEN_SYSCTL_READ(readconsole, count);
442
443 /* 'index' only read if 'incremental' is nonzero */
444 if (sysctl->u.readconsole.incremental)
445 PRE_XEN_SYSCTL_READ(readconsole, index);
446 break;
447
448 case VKI_XEN_SYSCTL_getdomaininfolist:
449 switch (sysctl->interface_version)
450 {
451 case 0x00000008:
452 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
453 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
454 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
455 break;
456 case 0x00000009:
457 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
458 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
459 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
460 break;
461 case 0x0000000a:
462 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
463 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
464 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
465 break;
466 default:
467 VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
468 "%"PRIx32" not implemented yet\n",
469 sysctl->interface_version);
470 SET_STATUS_Failure(VKI_EINVAL);
471 return;
472 }
473 break;
474
475 case VKI_XEN_SYSCTL_debug_keys:
476 PRE_XEN_SYSCTL_READ(debug_keys, keys);
477 PRE_XEN_SYSCTL_READ(debug_keys, nr_keys);
478 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
479 (Addr)sysctl->u.debug_keys.keys.p,
480 sysctl->u.debug_keys.nr_keys * sizeof(char));
481 break;
482
483 case VKI_XEN_SYSCTL_sched_id:
484 /* No inputs */
485 break;
486
487 case VKI_XEN_SYSCTL_cpupool_op:
488 PRE_XEN_SYSCTL_READ(cpupool_op, op);
489
490 switch(sysctl->u.cpupool_op.op) {
491 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
492 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
493 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
494 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
495 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
496 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
497 PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
498 }
499
500 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
501 PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
502
503 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
504 PRE_XEN_SYSCTL_READ(cpupool_op, domid);
505
506 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
507 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
508 PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
509
510 break;
511
512 case VKI_XEN_SYSCTL_physinfo:
513 /* No input params */
514 break;
515
516 case VKI_XEN_SYSCTL_topologyinfo:
517 PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
518 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
519 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
520 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
521 break;
522
523 case VKI_XEN_SYSCTL_numainfo:
524 PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
525 PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
526 PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
527 PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
528 break;
529
530 default:
531 bad_subop(tid, layout, arrghs, status, flags,
532 "__HYPERVISOR_sysctl", sysctl->cmd);
533 break;
534 }
535 #undef PRE_XEN_SYSCTL_READ
536 #undef __PRE_XEN_SYSCTL_READ
537 }
538
PRE(domctl)539 PRE(domctl)
540 {
541 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
542
543 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl->cmd, domctl->domain);
544
545 /*
546 * Common part of xen_domctl:
547 * vki_uint32_t cmd;
548 * vki_uint32_t interface_version;
549 * vki_xen_domid_t domain;
550 */
551 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
552 sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
553 + sizeof(vki_xen_domid_t));
554
555 if (!domctl)
556 return;
557
558 switch (domctl->interface_version)
559 {
560 case 0x00000007:
561 case 0x00000008:
562 case 0x00000009:
563 break;
564 default:
565 VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
566 domctl->interface_version);
567 if (VG_(clo_verbosity) > 1) {
568 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
569 }
570 VG_(dmsg)("You may be able to write your own handler.\n");
571 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
572 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
573 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
574 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
575
576 SET_STATUS_Failure(VKI_EINVAL);
577 return;
578 }
579
580 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
581 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
582 (Addr)&domctl->u._union._field, \
583 sizeof(domctl->u._union._field))
584 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
585 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
586
587 switch (domctl->cmd) {
588 case VKI_XEN_DOMCTL_destroydomain:
589 case VKI_XEN_DOMCTL_pausedomain:
590 case VKI_XEN_DOMCTL_max_vcpus:
591 case VKI_XEN_DOMCTL_get_address_size:
592 case VKI_XEN_DOMCTL_gettscinfo:
593 case VKI_XEN_DOMCTL_getdomaininfo:
594 case VKI_XEN_DOMCTL_unpausedomain:
595 case VKI_XEN_DOMCTL_resumedomain:
596 /* No input fields. */
597 break;
598
599 case VKI_XEN_DOMCTL_createdomain:
600 PRE_XEN_DOMCTL_READ(createdomain, ssidref);
601 PRE_XEN_DOMCTL_READ(createdomain, handle);
602 PRE_XEN_DOMCTL_READ(createdomain, flags);
603 break;
604
605 case VKI_XEN_DOMCTL_gethvmcontext:
606 /* Xen unconditionally reads the 'buffer' pointer */
607 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
608 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
609 * buffer is a request for the required size. */
610 if ( domctl->u.hvmcontext.buffer.p )
611 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
612 break;
613
614 case VKI_XEN_DOMCTL_sethvmcontext:
615 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, size);
616 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, buffer);
617 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
618 (Addr)domctl->u.hvmcontext.buffer.p,
619 domctl->u.hvmcontext.size);
620 break;
621
622 case VKI_XEN_DOMCTL_max_mem:
623 PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
624 break;
625
626 case VKI_XEN_DOMCTL_set_address_size:
627 __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
628 break;
629
630 case VKI_XEN_DOMCTL_settscinfo:
631 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
632 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
633 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
634 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
635 break;
636
637 case VKI_XEN_DOMCTL_hypercall_init:
638 PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
639 break;
640
641 case VKI_XEN_DOMCTL_settimeoffset:
642 PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
643 break;
644
645 case VKI_XEN_DOMCTL_getvcpuinfo:
646 PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
647 break;
648
649 case VKI_XEN_DOMCTL_scheduler_op:
650 PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
651 PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
652 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
653 switch(domctl->u.scheduler_op.sched_id) {
654 case VKI_XEN_SCHEDULER_SEDF:
655 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
656 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
657 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
658 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
659 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
660 break;
661 case VKI_XEN_SCHEDULER_CREDIT:
662 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
663 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
664 break;
665 case VKI_XEN_SCHEDULER_CREDIT2:
666 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
667 break;
668 case VKI_XEN_SCHEDULER_ARINC653:
669 break;
670 }
671 }
672 break;
673
674 case VKI_XEN_DOMCTL_getvcpuaffinity:
675 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
676 break;
677
678 case VKI_XEN_DOMCTL_setvcpuaffinity:
679 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
680 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
681 (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
682 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
683 break;
684
685 case VKI_XEN_DOMCTL_getnodeaffinity:
686 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
687 break;
688 case VKI_XEN_DOMCTL_setnodeaffinity:
689 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
690 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
691 (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
692 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
693 break;
694
695 case VKI_XEN_DOMCTL_getvcpucontext:
696 __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
697 break;
698
699 case VKI_XEN_DOMCTL_setvcpucontext:
700 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
701 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
702 break;
703
704 case VKI_XEN_DOMCTL_set_cpuid:
705 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
706 (Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
707 break;
708
709 case VKI_XEN_DOMCTL_getpageframeinfo3:
710 PRE_XEN_DOMCTL_READ(getpageframeinfo3, num);
711 PRE_XEN_DOMCTL_READ(getpageframeinfo3, array.p);
712 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
713 (Addr)domctl->u.getpageframeinfo3.array.p,
714 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
715 break;
716
717 case VKI_XEN_DOMCTL_getvcpuextstate:
718 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
719 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
720 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
721 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, buffer);
722 break;
723
724 case VKI_XEN_DOMCTL_shadow_op:
725 PRE_XEN_DOMCTL_READ(shadow_op, op);
726
727 switch(domctl->u.shadow_op.op)
728 {
729 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
730 /* No further inputs */
731 break;
732
733 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE:
734 PRE_XEN_DOMCTL_READ(shadow_op, mode);
735 switch(domctl->u.shadow_op.mode)
736 {
737 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY:
738 goto domctl_shadow_op_enable_logdirty;
739
740
741 default:
742 bad_subop(tid, layout, arrghs, status, flags,
743 "__HYPERVISOR_domctl shadowop mode",
744 domctl->u.shadow_op.mode);
745 break;
746 }
747
748 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
749 domctl_shadow_op_enable_logdirty:
750 /* No further inputs */
751 break;
752
753 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
754 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
755 PRE_XEN_DOMCTL_READ(shadow_op, dirty_bitmap);
756 PRE_XEN_DOMCTL_READ(shadow_op, pages);
757 break;
758
759 default:
760 bad_subop(tid, layout, arrghs, status, flags,
761 "__HYPERVISOR_domctl shadow(10)",
762 domctl->u.shadow_op.op);
763 break;
764 }
765 break;
766
767 case VKI_XEN_DOMCTL_set_max_evtchn:
768 PRE_XEN_DOMCTL_READ(set_max_evtchn, max_port);
769 break;
770
771 case VKI_XEN_DOMCTL_cacheflush:
772 PRE_XEN_DOMCTL_READ(cacheflush, start_pfn);
773 PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
774 break;
775
776 default:
777 bad_subop(tid, layout, arrghs, status, flags,
778 "__HYPERVISOR_domctl", domctl->cmd);
779 break;
780 }
781 #undef PRE_XEN_DOMCTL_READ
782 #undef __PRE_XEN_DOMCTL_READ
783 }
784
PRE(hvm_op)785 PRE(hvm_op)
786 {
787 unsigned long op = ARG1;
788 void *arg = (void *)(unsigned long)ARG2;
789
790 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
791
792 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
793 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
794 (Addr)&((_type*)arg)->_field, \
795 sizeof(((_type*)arg)->_field))
796 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
797 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
798
799 switch (op) {
800 case VKI_XEN_HVMOP_set_param:
801 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
802 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
803 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
804 break;
805
806 case VKI_XEN_HVMOP_get_param:
807 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
808 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
809 break;
810
811 case VKI_XEN_HVMOP_set_isa_irq_level:
812 PRE_XEN_HVMOP_READ(set_isa_irq_level, domid);
813 PRE_XEN_HVMOP_READ(set_isa_irq_level, isa_irq);
814 PRE_XEN_HVMOP_READ(set_isa_irq_level, level);
815 break;
816
817 case VKI_XEN_HVMOP_set_pci_link_route:
818 PRE_XEN_HVMOP_READ(set_pci_link_route, domid);
819 PRE_XEN_HVMOP_READ(set_pci_link_route, link);
820 PRE_XEN_HVMOP_READ(set_pci_link_route, isa_irq);
821 break;
822
823 case VKI_XEN_HVMOP_set_mem_type:
824 PRE_XEN_HVMOP_READ(set_mem_type, domid);
825 PRE_XEN_HVMOP_READ(set_mem_type, hvmmem_type);
826 PRE_XEN_HVMOP_READ(set_mem_type, nr);
827 PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
828 break;
829
830 default:
831 bad_subop(tid, layout, arrghs, status, flags,
832 "__HYPERVISOR_hvm_op", op);
833 break;
834 }
835 #undef __PRE_XEN_HVMOP_READ
836 #undef PRE_XEN_HVMOP_READ
837 }
838
PRE(tmem_op)839 PRE(tmem_op)
840 {
841 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
842
843 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem->cmd);
844
845 /* Common part for xen_tmem_op:
846 * vki_uint32_t cmd;
847 */
848 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1, sizeof(vki_uint32_t));
849
850
851 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
852 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
853 (Addr)&tmem->u._union._field, \
854 sizeof(tmem->u._union._field))
855 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
856 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
857
858 switch(tmem->cmd) {
859
860 case VKI_XEN_TMEM_control:
861
862 /* Common part for control hypercall:
863 * vki_int32_t pool_id;
864 * vki_uint32_t subop;
865 */
866 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
867 (Addr)&tmem->pool_id, sizeof(&tmem->pool_id));
868 PRE_XEN_TMEMOP_READ(ctrl, subop);
869
870 switch (tmem->u.ctrl.subop) {
871
872 case VKI_XEN_TMEMC_save_begin:
873 PRE_XEN_TMEMOP_READ(ctrl, cli_id);
874 PRE_XEN_TMEMOP_READ(ctrl, arg1);
875 PRE_XEN_TMEMOP_READ(ctrl, buf);
876 break;
877
878 default:
879 bad_subop(tid, layout, arrghs, status, flags,
880 "__HYPERVISOR_tmem_op_control", tmem->u.ctrl.subop);
881 }
882
883 break;
884
885 default:
886 bad_subop(tid, layout, arrghs, status, flags,
887 "__HYPERVISOR_tmem_op", ARG1);
888 }
889
890 #undef PRE_XEN_TMEMOP_READ
891 #undef __PRE_XEN_TMEMOP_READ
892 }
893
POST(memory_op)894 POST(memory_op)
895 {
896 switch (ARG1) {
897 case VKI_XENMEM_maximum_ram_page:
898 case VKI_XENMEM_set_memory_map:
899 case VKI_XENMEM_decrease_reservation:
900 case VKI_XENMEM_claim_pages:
901 case VKI_XENMEM_maximum_gpfn:
902 case VKI_XENMEM_remove_from_physmap:
903 /* No outputs */
904 break;
905 case VKI_XENMEM_increase_reservation:
906 case VKI_XENMEM_populate_physmap: {
907 struct xen_memory_reservation *memory_reservation =
908 (struct xen_memory_reservation *)ARG2;
909
910 POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
911 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
912 break;
913 }
914
915 case VKI_XENMEM_machphys_mfn_list: {
916 struct vki_xen_machphys_mfn_list *arg =
917 (struct vki_xen_machphys_mfn_list *)ARG2;
918 POST_MEM_WRITE((Addr)&arg->nr_extents, sizeof(arg->nr_extents));
919 POST_MEM_WRITE((Addr)arg->extent_start.p,
920 sizeof(vki_xen_pfn_t) * arg->nr_extents);
921 break;
922 }
923
924 case VKI_XENMEM_add_to_physmap: {
925 struct vki_xen_add_to_physmap *arg =
926 (struct vki_xen_add_to_physmap *)ARG2;
927 if (arg->space == VKI_XENMAPSPACE_gmfn_range)
928 POST_MEM_WRITE(ARG2, sizeof(*arg));
929 }
930
931 case VKI_XENMEM_get_sharing_freed_pages:
932 case VKI_XENMEM_get_sharing_shared_pages:
933 /* No outputs */
934 break;
935 }
936 }
937
POST(mmuext_op)938 POST(mmuext_op)
939 {
940 unsigned int *pdone = (unsigned int *)ARG3;
941 /* simplistic */
942 POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
943 }
944
post_evtchn_op(ThreadId tid,__vki_u32 cmd,void * arg,int compat)945 static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
946 {
947 switch (cmd) {
948 case VKI_XEN_EVTCHNOP_alloc_unbound: {
949 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
950 POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
951 break;
952 }
953 }
954 }
955
POST(evtchn_op)956 POST(evtchn_op)
957 {
958 post_evtchn_op(tid, ARG1, (void *)ARG2, 0);
959 }
960
POST(evtchn_op_compat)961 POST(evtchn_op_compat)
962 {
963 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
964 post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
965 }
966
POST(xen_version)967 POST(xen_version)
968 {
969 switch (ARG1) {
970 case VKI_XENVER_version:
971 /* No outputs */
972 break;
973 case VKI_XENVER_extraversion:
974 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
975 break;
976 case VKI_XENVER_compile_info:
977 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
978 break;
979 case VKI_XENVER_capabilities:
980 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
981 break;
982 case VKI_XENVER_changeset:
983 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
984 break;
985 case VKI_XENVER_platform_parameters:
986 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
987 break;
988 case VKI_XENVER_get_features:
989 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
990 break;
991 case VKI_XENVER_pagesize:
992 /* No outputs */
993 break;
994 case VKI_XENVER_guest_handle:
995 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
996 break;
997 case VKI_XENVER_commandline:
998 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
999 break;
1000 }
1001 }
1002
POST(grant_table_op)1003 POST(grant_table_op)
1004 {
1005 switch (ARG1) {
1006 case VKI_XEN_GNTTABOP_setup_table: {
1007 struct vki_xen_gnttab_setup_table *gst =
1008 (struct vki_xen_gnttab_setup_table*)ARG2;
1009 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1010 (Addr)&gst->status, sizeof(gst->status));
1011 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1012 (Addr)gst->frame_list.p,
1013 sizeof(*gst->frame_list.p) & gst->nr_frames);
1014 break;
1015 }
1016 }
1017 }
1018
POST(sysctl)1019 POST(sysctl)
1020 {
1021 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
1022
1023 switch (sysctl->interface_version)
1024 {
1025 case 0x00000008:
1026 case 0x00000009:
1027 case 0x0000000a:
1028 break;
1029 default:
1030 return;
1031 }
1032
1033 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1034 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1035 sizeof(sysctl->u._union._field))
1036 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1037 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1038
1039 switch (sysctl->cmd) {
1040 case VKI_XEN_SYSCTL_readconsole:
1041 POST_MEM_WRITE((Addr)sysctl->u.readconsole.buffer.p,
1042 sysctl->u.readconsole.count * sizeof(char));
1043 break;
1044
1045 case VKI_XEN_SYSCTL_getdomaininfolist:
1046 switch (sysctl->interface_version)
1047 {
1048 case 0x00000008:
1049 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
1050 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
1051 sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
1052 * sysctl->u.getdomaininfolist_00000008.num_domains);
1053 break;
1054 case 0x00000009:
1055 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
1056 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
1057 sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
1058 * sysctl->u.getdomaininfolist_00000009.num_domains);
1059 break;
1060 case 0x0000000a:
1061 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
1062 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
1063 sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
1064 * sysctl->u.getdomaininfolist_0000000a.num_domains);
1065 break;
1066 }
1067 break;
1068
1069 case VKI_XEN_SYSCTL_sched_id:
1070 POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
1071 break;
1072
1073 case VKI_XEN_SYSCTL_cpupool_op:
1074 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
1075 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
1076 POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
1077 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
1078 POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
1079 POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
1080 }
1081 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
1082 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
1083 POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
1084 break;
1085
1086 case VKI_XEN_SYSCTL_physinfo:
1087 switch (sysctl->interface_version)
1088 {
1089 case 0x00000008:
1090 case 0x00000009: /* Unchanged from version 8 */
1091 POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
1092 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
1093 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
1094 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
1095 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
1096 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
1097 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
1098 POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
1099 POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
1100 POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
1101 POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
1102 POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
1103 break;
1104 case 0x0000000a:
1105 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
1106 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
1107 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
1108 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
1109 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
1110 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
1111 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
1112 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
1113 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
1114 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
1115 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
1116 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
1117 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
1118 break;
1119 }
1120 break;
1121
1122 case VKI_XEN_SYSCTL_topologyinfo:
1123 POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
1124 if (sysctl->u.topologyinfo.cpu_to_core.p)
1125 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
1126 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1127 if (sysctl->u.topologyinfo.cpu_to_socket.p)
1128 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
1129 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1130 if (sysctl->u.topologyinfo.cpu_to_node.p)
1131 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
1132 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1133 break;
1134
1135 case VKI_XEN_SYSCTL_numainfo:
1136 POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
1137 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
1138 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1139 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
1140 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1141 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
1142 sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
1143 break;
1144
1145 /* No outputs */
1146 case VKI_XEN_SYSCTL_debug_keys:
1147 break;
1148 }
1149 #undef POST_XEN_SYSCTL_WRITE
1150 #undef __POST_XEN_SYSCTL_WRITE
1151 }
1152
POST(domctl)1153 POST(domctl){
1154 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
1155
1156 switch (domctl->interface_version) {
1157 case 0x00000007:
1158 case 0x00000008:
1159 case 0x00000009:
1160 break;
1161 default:
1162 return;
1163 }
1164
1165 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1166 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1167 sizeof(domctl->u._union._field));
1168 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1169 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1170
1171 switch (domctl->cmd) {
1172 case VKI_XEN_DOMCTL_createdomain:
1173 case VKI_XEN_DOMCTL_destroydomain:
1174 case VKI_XEN_DOMCTL_pausedomain:
1175 case VKI_XEN_DOMCTL_max_mem:
1176 case VKI_XEN_DOMCTL_set_address_size:
1177 case VKI_XEN_DOMCTL_settscinfo:
1178 case VKI_XEN_DOMCTL_hypercall_init:
1179 case VKI_XEN_DOMCTL_setvcpuaffinity:
1180 case VKI_XEN_DOMCTL_setvcpucontext:
1181 case VKI_XEN_DOMCTL_setnodeaffinity:
1182 case VKI_XEN_DOMCTL_set_cpuid:
1183 case VKI_XEN_DOMCTL_unpausedomain:
1184 case VKI_XEN_DOMCTL_sethvmcontext:
1185 case VKI_XEN_DOMCTL_set_max_evtchn:
1186 case VKI_XEN_DOMCTL_cacheflush:
1187 case VKI_XEN_DOMCTL_resumedomain:
1188 /* No output fields */
1189 break;
1190
1191 case VKI_XEN_DOMCTL_max_vcpus:
1192 POST_XEN_DOMCTL_WRITE(max_vcpus, max);
1193 break;
1194
1195 case VKI_XEN_DOMCTL_get_address_size:
1196 __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
1197 break;
1198
1199 case VKI_XEN_DOMCTL_gettscinfo:
1200 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
1201 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
1202 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
1203 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
1204 break;
1205
1206 case VKI_XEN_DOMCTL_getvcpuinfo:
1207 POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
1208 POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
1209 POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
1210 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
1211 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
1212 break;
1213
1214 case VKI_XEN_DOMCTL_gethvmcontext:
1215 /* Xen unconditionally writes size... */
1216 __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
1217 /* ...but only writes to the buffer if it was non NULL */
1218 if ( domctl->u.hvmcontext.buffer.p )
1219 POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
1220 sizeof(*domctl->u.hvmcontext.buffer.p)
1221 * domctl->u.hvmcontext.size);
1222 break;
1223
1224 case VKI_XEN_DOMCTL_scheduler_op:
1225 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
1226 switch(domctl->u.scheduler_op.sched_id) {
1227 case VKI_XEN_SCHEDULER_SEDF:
1228 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
1229 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
1230 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
1231 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
1232 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
1233 break;
1234 case VKI_XEN_SCHEDULER_CREDIT:
1235 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
1236 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
1237 break;
1238 case VKI_XEN_SCHEDULER_CREDIT2:
1239 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
1240 break;
1241 case VKI_XEN_SCHEDULER_ARINC653:
1242 break;
1243 }
1244 }
1245 break;
1246
1247 case VKI_XEN_DOMCTL_getvcpuaffinity:
1248 POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
1249 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
1250 break;
1251
1252 case VKI_XEN_DOMCTL_getnodeaffinity:
1253 POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
1254 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
1255 break;
1256
1257 case VKI_XEN_DOMCTL_getdomaininfo:
1258 switch (domctl->interface_version) {
1259 case 0x00000007:
1260 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
1261 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
1262 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
1263 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
1264 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
1265 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
1266 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
1267 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
1268 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
1269 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
1270 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
1271 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
1272 break;
1273 case 0x00000008:
1274 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
1275 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
1276 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
1277 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
1278 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
1279 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
1280 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
1281 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
1282 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
1283 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
1284 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
1285 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
1286 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
1287 break;
1288 case 0x00000009:
1289 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
1290 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
1291 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
1292 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
1293 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
1294 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
1295 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
1296 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
1297 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
1298 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
1299 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
1300 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
1301 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
1302 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
1303 break;
1304 }
1305 break;
1306 case VKI_XEN_DOMCTL_getvcpucontext:
1307 __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
1308 break;
1309
1310 case VKI_XEN_DOMCTL_getpageframeinfo3:
1311 POST_MEM_WRITE((Addr)domctl->u.getpageframeinfo3.array.p,
1312 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1313 break;
1314
1315
1316 case VKI_XEN_DOMCTL_getvcpuextstate:
1317 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, xfeature_mask);
1318 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, size);
1319 POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
1320 domctl->u.vcpuextstate.size);
1321 break;
1322
1323 case VKI_XEN_DOMCTL_shadow_op:
1324 switch(domctl->u.shadow_op.op)
1325 {
1326 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1327 /* No outputs */
1328 break;
1329
1330 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1331 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1332 POST_XEN_DOMCTL_WRITE(shadow_op, pages);
1333 POST_XEN_DOMCTL_WRITE(shadow_op, stats.fault_count);
1334 POST_XEN_DOMCTL_WRITE(shadow_op, stats.dirty_count);
1335 if(domctl->u.shadow_op.dirty_bitmap.p)
1336 POST_MEM_WRITE((Addr)domctl->u.shadow_op.dirty_bitmap.p,
1337 domctl->u.shadow_op.pages * sizeof(vki_uint8_t));
1338 break;
1339
1340 default:
1341 break;
1342 }
1343 break;
1344 }
1345 #undef POST_XEN_DOMCTL_WRITE
1346 #undef __POST_XEN_DOMCTL_WRITE
1347 }
1348
POST(hvm_op)1349 POST(hvm_op)
1350 {
1351 unsigned long op = ARG1;
1352 void *arg = (void *)(unsigned long)ARG2;
1353
1354 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1355 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1356 sizeof(((_type*)arg)->_field))
1357 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1358 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1359
1360 switch (op) {
1361 case VKI_XEN_HVMOP_set_param:
1362 case VKI_XEN_HVMOP_set_isa_irq_level:
1363 case VKI_XEN_HVMOP_set_pci_link_route:
1364 case VKI_XEN_HVMOP_set_mem_type:
1365 /* No output paramters */
1366 break;
1367
1368 case VKI_XEN_HVMOP_get_param:
1369 __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
1370 break;
1371 }
1372 #undef __POST_XEN_HVMOP_WRITE
1373 #undef POST_XEN_HVMOP_WRITE
1374 }
1375
POST(tmem_op)1376 POST(tmem_op)
1377 {
1378 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1379
1380 switch(tmem->cmd) {
1381
1382 case VKI_XEN_TMEM_control:
1383
1384 switch(tmem->u.ctrl.subop) {
1385 /* No outputs */
1386 case VKI_XEN_TMEMC_save_begin:
1387 break;
1388 }
1389
1390 break;
1391 }
1392 }
1393
1394 typedef
1395 struct {
1396 SyscallTableEntry entry;
1397 int nr_args;
1398 }
1399 XenHypercallTableEntry;
1400
1401 #define HYPX_(const, name, nr_args) \
1402 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1403 #define HYPXY(const, name, nr_args) \
1404 [const] = { { vgSysWrap_xen_##name##_before, \
1405 vgSysWrap_xen_##name##_after }, \
1406 nr_args }
1407
1408 static XenHypercallTableEntry hypercall_table[] = {
1409 // __VKI_XEN_set_trap_table // 0
1410 // __VKI_XEN_mmu_update // 1
1411 // __VKI_XEN_set_gdt // 2
1412 // __VKI_XEN_stack_switch // 3
1413 // __VKI_XEN_set_callbacks // 4
1414
1415 // __VKI_XEN_fpu_taskswitch // 5
1416 // __VKI_XEN_sched_op_compat // 6
1417 // __VKI_XEN_platform_op // 7
1418 // __VKI_XEN_set_debugreg // 8
1419 // __VKI_XEN_get_debugreg // 9
1420
1421 // __VKI_XEN_update_descriptor // 10
1422 // // 11
1423 HYPXY(__VKI_XEN_memory_op, memory_op, 2), // 12
1424 // __VKI_XEN_multicall // 13
1425 // __VKI_XEN_update_va_mapping // 14
1426
1427 // __VKI_XEN_set_timer_op // 15
1428 HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat, 1), // 16
1429 HYPXY(__VKI_XEN_xen_version, xen_version, 2), // 17
1430 // __VKI_XEN_console_io // 18
1431 // __VKI_XEN_physdev_op_compat // 19
1432
1433 HYPXY(__VKI_XEN_grant_table_op, grant_table_op, 3), // 20
1434 // __VKI_XEN_vm_assist // 21
1435 // __VKI_XEN_update_va_mapping_otherdomain // 22
1436 // __VKI_XEN_iret, iret // 23
1437 // __VKI_XEN_vcpu_op, vcpu_op // 24
1438
1439 // __VKI_XEN_set_segment_base // 25
1440 HYPXY(__VKI_XEN_mmuext_op, mmuext_op, 2), // 26
1441 // __VKI_XEN_xsm_op // 27
1442 // __VKI_XEN_nmi_op // 28
1443 // __VKI_XEN_sched_op // 29
1444
1445 // __VKI_XEN_callback_op // 30
1446 // __VKI_XEN_xenoprof_op // 31
1447 HYPXY(__VKI_XEN_event_channel_op, evtchn_op, 2), // 32
1448 // __VKI_XEN_physdev_op // 33
1449 HYPXY(__VKI_XEN_hvm_op, hvm_op, 2), // 34
1450
1451 HYPXY(__VKI_XEN_sysctl, sysctl, 1), // 35
1452 HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
1453 // __VKI_XEN_kexec_op // 37
1454 HYPXY(__VKI_XEN_tmem_op, tmem_op, 1), // 38
1455 };
1456
bad_before(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * args,SyscallStatus * status,UWord * flags)1457 static void bad_before ( ThreadId tid,
1458 SyscallArgLayout* layout,
1459 /*MOD*/SyscallArgs* args,
1460 /*OUT*/SyscallStatus* status,
1461 /*OUT*/UWord* flags )
1462 {
1463 VG_(dmsg)("WARNING: unhandled hypercall: %s\n",
1464 VG_SYSNUM_STRING_EXTRA(args->sysno));
1465 if (VG_(clo_verbosity) > 1) {
1466 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1467 }
1468 VG_(dmsg)("You may be able to write your own handler.\n");
1469 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1470 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1471 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
1472 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1473
1474 SET_STATUS_Failure(VKI_ENOSYS);
1475 }
1476
1477 static XenHypercallTableEntry bad_hyper =
1478 { { bad_before, NULL }, 0 };
1479
ML_(get_xen_hypercall_entry)1480 static XenHypercallTableEntry* ML_(get_xen_hypercall_entry) ( UInt sysno )
1481 {
1482 XenHypercallTableEntry *ret = &bad_hyper;
1483
1484 const UInt hypercall_table_size
1485 = sizeof(hypercall_table) / sizeof(hypercall_table[0]);
1486
1487 /* Is it in the contiguous initial section of the table? */
1488 if (sysno < hypercall_table_size) {
1489 XenHypercallTableEntry* ent = &hypercall_table[sysno];
1490 if (ent->entry.before != NULL)
1491 ret = ent;
1492 }
1493
1494 /* Can't find a wrapper */
1495 return ret;
1496 }
1497
DEFN_PRE_TEMPLATE(xen,hypercall)1498 DEFN_PRE_TEMPLATE(xen, hypercall)
1499 {
1500 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1501
1502 /* Return number of arguments consumed */
1503 ARG8 = ent->nr_args;
1504
1505 vg_assert(ent);
1506 vg_assert(ent->entry.before);
1507 (ent->entry.before)( tid, layout, arrghs, status, flags );
1508
1509 }
1510
DEFN_POST_TEMPLATE(xen,hypercall)1511 DEFN_POST_TEMPLATE(xen, hypercall)
1512 {
1513 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1514
1515 /* Return number of arguments consumed */
1516 ARG8 = ent->nr_args;
1517
1518 vg_assert(ent);
1519 if (ent->entry.after)
1520 (ent->entry.after)( tid, arrghs, status );
1521 }
1522
1523 #endif // defined(ENABLE_XEN)
1524