1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
6 #![allow(non_camel_case_types)]
7
8 //! This module implements the dynamically loaded client library API used by a crosvm plugin,
9 //! defined in `crosvm.h`. It implements the client half of the plugin protocol, which is defined in
10 //! the `protos::plugin` module.
11 //!
12 //! To implement the `crosvm.h` C API, each function and struct definition is repeated here, with
13 //! concrete definitions for each struct. Most functions are thin shims to the underlying object
14 //! oriented Rust implementation method. Most methods require a request over the crosvm connection,
15 //! which is done by creating a `MainRequest` or `VcpuRequest` protobuf and sending it over the
16 //! connection's socket. Then, that socket is read for a `MainResponse` or `VcpuResponse`, which is
17 //! translated to the appropriate return type for the C API.
18
19 use std::env;
20 use std::fs::File;
21 use std::io::{IoSlice, IoSliceMut, Read, Write};
22 use std::mem::{size_of, swap};
23 use std::os::raw::{c_int, c_void};
24 use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
25 use std::os::unix::net::UnixDatagram;
26 use std::ptr::{self, null_mut};
27 use std::result;
28 use std::slice;
29 use std::slice::{from_raw_parts, from_raw_parts_mut};
30 use std::sync::atomic::{AtomicUsize, Ordering};
31 use std::sync::Arc;
32
33 use libc::{E2BIG, EINVAL, ENOENT, ENOTCONN, EPROTO};
34
35 use protobuf::{Message, ProtobufEnum, RepeatedField};
36
37 use base::ScmSocket;
38
39 use kvm::dirty_log_bitmap_size;
40
41 use kvm_sys::{
42 kvm_clock_data, kvm_cpuid_entry2, kvm_debugregs, kvm_fpu, kvm_ioapic_state, kvm_lapic_state,
43 kvm_mp_state, kvm_msr_entry, kvm_pic_state, kvm_pit_state2, kvm_regs, kvm_sregs,
44 kvm_vcpu_events, kvm_xcrs,
45 };
46
47 use protos::plugin::*;
48
49 #[cfg(feature = "stats")]
50 mod stats;
51
52 // Needs to be large enough to receive all the VCPU sockets.
53 const MAX_DATAGRAM_FD: usize = 32;
54 // Needs to be large enough for a sizable dirty log.
55 const MAX_DATAGRAM_SIZE: usize = 0x40000;
56
57 const CROSVM_IRQ_ROUTE_IRQCHIP: u32 = 0;
58 const CROSVM_IRQ_ROUTE_MSI: u32 = 1;
59
60 const CROSVM_VCPU_EVENT_KIND_INIT: u32 = 0;
61 const CROSVM_VCPU_EVENT_KIND_IO_ACCESS: u32 = 1;
62 const CROSVM_VCPU_EVENT_KIND_PAUSED: u32 = 2;
63 const CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL: u32 = 3;
64 const CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC: u32 = 4;
65
66 pub const CROSVM_GPU_SERVER_FD_ENV: &str = "CROSVM_GPU_SERVER_FD";
67 pub const CROSVM_SOCKET_ENV: &str = "CROSVM_SOCKET";
68 #[cfg(feature = "stats")]
69 pub const CROSVM_STATS_ENV: &str = "CROSVM_STATS";
70
71 #[repr(C)]
72 #[derive(Copy, Clone)]
73 pub struct crosvm_net_config {
74 tap_fd: c_int,
75 host_ipv4_address: u32,
76 netmask: u32,
77 host_mac_address: [u8; 6],
78 _reserved: [u8; 2],
79 }
80
81 #[repr(C)]
82 #[derive(Copy, Clone)]
83 pub struct anon_irqchip {
84 irqchip: u32,
85 pin: u32,
86 }
87
88 #[repr(C)]
89 #[derive(Copy, Clone)]
90 pub struct anon_msi {
91 address: u64,
92 data: u32,
93 }
94
95 #[repr(C)]
96 pub union anon_route {
97 irqchip: anon_irqchip,
98 msi: anon_msi,
99 reserved: [u8; 16],
100 }
101
102 #[repr(C)]
103 pub struct crosvm_irq_route {
104 irq_id: u32,
105 kind: u32,
106 route: anon_route,
107 }
108
109 const CROSVM_MAX_HINT_COUNT: u32 = 1;
110 const CROSVM_MAX_HINT_DETAIL_COUNT: u32 = 32;
111 const CROSVM_HINT_ON_WRITE: u16 = 1;
112
113 #[repr(C)]
114 pub struct crosvm_hint {
115 hint_version: u32,
116 reserved: u32,
117 address_space: u32,
118 address_flags: u16,
119 details_count: u16,
120 address: u64,
121 details: *const crosvm_hint_detail,
122 }
123
124 #[repr(C)]
125 pub struct crosvm_hint_detail {
126 match_rax: bool,
127 match_rbx: bool,
128 match_rcx: bool,
129 match_rdx: bool,
130 reserved1: [u8; 4],
131 rax: u64,
132 rbx: u64,
133 rcx: u64,
134 rdx: u64,
135 send_sregs: bool,
136 send_debugregs: bool,
137 reserved2: [u8; 6],
138 }
139
proto_error_to_int(e: protobuf::ProtobufError) -> c_int140 fn proto_error_to_int(e: protobuf::ProtobufError) -> c_int {
141 match e {
142 protobuf::ProtobufError::IoError(e) => e.raw_os_error().unwrap_or(EINVAL),
143 _ => EINVAL,
144 }
145 }
146
fd_cast<F: FromRawFd>(f: File) -> F147 fn fd_cast<F: FromRawFd>(f: File) -> F {
148 // Safe because we are transferring unique ownership.
149 unsafe { F::from_raw_fd(f.into_raw_fd()) }
150 }
151
152 #[derive(Default)]
153 struct IdAllocator(AtomicUsize);
154
155 impl IdAllocator {
alloc(&self) -> u32156 fn alloc(&self) -> u32 {
157 self.0.fetch_add(1, Ordering::Relaxed) as u32
158 }
159
free(&self, id: u32)160 fn free(&self, id: u32) {
161 let _ = self.0.compare_exchange(
162 id as usize + 1,
163 id as usize,
164 Ordering::Relaxed,
165 Ordering::Relaxed,
166 );
167 }
168 }
169
170 #[repr(u8)]
171 #[derive(Debug, Clone, Copy)]
172 pub enum Stat {
173 IoEvent,
174 MemoryGetDirtyLog,
175 IrqEventGetFd,
176 IrqEventGetResampleFd,
177 Connect,
178 DestroyConnection,
179 GetShutdownEvent,
180 CheckExtentsion,
181 EnableVmCapability,
182 EnableVcpuCapability,
183 GetSupportedCpuid,
184 GetEmulatedCpuid,
185 GetHypervCpuid,
186 GetMsrIndexList,
187 NetGetConfig,
188 ReserveRange,
189 ReserveAsyncWriteRange,
190 SetIrq,
191 SetIrqRouting,
192 GetPicState,
193 SetPicState,
194 GetIoapicState,
195 SetIoapicState,
196 GetPitState,
197 SetPitState,
198 GetClock,
199 SetClock,
200 SetIdentityMapAddr,
201 PauseVcpus,
202 Start,
203 GetVcpu,
204 VcpuWait,
205 VcpuResume,
206 VcpuGetRegs,
207 VcpuSetRegs,
208 VcpuGetSregs,
209 VcpuSetSregs,
210 GetFpu,
211 SetFpu,
212 GetDebugRegs,
213 SetDebugRegs,
214 GetXCRegs,
215 SetXCRegs,
216 VcpuGetMsrs,
217 VcpuSetMsrs,
218 VcpuSetCpuid,
219 VcpuGetLapicState,
220 VcpuSetLapicState,
221 VcpuGetMpState,
222 VcpuSetMpState,
223 VcpuGetVcpuEvents,
224 VcpuSetVcpuEvents,
225 NewConnection,
226 SetHypercallHint,
227
228 Count,
229 }
230
231 #[cfg(feature = "stats")]
record(a: Stat) -> stats::StatUpdater232 fn record(a: Stat) -> stats::StatUpdater {
233 unsafe { stats::STATS.record(a) }
234 }
235
236 #[cfg(not(feature = "stats"))]
record(_a: Stat) -> u32237 fn record(_a: Stat) -> u32 {
238 0
239 }
240
241 #[cfg(feature = "stats")]
printstats()242 fn printstats() {
243 // Unsafe due to racy access - OK for stats
244 if std::env::var(CROSVM_STATS_ENV).is_ok() {
245 unsafe {
246 stats::STATS.print();
247 }
248 }
249 }
250
251 #[cfg(not(feature = "stats"))]
printstats()252 fn printstats() {}
253
254 pub struct crosvm {
255 id_allocator: Arc<IdAllocator>,
256 socket: UnixDatagram,
257 request_buffer: Vec<u8>,
258 response_buffer: Vec<u8>,
259 vcpus: Arc<[crosvm_vcpu]>,
260 }
261
262 impl crosvm {
from_connection(socket: UnixDatagram) -> result::Result<crosvm, c_int>263 fn from_connection(socket: UnixDatagram) -> result::Result<crosvm, c_int> {
264 let mut crosvm = crosvm {
265 id_allocator: Default::default(),
266 socket,
267 request_buffer: Vec::new(),
268 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
269 vcpus: Arc::new([]),
270 };
271 crosvm.load_all_vcpus()?;
272 Ok(crosvm)
273 }
274
new( id_allocator: Arc<IdAllocator>, socket: UnixDatagram, vcpus: Arc<[crosvm_vcpu]>, ) -> crosvm275 fn new(
276 id_allocator: Arc<IdAllocator>,
277 socket: UnixDatagram,
278 vcpus: Arc<[crosvm_vcpu]>,
279 ) -> crosvm {
280 crosvm {
281 id_allocator,
282 socket,
283 request_buffer: Vec::new(),
284 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
285 vcpus,
286 }
287 }
288
get_id_allocator(&self) -> &IdAllocator289 fn get_id_allocator(&self) -> &IdAllocator {
290 &*self.id_allocator
291 }
292
main_transaction( &mut self, request: &MainRequest, fds: &[RawFd], ) -> result::Result<(MainResponse, Vec<File>), c_int>293 fn main_transaction(
294 &mut self,
295 request: &MainRequest,
296 fds: &[RawFd],
297 ) -> result::Result<(MainResponse, Vec<File>), c_int> {
298 self.request_buffer.clear();
299 request
300 .write_to_vec(&mut self.request_buffer)
301 .map_err(proto_error_to_int)?;
302 self.socket
303 .send_with_fds(&[IoSlice::new(self.request_buffer.as_slice())], fds)
304 .map_err(|e| -e.errno())?;
305
306 let mut datagram_fds = [0; MAX_DATAGRAM_FD];
307 let (msg_size, fd_count) = self
308 .socket
309 .recv_with_fds(
310 IoSliceMut::new(&mut self.response_buffer),
311 &mut datagram_fds,
312 )
313 .map_err(|e| -e.errno())?;
314 // Safe because the first fd_count fds from recv_with_fds are owned by us and valid.
315 let datagram_files = datagram_fds[..fd_count]
316 .iter()
317 .map(|&fd| unsafe { File::from_raw_fd(fd) })
318 .collect();
319
320 let response: MainResponse = Message::parse_from_bytes(&self.response_buffer[..msg_size])
321 .map_err(proto_error_to_int)?;
322 if response.errno != 0 {
323 return Err(response.errno);
324 }
325 Ok((response, datagram_files))
326 }
327
try_clone(&mut self) -> result::Result<crosvm, c_int>328 fn try_clone(&mut self) -> result::Result<crosvm, c_int> {
329 let mut r = MainRequest::new();
330 r.mut_new_connection();
331 let mut files = self.main_transaction(&r, &[])?.1;
332 match files.pop() {
333 Some(new_socket) => Ok(crosvm::new(
334 self.id_allocator.clone(),
335 fd_cast(new_socket),
336 self.vcpus.clone(),
337 )),
338 None => Err(EPROTO),
339 }
340 }
341
destroy(&mut self, id: u32) -> result::Result<(), c_int>342 fn destroy(&mut self, id: u32) -> result::Result<(), c_int> {
343 let mut r = MainRequest::new();
344 r.mut_destroy().id = id;
345 self.main_transaction(&r, &[])?;
346 self.get_id_allocator().free(id);
347 printstats();
348 Ok(())
349 }
350
351 // Only call this at `from_connection` function.
load_all_vcpus(&mut self) -> result::Result<(), c_int>352 fn load_all_vcpus(&mut self) -> result::Result<(), c_int> {
353 let mut r = MainRequest::new();
354 r.mut_get_vcpus();
355 let (_, mut files) = self.main_transaction(&r, &[])?;
356 if files.is_empty() || files.len() % 2 != 0 {
357 return Err(EPROTO);
358 }
359
360 let mut vcpus = Vec::with_capacity(files.len() / 2);
361 while files.len() > 1 {
362 let write_pipe = files.remove(0);
363 let read_pipe = files.remove(0);
364 vcpus.push(crosvm_vcpu::new(fd_cast(read_pipe), fd_cast(write_pipe)));
365 }
366 self.vcpus = Arc::from(vcpus);
367 Ok(())
368 }
369
get_shutdown_event(&mut self) -> result::Result<File, c_int>370 fn get_shutdown_event(&mut self) -> result::Result<File, c_int> {
371 let mut r = MainRequest::new();
372 r.mut_get_shutdown_eventfd();
373 let (_, mut files) = self.main_transaction(&r, &[])?;
374 match files.pop() {
375 Some(f) => Ok(f),
376 None => Err(EPROTO),
377 }
378 }
379
check_extension(&mut self, extension: u32) -> result::Result<bool, c_int>380 fn check_extension(&mut self, extension: u32) -> result::Result<bool, c_int> {
381 let mut r = MainRequest::new();
382 r.mut_check_extension().extension = extension;
383 let (response, _) = self.main_transaction(&r, &[])?;
384 if !response.has_check_extension() {
385 return Err(EPROTO);
386 }
387 Ok(response.get_check_extension().has_extension)
388 }
389
get_supported_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>390 fn get_supported_cpuid(
391 &mut self,
392 cpuid_entries: &mut [kvm_cpuid_entry2],
393 cpuid_count: &mut usize,
394 ) -> result::Result<(), c_int> {
395 *cpuid_count = 0;
396
397 let mut r = MainRequest::new();
398 r.mut_get_supported_cpuid();
399
400 let (response, _) = self.main_transaction(&r, &[])?;
401 if !response.has_get_supported_cpuid() {
402 return Err(EPROTO);
403 }
404
405 let supported_cpuids: &MainResponse_CpuidResponse = response.get_get_supported_cpuid();
406
407 *cpuid_count = supported_cpuids.get_entries().len();
408 if *cpuid_count > cpuid_entries.len() {
409 return Err(E2BIG);
410 }
411
412 for (proto_entry, kvm_entry) in supported_cpuids
413 .get_entries()
414 .iter()
415 .zip(cpuid_entries.iter_mut())
416 {
417 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
418 }
419
420 Ok(())
421 }
422
get_emulated_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>423 fn get_emulated_cpuid(
424 &mut self,
425 cpuid_entries: &mut [kvm_cpuid_entry2],
426 cpuid_count: &mut usize,
427 ) -> result::Result<(), c_int> {
428 *cpuid_count = 0;
429
430 let mut r = MainRequest::new();
431 r.mut_get_emulated_cpuid();
432
433 let (response, _) = self.main_transaction(&r, &[])?;
434 if !response.has_get_emulated_cpuid() {
435 return Err(EPROTO);
436 }
437
438 let emulated_cpuids: &MainResponse_CpuidResponse = response.get_get_emulated_cpuid();
439
440 *cpuid_count = emulated_cpuids.get_entries().len();
441 if *cpuid_count > cpuid_entries.len() {
442 return Err(E2BIG);
443 }
444
445 for (proto_entry, kvm_entry) in emulated_cpuids
446 .get_entries()
447 .iter()
448 .zip(cpuid_entries.iter_mut())
449 {
450 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
451 }
452
453 Ok(())
454 }
455
get_msr_index_list( &mut self, msr_indices: &mut [u32], msr_count: &mut usize, ) -> result::Result<(), c_int>456 fn get_msr_index_list(
457 &mut self,
458 msr_indices: &mut [u32],
459 msr_count: &mut usize,
460 ) -> result::Result<(), c_int> {
461 *msr_count = 0;
462
463 let mut r = MainRequest::new();
464 r.mut_get_msr_index_list();
465
466 let (response, _) = self.main_transaction(&r, &[])?;
467 if !response.has_get_msr_index_list() {
468 return Err(EPROTO);
469 }
470
471 let msr_list: &MainResponse_MsrListResponse = response.get_get_msr_index_list();
472
473 *msr_count = msr_list.get_indices().len();
474 if *msr_count > msr_indices.len() {
475 return Err(E2BIG);
476 }
477
478 for (proto_entry, kvm_entry) in msr_list.get_indices().iter().zip(msr_indices.iter_mut()) {
479 *kvm_entry = *proto_entry;
480 }
481
482 Ok(())
483 }
484
reserve_range( &mut self, space: u32, start: u64, length: u64, async_write: bool, ) -> result::Result<(), c_int>485 fn reserve_range(
486 &mut self,
487 space: u32,
488 start: u64,
489 length: u64,
490 async_write: bool,
491 ) -> result::Result<(), c_int> {
492 let mut r = MainRequest::new();
493 let reserve: &mut MainRequest_ReserveRange = r.mut_reserve_range();
494 reserve.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
495 reserve.start = start;
496 reserve.length = length;
497 reserve.async_write = async_write;
498
499 self.main_transaction(&r, &[])?;
500 Ok(())
501 }
502
set_irq(&mut self, irq_id: u32, active: bool) -> result::Result<(), c_int>503 fn set_irq(&mut self, irq_id: u32, active: bool) -> result::Result<(), c_int> {
504 let mut r = MainRequest::new();
505 let set_irq: &mut MainRequest_SetIrq = r.mut_set_irq();
506 set_irq.irq_id = irq_id;
507 set_irq.active = active;
508
509 self.main_transaction(&r, &[])?;
510 Ok(())
511 }
512
set_irq_routing(&mut self, routing: &[crosvm_irq_route]) -> result::Result<(), c_int>513 fn set_irq_routing(&mut self, routing: &[crosvm_irq_route]) -> result::Result<(), c_int> {
514 let mut r = MainRequest::new();
515 let set_irq_routing: &mut RepeatedField<MainRequest_SetIrqRouting_Route> =
516 r.mut_set_irq_routing().mut_routes();
517 for route in routing {
518 let mut entry = MainRequest_SetIrqRouting_Route::new();
519 entry.irq_id = route.irq_id;
520 match route.kind {
521 CROSVM_IRQ_ROUTE_IRQCHIP => {
522 let irqchip: &mut MainRequest_SetIrqRouting_Route_Irqchip = entry.mut_irqchip();
523 // Safe because route.kind indicates which union field is valid.
524 irqchip.irqchip = unsafe { route.route.irqchip }.irqchip;
525 irqchip.pin = unsafe { route.route.irqchip }.pin;
526 }
527 CROSVM_IRQ_ROUTE_MSI => {
528 let msi: &mut MainRequest_SetIrqRouting_Route_Msi = entry.mut_msi();
529 // Safe because route.kind indicates which union field is valid.
530 msi.address = unsafe { route.route.msi }.address;
531 msi.data = unsafe { route.route.msi }.data;
532 }
533 _ => return Err(EINVAL),
534 }
535 set_irq_routing.push(entry);
536 }
537
538 self.main_transaction(&r, &[])?;
539 Ok(())
540 }
541
set_hint( &mut self, space: u32, addr: u64, on_write: bool, hints: &[crosvm_hint_detail], ) -> result::Result<(), c_int>542 fn set_hint(
543 &mut self,
544 space: u32,
545 addr: u64,
546 on_write: bool,
547 hints: &[crosvm_hint_detail],
548 ) -> result::Result<(), c_int> {
549 let mut r = MainRequest::new();
550 let req: &mut MainRequest_SetCallHint = r.mut_set_call_hint();
551 let set_hints: &mut RepeatedField<MainRequest_SetCallHint_RegHint> = req.mut_hints();
552 for hint in hints {
553 let mut entry = MainRequest_SetCallHint_RegHint::new();
554 entry.match_rax = hint.match_rax;
555 entry.match_rbx = hint.match_rbx;
556 entry.match_rcx = hint.match_rcx;
557 entry.match_rdx = hint.match_rdx;
558 entry.rax = hint.rax;
559 entry.rbx = hint.rbx;
560 entry.rcx = hint.rcx;
561 entry.rdx = hint.rdx;
562 entry.send_sregs = hint.send_sregs;
563 entry.send_debugregs = hint.send_debugregs;
564 set_hints.push(entry);
565 }
566 req.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
567 req.address = addr;
568 req.on_write = on_write;
569
570 self.main_transaction(&r, &[])?;
571 Ok(())
572 }
573
get_state( &mut self, state_set: MainRequest_StateSet, out: &mut [u8], ) -> result::Result<(), c_int>574 fn get_state(
575 &mut self,
576 state_set: MainRequest_StateSet,
577 out: &mut [u8],
578 ) -> result::Result<(), c_int> {
579 let mut r = MainRequest::new();
580 r.mut_get_state().set = state_set;
581 let (response, _) = self.main_transaction(&r, &[])?;
582 if !response.has_get_state() {
583 return Err(EPROTO);
584 }
585 let get_state: &MainResponse_GetState = response.get_get_state();
586 if get_state.state.len() != out.len() {
587 return Err(EPROTO);
588 }
589 out.copy_from_slice(&get_state.state);
590 Ok(())
591 }
592
set_state( &mut self, state_set: MainRequest_StateSet, new_state: &[u8], ) -> result::Result<(), c_int>593 fn set_state(
594 &mut self,
595 state_set: MainRequest_StateSet,
596 new_state: &[u8],
597 ) -> result::Result<(), c_int> {
598 let mut r = MainRequest::new();
599 let set_state: &mut MainRequest_SetState = r.mut_set_state();
600 set_state.set = state_set;
601 set_state.state = new_state.to_vec();
602
603 self.main_transaction(&r, &[])?;
604 Ok(())
605 }
606
set_identity_map_addr(&mut self, addr: u32) -> result::Result<(), c_int>607 fn set_identity_map_addr(&mut self, addr: u32) -> result::Result<(), c_int> {
608 let mut r = MainRequest::new();
609 r.mut_set_identity_map_addr().address = addr;
610
611 self.main_transaction(&r, &[])?;
612 Ok(())
613 }
614
pause_vcpus(&mut self, cpu_mask: u64, user: *mut c_void) -> result::Result<(), c_int>615 fn pause_vcpus(&mut self, cpu_mask: u64, user: *mut c_void) -> result::Result<(), c_int> {
616 let mut r = MainRequest::new();
617 let pause_vcpus: &mut MainRequest_PauseVcpus = r.mut_pause_vcpus();
618 pause_vcpus.cpu_mask = cpu_mask;
619 pause_vcpus.user = user as u64;
620 self.main_transaction(&r, &[])?;
621 Ok(())
622 }
623
start(&mut self) -> result::Result<(), c_int>624 fn start(&mut self) -> result::Result<(), c_int> {
625 let mut r = MainRequest::new();
626 r.mut_start();
627 self.main_transaction(&r, &[])?;
628 Ok(())
629 }
630
get_vcpu(&mut self, cpu_id: u32) -> Result<*mut crosvm_vcpu, c_int>631 fn get_vcpu(&mut self, cpu_id: u32) -> Result<*mut crosvm_vcpu, c_int> {
632 if let Some(vcpu) = self.vcpus.get(cpu_id as usize) {
633 Ok(vcpu as *const crosvm_vcpu as *mut crosvm_vcpu)
634 } else {
635 Err(ENOENT)
636 }
637 }
638
get_net_config(&mut self) -> result::Result<crosvm_net_config, c_int>639 fn get_net_config(&mut self) -> result::Result<crosvm_net_config, c_int> {
640 let mut r = MainRequest::new();
641 r.mut_get_net_config();
642
643 let (response, mut files) = self.main_transaction(&r, &[])?;
644 if !response.has_get_net_config() {
645 return Err(EPROTO);
646 }
647 let config = response.get_get_net_config();
648
649 match files.pop() {
650 Some(f) => {
651 let mut net_config = crosvm_net_config {
652 tap_fd: f.into_raw_fd(),
653 host_ipv4_address: config.host_ipv4_address,
654 netmask: config.netmask,
655 host_mac_address: [0; 6],
656 _reserved: [0; 2],
657 };
658
659 let mac_addr = config.get_host_mac_address();
660 if mac_addr.len() != net_config.host_mac_address.len() {
661 return Err(EPROTO);
662 }
663 net_config.host_mac_address.copy_from_slice(mac_addr);
664
665 Ok(net_config)
666 }
667 None => Err(EPROTO),
668 }
669 }
670 }
671
672 /// This helper macro implements the C API's constructor/destructor for a given type. Because they
673 /// all follow the same pattern and include lots of boilerplate unsafe code, it makes sense to write
674 /// it once with this helper macro.
675 macro_rules! impl_ctor_dtor {
676 (
677 $t:ident,
678 $ctor:ident ( $( $x:ident: $y:ty ),* ),
679 $dtor:ident,
680 ) => {
681 #[allow(unused_unsafe)]
682 #[no_mangle]
683 pub unsafe extern fn $ctor(self_: *mut crosvm, $($x: $y,)* obj_ptr: *mut *mut $t) -> c_int {
684 let self_ = &mut (*self_);
685 match $t::create(self_, $($x,)*) {
686 Ok(obj) => {
687 *obj_ptr = Box::into_raw(Box::new(obj));
688 0
689 }
690 Err(e) => -e,
691 }
692 }
693 #[no_mangle]
694 pub unsafe extern fn $dtor(self_: *mut crosvm, obj_ptr: *mut *mut $t) -> c_int {
695 let self_ = &mut (*self_);
696 let obj = Box::from_raw(*obj_ptr);
697 match self_.destroy(obj.id) {
698 Ok(_) => {
699 *obj_ptr = null_mut();
700 0
701 }
702 Err(e) => {
703 Box::into_raw(obj);
704 -e
705 }
706 }
707 }
708 }
709 }
710
711 pub struct crosvm_io_event {
712 id: u32,
713 evt: File,
714 }
715
716 impl crosvm_io_event {
717 // Clippy: we use ptr::read_unaligned to read from pointers that may be
718 // underaligned. Dereferencing such a pointer is always undefined behavior
719 // in Rust.
720 //
721 // Lint can be unsuppressed once Clippy recognizes this pattern as correct.
722 // https://github.com/rust-lang/rust-clippy/issues/2881
723 #[allow(clippy::cast_ptr_alignment)]
create( crosvm: &mut crosvm, space: u32, addr: u64, length: u32, datamatch: *const u8, ) -> result::Result<crosvm_io_event, c_int>724 unsafe fn create(
725 crosvm: &mut crosvm,
726 space: u32,
727 addr: u64,
728 length: u32,
729 datamatch: *const u8,
730 ) -> result::Result<crosvm_io_event, c_int> {
731 let datamatch = match length {
732 0 => 0,
733 1 => ptr::read_unaligned(datamatch as *const u8) as u64,
734 2 => ptr::read_unaligned(datamatch as *const u16) as u64,
735 4 => ptr::read_unaligned(datamatch as *const u32) as u64,
736 8 => ptr::read_unaligned(datamatch as *const u64),
737 _ => return Err(EINVAL),
738 };
739 Self::safe_create(crosvm, space, addr, length, datamatch)
740 }
741
safe_create( crosvm: &mut crosvm, space: u32, addr: u64, length: u32, datamatch: u64, ) -> result::Result<crosvm_io_event, c_int>742 fn safe_create(
743 crosvm: &mut crosvm,
744 space: u32,
745 addr: u64,
746 length: u32,
747 datamatch: u64,
748 ) -> result::Result<crosvm_io_event, c_int> {
749 let id = crosvm.get_id_allocator().alloc();
750
751 let mut r = MainRequest::new();
752 let create: &mut MainRequest_Create = r.mut_create();
753 create.id = id;
754 let io_event: &mut MainRequest_Create_IoEvent = create.mut_io_event();
755 io_event.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
756 io_event.address = addr;
757 io_event.length = length;
758 io_event.datamatch = datamatch;
759
760 let ret = match crosvm.main_transaction(&r, &[]) {
761 Ok((_, mut files)) => match files.pop() {
762 Some(evt) => return Ok(crosvm_io_event { id, evt }),
763 None => EPROTO,
764 },
765 Err(e) => e,
766 };
767 crosvm.get_id_allocator().free(id);
768 Err(ret)
769 }
770 }
771
772 impl_ctor_dtor!(
773 crosvm_io_event,
774 crosvm_create_io_event(space: u32, addr: u64, len: u32, datamatch: *const u8),
775 crosvm_destroy_io_event,
776 );
777
778 #[no_mangle]
crosvm_io_event_fd(this: *mut crosvm_io_event) -> c_int779 pub unsafe extern "C" fn crosvm_io_event_fd(this: *mut crosvm_io_event) -> c_int {
780 let _u = record(Stat::IoEvent);
781 (*this).evt.as_raw_fd()
782 }
783
784 pub struct crosvm_memory {
785 id: u32,
786 length: u64,
787 }
788
789 impl crosvm_memory {
create( crosvm: &mut crosvm, fd: c_int, offset: u64, length: u64, start: u64, read_only: bool, dirty_log: bool, ) -> result::Result<crosvm_memory, c_int>790 fn create(
791 crosvm: &mut crosvm,
792 fd: c_int,
793 offset: u64,
794 length: u64,
795 start: u64,
796 read_only: bool,
797 dirty_log: bool,
798 ) -> result::Result<crosvm_memory, c_int> {
799 const PAGE_MASK: u64 = 0x0fff;
800 if offset & PAGE_MASK != 0 || length & PAGE_MASK != 0 {
801 return Err(EINVAL);
802 }
803 let id = crosvm.get_id_allocator().alloc();
804
805 let mut r = MainRequest::new();
806 let create: &mut MainRequest_Create = r.mut_create();
807 create.id = id;
808 let memory: &mut MainRequest_Create_Memory = create.mut_memory();
809 memory.offset = offset;
810 memory.start = start;
811 memory.length = length;
812 memory.read_only = read_only;
813 memory.dirty_log = dirty_log;
814
815 let ret = match crosvm.main_transaction(&r, &[fd]) {
816 Ok(_) => return Ok(crosvm_memory { id, length }),
817 Err(e) => e,
818 };
819 crosvm.get_id_allocator().free(id);
820 Err(ret)
821 }
822
get_dirty_log(&mut self, crosvm: &mut crosvm) -> result::Result<Vec<u8>, c_int>823 fn get_dirty_log(&mut self, crosvm: &mut crosvm) -> result::Result<Vec<u8>, c_int> {
824 let mut r = MainRequest::new();
825 r.mut_dirty_log().id = self.id;
826 let (mut response, _) = crosvm.main_transaction(&r, &[])?;
827 if !response.has_dirty_log() {
828 return Err(EPROTO);
829 }
830 Ok(response.take_dirty_log().bitmap)
831 }
832 }
833
834 impl_ctor_dtor!(
835 crosvm_memory,
836 crosvm_create_memory(
837 fd: c_int,
838 offset: u64,
839 length: u64,
840 start: u64,
841 read_only: bool,
842 dirty_log: bool
843 ),
844 crosvm_destroy_memory,
845 );
846
847 #[no_mangle]
crosvm_memory_get_dirty_log( crosvm: *mut crosvm, this: *mut crosvm_memory, log: *mut u8, ) -> c_int848 pub unsafe extern "C" fn crosvm_memory_get_dirty_log(
849 crosvm: *mut crosvm,
850 this: *mut crosvm_memory,
851 log: *mut u8,
852 ) -> c_int {
853 let _u = record(Stat::MemoryGetDirtyLog);
854 let crosvm = &mut *crosvm;
855 let this = &mut *this;
856 let log_slice = slice::from_raw_parts_mut(log, dirty_log_bitmap_size(this.length as usize));
857 match this.get_dirty_log(crosvm) {
858 Ok(bitmap) => {
859 if bitmap.len() == log_slice.len() {
860 log_slice.copy_from_slice(&bitmap);
861 0
862 } else {
863 -EPROTO
864 }
865 }
866 Err(e) => -e,
867 }
868 }
869
870 pub struct crosvm_irq_event {
871 id: u32,
872 trigger_evt: File,
873 resample_evt: File,
874 }
875
876 impl crosvm_irq_event {
create(crosvm: &mut crosvm, irq_id: u32) -> result::Result<crosvm_irq_event, c_int>877 fn create(crosvm: &mut crosvm, irq_id: u32) -> result::Result<crosvm_irq_event, c_int> {
878 let id = crosvm.get_id_allocator().alloc();
879
880 let mut r = MainRequest::new();
881 let create: &mut MainRequest_Create = r.mut_create();
882 create.id = id;
883 let irq_event: &mut MainRequest_Create_IrqEvent = create.mut_irq_event();
884 irq_event.irq_id = irq_id;
885 irq_event.resample = true;
886
887 let ret = match crosvm.main_transaction(&r, &[]) {
888 Ok((_, mut files)) => {
889 if files.len() >= 2 {
890 let resample_evt = files.pop().unwrap();
891 let trigger_evt = files.pop().unwrap();
892 return Ok(crosvm_irq_event {
893 id,
894 trigger_evt,
895 resample_evt,
896 });
897 }
898 EPROTO
899 }
900 Err(e) => e,
901 };
902 crosvm.get_id_allocator().free(id);
903 Err(ret)
904 }
905 }
906
907 impl_ctor_dtor!(
908 crosvm_irq_event,
909 crosvm_create_irq_event(irq_id: u32),
910 crosvm_destroy_irq_event,
911 );
912
913 #[no_mangle]
crosvm_irq_event_get_fd(this: *mut crosvm_irq_event) -> c_int914 pub unsafe extern "C" fn crosvm_irq_event_get_fd(this: *mut crosvm_irq_event) -> c_int {
915 let _u = record(Stat::IrqEventGetFd);
916 (*this).trigger_evt.as_raw_fd()
917 }
918
919 #[no_mangle]
crosvm_irq_event_get_resample_fd(this: *mut crosvm_irq_event) -> c_int920 pub unsafe extern "C" fn crosvm_irq_event_get_resample_fd(this: *mut crosvm_irq_event) -> c_int {
921 let _u = record(Stat::IrqEventGetResampleFd);
922 (*this).resample_evt.as_raw_fd()
923 }
924
925 #[allow(dead_code)]
926 #[derive(Copy, Clone)]
927 #[repr(C)]
928 struct anon_io_access {
929 address_space: u32,
930 __reserved0: [u8; 4],
931 address: u64,
932 data: *mut u8,
933 length: u32,
934 is_write: u8,
935 no_resume: u8,
936 __reserved1: [u8; 2],
937 }
938
939 #[derive(Copy, Clone)]
940 #[repr(C)]
941 struct anon_hyperv_call {
942 input: u64,
943 result: *mut u8,
944 params: [u64; 2],
945 }
946
947 #[derive(Copy, Clone)]
948 #[repr(C)]
949 struct anon_hyperv_synic {
950 msr: u32,
951 reserved: u32,
952 control: u64,
953 evt_page: u64,
954 msg_page: u64,
955 }
956
957 #[repr(C)]
958 union anon_vcpu_event {
959 io_access: anon_io_access,
960 user: *mut c_void,
961 hyperv_call: anon_hyperv_call,
962 hyperv_synic: anon_hyperv_synic,
963 #[allow(dead_code)]
964 __reserved: [u8; 64],
965 }
966
967 #[repr(C)]
968 pub struct crosvm_vcpu_event {
969 kind: u32,
970 __reserved: [u8; 4],
971 event: anon_vcpu_event,
972 }
973
974 // |get| tracks if the |cache| contains a cached value that can service get()
975 // requests. A set() call will populate |cache| and |set| to true to record
976 // that the next resume() should apply the state. We've got two choices on
977 // what to do about |get| on a set(): 1) leave it as true, or 2) clear it and
978 // have any call to get() first apply any pending set. Currently #2 is used
979 // to favor correctness over performance (it gives KVM a chance to
980 // modify/massage the values input to the set call). A plugin will rarely
981 // (if ever) issue a get() after a set() on the same vcpu exit, so opting for
982 // #1 is unlikely to provide a tangible performance gain.
983 pub struct crosvm_vcpu_reg_cache {
984 get: bool,
985 set: bool,
986 cache: Vec<u8>,
987 }
988
989 pub struct crosvm_vcpu {
990 read_pipe: File,
991 write_pipe: File,
992 send_init: bool,
993 request_buffer: Vec<u8>,
994 response_buffer: Vec<u8>,
995 response_base: usize,
996 response_length: usize,
997 resume_data: Vec<u8>,
998
999 regs: crosvm_vcpu_reg_cache,
1000 sregs: crosvm_vcpu_reg_cache,
1001 debugregs: crosvm_vcpu_reg_cache,
1002 }
1003
read_varint32(data: &[u8]) -> (u32, usize)1004 fn read_varint32(data: &[u8]) -> (u32, usize) {
1005 let mut value: u32 = 0;
1006 let mut shift: u32 = 0;
1007 for (i, &b) in data.iter().enumerate() {
1008 if b < 0x80 {
1009 return match (b as u32).checked_shl(shift) {
1010 None => (0, 0),
1011 Some(b) => (value | b, i + 1),
1012 };
1013 }
1014 match ((b as u32) & 0x7F).checked_shl(shift) {
1015 None => return (0, 0),
1016 Some(b) => value |= b,
1017 }
1018 shift += 7;
1019 }
1020 (0, 0)
1021 }
1022
1023 impl crosvm_vcpu {
new(read_pipe: File, write_pipe: File) -> crosvm_vcpu1024 fn new(read_pipe: File, write_pipe: File) -> crosvm_vcpu {
1025 crosvm_vcpu {
1026 read_pipe,
1027 write_pipe,
1028 send_init: true,
1029 request_buffer: Vec::new(),
1030 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
1031 response_base: 0,
1032 response_length: 0,
1033 resume_data: Vec::new(),
1034 regs: crosvm_vcpu_reg_cache {
1035 get: false,
1036 set: false,
1037 cache: vec![],
1038 },
1039 sregs: crosvm_vcpu_reg_cache {
1040 get: false,
1041 set: false,
1042 cache: vec![],
1043 },
1044 debugregs: crosvm_vcpu_reg_cache {
1045 get: false,
1046 set: false,
1047 cache: vec![],
1048 },
1049 }
1050 }
vcpu_send(&mut self, request: &VcpuRequest) -> result::Result<(), c_int>1051 fn vcpu_send(&mut self, request: &VcpuRequest) -> result::Result<(), c_int> {
1052 self.request_buffer.clear();
1053 request
1054 .write_to_vec(&mut self.request_buffer)
1055 .map_err(proto_error_to_int)?;
1056 self.write_pipe
1057 .write(self.request_buffer.as_slice())
1058 .map_err(|e| -e.raw_os_error().unwrap_or(EINVAL))?;
1059 Ok(())
1060 }
1061
vcpu_recv(&mut self) -> result::Result<VcpuResponse, c_int>1062 fn vcpu_recv(&mut self) -> result::Result<VcpuResponse, c_int> {
1063 if self.response_length == 0 {
1064 let msg_size = self
1065 .read_pipe
1066 .read(&mut self.response_buffer)
1067 .map_err(|e| -e.raw_os_error().unwrap_or(EINVAL))?;
1068 self.response_base = 0;
1069 self.response_length = msg_size;
1070 }
1071 if self.response_length == 0 {
1072 return Err(EINVAL);
1073 }
1074 let (value, bytes) = read_varint32(
1075 &self.response_buffer[self.response_base..self.response_base + self.response_length],
1076 );
1077 let total_size: usize = bytes + value as usize;
1078 if bytes == 0 || total_size > self.response_length {
1079 return Err(EINVAL);
1080 }
1081 let response: VcpuResponse = Message::parse_from_bytes(
1082 &self.response_buffer[self.response_base + bytes..self.response_base + total_size],
1083 )
1084 .map_err(proto_error_to_int)?;
1085 self.response_base += total_size;
1086 self.response_length -= total_size;
1087 if response.errno != 0 {
1088 return Err(response.errno);
1089 }
1090 Ok(response)
1091 }
1092
vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int>1093 fn vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int> {
1094 self.vcpu_send(request)?;
1095 let response: VcpuResponse = self.vcpu_recv()?;
1096 Ok(response)
1097 }
1098
wait(&mut self, event: &mut crosvm_vcpu_event) -> result::Result<(), c_int>1099 fn wait(&mut self, event: &mut crosvm_vcpu_event) -> result::Result<(), c_int> {
1100 if self.send_init {
1101 self.send_init = false;
1102 let mut r = VcpuRequest::new();
1103 r.mut_wait();
1104 self.vcpu_send(&r)?;
1105 }
1106 let mut response: VcpuResponse = self.vcpu_recv()?;
1107 if !response.has_wait() {
1108 return Err(EPROTO);
1109 }
1110 let wait: &mut VcpuResponse_Wait = response.mut_wait();
1111 if wait.has_init() {
1112 event.kind = CROSVM_VCPU_EVENT_KIND_INIT;
1113 self.regs.get = false;
1114 self.sregs.get = false;
1115 self.debugregs.get = false;
1116 Ok(())
1117 } else if wait.has_io() {
1118 let mut io: VcpuResponse_Wait_Io = wait.take_io();
1119 event.kind = CROSVM_VCPU_EVENT_KIND_IO_ACCESS;
1120 event.event.io_access = anon_io_access {
1121 address_space: io.space.value() as u32,
1122 __reserved0: Default::default(),
1123 address: io.address,
1124 data: io.data.as_mut_ptr(),
1125 length: io.data.len() as u32,
1126 is_write: io.is_write as u8,
1127 no_resume: io.no_resume as u8,
1128 __reserved1: Default::default(),
1129 };
1130 self.resume_data = io.data;
1131 self.regs.get = !io.regs.is_empty();
1132 if self.regs.get {
1133 swap(&mut self.regs.cache, &mut io.regs);
1134 }
1135 self.sregs.get = !io.sregs.is_empty();
1136 if self.sregs.get {
1137 swap(&mut self.sregs.cache, &mut io.sregs);
1138 }
1139 self.debugregs.get = !io.debugregs.is_empty();
1140 if self.debugregs.get {
1141 swap(&mut self.debugregs.cache, &mut io.debugregs);
1142 }
1143 Ok(())
1144 } else if wait.has_user() {
1145 let user: &VcpuResponse_Wait_User = wait.get_user();
1146 event.kind = CROSVM_VCPU_EVENT_KIND_PAUSED;
1147 event.event.user = user.user as *mut c_void;
1148 self.regs.get = false;
1149 self.sregs.get = false;
1150 self.debugregs.get = false;
1151 Ok(())
1152 } else if wait.has_hyperv_call() {
1153 let hv: &VcpuResponse_Wait_HypervCall = wait.get_hyperv_call();
1154 event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL;
1155 self.resume_data = vec![0; 8];
1156 event.event.hyperv_call = anon_hyperv_call {
1157 input: hv.input,
1158 result: self.resume_data.as_mut_ptr(),
1159 params: [hv.params0, hv.params1],
1160 };
1161 self.regs.get = false;
1162 self.sregs.get = false;
1163 self.debugregs.get = false;
1164 Ok(())
1165 } else if wait.has_hyperv_synic() {
1166 let hv: &VcpuResponse_Wait_HypervSynic = wait.get_hyperv_synic();
1167 event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC;
1168 event.event.hyperv_synic = anon_hyperv_synic {
1169 msr: hv.msr,
1170 reserved: 0,
1171 control: hv.control,
1172 evt_page: hv.evt_page,
1173 msg_page: hv.msg_page,
1174 };
1175 self.regs.get = false;
1176 self.sregs.get = false;
1177 self.debugregs.get = false;
1178 Ok(())
1179 } else {
1180 Err(EPROTO)
1181 }
1182 }
1183
resume(&mut self) -> result::Result<(), c_int>1184 fn resume(&mut self) -> result::Result<(), c_int> {
1185 let mut r = VcpuRequest::new();
1186 let resume: &mut VcpuRequest_Resume = r.mut_resume();
1187 swap(&mut resume.data, &mut self.resume_data);
1188
1189 if self.regs.set {
1190 swap(&mut resume.regs, &mut self.regs.cache);
1191 self.regs.set = false;
1192 }
1193 if self.sregs.set {
1194 swap(&mut resume.sregs, &mut self.sregs.cache);
1195 self.sregs.set = false;
1196 }
1197 if self.debugregs.set {
1198 swap(&mut resume.debugregs, &mut self.debugregs.cache);
1199 self.debugregs.set = false;
1200 }
1201
1202 self.vcpu_send(&r)?;
1203 Ok(())
1204 }
1205
get_state( &mut self, state_set: VcpuRequest_StateSet, out: &mut [u8], ) -> result::Result<(), c_int>1206 fn get_state(
1207 &mut self,
1208 state_set: VcpuRequest_StateSet,
1209 out: &mut [u8],
1210 ) -> result::Result<(), c_int> {
1211 let mut r = VcpuRequest::new();
1212 r.mut_get_state().set = state_set;
1213 let response = self.vcpu_transaction(&r)?;
1214 if !response.has_get_state() {
1215 return Err(EPROTO);
1216 }
1217 let get_state: &VcpuResponse_GetState = response.get_get_state();
1218 if get_state.state.len() != out.len() {
1219 return Err(EPROTO);
1220 }
1221 out.copy_from_slice(&get_state.state);
1222 Ok(())
1223 }
1224
set_state( &mut self, state_set: VcpuRequest_StateSet, new_state: &[u8], ) -> result::Result<(), c_int>1225 fn set_state(
1226 &mut self,
1227 state_set: VcpuRequest_StateSet,
1228 new_state: &[u8],
1229 ) -> result::Result<(), c_int> {
1230 let mut r = VcpuRequest::new();
1231 let set_state: &mut VcpuRequest_SetState = r.mut_set_state();
1232 set_state.set = state_set;
1233 set_state.state = new_state.to_vec();
1234
1235 self.vcpu_transaction(&r)?;
1236 Ok(())
1237 }
1238
set_state_from_cache( &mut self, state_set: VcpuRequest_StateSet, ) -> result::Result<(), c_int>1239 fn set_state_from_cache(
1240 &mut self,
1241 state_set: VcpuRequest_StateSet,
1242 ) -> result::Result<(), c_int> {
1243 let mut r = VcpuRequest::new();
1244 let set_state: &mut VcpuRequest_SetState = r.mut_set_state();
1245 set_state.set = state_set;
1246 match state_set {
1247 VcpuRequest_StateSet::REGS => {
1248 swap(&mut set_state.state, &mut self.regs.cache);
1249 self.regs.set = false;
1250 }
1251 VcpuRequest_StateSet::SREGS => {
1252 swap(&mut set_state.state, &mut self.sregs.cache);
1253 self.sregs.set = false;
1254 }
1255 VcpuRequest_StateSet::DEBUGREGS => {
1256 swap(&mut set_state.state, &mut self.debugregs.cache);
1257 self.debugregs.set = false;
1258 }
1259 _ => return Err(EINVAL),
1260 }
1261
1262 self.vcpu_transaction(&r)?;
1263 Ok(())
1264 }
1265
get_hyperv_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>1266 fn get_hyperv_cpuid(
1267 &mut self,
1268 cpuid_entries: &mut [kvm_cpuid_entry2],
1269 cpuid_count: &mut usize,
1270 ) -> result::Result<(), c_int> {
1271 *cpuid_count = 0;
1272
1273 let mut r = VcpuRequest::new();
1274 r.mut_get_hyperv_cpuid();
1275
1276 let response = self.vcpu_transaction(&r)?;
1277 if !response.has_get_hyperv_cpuid() {
1278 return Err(EPROTO);
1279 }
1280
1281 let hyperv_cpuids: &VcpuResponse_CpuidResponse = response.get_get_hyperv_cpuid();
1282
1283 *cpuid_count = hyperv_cpuids.get_entries().len();
1284 if *cpuid_count > cpuid_entries.len() {
1285 return Err(E2BIG);
1286 }
1287
1288 for (proto_entry, kvm_entry) in hyperv_cpuids
1289 .get_entries()
1290 .iter()
1291 .zip(cpuid_entries.iter_mut())
1292 {
1293 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
1294 }
1295
1296 Ok(())
1297 }
1298
get_msrs( &mut self, msr_entries: &mut [kvm_msr_entry], msr_count: &mut usize, ) -> result::Result<(), c_int>1299 fn get_msrs(
1300 &mut self,
1301 msr_entries: &mut [kvm_msr_entry],
1302 msr_count: &mut usize,
1303 ) -> result::Result<(), c_int> {
1304 *msr_count = 0;
1305
1306 let mut r = VcpuRequest::new();
1307 let entry_indices: &mut Vec<u32> = r.mut_get_msrs().mut_entry_indices();
1308 for entry in msr_entries.iter() {
1309 entry_indices.push(entry.index);
1310 }
1311
1312 let response = self.vcpu_transaction(&r)?;
1313 if !response.has_get_msrs() {
1314 return Err(EPROTO);
1315 }
1316 let get_msrs: &VcpuResponse_GetMsrs = response.get_get_msrs();
1317 *msr_count = get_msrs.get_entry_data().len();
1318 if *msr_count > msr_entries.len() {
1319 return Err(E2BIG);
1320 }
1321 for (&msr_data, msr_entry) in get_msrs.get_entry_data().iter().zip(msr_entries) {
1322 msr_entry.data = msr_data;
1323 }
1324 Ok(())
1325 }
1326
set_msrs(&mut self, msr_entries: &[kvm_msr_entry]) -> result::Result<(), c_int>1327 fn set_msrs(&mut self, msr_entries: &[kvm_msr_entry]) -> result::Result<(), c_int> {
1328 let mut r = VcpuRequest::new();
1329 let set_msrs_entries: &mut RepeatedField<VcpuRequest_MsrEntry> =
1330 r.mut_set_msrs().mut_entries();
1331 for msr_entry in msr_entries {
1332 let mut entry = VcpuRequest_MsrEntry::new();
1333 entry.index = msr_entry.index;
1334 entry.data = msr_entry.data;
1335 set_msrs_entries.push(entry);
1336 }
1337
1338 self.vcpu_transaction(&r)?;
1339 Ok(())
1340 }
1341
set_cpuid(&mut self, cpuid_entries: &[kvm_cpuid_entry2]) -> result::Result<(), c_int>1342 fn set_cpuid(&mut self, cpuid_entries: &[kvm_cpuid_entry2]) -> result::Result<(), c_int> {
1343 let mut r = VcpuRequest::new();
1344 let set_cpuid_entries: &mut RepeatedField<CpuidEntry> = r.mut_set_cpuid().mut_entries();
1345 for cpuid_entry in cpuid_entries {
1346 set_cpuid_entries.push(cpuid_kvm_to_proto(cpuid_entry));
1347 }
1348
1349 self.vcpu_transaction(&r)?;
1350 Ok(())
1351 }
1352
enable_capability(&mut self, capability: u32) -> result::Result<(), c_int>1353 fn enable_capability(&mut self, capability: u32) -> result::Result<(), c_int> {
1354 let mut r = VcpuRequest::new();
1355 r.mut_enable_capability().capability = capability;
1356 self.vcpu_transaction(&r)?;
1357 Ok(())
1358 }
1359 }
1360
1361 // crosvm API signals success as 0 and errors as negative values
1362 // derived from `errno`.
to_crosvm_rc<T>(r: result::Result<T, c_int>) -> c_int1363 fn to_crosvm_rc<T>(r: result::Result<T, c_int>) -> c_int {
1364 match r {
1365 Ok(_) => 0,
1366 Err(e) => -e,
1367 }
1368 }
1369
1370 #[no_mangle]
crosvm_get_render_server_fd() -> c_int1371 pub unsafe extern "C" fn crosvm_get_render_server_fd() -> c_int {
1372 let fd = match env::var(CROSVM_GPU_SERVER_FD_ENV) {
1373 Ok(v) => v,
1374 _ => return -EINVAL,
1375 };
1376
1377 match fd.parse() {
1378 Ok(v) if v >= 0 => v,
1379 _ => -EINVAL,
1380 }
1381 }
1382
1383 #[no_mangle]
crosvm_connect(out: *mut *mut crosvm) -> c_int1384 pub unsafe extern "C" fn crosvm_connect(out: *mut *mut crosvm) -> c_int {
1385 let _u = record(Stat::Connect);
1386 let socket_name = match env::var(CROSVM_SOCKET_ENV) {
1387 Ok(v) => v,
1388 _ => return -ENOTCONN,
1389 };
1390
1391 let socket = match socket_name.parse() {
1392 Ok(v) if v < 0 => return -EINVAL,
1393 Ok(v) => v,
1394 _ => return -EINVAL,
1395 };
1396
1397 let socket = UnixDatagram::from_raw_fd(socket);
1398 let crosvm = match crosvm::from_connection(socket) {
1399 Ok(c) => c,
1400 Err(e) => return -e,
1401 };
1402 *out = Box::into_raw(Box::new(crosvm));
1403 0
1404 }
1405
1406 #[no_mangle]
crosvm_new_connection(self_: *mut crosvm, out: *mut *mut crosvm) -> c_int1407 pub unsafe extern "C" fn crosvm_new_connection(self_: *mut crosvm, out: *mut *mut crosvm) -> c_int {
1408 let _u = record(Stat::NewConnection);
1409 let self_ = &mut (*self_);
1410 match self_.try_clone() {
1411 Ok(cloned) => {
1412 *out = Box::into_raw(Box::new(cloned));
1413 0
1414 }
1415 Err(e) => -e,
1416 }
1417 }
1418
1419 #[no_mangle]
crosvm_destroy_connection(self_: *mut *mut crosvm) -> c_int1420 pub unsafe extern "C" fn crosvm_destroy_connection(self_: *mut *mut crosvm) -> c_int {
1421 let _u = record(Stat::DestroyConnection);
1422 Box::from_raw(*self_);
1423 *self_ = null_mut();
1424 0
1425 }
1426
1427 #[no_mangle]
crosvm_get_shutdown_eventfd(self_: *mut crosvm) -> c_int1428 pub unsafe extern "C" fn crosvm_get_shutdown_eventfd(self_: *mut crosvm) -> c_int {
1429 let _u = record(Stat::GetShutdownEvent);
1430 let self_ = &mut (*self_);
1431 match self_.get_shutdown_event() {
1432 Ok(f) => f.into_raw_fd(),
1433 Err(e) => -e,
1434 }
1435 }
1436
1437 #[no_mangle]
crosvm_check_extension( self_: *mut crosvm, extension: u32, has_extension: *mut bool, ) -> c_int1438 pub unsafe extern "C" fn crosvm_check_extension(
1439 self_: *mut crosvm,
1440 extension: u32,
1441 has_extension: *mut bool,
1442 ) -> c_int {
1443 let _u = record(Stat::CheckExtentsion);
1444 let self_ = &mut (*self_);
1445 let ret = self_.check_extension(extension);
1446
1447 if let Ok(supported) = ret {
1448 *has_extension = supported;
1449 }
1450 to_crosvm_rc(ret)
1451 }
1452
1453 #[no_mangle]
crosvm_enable_capability( _self_: *mut crosvm, _capability: u32, _flags: u32, _args: *const u64, ) -> c_int1454 pub unsafe extern "C" fn crosvm_enable_capability(
1455 _self_: *mut crosvm,
1456 _capability: u32,
1457 _flags: u32,
1458 _args: *const u64,
1459 ) -> c_int {
1460 let _u = record(Stat::EnableVmCapability);
1461 -EINVAL
1462 }
1463
1464 #[no_mangle]
crosvm_get_supported_cpuid( this: *mut crosvm, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1465 pub unsafe extern "C" fn crosvm_get_supported_cpuid(
1466 this: *mut crosvm,
1467 entry_count: u32,
1468 cpuid_entries: *mut kvm_cpuid_entry2,
1469 out_count: *mut u32,
1470 ) -> c_int {
1471 let _u = record(Stat::GetSupportedCpuid);
1472 let this = &mut *this;
1473 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1474 let mut cpuid_count: usize = 0;
1475 let ret = this.get_supported_cpuid(cpuid_entries, &mut cpuid_count);
1476 *out_count = cpuid_count as u32;
1477 to_crosvm_rc(ret)
1478 }
1479
1480 #[no_mangle]
crosvm_get_emulated_cpuid( this: *mut crosvm, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1481 pub unsafe extern "C" fn crosvm_get_emulated_cpuid(
1482 this: *mut crosvm,
1483 entry_count: u32,
1484 cpuid_entries: *mut kvm_cpuid_entry2,
1485 out_count: *mut u32,
1486 ) -> c_int {
1487 let _u = record(Stat::GetEmulatedCpuid);
1488 let this = &mut *this;
1489 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1490 let mut cpuid_count: usize = 0;
1491 let ret = this.get_emulated_cpuid(cpuid_entries, &mut cpuid_count);
1492 *out_count = cpuid_count as u32;
1493 to_crosvm_rc(ret)
1494 }
1495
1496 #[no_mangle]
crosvm_get_msr_index_list( this: *mut crosvm, entry_count: u32, msr_indices: *mut u32, out_count: *mut u32, ) -> c_int1497 pub unsafe extern "C" fn crosvm_get_msr_index_list(
1498 this: *mut crosvm,
1499 entry_count: u32,
1500 msr_indices: *mut u32,
1501 out_count: *mut u32,
1502 ) -> c_int {
1503 let _u = record(Stat::GetMsrIndexList);
1504 let this = &mut *this;
1505 let msr_indices = from_raw_parts_mut(msr_indices, entry_count as usize);
1506 let mut msr_count: usize = 0;
1507 let ret = this.get_msr_index_list(msr_indices, &mut msr_count);
1508 *out_count = msr_count as u32;
1509 to_crosvm_rc(ret)
1510 }
1511
1512 #[no_mangle]
crosvm_net_get_config( self_: *mut crosvm, config: *mut crosvm_net_config, ) -> c_int1513 pub unsafe extern "C" fn crosvm_net_get_config(
1514 self_: *mut crosvm,
1515 config: *mut crosvm_net_config,
1516 ) -> c_int {
1517 let _u = record(Stat::NetGetConfig);
1518 let self_ = &mut (*self_);
1519 let ret = self_.get_net_config();
1520
1521 if let Ok(c) = ret {
1522 *config = c;
1523 }
1524
1525 to_crosvm_rc(ret)
1526 }
1527
1528 #[no_mangle]
crosvm_reserve_range( self_: *mut crosvm, space: u32, start: u64, length: u64, ) -> c_int1529 pub unsafe extern "C" fn crosvm_reserve_range(
1530 self_: *mut crosvm,
1531 space: u32,
1532 start: u64,
1533 length: u64,
1534 ) -> c_int {
1535 let _u = record(Stat::ReserveRange);
1536 let self_ = &mut (*self_);
1537 let ret = self_.reserve_range(space, start, length, false);
1538 to_crosvm_rc(ret)
1539 }
1540
1541 #[no_mangle]
crosvm_reserve_async_write_range( self_: *mut crosvm, space: u32, start: u64, length: u64, ) -> c_int1542 pub unsafe extern "C" fn crosvm_reserve_async_write_range(
1543 self_: *mut crosvm,
1544 space: u32,
1545 start: u64,
1546 length: u64,
1547 ) -> c_int {
1548 let _u = record(Stat::ReserveAsyncWriteRange);
1549 let self_ = &mut (*self_);
1550 let ret = self_.reserve_range(space, start, length, true);
1551 to_crosvm_rc(ret)
1552 }
1553
1554 #[no_mangle]
crosvm_set_irq(self_: *mut crosvm, irq_id: u32, active: bool) -> c_int1555 pub unsafe extern "C" fn crosvm_set_irq(self_: *mut crosvm, irq_id: u32, active: bool) -> c_int {
1556 let _u = record(Stat::SetIrq);
1557 let self_ = &mut (*self_);
1558 let ret = self_.set_irq(irq_id, active);
1559 to_crosvm_rc(ret)
1560 }
1561
1562 #[no_mangle]
crosvm_set_irq_routing( self_: *mut crosvm, route_count: u32, routes: *const crosvm_irq_route, ) -> c_int1563 pub unsafe extern "C" fn crosvm_set_irq_routing(
1564 self_: *mut crosvm,
1565 route_count: u32,
1566 routes: *const crosvm_irq_route,
1567 ) -> c_int {
1568 let _u = record(Stat::SetIrqRouting);
1569 let self_ = &mut (*self_);
1570 let ret = self_.set_irq_routing(slice::from_raw_parts(routes, route_count as usize));
1571 to_crosvm_rc(ret)
1572 }
1573
1574 #[no_mangle]
crosvm_set_hypercall_hint( self_: *mut crosvm, hints_count: u32, hints: *const crosvm_hint, ) -> c_int1575 pub unsafe extern "C" fn crosvm_set_hypercall_hint(
1576 self_: *mut crosvm,
1577 hints_count: u32,
1578 hints: *const crosvm_hint,
1579 ) -> c_int {
1580 let _u = record(Stat::SetHypercallHint);
1581 let self_ = &mut (*self_);
1582
1583 if hints_count < 1 {
1584 let ret = self_.set_hint(0, 0, false, &[]);
1585 return to_crosvm_rc(ret);
1586 }
1587 if hints_count > CROSVM_MAX_HINT_COUNT {
1588 return -EINVAL;
1589 }
1590 let hints = slice::from_raw_parts(hints, hints_count as usize);
1591 let hint = &hints[0];
1592 if hint.hint_version != 0
1593 || hint.reserved != 0
1594 || hint.address == 0
1595 || (hint.address_flags != 0 && hint.address_flags != CROSVM_HINT_ON_WRITE)
1596 || hint.details_count > CROSVM_MAX_HINT_DETAIL_COUNT as u16
1597 {
1598 return -EINVAL;
1599 }
1600 let ret = self_.set_hint(
1601 hint.address_space,
1602 hint.address,
1603 hint.address_flags == CROSVM_HINT_ON_WRITE,
1604 slice::from_raw_parts(hint.details, hint.details_count as usize),
1605 );
1606 to_crosvm_rc(ret)
1607 }
1608
1609 #[no_mangle]
crosvm_get_pic_state( this: *mut crosvm, primary: bool, state: *mut kvm_pic_state, ) -> c_int1610 pub unsafe extern "C" fn crosvm_get_pic_state(
1611 this: *mut crosvm,
1612 primary: bool,
1613 state: *mut kvm_pic_state,
1614 ) -> c_int {
1615 let _u = record(Stat::GetPicState);
1616 let this = &mut *this;
1617 let state_set = if primary {
1618 MainRequest_StateSet::PIC0
1619 } else {
1620 MainRequest_StateSet::PIC1
1621 };
1622 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_pic_state>());
1623 let ret = this.get_state(state_set, state);
1624 to_crosvm_rc(ret)
1625 }
1626
1627 #[no_mangle]
crosvm_set_pic_state( this: *mut crosvm, primary: bool, state: *mut kvm_pic_state, ) -> c_int1628 pub unsafe extern "C" fn crosvm_set_pic_state(
1629 this: *mut crosvm,
1630 primary: bool,
1631 state: *mut kvm_pic_state,
1632 ) -> c_int {
1633 let _u = record(Stat::SetPicState);
1634 let this = &mut *this;
1635 let state_set = if primary {
1636 MainRequest_StateSet::PIC0
1637 } else {
1638 MainRequest_StateSet::PIC1
1639 };
1640 let state = from_raw_parts(state as *mut u8, size_of::<kvm_pic_state>());
1641 let ret = this.set_state(state_set, state);
1642 to_crosvm_rc(ret)
1643 }
1644
1645 #[no_mangle]
crosvm_get_ioapic_state( this: *mut crosvm, state: *mut kvm_ioapic_state, ) -> c_int1646 pub unsafe extern "C" fn crosvm_get_ioapic_state(
1647 this: *mut crosvm,
1648 state: *mut kvm_ioapic_state,
1649 ) -> c_int {
1650 let _u = record(Stat::GetIoapicState);
1651 let this = &mut *this;
1652 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_ioapic_state>());
1653 let ret = this.get_state(MainRequest_StateSet::IOAPIC, state);
1654 to_crosvm_rc(ret)
1655 }
1656
1657 #[no_mangle]
crosvm_set_ioapic_state( this: *mut crosvm, state: *const kvm_ioapic_state, ) -> c_int1658 pub unsafe extern "C" fn crosvm_set_ioapic_state(
1659 this: *mut crosvm,
1660 state: *const kvm_ioapic_state,
1661 ) -> c_int {
1662 let _u = record(Stat::SetIoapicState);
1663 let this = &mut *this;
1664 let state = from_raw_parts(state as *mut u8, size_of::<kvm_ioapic_state>());
1665 let ret = this.set_state(MainRequest_StateSet::IOAPIC, state);
1666 to_crosvm_rc(ret)
1667 }
1668
1669 #[no_mangle]
crosvm_get_pit_state( this: *mut crosvm, state: *mut kvm_pit_state2, ) -> c_int1670 pub unsafe extern "C" fn crosvm_get_pit_state(
1671 this: *mut crosvm,
1672 state: *mut kvm_pit_state2,
1673 ) -> c_int {
1674 let _u = record(Stat::GetPitState);
1675 let this = &mut *this;
1676 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_pit_state2>());
1677 let ret = this.get_state(MainRequest_StateSet::PIT, state);
1678 to_crosvm_rc(ret)
1679 }
1680
1681 #[no_mangle]
crosvm_set_pit_state( this: *mut crosvm, state: *const kvm_pit_state2, ) -> c_int1682 pub unsafe extern "C" fn crosvm_set_pit_state(
1683 this: *mut crosvm,
1684 state: *const kvm_pit_state2,
1685 ) -> c_int {
1686 let _u = record(Stat::SetPitState);
1687 let this = &mut *this;
1688 let state = from_raw_parts(state as *mut u8, size_of::<kvm_pit_state2>());
1689 let ret = this.set_state(MainRequest_StateSet::PIT, state);
1690 to_crosvm_rc(ret)
1691 }
1692
1693 #[no_mangle]
crosvm_get_clock( this: *mut crosvm, clock_data: *mut kvm_clock_data, ) -> c_int1694 pub unsafe extern "C" fn crosvm_get_clock(
1695 this: *mut crosvm,
1696 clock_data: *mut kvm_clock_data,
1697 ) -> c_int {
1698 let _u = record(Stat::GetClock);
1699 let this = &mut *this;
1700 let state = from_raw_parts_mut(clock_data as *mut u8, size_of::<kvm_clock_data>());
1701 let ret = this.get_state(MainRequest_StateSet::CLOCK, state);
1702 to_crosvm_rc(ret)
1703 }
1704
1705 #[no_mangle]
crosvm_set_clock( this: *mut crosvm, clock_data: *const kvm_clock_data, ) -> c_int1706 pub unsafe extern "C" fn crosvm_set_clock(
1707 this: *mut crosvm,
1708 clock_data: *const kvm_clock_data,
1709 ) -> c_int {
1710 let _u = record(Stat::SetClock);
1711 let this = &mut *this;
1712 let state = from_raw_parts(clock_data as *mut u8, size_of::<kvm_clock_data>());
1713 let ret = this.set_state(MainRequest_StateSet::CLOCK, state);
1714 to_crosvm_rc(ret)
1715 }
1716
1717 #[no_mangle]
crosvm_set_identity_map_addr(self_: *mut crosvm, addr: u32) -> c_int1718 pub unsafe extern "C" fn crosvm_set_identity_map_addr(self_: *mut crosvm, addr: u32) -> c_int {
1719 let _u = record(Stat::SetIdentityMapAddr);
1720 let self_ = &mut (*self_);
1721 let ret = self_.set_identity_map_addr(addr);
1722 to_crosvm_rc(ret)
1723 }
1724
1725 #[no_mangle]
crosvm_pause_vcpus( self_: *mut crosvm, cpu_mask: u64, user: *mut c_void, ) -> c_int1726 pub unsafe extern "C" fn crosvm_pause_vcpus(
1727 self_: *mut crosvm,
1728 cpu_mask: u64,
1729 user: *mut c_void,
1730 ) -> c_int {
1731 let _u = record(Stat::PauseVcpus);
1732 let self_ = &mut (*self_);
1733 let ret = self_.pause_vcpus(cpu_mask, user);
1734 to_crosvm_rc(ret)
1735 }
1736
1737 #[no_mangle]
crosvm_start(self_: *mut crosvm) -> c_int1738 pub unsafe extern "C" fn crosvm_start(self_: *mut crosvm) -> c_int {
1739 let _u = record(Stat::Start);
1740 let self_ = &mut (*self_);
1741 let ret = self_.start();
1742 to_crosvm_rc(ret)
1743 }
1744
1745 #[no_mangle]
crosvm_get_vcpu( self_: *mut crosvm, cpu_id: u32, out: *mut *mut crosvm_vcpu, ) -> c_int1746 pub unsafe extern "C" fn crosvm_get_vcpu(
1747 self_: *mut crosvm,
1748 cpu_id: u32,
1749 out: *mut *mut crosvm_vcpu,
1750 ) -> c_int {
1751 let _u = record(Stat::GetVcpu);
1752 let self_ = &mut (*self_);
1753 let ret = self_.get_vcpu(cpu_id);
1754
1755 if let Ok(vcpu) = ret {
1756 *out = vcpu;
1757 }
1758 to_crosvm_rc(ret)
1759 }
1760
1761 #[no_mangle]
crosvm_vcpu_wait( this: *mut crosvm_vcpu, event: *mut crosvm_vcpu_event, ) -> c_int1762 pub unsafe extern "C" fn crosvm_vcpu_wait(
1763 this: *mut crosvm_vcpu,
1764 event: *mut crosvm_vcpu_event,
1765 ) -> c_int {
1766 let _u = record(Stat::VcpuWait);
1767 let this = &mut *this;
1768 let event = &mut *event;
1769 let ret = this.wait(event);
1770 to_crosvm_rc(ret)
1771 }
1772
1773 #[no_mangle]
crosvm_vcpu_resume(this: *mut crosvm_vcpu) -> c_int1774 pub unsafe extern "C" fn crosvm_vcpu_resume(this: *mut crosvm_vcpu) -> c_int {
1775 let _u = record(Stat::VcpuResume);
1776 let this = &mut *this;
1777 let ret = this.resume();
1778 to_crosvm_rc(ret)
1779 }
1780
1781 #[no_mangle]
crosvm_vcpu_get_regs( this: *mut crosvm_vcpu, regs: *mut kvm_regs, ) -> c_int1782 pub unsafe extern "C" fn crosvm_vcpu_get_regs(
1783 this: *mut crosvm_vcpu,
1784 regs: *mut kvm_regs,
1785 ) -> c_int {
1786 let _u = record(Stat::VcpuGetRegs);
1787 let this = &mut *this;
1788 if this.regs.set {
1789 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::REGS) {
1790 return -e;
1791 }
1792 }
1793 let regs = from_raw_parts_mut(regs as *mut u8, size_of::<kvm_regs>());
1794 if this.regs.get {
1795 regs.copy_from_slice(&this.regs.cache);
1796 0
1797 } else {
1798 let ret = this.get_state(VcpuRequest_StateSet::REGS, regs);
1799 to_crosvm_rc(ret)
1800 }
1801 }
1802
1803 #[no_mangle]
crosvm_vcpu_set_regs( this: *mut crosvm_vcpu, regs: *const kvm_regs, ) -> c_int1804 pub unsafe extern "C" fn crosvm_vcpu_set_regs(
1805 this: *mut crosvm_vcpu,
1806 regs: *const kvm_regs,
1807 ) -> c_int {
1808 let _u = record(Stat::VcpuSetRegs);
1809 let this = &mut *this;
1810 this.regs.get = false;
1811 let regs = from_raw_parts(regs as *mut u8, size_of::<kvm_regs>());
1812 this.regs.set = true;
1813 this.regs.cache = regs.to_vec();
1814 0
1815 }
1816
1817 #[no_mangle]
crosvm_vcpu_get_sregs( this: *mut crosvm_vcpu, sregs: *mut kvm_sregs, ) -> c_int1818 pub unsafe extern "C" fn crosvm_vcpu_get_sregs(
1819 this: *mut crosvm_vcpu,
1820 sregs: *mut kvm_sregs,
1821 ) -> c_int {
1822 let _u = record(Stat::VcpuGetSregs);
1823 let this = &mut *this;
1824 if this.sregs.set {
1825 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::SREGS) {
1826 return -e;
1827 }
1828 }
1829 let sregs = from_raw_parts_mut(sregs as *mut u8, size_of::<kvm_sregs>());
1830 if this.sregs.get {
1831 sregs.copy_from_slice(&this.sregs.cache);
1832 0
1833 } else {
1834 let ret = this.get_state(VcpuRequest_StateSet::SREGS, sregs);
1835 to_crosvm_rc(ret)
1836 }
1837 }
1838
1839 #[no_mangle]
crosvm_vcpu_set_sregs( this: *mut crosvm_vcpu, sregs: *const kvm_sregs, ) -> c_int1840 pub unsafe extern "C" fn crosvm_vcpu_set_sregs(
1841 this: *mut crosvm_vcpu,
1842 sregs: *const kvm_sregs,
1843 ) -> c_int {
1844 let _u = record(Stat::VcpuSetSregs);
1845 let this = &mut *this;
1846 this.sregs.get = false;
1847 let sregs = from_raw_parts(sregs as *mut u8, size_of::<kvm_sregs>());
1848 this.sregs.set = true;
1849 this.sregs.cache = sregs.to_vec();
1850 0
1851 }
1852
1853 #[no_mangle]
crosvm_vcpu_get_fpu(this: *mut crosvm_vcpu, fpu: *mut kvm_fpu) -> c_int1854 pub unsafe extern "C" fn crosvm_vcpu_get_fpu(this: *mut crosvm_vcpu, fpu: *mut kvm_fpu) -> c_int {
1855 let _u = record(Stat::GetFpu);
1856 let this = &mut *this;
1857 let fpu = from_raw_parts_mut(fpu as *mut u8, size_of::<kvm_fpu>());
1858 let ret = this.get_state(VcpuRequest_StateSet::FPU, fpu);
1859 to_crosvm_rc(ret)
1860 }
1861
1862 #[no_mangle]
crosvm_vcpu_set_fpu(this: *mut crosvm_vcpu, fpu: *const kvm_fpu) -> c_int1863 pub unsafe extern "C" fn crosvm_vcpu_set_fpu(this: *mut crosvm_vcpu, fpu: *const kvm_fpu) -> c_int {
1864 let _u = record(Stat::SetFpu);
1865 let this = &mut *this;
1866 let fpu = from_raw_parts(fpu as *mut u8, size_of::<kvm_fpu>());
1867 let ret = this.set_state(VcpuRequest_StateSet::FPU, fpu);
1868 to_crosvm_rc(ret)
1869 }
1870
1871 #[no_mangle]
crosvm_vcpu_get_debugregs( this: *mut crosvm_vcpu, dregs: *mut kvm_debugregs, ) -> c_int1872 pub unsafe extern "C" fn crosvm_vcpu_get_debugregs(
1873 this: *mut crosvm_vcpu,
1874 dregs: *mut kvm_debugregs,
1875 ) -> c_int {
1876 let _u = record(Stat::GetDebugRegs);
1877 let this = &mut *this;
1878 if this.debugregs.set {
1879 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::DEBUGREGS) {
1880 return -e;
1881 }
1882 }
1883 let dregs = from_raw_parts_mut(dregs as *mut u8, size_of::<kvm_debugregs>());
1884 if this.debugregs.get {
1885 dregs.copy_from_slice(&this.debugregs.cache);
1886 0
1887 } else {
1888 let ret = this.get_state(VcpuRequest_StateSet::DEBUGREGS, dregs);
1889 to_crosvm_rc(ret)
1890 }
1891 }
1892
1893 #[no_mangle]
crosvm_vcpu_set_debugregs( this: *mut crosvm_vcpu, dregs: *const kvm_debugregs, ) -> c_int1894 pub unsafe extern "C" fn crosvm_vcpu_set_debugregs(
1895 this: *mut crosvm_vcpu,
1896 dregs: *const kvm_debugregs,
1897 ) -> c_int {
1898 let _u = record(Stat::SetDebugRegs);
1899 let this = &mut *this;
1900 this.debugregs.get = false;
1901 let dregs = from_raw_parts(dregs as *mut u8, size_of::<kvm_debugregs>());
1902 this.debugregs.set = true;
1903 this.debugregs.cache = dregs.to_vec();
1904 0
1905 }
1906
1907 #[no_mangle]
crosvm_vcpu_get_xcrs( this: *mut crosvm_vcpu, xcrs: *mut kvm_xcrs, ) -> c_int1908 pub unsafe extern "C" fn crosvm_vcpu_get_xcrs(
1909 this: *mut crosvm_vcpu,
1910 xcrs: *mut kvm_xcrs,
1911 ) -> c_int {
1912 let _u = record(Stat::GetXCRegs);
1913 let this = &mut *this;
1914 let xcrs = from_raw_parts_mut(xcrs as *mut u8, size_of::<kvm_xcrs>());
1915 let ret = this.get_state(VcpuRequest_StateSet::XCREGS, xcrs);
1916 to_crosvm_rc(ret)
1917 }
1918
1919 #[no_mangle]
crosvm_vcpu_set_xcrs( this: *mut crosvm_vcpu, xcrs: *const kvm_xcrs, ) -> c_int1920 pub unsafe extern "C" fn crosvm_vcpu_set_xcrs(
1921 this: *mut crosvm_vcpu,
1922 xcrs: *const kvm_xcrs,
1923 ) -> c_int {
1924 let _u = record(Stat::SetXCRegs);
1925 let this = &mut *this;
1926 let xcrs = from_raw_parts(xcrs as *mut u8, size_of::<kvm_xcrs>());
1927 let ret = this.set_state(VcpuRequest_StateSet::XCREGS, xcrs);
1928 to_crosvm_rc(ret)
1929 }
1930
1931 #[no_mangle]
crosvm_get_hyperv_cpuid( this: *mut crosvm_vcpu, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1932 pub unsafe extern "C" fn crosvm_get_hyperv_cpuid(
1933 this: *mut crosvm_vcpu,
1934 entry_count: u32,
1935 cpuid_entries: *mut kvm_cpuid_entry2,
1936 out_count: *mut u32,
1937 ) -> c_int {
1938 let _u = record(Stat::GetHypervCpuid);
1939 let this = &mut *this;
1940 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1941 let mut cpuid_count: usize = 0;
1942 let ret = this.get_hyperv_cpuid(cpuid_entries, &mut cpuid_count);
1943 *out_count = cpuid_count as u32;
1944 to_crosvm_rc(ret)
1945 }
1946
1947 #[no_mangle]
crosvm_vcpu_get_msrs( this: *mut crosvm_vcpu, msr_count: u32, msr_entries: *mut kvm_msr_entry, out_count: *mut u32, ) -> c_int1948 pub unsafe extern "C" fn crosvm_vcpu_get_msrs(
1949 this: *mut crosvm_vcpu,
1950 msr_count: u32,
1951 msr_entries: *mut kvm_msr_entry,
1952 out_count: *mut u32,
1953 ) -> c_int {
1954 let _u = record(Stat::VcpuGetMsrs);
1955 let this = &mut *this;
1956 let msr_entries = from_raw_parts_mut(msr_entries, msr_count as usize);
1957 let mut count: usize = 0;
1958 let ret = this.get_msrs(msr_entries, &mut count);
1959 *out_count = count as u32;
1960 to_crosvm_rc(ret)
1961 }
1962
1963 #[no_mangle]
crosvm_vcpu_set_msrs( this: *mut crosvm_vcpu, msr_count: u32, msr_entries: *const kvm_msr_entry, ) -> c_int1964 pub unsafe extern "C" fn crosvm_vcpu_set_msrs(
1965 this: *mut crosvm_vcpu,
1966 msr_count: u32,
1967 msr_entries: *const kvm_msr_entry,
1968 ) -> c_int {
1969 let _u = record(Stat::VcpuSetMsrs);
1970 let this = &mut *this;
1971 let msr_entries = from_raw_parts(msr_entries, msr_count as usize);
1972 let ret = this.set_msrs(msr_entries);
1973 to_crosvm_rc(ret)
1974 }
1975
1976 #[no_mangle]
crosvm_vcpu_set_cpuid( this: *mut crosvm_vcpu, cpuid_count: u32, cpuid_entries: *const kvm_cpuid_entry2, ) -> c_int1977 pub unsafe extern "C" fn crosvm_vcpu_set_cpuid(
1978 this: *mut crosvm_vcpu,
1979 cpuid_count: u32,
1980 cpuid_entries: *const kvm_cpuid_entry2,
1981 ) -> c_int {
1982 let _u = record(Stat::VcpuSetCpuid);
1983 let this = &mut *this;
1984 let cpuid_entries = from_raw_parts(cpuid_entries, cpuid_count as usize);
1985 let ret = this.set_cpuid(cpuid_entries);
1986 to_crosvm_rc(ret)
1987 }
1988
1989 #[no_mangle]
crosvm_vcpu_enable_capability( this: *mut crosvm_vcpu, capability: u32, flags: u32, args: *const u64, ) -> c_int1990 pub unsafe extern "C" fn crosvm_vcpu_enable_capability(
1991 this: *mut crosvm_vcpu,
1992 capability: u32,
1993 flags: u32,
1994 args: *const u64,
1995 ) -> c_int {
1996 let _u = record(Stat::EnableVcpuCapability);
1997 let this = &mut *this;
1998 let args = slice::from_raw_parts(args, 4);
1999
2000 if flags != 0 || args.iter().any(|v| *v != 0) {
2001 return -EINVAL;
2002 }
2003
2004 let ret = this.enable_capability(capability);
2005 to_crosvm_rc(ret)
2006 }
2007
2008 #[no_mangle]
crosvm_vcpu_get_lapic_state( this: *mut crosvm_vcpu, state: *mut kvm_lapic_state, ) -> c_int2009 pub unsafe extern "C" fn crosvm_vcpu_get_lapic_state(
2010 this: *mut crosvm_vcpu,
2011 state: *mut kvm_lapic_state,
2012 ) -> c_int {
2013 let _u = record(Stat::VcpuGetLapicState);
2014 let this = &mut *this;
2015 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_lapic_state>());
2016 let ret = this.get_state(VcpuRequest_StateSet::LAPIC, state);
2017 to_crosvm_rc(ret)
2018 }
2019
2020 #[no_mangle]
crosvm_vcpu_set_lapic_state( this: *mut crosvm_vcpu, state: *const kvm_lapic_state, ) -> c_int2021 pub unsafe extern "C" fn crosvm_vcpu_set_lapic_state(
2022 this: *mut crosvm_vcpu,
2023 state: *const kvm_lapic_state,
2024 ) -> c_int {
2025 let _u = record(Stat::VcpuSetLapicState);
2026 let this = &mut *this;
2027 let state = from_raw_parts(state as *mut u8, size_of::<kvm_lapic_state>());
2028 let ret = this.set_state(VcpuRequest_StateSet::LAPIC, state);
2029 to_crosvm_rc(ret)
2030 }
2031
2032 #[no_mangle]
crosvm_vcpu_get_mp_state( this: *mut crosvm_vcpu, state: *mut kvm_mp_state, ) -> c_int2033 pub unsafe extern "C" fn crosvm_vcpu_get_mp_state(
2034 this: *mut crosvm_vcpu,
2035 state: *mut kvm_mp_state,
2036 ) -> c_int {
2037 let _u = record(Stat::VcpuGetMpState);
2038 let this = &mut *this;
2039 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_mp_state>());
2040 let ret = this.get_state(VcpuRequest_StateSet::MP, state);
2041 to_crosvm_rc(ret)
2042 }
2043
2044 #[no_mangle]
crosvm_vcpu_set_mp_state( this: *mut crosvm_vcpu, state: *const kvm_mp_state, ) -> c_int2045 pub unsafe extern "C" fn crosvm_vcpu_set_mp_state(
2046 this: *mut crosvm_vcpu,
2047 state: *const kvm_mp_state,
2048 ) -> c_int {
2049 let _u = record(Stat::VcpuSetMpState);
2050 let this = &mut *this;
2051 let state = from_raw_parts(state as *mut u8, size_of::<kvm_mp_state>());
2052 let ret = this.set_state(VcpuRequest_StateSet::MP, state);
2053 to_crosvm_rc(ret)
2054 }
2055
2056 #[no_mangle]
crosvm_vcpu_get_vcpu_events( this: *mut crosvm_vcpu, events: *mut kvm_vcpu_events, ) -> c_int2057 pub unsafe extern "C" fn crosvm_vcpu_get_vcpu_events(
2058 this: *mut crosvm_vcpu,
2059 events: *mut kvm_vcpu_events,
2060 ) -> c_int {
2061 let _u = record(Stat::VcpuGetVcpuEvents);
2062 let this = &mut *this;
2063 let events = from_raw_parts_mut(events as *mut u8, size_of::<kvm_vcpu_events>());
2064 let ret = this.get_state(VcpuRequest_StateSet::EVENTS, events);
2065 to_crosvm_rc(ret)
2066 }
2067
2068 #[no_mangle]
crosvm_vcpu_set_vcpu_events( this: *mut crosvm_vcpu, events: *const kvm_vcpu_events, ) -> c_int2069 pub unsafe extern "C" fn crosvm_vcpu_set_vcpu_events(
2070 this: *mut crosvm_vcpu,
2071 events: *const kvm_vcpu_events,
2072 ) -> c_int {
2073 let _u = record(Stat::VcpuSetVcpuEvents);
2074 let this = &mut *this;
2075 let events = from_raw_parts(events as *mut u8, size_of::<kvm_vcpu_events>());
2076 let ret = this.set_state(VcpuRequest_StateSet::EVENTS, events);
2077 to_crosvm_rc(ret)
2078 }
2079