1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::hash_map::Entry;
6 use std::collections::hash_map::HashMap;
7 use std::collections::hash_map::VacantEntry;
8 use std::env::set_var;
9 use std::fs::File;
10 use std::io::IoSlice;
11 use std::io::IoSliceMut;
12 use std::io::Write;
13 use std::mem::transmute;
14 use std::os::unix::net::UnixDatagram;
15 use std::path::Path;
16 use std::process::Command;
17 use std::result;
18 use std::sync::Arc;
19 use std::sync::RwLock;
20 use std::thread::JoinHandle;
21
22 use base::error;
23 use base::AsRawDescriptor;
24 use base::Error as SysError;
25 use base::Event;
26 use base::IntoRawDescriptor;
27 use base::Killable;
28 use base::MemoryMappingBuilder;
29 use base::Result as SysResult;
30 use base::ScmSocket;
31 use base::SharedMemory;
32 use base::SharedMemoryUnix;
33 use base::SIGRTMIN;
34 // Wrapper types to make the kvm state structs DataInit
35 use data_model::DataInit;
36 use kvm::dirty_log_bitmap_size;
37 use kvm::Datamatch;
38 use kvm::IoeventAddress;
39 use kvm::IrqRoute;
40 use kvm::IrqSource;
41 use kvm::PicId;
42 use kvm::Vm;
43 use kvm_sys::kvm_clock_data;
44 use kvm_sys::kvm_ioapic_state;
45 use kvm_sys::kvm_pic_state;
46 use kvm_sys::kvm_pit_state2;
47 use libc::pid_t;
48 use libc::waitpid;
49 use libc::EINVAL;
50 use libc::ENODATA;
51 use libc::ENOTTY;
52 use libc::STDERR_FILENO;
53 use libc::WEXITSTATUS;
54 use libc::WIFEXITED;
55 use libc::WNOHANG;
56 use libc::WTERMSIG;
57 use minijail::Minijail;
58 use net_util::Error as NetError;
59 use net_util::TapTCommon;
60 use protobuf::Message;
61 use protos::plugin::*;
62 use sync::Mutex;
63 use vm_memory::GuestAddress;
64
65 use super::*;
66 #[derive(Copy, Clone)]
67 struct VmPicState(kvm_pic_state);
68 unsafe impl DataInit for VmPicState {}
69 #[derive(Copy, Clone)]
70 struct VmIoapicState(kvm_ioapic_state);
71 unsafe impl DataInit for VmIoapicState {}
72 #[derive(Copy, Clone)]
73 struct VmPitState(kvm_pit_state2);
74 unsafe impl DataInit for VmPitState {}
75 #[derive(Copy, Clone)]
76 struct VmClockState(kvm_clock_data);
77 unsafe impl DataInit for VmClockState {}
78
79 const CROSVM_SOCKET_ENV: &str = "CROSVM_SOCKET";
80
get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>>81 fn get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>> {
82 Ok(match state_set {
83 MainRequest_StateSet::PIC0 => VmPicState(vm.get_pic_state(PicId::Primary)?)
84 .as_slice()
85 .to_vec(),
86 MainRequest_StateSet::PIC1 => VmPicState(vm.get_pic_state(PicId::Secondary)?)
87 .as_slice()
88 .to_vec(),
89 MainRequest_StateSet::IOAPIC => VmIoapicState(vm.get_ioapic_state()?).as_slice().to_vec(),
90 MainRequest_StateSet::PIT => VmPitState(vm.get_pit_state()?).as_slice().to_vec(),
91 MainRequest_StateSet::CLOCK => VmClockState(vm.get_clock()?).as_slice().to_vec(),
92 })
93 }
94
set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()>95 fn set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()> {
96 match state_set {
97 MainRequest_StateSet::PIC0 => vm.set_pic_state(
98 PicId::Primary,
99 &VmPicState::from_slice(state)
100 .ok_or(SysError::new(EINVAL))?
101 .0,
102 ),
103 MainRequest_StateSet::PIC1 => vm.set_pic_state(
104 PicId::Secondary,
105 &VmPicState::from_slice(state)
106 .ok_or(SysError::new(EINVAL))?
107 .0,
108 ),
109 MainRequest_StateSet::IOAPIC => vm.set_ioapic_state(
110 &VmIoapicState::from_slice(state)
111 .ok_or(SysError::new(EINVAL))?
112 .0,
113 ),
114 MainRequest_StateSet::PIT => vm.set_pit_state(
115 &VmPitState::from_slice(state)
116 .ok_or(SysError::new(EINVAL))?
117 .0,
118 ),
119 MainRequest_StateSet::CLOCK => vm.set_clock(
120 &VmClockState::from_slice(state)
121 .ok_or(SysError::new(EINVAL))?
122 .0,
123 ),
124 }
125 }
126
127 /// The status of a process, either that it is running, or that it exited under some condition.
128 pub enum ProcessStatus {
129 /// The process is running and therefore has no information about its result.
130 Running,
131 /// The process has exited with a successful code.
132 Success,
133 /// The process failed with the given exit code.
134 Fail(i32),
135 /// The process was terminated with the given signal code.
136 Signal(i32),
137 }
138
139 /// Creates, owns, and handles messages from a plugin process.
140 ///
141 /// A plugin process has control over a single VM and a fixed number of VCPUs via a set of pipes & unix
142 /// domain socket connections and a protocol defined in `protos::plugin`. The plugin process is run
143 /// in an unprivileged manner as a child process spawned via a path to a arbitrary executable.
144 pub struct Process {
145 started: bool,
146 plugin_pid: pid_t,
147 request_sockets: Vec<UnixDatagram>,
148 objects: HashMap<u32, PluginObject>,
149 shared_vcpu_state: Arc<RwLock<SharedVcpuState>>,
150 per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>>,
151
152 // Resource to sent to plugin
153 kill_evt: Event,
154 vcpu_pipes: Vec<VcpuPipe>,
155
156 // Socket Transmission
157 request_buffer: Vec<u8>,
158 response_buffer: Vec<u8>,
159 }
160
161 impl Process {
162 /// Creates a new plugin process for the given number of vcpus and VM.
163 ///
164 /// This will immediately spawn the plugin process and wait for the child to signal that it is
165 /// ready to start. This call may block indefinitely.
166 ///
167 /// Set the `jail` argument to spawn the plugin process within the preconfigured jail.
168 /// Due to an API limitation in libminijail necessitating that this function set an environment
169 /// variable, this function is not thread-safe.
170 ///
171 /// Arguments:
172 ///
173 /// * `cpu_count`: number of vcpus
174 /// * `cmd`: path to plugin executable
175 /// * `args`: arguments to plugin executable
176 /// * `jail`: jail to launch plugin in. If None plugin will just be spawned as a child
177 /// * `stderr`: File to redirect stderr of plugin process to
new( cpu_count: u32, cmd: &Path, args: &[&str], jail: Option<Minijail>, stderr: File, ) -> Result<Process>178 pub fn new(
179 cpu_count: u32,
180 cmd: &Path,
181 args: &[&str],
182 jail: Option<Minijail>,
183 stderr: File,
184 ) -> Result<Process> {
185 let (request_socket, child_socket) =
186 new_seqpacket_pair().context("error creating main request socket")?;
187
188 let mut vcpu_pipes: Vec<VcpuPipe> = Vec::with_capacity(cpu_count as usize);
189 for _ in 0..cpu_count {
190 vcpu_pipes.push(new_pipe_pair().context("error creating vcpu request socket")?);
191 }
192
193 let mut per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>> = Vec::new();
194 per_vcpu_states.resize_with(cpu_count as usize, Default::default);
195
196 let plugin_pid = match jail {
197 Some(jail) => {
198 set_var(
199 CROSVM_SOCKET_ENV,
200 child_socket.as_raw_descriptor().to_string(),
201 );
202 jail.run_remap(
203 cmd,
204 &[
205 (stderr.as_raw_descriptor(), STDERR_FILENO),
206 (
207 child_socket.as_raw_descriptor(),
208 child_socket.as_raw_descriptor(),
209 ),
210 ],
211 args,
212 )
213 .context("failed to run plugin jail")?
214 }
215 None => Command::new(cmd)
216 .args(args)
217 .env(
218 "CROSVM_SOCKET",
219 child_socket.as_raw_descriptor().to_string(),
220 )
221 .stderr(stderr)
222 .spawn()
223 .context("failed to spawn plugin")?
224 .id() as pid_t,
225 };
226
227 Ok(Process {
228 started: false,
229 plugin_pid,
230 request_sockets: vec![request_socket],
231 objects: Default::default(),
232 shared_vcpu_state: Default::default(),
233 per_vcpu_states,
234 kill_evt: Event::new().context("failed to create plugin kill event")?,
235 vcpu_pipes,
236 request_buffer: vec![0; MAX_DATAGRAM_SIZE],
237 response_buffer: Vec::new(),
238 })
239 }
240
241 /// Creates a VCPU plugin connection object, used by a VCPU run loop to communicate with the
242 /// plugin process.
243 ///
244 /// While each invocation of `create_vcpu` with the given `cpu_id` will return a unique
245 /// `PluginVcpu` object, the underlying resources are shared by each `PluginVcpu` resulting from
246 /// the same `cpu_id`.
create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu>247 pub fn create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu> {
248 let vcpu_pipe_read = self.vcpu_pipes[cpu_id as usize]
249 .crosvm_read
250 .try_clone()
251 .context("failed to clone vcpu read pipe")?;
252 let vcpu_pipe_write = self.vcpu_pipes[cpu_id as usize]
253 .crosvm_write
254 .try_clone()
255 .context("failed to clone vcpu write pipe")?;
256 Ok(PluginVcpu::new(
257 self.shared_vcpu_state.clone(),
258 self.per_vcpu_states[cpu_id as usize].clone(),
259 vcpu_pipe_read,
260 vcpu_pipe_write,
261 ))
262 }
263
264 /// Returns if the plugin process indicated the VM was ready to start.
is_started(&self) -> bool265 pub fn is_started(&self) -> bool {
266 self.started
267 }
268
269 /// Returns the process ID of the plugin process.
pid(&self) -> pid_t270 pub fn pid(&self) -> pid_t {
271 self.plugin_pid
272 }
273
274 /// Returns a slice of each socket that should be polled.
275 ///
276 /// If any socket in this slice becomes readable, `handle_socket` should be called with the
277 /// index of that socket. If any socket becomes closed, its index should be passed to
278 /// `drop_sockets`.
sockets(&self) -> &[UnixDatagram]279 pub fn sockets(&self) -> &[UnixDatagram] {
280 &self.request_sockets
281 }
282
283 /// Drops the each socket identified by its index in the slice returned by `sockets`.
284 ///
285 /// The given `socket_idxs` slice will be modified in an arbitrary way for efficient removal of
286 /// the sockets from internal data structures.
drop_sockets(&mut self, socket_idxs: &mut [usize])287 pub fn drop_sockets(&mut self, socket_idxs: &mut [usize]) {
288 // Takes a mutable slice so that the indices can be sorted for efficient removal in
289 // request_sockets..
290 socket_idxs.sort_unstable_by(|a, b| b.cmp(a));
291 let old_len = self.request_sockets.len();
292 for &socket_index in socket_idxs.iter() {
293 // swap_remove changes the index of the last element, but we already know that one
294 // doesn't need to be removed because we are removing sockets in descending order thanks
295 // to the above sort.
296 self.request_sockets.swap_remove(socket_index);
297 }
298 assert_eq!(old_len - socket_idxs.len(), self.request_sockets.len());
299 }
300
301 /// Gently requests that the plugin process exit cleanly, and ends handling of all VCPU
302 /// connections.
303 ///
304 /// The plugin process can ignore the given signal, and so some timeout should be used before
305 /// forcefully terminating the process.
306 ///
307 /// Any blocked VCPU connections will get interrupted so that the VCPU threads can exit cleanly.
308 /// Any subsequent attempt to use the VCPU connections will fail.
signal_kill(&mut self) -> SysResult<()>309 pub fn signal_kill(&mut self) -> SysResult<()> {
310 self.kill_evt.signal()?;
311 // Normally we'd get any blocked recv() calls in the VCPU threads
312 // to unblock by calling shutdown(). However, we're using pipes
313 // (for improved performance), and pipes don't have shutdown so
314 // instead we'll write a shutdown message to ourselves using the
315 // the writable side of the pipe (normally used by the plugin).
316 for pipe in self.vcpu_pipes.iter_mut() {
317 let mut shutdown_request = VcpuRequest::new();
318 shutdown_request.set_shutdown(VcpuRequest_Shutdown::new());
319 let mut buffer = Vec::new();
320 shutdown_request
321 .write_to_vec(&mut buffer)
322 .map_err(proto_to_sys_err)?;
323 pipe.plugin_write
324 .write(&buffer[..])
325 .map_err(io_to_sys_err)?;
326 }
327 Ok(())
328 }
329
330 /// Waits without blocking for the plugin process to exit and returns the status.
try_wait(&mut self) -> SysResult<ProcessStatus>331 pub fn try_wait(&mut self) -> SysResult<ProcessStatus> {
332 let mut status = 0;
333 // Safe because waitpid is given a valid pointer of correct size and mutability, and the
334 // return value is checked.
335 let ret = unsafe { waitpid(self.plugin_pid, &mut status, WNOHANG) };
336 match ret {
337 -1 => Err(SysError::last()),
338 0 => Ok(ProcessStatus::Running),
339 _ => {
340 if WIFEXITED(status) {
341 match WEXITSTATUS(status) {
342 0 => Ok(ProcessStatus::Success),
343 code => Ok(ProcessStatus::Fail(code)),
344 }
345 } else {
346 // Plugin terminated but has no exit status, so it must have been signaled.
347 Ok(ProcessStatus::Signal(WTERMSIG(status)))
348 }
349 }
350 }
351 }
352
handle_io_event( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, io_event: &MainRequest_Create_IoEvent, ) -> SysResult<RawDescriptor>353 fn handle_io_event(
354 entry: VacantEntry<u32, PluginObject>,
355 vm: &mut Vm,
356 io_event: &MainRequest_Create_IoEvent,
357 ) -> SysResult<RawDescriptor> {
358 let evt = Event::new()?;
359 let addr = match io_event.space {
360 AddressSpace::IOPORT => IoeventAddress::Pio(io_event.address),
361 AddressSpace::MMIO => IoeventAddress::Mmio(io_event.address),
362 };
363 match io_event.length {
364 0 => vm.register_ioevent(&evt, addr, Datamatch::AnyLength)?,
365 1 => vm.register_ioevent(&evt, addr, Datamatch::U8(Some(io_event.datamatch as u8)))?,
366 2 => {
367 vm.register_ioevent(&evt, addr, Datamatch::U16(Some(io_event.datamatch as u16)))?
368 }
369 4 => {
370 vm.register_ioevent(&evt, addr, Datamatch::U32(Some(io_event.datamatch as u32)))?
371 }
372 8 => {
373 vm.register_ioevent(&evt, addr, Datamatch::U64(Some(io_event.datamatch as u64)))?
374 }
375 _ => return Err(SysError::new(EINVAL)),
376 };
377
378 let fd = evt.as_raw_descriptor();
379 entry.insert(PluginObject::IoEvent {
380 evt,
381 addr,
382 length: io_event.length,
383 datamatch: io_event.datamatch,
384 });
385 Ok(fd)
386 }
387
handle_memory( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, memfd: File, offset: u64, start: u64, length: u64, read_only: bool, dirty_log: bool, ) -> SysResult<()>388 fn handle_memory(
389 entry: VacantEntry<u32, PluginObject>,
390 vm: &mut Vm,
391 memfd: File,
392 offset: u64,
393 start: u64,
394 length: u64,
395 read_only: bool,
396 dirty_log: bool,
397 ) -> SysResult<()> {
398 let shm = SharedMemory::from_file(memfd)?;
399 // Checking the seals ensures the plugin process won't shrink the mmapped file, causing us
400 // to SIGBUS in the future.
401 let seals = shm.get_seals()?;
402 if !seals.shrink_seal() {
403 return Err(SysError::new(EPERM));
404 }
405 // Check to make sure we don't mmap areas beyond the end of the memfd.
406 match length.checked_add(offset) {
407 Some(end) if end > shm.size() => return Err(SysError::new(EINVAL)),
408 None => return Err(SysError::new(EOVERFLOW)),
409 _ => {}
410 }
411 let mem = MemoryMappingBuilder::new(length as usize)
412 .from_shared_memory(&shm)
413 .offset(offset)
414 .build()
415 .map_err(mmap_to_sys_err)?;
416 let slot =
417 vm.add_memory_region(GuestAddress(start), Box::new(mem), read_only, dirty_log)?;
418 entry.insert(PluginObject::Memory {
419 slot,
420 length: length as usize,
421 });
422 Ok(())
423 }
424
handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()>425 fn handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()> {
426 match self.shared_vcpu_state.write() {
427 Ok(mut lock) => {
428 let space = match reserve_range.space {
429 AddressSpace::IOPORT => IoSpace::Ioport,
430 AddressSpace::MMIO => IoSpace::Mmio,
431 };
432 match reserve_range.length {
433 0 => lock.unreserve_range(space, reserve_range.start),
434 _ => lock.reserve_range(
435 space,
436 reserve_range.start,
437 reserve_range.length,
438 reserve_range.async_write,
439 ),
440 }
441 }
442 Err(_) => Err(SysError::new(EDEADLK)),
443 }
444 }
445
handle_set_irq_routing( vm: &mut Vm, irq_routing: &MainRequest_SetIrqRouting, ) -> SysResult<()>446 fn handle_set_irq_routing(
447 vm: &mut Vm,
448 irq_routing: &MainRequest_SetIrqRouting,
449 ) -> SysResult<()> {
450 let mut routes = Vec::with_capacity(irq_routing.routes.len());
451 for route in &irq_routing.routes {
452 routes.push(IrqRoute {
453 gsi: route.irq_id,
454 source: if route.has_irqchip() {
455 let irqchip = route.get_irqchip();
456 IrqSource::Irqchip {
457 chip: irqchip.irqchip,
458 pin: irqchip.pin,
459 }
460 } else if route.has_msi() {
461 let msi = route.get_msi();
462 IrqSource::Msi {
463 address: msi.address,
464 data: msi.data,
465 }
466 } else {
467 // Because route is a oneof field in the proto definition, this should
468 // only happen if a new variant gets added without updating this chained
469 // if block.
470 return Err(SysError::new(EINVAL));
471 },
472 });
473 }
474 vm.set_gsi_routing(&routes[..])
475 }
476
handle_set_call_hint(&mut self, hints: &MainRequest_SetCallHint) -> SysResult<()>477 fn handle_set_call_hint(&mut self, hints: &MainRequest_SetCallHint) -> SysResult<()> {
478 let mut regs: Vec<CallHintDetails> = vec![];
479 for hint in &hints.hints {
480 regs.push(CallHintDetails {
481 match_rax: hint.match_rax,
482 match_rbx: hint.match_rbx,
483 match_rcx: hint.match_rcx,
484 match_rdx: hint.match_rdx,
485 rax: hint.rax,
486 rbx: hint.rbx,
487 rcx: hint.rcx,
488 rdx: hint.rdx,
489 send_sregs: hint.send_sregs,
490 send_debugregs: hint.send_debugregs,
491 });
492 }
493 match self.shared_vcpu_state.write() {
494 Ok(mut lock) => {
495 let space = match hints.space {
496 AddressSpace::IOPORT => IoSpace::Ioport,
497 AddressSpace::MMIO => IoSpace::Mmio,
498 };
499 lock.set_hint(space, hints.address, hints.on_write, regs);
500 Ok(())
501 }
502 Err(_) => Err(SysError::new(EDEADLK)),
503 }
504 }
505
handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64)506 fn handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64) {
507 for (cpu_id, (handle, per_cpu_state)) in
508 vcpu_handles.iter().zip(&self.per_vcpu_states).enumerate()
509 {
510 if cpu_mask & (1 << cpu_id) != 0 {
511 per_cpu_state.lock().request_pause(user_data);
512 if let Err(e) = handle.kill(SIGRTMIN() + 0) {
513 error!("failed to interrupt vcpu {}: {}", cpu_id, e);
514 }
515 }
516 }
517 }
518
handle_get_net_config( tap: &net_util::sys::unix::Tap, config: &mut MainResponse_GetNetConfig, ) -> SysResult<()>519 fn handle_get_net_config(
520 tap: &net_util::sys::unix::Tap,
521 config: &mut MainResponse_GetNetConfig,
522 ) -> SysResult<()> {
523 // Log any NetError so that the cause can be found later, but extract and return the
524 // underlying errno for the client as well.
525 fn map_net_error(s: &str, e: NetError) -> SysError {
526 error!("failed to get {}: {}", s, e);
527 e.sys_error()
528 }
529
530 let ip_addr = tap.ip_addr().map_err(|e| map_net_error("IP address", e))?;
531 config.set_host_ipv4_address(u32::from(ip_addr));
532
533 let netmask = tap.netmask().map_err(|e| map_net_error("netmask", e))?;
534 config.set_netmask(u32::from(netmask));
535
536 let result_mac_addr = config.mut_host_mac_address();
537 let mac_addr_octets = tap
538 .mac_address()
539 .map_err(|e| map_net_error("mac address", e))?
540 .octets();
541 result_mac_addr.resize(mac_addr_octets.len(), 0);
542 result_mac_addr.clone_from_slice(&mac_addr_octets);
543
544 Ok(())
545 }
546
547 /// Handles a request on a readable socket identified by its index in the slice returned by
548 /// `sockets`.
549 ///
550 /// The `vm` is used to service request that affect the VM. The `vcpu_handles` slice is used to
551 /// interrupt a VCPU thread currently running in the VM if the socket request it.
handle_socket( &mut self, index: usize, kvm: &Kvm, vm: &mut Vm, vcpu_handles: &[JoinHandle<()>], taps: &[Tap], ) -> result::Result<(), CommError>552 pub fn handle_socket(
553 &mut self,
554 index: usize,
555 kvm: &Kvm,
556 vm: &mut Vm,
557 vcpu_handles: &[JoinHandle<()>],
558 taps: &[Tap],
559 ) -> result::Result<(), CommError> {
560 let (msg_size, request_file) = self.request_sockets[index]
561 .recv_with_fd(IoSliceMut::new(&mut self.request_buffer))
562 .map_err(CommError::PluginSocketRecv)?;
563
564 if msg_size == 0 {
565 return Err(CommError::PluginSocketHup);
566 }
567
568 let request: MainRequest = Message::parse_from_bytes(&self.request_buffer[..msg_size])
569 .map_err(CommError::DecodeRequest)?;
570
571 /// Use this to make it easier to stuff various kinds of File-like objects into the
572 /// `boxed_fds` list.
573 fn box_owned_fd<F: IntoRawDescriptor + 'static>(f: F) -> Box<dyn IntoRawDescriptor> {
574 Box::new(f)
575 }
576
577 // This vec is used to extend ownership of certain FDs until the end of this function.
578 let mut boxed_fds = Vec::new();
579 let mut response_fds = Vec::new();
580 let mut response = MainResponse::new();
581 let res = if request.has_create() {
582 response.mut_create();
583 let create = request.get_create();
584 match self.objects.entry(create.id) {
585 Entry::Vacant(entry) => {
586 if create.has_io_event() {
587 match Self::handle_io_event(entry, vm, create.get_io_event()) {
588 Ok(fd) => {
589 response_fds.push(fd);
590 Ok(())
591 }
592 Err(e) => Err(e),
593 }
594 } else if create.has_memory() {
595 let memory = create.get_memory();
596 match request_file {
597 Some(memfd) => Self::handle_memory(
598 entry,
599 vm,
600 memfd,
601 memory.offset,
602 memory.start,
603 memory.length,
604 memory.read_only,
605 memory.dirty_log,
606 ),
607 None => Err(SysError::new(EBADF)),
608 }
609 } else if create.has_irq_event() {
610 let irq_event = create.get_irq_event();
611 match (Event::new(), Event::new()) {
612 (Ok(evt), Ok(resample_evt)) => match vm.register_irqfd_resample(
613 &evt,
614 &resample_evt,
615 irq_event.irq_id,
616 ) {
617 Ok(()) => {
618 response_fds.push(evt.as_raw_descriptor());
619 response_fds.push(resample_evt.as_raw_descriptor());
620 boxed_fds.push(box_owned_fd(resample_evt));
621 entry.insert(PluginObject::IrqEvent {
622 irq_id: irq_event.irq_id,
623 evt,
624 });
625 Ok(())
626 }
627 Err(e) => Err(e),
628 },
629 (Err(e), _) | (_, Err(e)) => Err(e),
630 }
631 } else {
632 Err(SysError::new(ENOTTY))
633 }
634 }
635 Entry::Occupied(_) => Err(SysError::new(EEXIST)),
636 }
637 } else if request.has_destroy() {
638 response.mut_destroy();
639 match self.objects.entry(request.get_destroy().id) {
640 Entry::Occupied(entry) => entry.remove().destroy(vm),
641 Entry::Vacant(_) => Err(SysError::new(ENOENT)),
642 }
643 } else if request.has_new_connection() {
644 response.mut_new_connection();
645 match new_seqpacket_pair() {
646 Ok((request_socket, child_socket)) => {
647 self.request_sockets.push(request_socket);
648 response_fds.push(child_socket.as_raw_descriptor());
649 boxed_fds.push(box_owned_fd(child_socket));
650 Ok(())
651 }
652 Err(e) => Err(e),
653 }
654 } else if request.has_get_shutdown_eventfd() {
655 response.mut_get_shutdown_eventfd();
656 response_fds.push(self.kill_evt.as_raw_descriptor());
657 Ok(())
658 } else if request.has_check_extension() {
659 // Safe because the Cap enum is not read by the check_extension method. In that method,
660 // cap is cast back to an integer and fed to an ioctl. If the extension name is actually
661 // invalid, the kernel will safely reject the extension under the assumption that the
662 // capability is legitimately unsupported.
663 let cap = unsafe { transmute(request.get_check_extension().extension) };
664 response.mut_check_extension().has_extension = vm.check_extension(cap);
665 Ok(())
666 } else if request.has_reserve_range() {
667 response.mut_reserve_range();
668 self.handle_reserve_range(request.get_reserve_range())
669 } else if request.has_set_irq() {
670 response.mut_set_irq();
671 let irq = request.get_set_irq();
672 vm.set_irq_line(irq.irq_id, irq.active)
673 } else if request.has_set_irq_routing() {
674 response.mut_set_irq_routing();
675 Self::handle_set_irq_routing(vm, request.get_set_irq_routing())
676 } else if request.has_get_state() {
677 let response_state = response.mut_get_state();
678 match get_vm_state(vm, request.get_get_state().set) {
679 Ok(state) => {
680 response_state.state = state;
681 Ok(())
682 }
683 Err(e) => Err(e),
684 }
685 } else if request.has_set_state() {
686 response.mut_set_state();
687 let set_state = request.get_set_state();
688 set_vm_state(vm, set_state.set, set_state.get_state())
689 } else if request.has_set_identity_map_addr() {
690 response.mut_set_identity_map_addr();
691 let addr = request.get_set_identity_map_addr().address;
692 vm.set_identity_map_addr(GuestAddress(addr as u64))
693 } else if request.has_pause_vcpus() {
694 response.mut_pause_vcpus();
695 let pause_vcpus = request.get_pause_vcpus();
696 self.handle_pause_vcpus(vcpu_handles, pause_vcpus.cpu_mask, pause_vcpus.user);
697 Ok(())
698 } else if request.has_get_vcpus() {
699 response.mut_get_vcpus();
700 for pipe in self.vcpu_pipes.iter() {
701 response_fds.push(pipe.plugin_write.as_raw_descriptor());
702 response_fds.push(pipe.plugin_read.as_raw_descriptor());
703 }
704 Ok(())
705 } else if request.has_start() {
706 response.mut_start();
707 if self.started {
708 Err(SysError::new(EINVAL))
709 } else {
710 self.started = true;
711 Ok(())
712 }
713 } else if request.has_get_net_config() {
714 match taps.first() {
715 Some(tap) => {
716 match Self::handle_get_net_config(tap, response.mut_get_net_config()) {
717 Ok(_) => {
718 response_fds.push(tap.as_raw_descriptor());
719 Ok(())
720 }
721 Err(e) => Err(e),
722 }
723 }
724 None => Err(SysError::new(ENODATA)),
725 }
726 } else if request.has_set_call_hint() {
727 response.mut_set_call_hint();
728 self.handle_set_call_hint(request.get_set_call_hint())
729 } else if request.has_dirty_log() {
730 let dirty_log_response = response.mut_dirty_log();
731 match self.objects.get(&request.get_dirty_log().id) {
732 Some(&PluginObject::Memory { slot, length }) => {
733 let dirty_log = dirty_log_response.mut_bitmap();
734 dirty_log.resize(dirty_log_bitmap_size(length), 0);
735 vm.get_dirty_log(slot, &mut dirty_log[..])
736 }
737 _ => Err(SysError::new(ENOENT)),
738 }
739 } else if request.has_get_supported_cpuid() {
740 let cpuid_response = &mut response.mut_get_supported_cpuid().entries;
741 match kvm.get_supported_cpuid() {
742 Ok(mut cpuid) => {
743 for entry in cpuid.mut_entries_slice() {
744 cpuid_response.push(cpuid_kvm_to_proto(entry));
745 }
746 Ok(())
747 }
748 Err(e) => Err(e),
749 }
750 } else if request.has_get_emulated_cpuid() {
751 let cpuid_response = &mut response.mut_get_emulated_cpuid().entries;
752 match kvm.get_emulated_cpuid() {
753 Ok(mut cpuid) => {
754 for entry in cpuid.mut_entries_slice() {
755 cpuid_response.push(cpuid_kvm_to_proto(entry));
756 }
757 Ok(())
758 }
759 Err(e) => Err(e),
760 }
761 } else if request.has_get_msr_index_list() {
762 let msr_list_response = &mut response.mut_get_msr_index_list().indices;
763 match kvm.get_msr_index_list() {
764 Ok(indices) => {
765 for entry in indices {
766 msr_list_response.push(entry);
767 }
768 Ok(())
769 }
770 Err(e) => Err(e),
771 }
772 } else {
773 Err(SysError::new(ENOTTY))
774 };
775
776 if let Err(e) = res {
777 response.errno = e.errno();
778 }
779
780 self.response_buffer.clear();
781 response
782 .write_to_vec(&mut self.response_buffer)
783 .map_err(CommError::EncodeResponse)?;
784 assert_ne!(self.response_buffer.len(), 0);
785 self.request_sockets[index]
786 .send_with_fds(&[IoSlice::new(&self.response_buffer[..])], &response_fds)
787 .map_err(CommError::PluginSocketSend)?;
788
789 Ok(())
790 }
791 }
792
793 impl Drop for Process {
drop(&mut self)794 fn drop(&mut self) {
795 // Ignore the result because there is nothing we can do about it.
796 if let Err(e) = self.signal_kill() {
797 error!("failed to signal kill event for plugin: {}", e);
798 }
799 }
800 }
801