1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::hash_map::{Entry, HashMap, VacantEntry};
6 use std::env::set_var;
7 use std::fs::File;
8 use std::io::Write;
9 use std::mem::transmute;
10 use std::os::unix::io::{AsRawFd, RawFd};
11 use std::os::unix::net::UnixDatagram;
12 use std::path::Path;
13 use std::process::Command;
14 use std::sync::{Arc, RwLock};
15 use std::thread::JoinHandle;
16
17 use net_util;
18 use net_util::Error as NetError;
19
20 use libc::{pid_t, waitpid, EINVAL, ENODATA, ENOTTY, WEXITSTATUS, WIFEXITED, WNOHANG, WTERMSIG};
21
22 use protobuf;
23 use protobuf::Message;
24
25 use io_jail::Minijail;
26 use kvm::{dirty_log_bitmap_size, Datamatch, IoeventAddress, IrqRoute, IrqSource, PicId, Vm};
27 use kvm_sys::{kvm_clock_data, kvm_ioapic_state, kvm_pic_state, kvm_pit_state2};
28 use protos::plugin::*;
29 use sync::Mutex;
30 use sys_util::{
31 error, Error as SysError, EventFd, GuestAddress, Killable, MemoryMapping, Result as SysResult,
32 ScmSocket, SharedMemory, SIGRTMIN,
33 };
34
35 use super::*;
36
37 // Wrapper types to make the kvm state structs DataInit
38 use data_model::DataInit;
39 #[derive(Copy, Clone)]
40 struct VmPicState(kvm_pic_state);
41 unsafe impl DataInit for VmPicState {}
42 #[derive(Copy, Clone)]
43 struct VmIoapicState(kvm_ioapic_state);
44 unsafe impl DataInit for VmIoapicState {}
45 #[derive(Copy, Clone)]
46 struct VmPitState(kvm_pit_state2);
47 unsafe impl DataInit for VmPitState {}
48 #[derive(Copy, Clone)]
49 struct VmClockState(kvm_clock_data);
50 unsafe impl DataInit for VmClockState {}
51
get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>>52 fn get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>> {
53 Ok(match state_set {
54 MainRequest_StateSet::PIC0 => VmPicState(vm.get_pic_state(PicId::Primary)?)
55 .as_slice()
56 .to_vec(),
57 MainRequest_StateSet::PIC1 => VmPicState(vm.get_pic_state(PicId::Secondary)?)
58 .as_slice()
59 .to_vec(),
60 MainRequest_StateSet::IOAPIC => VmIoapicState(vm.get_ioapic_state()?).as_slice().to_vec(),
61 MainRequest_StateSet::PIT => VmPitState(vm.get_pit_state()?).as_slice().to_vec(),
62 MainRequest_StateSet::CLOCK => VmClockState(vm.get_clock()?).as_slice().to_vec(),
63 })
64 }
65
set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()>66 fn set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()> {
67 match state_set {
68 MainRequest_StateSet::PIC0 => vm.set_pic_state(
69 PicId::Primary,
70 &VmPicState::from_slice(state)
71 .ok_or(SysError::new(EINVAL))?
72 .0,
73 ),
74 MainRequest_StateSet::PIC1 => vm.set_pic_state(
75 PicId::Secondary,
76 &VmPicState::from_slice(state)
77 .ok_or(SysError::new(EINVAL))?
78 .0,
79 ),
80 MainRequest_StateSet::IOAPIC => vm.set_ioapic_state(
81 &VmIoapicState::from_slice(state)
82 .ok_or(SysError::new(EINVAL))?
83 .0,
84 ),
85 MainRequest_StateSet::PIT => vm.set_pit_state(
86 &VmPitState::from_slice(state)
87 .ok_or(SysError::new(EINVAL))?
88 .0,
89 ),
90 MainRequest_StateSet::CLOCK => vm.set_clock(
91 &VmClockState::from_slice(state)
92 .ok_or(SysError::new(EINVAL))?
93 .0,
94 ),
95 }
96 }
97
98 /// The status of a process, either that it is running, or that it exited under some condition.
99 pub enum ProcessStatus {
100 /// The process is running and therefore has no information about its result.
101 Running,
102 /// The process has exited with a successful code.
103 Success,
104 /// The process failed with the given exit code.
105 Fail(i32),
106 /// The process was terminated with the given signal code.
107 Signal(i32),
108 }
109
110 /// Creates, owns, and handles messages from a plugin process.
111 ///
112 /// A plugin process has control over a single VM and a fixed number of VCPUs via a set of pipes & unix
113 /// domain socket connections and a protocol defined in `protos::plugin`. The plugin process is run
114 /// in an unprivileged manner as a child process spawned via a path to a arbitrary executable.
115 pub struct Process {
116 started: bool,
117 plugin_pid: pid_t,
118 request_sockets: Vec<UnixDatagram>,
119 objects: HashMap<u32, PluginObject>,
120 shared_vcpu_state: Arc<RwLock<SharedVcpuState>>,
121 per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>>,
122
123 // Resource to sent to plugin
124 kill_evt: EventFd,
125 vcpu_pipes: Vec<VcpuPipe>,
126
127 // Socket Transmission
128 request_buffer: Vec<u8>,
129 response_buffer: Vec<u8>,
130 }
131
132 impl Process {
133 /// Creates a new plugin process for the given number of vcpus and VM.
134 ///
135 /// This will immediately spawn the plugin process and wait for the child to signal that it is
136 /// ready to start. This call may block indefinitely.
137 ///
138 /// Set the `jail` argument to spawn the plugin process within the preconfigured jail.
139 /// Due to an API limitation in libminijail necessitating that this function set an environment
140 /// variable, this function is not thread-safe.
new( cpu_count: u32, cmd: &Path, args: &[&str], jail: Option<Minijail>, ) -> Result<Process>141 pub fn new(
142 cpu_count: u32,
143 cmd: &Path,
144 args: &[&str],
145 jail: Option<Minijail>,
146 ) -> Result<Process> {
147 let (request_socket, child_socket) =
148 new_seqpacket_pair().map_err(Error::CreateMainSocket)?;
149
150 let mut vcpu_pipes: Vec<VcpuPipe> = Vec::with_capacity(cpu_count as usize);
151 for _ in 0..cpu_count {
152 vcpu_pipes.push(new_pipe_pair().map_err(Error::CreateVcpuSocket)?);
153 }
154 let mut per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>> =
155 Vec::with_capacity(cpu_count as usize);
156 // TODO(zachr): replace with `resize_default` when that stabilizes. Using a plain `resize`
157 // is incorrect because each element in the `Vec` will contain a shared reference to the
158 // same `PerVcpuState` instance. This happens because `resize` fills new slots using clones
159 // of the instance given to `resize`.
160 for _ in 0..cpu_count {
161 per_vcpu_states.push(Default::default());
162 }
163
164 let plugin_pid = match jail {
165 Some(jail) => {
166 set_var("CROSVM_SOCKET", child_socket.as_raw_fd().to_string());
167 jail.run(cmd, &[0, 1, 2, child_socket.as_raw_fd()], args)
168 .map_err(Error::PluginRunJail)?
169 }
170 None => Command::new(cmd)
171 .args(args)
172 .env("CROSVM_SOCKET", child_socket.as_raw_fd().to_string())
173 .spawn()
174 .map_err(Error::PluginSpawn)?
175 .id() as pid_t,
176 };
177
178 Ok(Process {
179 started: false,
180 plugin_pid,
181 request_sockets: vec![request_socket],
182 objects: Default::default(),
183 shared_vcpu_state: Default::default(),
184 per_vcpu_states,
185 kill_evt: EventFd::new().map_err(Error::CreateEventFd)?,
186 vcpu_pipes,
187 request_buffer: vec![0; MAX_DATAGRAM_SIZE],
188 response_buffer: Vec::new(),
189 })
190 }
191
192 /// Creates a VCPU plugin connection object, used by a VCPU run loop to communicate with the
193 /// plugin process.
194 ///
195 /// While each invocation of `create_vcpu` with the given `cpu_id` will return a unique
196 /// `PluginVcpu` object, the underlying resources are shared by each `PluginVcpu` resulting from
197 /// the same `cpu_id`.
create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu>198 pub fn create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu> {
199 let vcpu_pipe_read = self.vcpu_pipes[cpu_id as usize]
200 .crosvm_read
201 .try_clone()
202 .map_err(Error::CloneVcpuPipe)?;
203 let vcpu_pipe_write = self.vcpu_pipes[cpu_id as usize]
204 .crosvm_write
205 .try_clone()
206 .map_err(Error::CloneVcpuPipe)?;
207 Ok(PluginVcpu::new(
208 self.shared_vcpu_state.clone(),
209 self.per_vcpu_states[cpu_id as usize].clone(),
210 vcpu_pipe_read,
211 vcpu_pipe_write,
212 ))
213 }
214
215 /// Returns if the plugin process indicated the VM was ready to start.
is_started(&self) -> bool216 pub fn is_started(&self) -> bool {
217 self.started
218 }
219
220 /// Returns the process ID of the plugin process.
pid(&self) -> pid_t221 pub fn pid(&self) -> pid_t {
222 self.plugin_pid
223 }
224
225 /// Returns a slice of each socket that should be polled.
226 ///
227 /// If any socket in this slice becomes readable, `handle_socket` should be called with the
228 /// index of that socket. If any socket becomes closed, its index should be passed to
229 /// `drop_sockets`.
sockets(&self) -> &[UnixDatagram]230 pub fn sockets(&self) -> &[UnixDatagram] {
231 &self.request_sockets
232 }
233
234 /// Drops the each socket identified by its index in the slice returned by `sockets`.
235 ///
236 /// The given `socket_idxs` slice will be modified in an arbitrary way for efficient removal of
237 /// the sockets from internal data structures.
drop_sockets(&mut self, socket_idxs: &mut [usize])238 pub fn drop_sockets(&mut self, socket_idxs: &mut [usize]) {
239 // Takes a mutable slice so that the indices can be sorted for efficient removal in
240 // request_sockets..
241 socket_idxs.sort_unstable_by(|a, b| b.cmp(a));
242 let old_len = self.request_sockets.len();
243 for &socket_index in socket_idxs.iter() {
244 // swap_remove changes the index of the last element, but we already know that one
245 // doesn't need to be removed because we are removing sockets in descending order thanks
246 // to the above sort.
247 self.request_sockets.swap_remove(socket_index);
248 }
249 assert_eq!(old_len - socket_idxs.len(), self.request_sockets.len());
250 }
251
252 /// Gently requests that the plugin process exit cleanly, and ends handling of all VCPU
253 /// connections.
254 ///
255 /// The plugin process can ignore the given signal, and so some timeout should be used before
256 /// forcefully terminating the process.
257 ///
258 /// Any blocked VCPU connections will get interrupted so that the VCPU threads can exit cleanly.
259 /// Any subsequent attempt to use the VCPU connections will fail.
signal_kill(&mut self) -> SysResult<()>260 pub fn signal_kill(&mut self) -> SysResult<()> {
261 self.kill_evt.write(1)?;
262 // Normally we'd get any blocked recv() calls in the VCPU threads
263 // to unblock by calling shutdown(). However, we're using pipes
264 // (for improved performance), and pipes don't have shutdown so
265 // instead we'll write a shutdown message to ourselves using the
266 // the writable side of the pipe (normally used by the plugin).
267 for pipe in self.vcpu_pipes.iter_mut() {
268 let mut shutdown_request = VcpuRequest::new();
269 shutdown_request.set_shutdown(VcpuRequest_Shutdown::new());
270 let mut buffer = Vec::new();
271 shutdown_request
272 .write_to_vec(&mut buffer)
273 .map_err(proto_to_sys_err)?;
274 pipe.plugin_write
275 .write(&buffer[..])
276 .map_err(io_to_sys_err)?;
277 }
278 Ok(())
279 }
280
281 /// Waits without blocking for the plugin process to exit and returns the status.
try_wait(&mut self) -> SysResult<ProcessStatus>282 pub fn try_wait(&mut self) -> SysResult<ProcessStatus> {
283 let mut status = 0;
284 // Safe because waitpid is given a valid pointer of correct size and mutability, and the
285 // return value is checked.
286 let ret = unsafe { waitpid(self.plugin_pid, &mut status, WNOHANG) };
287 match ret {
288 -1 => Err(SysError::last()),
289 0 => Ok(ProcessStatus::Running),
290 _ => {
291 // Trivially safe
292 if unsafe { WIFEXITED(status) } {
293 match unsafe { WEXITSTATUS(status) } {
294 // Trivially safe
295 0 => Ok(ProcessStatus::Success),
296 code => Ok(ProcessStatus::Fail(code)),
297 }
298 } else {
299 // Plugin terminated but has no exit status, so it must have been signaled.
300 Ok(ProcessStatus::Signal(unsafe { WTERMSIG(status) })) // Trivially safe
301 }
302 }
303 }
304 }
305
handle_io_event( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, io_event: &MainRequest_Create_IoEvent, ) -> SysResult<RawFd>306 fn handle_io_event(
307 entry: VacantEntry<u32, PluginObject>,
308 vm: &mut Vm,
309 io_event: &MainRequest_Create_IoEvent,
310 ) -> SysResult<RawFd> {
311 let evt = EventFd::new()?;
312 let addr = match io_event.space {
313 AddressSpace::IOPORT => IoeventAddress::Pio(io_event.address),
314 AddressSpace::MMIO => IoeventAddress::Mmio(io_event.address),
315 };
316 match io_event.length {
317 0 => vm.register_ioevent(&evt, addr, Datamatch::AnyLength)?,
318 1 => vm.register_ioevent(&evt, addr, Datamatch::U8(Some(io_event.datamatch as u8)))?,
319 2 => {
320 vm.register_ioevent(&evt, addr, Datamatch::U16(Some(io_event.datamatch as u16)))?
321 }
322 4 => {
323 vm.register_ioevent(&evt, addr, Datamatch::U32(Some(io_event.datamatch as u32)))?
324 }
325 8 => {
326 vm.register_ioevent(&evt, addr, Datamatch::U64(Some(io_event.datamatch as u64)))?
327 }
328 _ => return Err(SysError::new(EINVAL)),
329 };
330
331 let fd = evt.as_raw_fd();
332 entry.insert(PluginObject::IoEvent {
333 evt,
334 addr,
335 length: io_event.length,
336 datamatch: io_event.datamatch,
337 });
338 Ok(fd)
339 }
340
handle_memory( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, memfd: File, offset: u64, start: u64, length: u64, read_only: bool, dirty_log: bool, ) -> SysResult<()>341 fn handle_memory(
342 entry: VacantEntry<u32, PluginObject>,
343 vm: &mut Vm,
344 memfd: File,
345 offset: u64,
346 start: u64,
347 length: u64,
348 read_only: bool,
349 dirty_log: bool,
350 ) -> SysResult<()> {
351 let shm = SharedMemory::from_raw_fd(memfd)?;
352 // Checking the seals ensures the plugin process won't shrink the mmapped file, causing us
353 // to SIGBUS in the future.
354 let seals = shm.get_seals()?;
355 if !seals.shrink_seal() {
356 return Err(SysError::new(EPERM));
357 }
358 // Check to make sure we don't mmap areas beyond the end of the memfd.
359 match length.checked_add(offset) {
360 Some(end) if end > shm.size() => return Err(SysError::new(EINVAL)),
361 None => return Err(SysError::new(EOVERFLOW)),
362 _ => {}
363 }
364 let mem = MemoryMapping::from_fd_offset(&shm, length as usize, offset as usize)
365 .map_err(mmap_to_sys_err)?;
366 let slot = vm.add_device_memory(GuestAddress(start), mem, read_only, dirty_log)?;
367 entry.insert(PluginObject::Memory {
368 slot,
369 length: length as usize,
370 });
371 Ok(())
372 }
373
handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()>374 fn handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()> {
375 match self.shared_vcpu_state.write() {
376 Ok(mut lock) => {
377 let space = match reserve_range.space {
378 AddressSpace::IOPORT => IoSpace::Ioport,
379 AddressSpace::MMIO => IoSpace::Mmio,
380 };
381 match reserve_range.length {
382 0 => lock.unreserve_range(space, reserve_range.start),
383 _ => lock.reserve_range(space, reserve_range.start, reserve_range.length),
384 }
385 }
386 Err(_) => Err(SysError::new(EDEADLK)),
387 }
388 }
389
handle_set_irq_routing( vm: &mut Vm, irq_routing: &MainRequest_SetIrqRouting, ) -> SysResult<()>390 fn handle_set_irq_routing(
391 vm: &mut Vm,
392 irq_routing: &MainRequest_SetIrqRouting,
393 ) -> SysResult<()> {
394 let mut routes = Vec::with_capacity(irq_routing.routes.len());
395 for route in &irq_routing.routes {
396 routes.push(IrqRoute {
397 gsi: route.irq_id,
398 source: if route.has_irqchip() {
399 let irqchip = route.get_irqchip();
400 IrqSource::Irqchip {
401 chip: irqchip.irqchip,
402 pin: irqchip.pin,
403 }
404 } else if route.has_msi() {
405 let msi = route.get_msi();
406 IrqSource::Msi {
407 address: msi.address,
408 data: msi.data,
409 }
410 } else {
411 // Because route is a oneof field in the proto definition, this should
412 // only happen if a new variant gets added without updating this chained
413 // if block.
414 return Err(SysError::new(EINVAL));
415 },
416 });
417 }
418 vm.set_gsi_routing(&routes[..])
419 }
420
handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64)421 fn handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64) {
422 for (cpu_id, (handle, per_cpu_state)) in
423 vcpu_handles.iter().zip(&self.per_vcpu_states).enumerate()
424 {
425 if cpu_mask & (1 << cpu_id) != 0 {
426 per_cpu_state.lock().request_pause(user_data);
427 if let Err(e) = handle.kill(SIGRTMIN() + 0) {
428 error!("failed to interrupt vcpu {}: {}", cpu_id, e);
429 }
430 }
431 }
432 }
433
handle_get_net_config( tap: &net_util::Tap, config: &mut MainResponse_GetNetConfig, ) -> SysResult<()>434 fn handle_get_net_config(
435 tap: &net_util::Tap,
436 config: &mut MainResponse_GetNetConfig,
437 ) -> SysResult<()> {
438 // Log any NetError so that the cause can be found later, but extract and return the
439 // underlying errno for the client as well.
440 fn map_net_error(s: &str, e: NetError) -> SysError {
441 error!("failed to get {}: {}", s, e);
442 e.sys_error()
443 }
444
445 let ip_addr = tap.ip_addr().map_err(|e| map_net_error("IP address", e))?;
446 config.set_host_ipv4_address(u32::from(ip_addr));
447
448 let netmask = tap.netmask().map_err(|e| map_net_error("netmask", e))?;
449 config.set_netmask(u32::from(netmask));
450
451 let result_mac_addr = config.mut_host_mac_address();
452 let mac_addr_octets = tap
453 .mac_address()
454 .map_err(|e| map_net_error("mac address", e))?
455 .octets();
456 result_mac_addr.resize(mac_addr_octets.len(), 0);
457 result_mac_addr.clone_from_slice(&mac_addr_octets);
458
459 Ok(())
460 }
461
462 /// Handles a request on a readable socket identified by its index in the slice returned by
463 /// `sockets`.
464 ///
465 /// The `vm` is used to service request that affect the VM. The `vcpu_handles` slice is used to
466 /// interrupt a VCPU thread currently running in the VM if the socket request it.
handle_socket( &mut self, index: usize, kvm: &Kvm, vm: &mut Vm, vcpu_handles: &[JoinHandle<()>], taps: &[Tap], ) -> Result<()>467 pub fn handle_socket(
468 &mut self,
469 index: usize,
470 kvm: &Kvm,
471 vm: &mut Vm,
472 vcpu_handles: &[JoinHandle<()>],
473 taps: &[Tap],
474 ) -> Result<()> {
475 let (msg_size, request_file) = self.request_sockets[index]
476 .recv_with_fd(&mut self.request_buffer)
477 .map_err(Error::PluginSocketRecv)?;
478
479 if msg_size == 0 {
480 return Err(Error::PluginSocketHup);
481 }
482
483 let request = protobuf::parse_from_bytes::<MainRequest>(&self.request_buffer[..msg_size])
484 .map_err(Error::DecodeRequest)?;
485
486 let mut response_files = Vec::new();
487 let mut response_fds = Vec::new();
488 let mut response = MainResponse::new();
489 let res = if request.has_create() {
490 response.mut_create();
491 let create = request.get_create();
492 match self.objects.entry(create.id) {
493 Entry::Vacant(entry) => {
494 if create.has_io_event() {
495 match Self::handle_io_event(entry, vm, create.get_io_event()) {
496 Ok(fd) => {
497 response_fds.push(fd);
498 Ok(())
499 }
500 Err(e) => Err(e),
501 }
502 } else if create.has_memory() {
503 let memory = create.get_memory();
504 match request_file {
505 Some(memfd) => Self::handle_memory(
506 entry,
507 vm,
508 memfd,
509 memory.offset,
510 memory.start,
511 memory.length,
512 memory.read_only,
513 memory.dirty_log,
514 ),
515 None => Err(SysError::new(EBADF)),
516 }
517 } else if create.has_irq_event() {
518 let irq_event = create.get_irq_event();
519 match (EventFd::new(), EventFd::new()) {
520 (Ok(evt), Ok(resample_evt)) => match vm.register_irqfd_resample(
521 &evt,
522 &resample_evt,
523 irq_event.irq_id,
524 ) {
525 Ok(()) => {
526 response_fds.push(evt.as_raw_fd());
527 response_fds.push(resample_evt.as_raw_fd());
528 response_files.push(downcast_file(resample_evt));
529 entry.insert(PluginObject::IrqEvent {
530 irq_id: irq_event.irq_id,
531 evt,
532 });
533 Ok(())
534 }
535 Err(e) => Err(e),
536 },
537 (Err(e), _) | (_, Err(e)) => Err(e),
538 }
539 } else {
540 Err(SysError::new(ENOTTY))
541 }
542 }
543 Entry::Occupied(_) => Err(SysError::new(EEXIST)),
544 }
545 } else if request.has_destroy() {
546 response.mut_destroy();
547 match self.objects.entry(request.get_destroy().id) {
548 Entry::Occupied(entry) => entry.remove().destroy(vm),
549 Entry::Vacant(_) => Err(SysError::new(ENOENT)),
550 }
551 } else if request.has_new_connection() {
552 response.mut_new_connection();
553 match new_seqpacket_pair() {
554 Ok((request_socket, child_socket)) => {
555 self.request_sockets.push(request_socket);
556 response_fds.push(child_socket.as_raw_fd());
557 response_files.push(downcast_file(child_socket));
558 Ok(())
559 }
560 Err(e) => Err(e),
561 }
562 } else if request.has_get_shutdown_eventfd() {
563 response.mut_get_shutdown_eventfd();
564 response_fds.push(self.kill_evt.as_raw_fd());
565 Ok(())
566 } else if request.has_check_extension() {
567 // Safe because the Cap enum is not read by the check_extension method. In that method,
568 // cap is cast back to an integer and fed to an ioctl. If the extension name is actually
569 // invalid, the kernel will safely reject the extension under the assumption that the
570 // capability is legitimately unsupported.
571 let cap = unsafe { transmute(request.get_check_extension().extension) };
572 response.mut_check_extension().has_extension = vm.check_extension(cap);
573 Ok(())
574 } else if request.has_reserve_range() {
575 response.mut_reserve_range();
576 self.handle_reserve_range(request.get_reserve_range())
577 } else if request.has_set_irq() {
578 response.mut_set_irq();
579 let irq = request.get_set_irq();
580 vm.set_irq_line(irq.irq_id, irq.active)
581 } else if request.has_set_irq_routing() {
582 response.mut_set_irq_routing();
583 Self::handle_set_irq_routing(vm, request.get_set_irq_routing())
584 } else if request.has_get_state() {
585 let response_state = response.mut_get_state();
586 match get_vm_state(vm, request.get_get_state().set) {
587 Ok(state) => {
588 response_state.state = state;
589 Ok(())
590 }
591 Err(e) => Err(e),
592 }
593 } else if request.has_set_state() {
594 response.mut_set_state();
595 let set_state = request.get_set_state();
596 set_vm_state(vm, set_state.set, set_state.get_state())
597 } else if request.has_set_identity_map_addr() {
598 response.mut_set_identity_map_addr();
599 let addr = request.get_set_identity_map_addr().address;
600 vm.set_identity_map_addr(GuestAddress(addr as u64))
601 } else if request.has_pause_vcpus() {
602 response.mut_pause_vcpus();
603 let pause_vcpus = request.get_pause_vcpus();
604 self.handle_pause_vcpus(vcpu_handles, pause_vcpus.cpu_mask, pause_vcpus.user);
605 Ok(())
606 } else if request.has_get_vcpus() {
607 response.mut_get_vcpus();
608 for pipe in self.vcpu_pipes.iter() {
609 response_fds.push(pipe.plugin_write.as_raw_fd());
610 response_fds.push(pipe.plugin_read.as_raw_fd());
611 }
612 Ok(())
613 } else if request.has_start() {
614 response.mut_start();
615 if self.started {
616 Err(SysError::new(EINVAL))
617 } else {
618 self.started = true;
619 Ok(())
620 }
621 } else if request.has_get_net_config() {
622 match taps.first() {
623 Some(tap) => {
624 match Self::handle_get_net_config(tap, response.mut_get_net_config()) {
625 Ok(_) => {
626 response_fds.push(tap.as_raw_fd());
627 Ok(())
628 }
629 Err(e) => Err(e),
630 }
631 }
632 None => Err(SysError::new(ENODATA)),
633 }
634 } else if request.has_dirty_log() {
635 let dirty_log_response = response.mut_dirty_log();
636 match self.objects.get(&request.get_dirty_log().id) {
637 Some(&PluginObject::Memory { slot, length }) => {
638 let dirty_log = dirty_log_response.mut_bitmap();
639 dirty_log.resize(dirty_log_bitmap_size(length), 0);
640 vm.get_dirty_log(slot, &mut dirty_log[..])
641 }
642 _ => Err(SysError::new(ENOENT)),
643 }
644 } else if request.has_get_supported_cpuid() {
645 let cpuid_response = &mut response.mut_get_supported_cpuid().entries;
646 match kvm.get_supported_cpuid() {
647 Ok(mut cpuid) => {
648 for entry in cpuid.mut_entries_slice() {
649 cpuid_response.push(cpuid_kvm_to_proto(entry));
650 }
651 Ok(())
652 }
653 Err(e) => Err(e),
654 }
655 } else if request.has_get_emulated_cpuid() {
656 let cpuid_response = &mut response.mut_get_emulated_cpuid().entries;
657 match kvm.get_emulated_cpuid() {
658 Ok(mut cpuid) => {
659 for entry in cpuid.mut_entries_slice() {
660 cpuid_response.push(cpuid_kvm_to_proto(entry));
661 }
662 Ok(())
663 }
664 Err(e) => Err(e),
665 }
666 } else if request.has_get_msr_index_list() {
667 let msr_list_response = &mut response.mut_get_msr_index_list().indices;
668 match kvm.get_msr_index_list() {
669 Ok(indices) => {
670 for entry in indices {
671 msr_list_response.push(entry);
672 }
673 Ok(())
674 }
675 Err(e) => Err(e),
676 }
677 } else {
678 Err(SysError::new(ENOTTY))
679 };
680
681 if let Err(e) = res {
682 response.errno = e.errno();
683 }
684
685 self.response_buffer.clear();
686 response
687 .write_to_vec(&mut self.response_buffer)
688 .map_err(Error::EncodeResponse)?;
689 assert_ne!(self.response_buffer.len(), 0);
690 self.request_sockets[index]
691 .send_with_fds(&self.response_buffer[..], &response_fds)
692 .map_err(Error::PluginSocketSend)?;
693
694 Ok(())
695 }
696 }
697
698 impl Drop for Process {
drop(&mut self)699 fn drop(&mut self) {
700 // Ignore the result because there is nothing we can do about it.
701 if let Err(e) = self.signal_kill() {
702 error!("failed to signal kill event for plugin: {}", e);
703 }
704 }
705 }
706