1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cell::RefCell;
6 use std::fs::File;
7 use std::io::prelude::*;
8 use std::sync::mpsc;
9 use std::sync::Arc;
10 use std::sync::Barrier;
11 use std::thread;
12 use std::thread::JoinHandle;
13 #[cfg(target_arch = "x86_64")]
14 use std::time::Duration;
15
16 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
17 use aarch64::AArch64 as Arch;
18 use anyhow::Context;
19 use anyhow::Result;
20 use arch::CpuConfigArch;
21 use arch::CpuSet;
22 use arch::IrqChipArch;
23 use arch::LinuxArch;
24 use arch::VcpuArch;
25 use arch::VcpuInitArch;
26 use arch::VmArch;
27 use base::sched_attr;
28 use base::sched_setattr;
29 use base::signal::clear_signal_handler;
30 use base::signal::BlockedSignal;
31 use base::*;
32 use devices::Bus;
33 use devices::IrqChip;
34 use devices::VcpuRunState;
35 use hypervisor::IoOperation;
36 use hypervisor::IoParams;
37 use hypervisor::VcpuExit;
38 use hypervisor::VcpuSignalHandle;
39 use libc::c_int;
40 #[cfg(target_arch = "riscv64")]
41 use riscv64::Riscv64 as Arch;
42 #[cfg(target_arch = "x86_64")]
43 use sync::Mutex;
44 use vm_control::*;
45 #[cfg(feature = "gdb")]
46 use vm_memory::GuestMemory;
47 #[cfg(target_arch = "x86_64")]
48 use x86_64::X8664arch as Arch;
49
50 use super::ExitState;
51 #[cfg(target_arch = "x86_64")]
52 use crate::crosvm::ratelimit::Ratelimit;
53
54 // TODO(davidai): Import libc constant when updated
55 const SCHED_FLAG_RESET_ON_FORK: u64 = 0x1;
56 const SCHED_FLAG_KEEP_POLICY: u64 = 0x08;
57 const SCHED_FLAG_KEEP_PARAMS: u64 = 0x10;
58 const SCHED_FLAG_UTIL_CLAMP_MIN: u64 = 0x20;
59 const SCHED_SCALE_CAPACITY: u32 = 1024;
60
61 const SCHED_FLAG_KEEP_ALL: u64 = SCHED_FLAG_KEEP_POLICY | SCHED_FLAG_KEEP_PARAMS;
62
bus_io_handler(bus: &Bus) -> impl FnMut(IoParams) -> Option<[u8; 8]> + '_63 fn bus_io_handler(bus: &Bus) -> impl FnMut(IoParams) -> Option<[u8; 8]> + '_ {
64 |IoParams {
65 address,
66 mut size,
67 operation: direction,
68 }| match direction {
69 IoOperation::Read => {
70 let mut data = [0u8; 8];
71 if size > data.len() {
72 error!("unsupported Read size of {} bytes", size);
73 size = data.len();
74 }
75 // Ignore the return value of `read()`. If no device exists on the bus at the given
76 // location, return the initial value of data, which is all zeroes.
77 let _ = bus.read(address, &mut data[..size]);
78 Some(data)
79 }
80 IoOperation::Write { data } => {
81 if size > data.len() {
82 error!("unsupported Write size of {} bytes", size);
83 size = data.len()
84 }
85 let data = &data[..size];
86 bus.write(address, data);
87 None
88 }
89 }
90 }
91
92 /// Set the VCPU thread affinity and other per-thread scheduler properties.
93 /// This function will be called from each VCPU thread at startup.
set_vcpu_thread_scheduling( vcpu_affinity: CpuSet, core_scheduling: bool, enable_per_vm_core_scheduling: bool, vcpu_cgroup_tasks_file: Option<File>, run_rt: bool, boost_uclamp: bool, ) -> anyhow::Result<()>94 pub fn set_vcpu_thread_scheduling(
95 vcpu_affinity: CpuSet,
96 core_scheduling: bool,
97 enable_per_vm_core_scheduling: bool,
98 vcpu_cgroup_tasks_file: Option<File>,
99 run_rt: bool,
100 boost_uclamp: bool,
101 ) -> anyhow::Result<()> {
102 if !vcpu_affinity.is_empty() {
103 if let Err(e) = set_cpu_affinity(vcpu_affinity) {
104 error!("Failed to set CPU affinity: {}", e);
105 }
106 }
107
108 if boost_uclamp {
109 let mut sched_attr = sched_attr::default();
110 sched_attr.sched_flags = SCHED_FLAG_KEEP_ALL
111 | SCHED_FLAG_UTIL_CLAMP_MIN
112 | SCHED_FLAG_RESET_ON_FORK;
113 sched_attr.sched_util_min = SCHED_SCALE_CAPACITY;
114
115 if let Err(e) = sched_setattr(0, &mut sched_attr, 0) {
116 warn!("Failed to boost vcpu util: {}", e);
117 }
118 }
119
120 if core_scheduling && !enable_per_vm_core_scheduling {
121 // Do per-vCPU core scheduling by setting a unique cookie to each vCPU.
122 if let Err(e) = enable_core_scheduling() {
123 error!("Failed to enable core scheduling: {}", e);
124 }
125 }
126
127 // Move vcpu thread to cgroup
128 if let Some(mut f) = vcpu_cgroup_tasks_file {
129 f.write_all(base::gettid().to_string().as_bytes())
130 .context("failed to write vcpu tid to cgroup tasks")?;
131 }
132
133 if run_rt {
134 const DEFAULT_VCPU_RT_LEVEL: u16 = 6;
135 if let Err(e) = set_rt_prio_limit(u64::from(DEFAULT_VCPU_RT_LEVEL))
136 .and_then(|_| set_rt_round_robin(i32::from(DEFAULT_VCPU_RT_LEVEL)))
137 {
138 warn!("Failed to set vcpu to real time: {}", e);
139 }
140 }
141
142 Ok(())
143 }
144
145 // Sets up a vcpu and converts it into a runnable vcpu.
runnable_vcpu<V>( cpu_id: usize, vcpu_id: usize, vcpu: Option<V>, vcpu_init: VcpuInitArch, vm: impl VmArch, irq_chip: &mut dyn IrqChipArch, vcpu_count: usize, cpu_config: Option<CpuConfigArch>, ) -> Result<V> where V: VcpuArch,146 pub fn runnable_vcpu<V>(
147 cpu_id: usize,
148 vcpu_id: usize,
149 vcpu: Option<V>,
150 vcpu_init: VcpuInitArch,
151 vm: impl VmArch,
152 irq_chip: &mut dyn IrqChipArch,
153 vcpu_count: usize,
154 cpu_config: Option<CpuConfigArch>,
155 ) -> Result<V>
156 where
157 V: VcpuArch,
158 {
159 let mut vcpu = match vcpu {
160 Some(v) => v,
161 None => {
162 // If vcpu is None, it means this arch/hypervisor requires create_vcpu to be called from
163 // the vcpu thread.
164 match vm
165 .create_vcpu(vcpu_id)
166 .context("failed to create vcpu")?
167 .downcast::<V>()
168 {
169 Ok(v) => *v,
170 Err(_) => panic!("VM created wrong type of VCPU"),
171 }
172 }
173 };
174
175 irq_chip
176 .add_vcpu(cpu_id, &vcpu)
177 .context("failed to add vcpu to irq chip")?;
178
179 Arch::configure_vcpu(
180 &vm,
181 vm.get_hypervisor(),
182 irq_chip,
183 &mut vcpu,
184 vcpu_init,
185 cpu_id,
186 vcpu_count,
187 cpu_config,
188 )
189 .context("failed to configure vcpu")?;
190
191 Ok(vcpu)
192 }
193
194 thread_local!(static VCPU_THREAD: RefCell<Option<VcpuSignalHandle>> = RefCell::new(None));
195
set_vcpu_thread_local(vcpu: Option<&dyn VcpuArch>, signal_num: c_int)196 fn set_vcpu_thread_local(vcpu: Option<&dyn VcpuArch>, signal_num: c_int) {
197 // Block signal while we add -- if a signal fires (very unlikely,
198 // as this means something is trying to pause the vcpu before it has
199 // even started) it'll try to grab the read lock while this write
200 // lock is grabbed and cause a deadlock.
201 // Assuming that a failure to block means it's already blocked.
202 let _blocked_signal = BlockedSignal::new(signal_num);
203
204 VCPU_THREAD.with(|v| {
205 let mut vcpu_thread = v.borrow_mut();
206
207 if let Some(vcpu) = vcpu {
208 assert!(vcpu_thread.is_none());
209 *vcpu_thread = Some(vcpu.signal_handle());
210 } else {
211 *vcpu_thread = None;
212 }
213 });
214 }
215
setup_vcpu_signal_handler() -> Result<()>216 pub fn setup_vcpu_signal_handler() -> Result<()> {
217 // SAFETY: trivially safe as we check return value.
218 unsafe {
219 extern "C" fn handle_signal(_: c_int) {
220 // Use LocalKey::try_with() so we don't panic if a signal happens while the destructor
221 // is running, and ignore any errors (the assumption being that the thread is exiting
222 // anyway in that case).
223 let _result = VCPU_THREAD.try_with(|v| {
224 if let Some(vcpu_signal_handle) = &(*v.borrow()) {
225 vcpu_signal_handle.signal_immediate_exit();
226 }
227 });
228 }
229
230 register_rt_signal_handler(SIGRTMIN() + 0, handle_signal)
231 .context("error registering signal handler")?;
232 }
233 Ok(())
234 }
235
remove_vcpu_signal_handler() -> Result<()>236 pub fn remove_vcpu_signal_handler() -> Result<()> {
237 clear_signal_handler(SIGRTMIN() + 0).context("error unregistering signal handler")
238 }
239
vcpu_loop<V>( mut run_mode: VmRunMode, cpu_id: usize, mut vcpu: V, irq_chip: Box<dyn IrqChipArch + 'static>, run_rt: bool, delay_rt: bool, io_bus: Bus, mmio_bus: Bus, from_main_tube: mpsc::Receiver<VcpuControl>, #[cfg(feature = "gdb")] to_gdb_tube: Option<mpsc::Sender<VcpuDebugStatusMessage>>, #[cfg(feature = "gdb")] guest_mem: GuestMemory, #[cfg(target_arch = "x86_64")] bus_lock_ratelimit_ctrl: Arc<Mutex<Ratelimit>>, ) -> ExitState where V: VcpuArch,240 fn vcpu_loop<V>(
241 mut run_mode: VmRunMode,
242 cpu_id: usize,
243 mut vcpu: V,
244 irq_chip: Box<dyn IrqChipArch + 'static>,
245 run_rt: bool,
246 delay_rt: bool,
247 io_bus: Bus,
248 mmio_bus: Bus,
249 from_main_tube: mpsc::Receiver<VcpuControl>,
250 #[cfg(feature = "gdb")] to_gdb_tube: Option<mpsc::Sender<VcpuDebugStatusMessage>>,
251 #[cfg(feature = "gdb")] guest_mem: GuestMemory,
252 #[cfg(target_arch = "x86_64")] bus_lock_ratelimit_ctrl: Arc<Mutex<Ratelimit>>,
253 ) -> ExitState
254 where
255 V: VcpuArch,
256 {
257 let mut interrupted_by_signal = false;
258
259 loop {
260 // Start by checking for messages to process and the run state of the CPU.
261 // An extra check here for Running so there isn't a need to call recv unless a
262 // message is likely to be ready because a signal was sent.
263 if interrupted_by_signal || run_mode != VmRunMode::Running {
264 'state_loop: loop {
265 // Tries to get a pending message without blocking first.
266 let msg = match from_main_tube.try_recv() {
267 Ok(m) => m,
268 Err(mpsc::TryRecvError::Empty) if run_mode == VmRunMode::Running => {
269 // If the VM is running and no message is pending, the state won't
270 // change.
271 break 'state_loop;
272 }
273 Err(mpsc::TryRecvError::Empty) => {
274 // If the VM is not running, wait until a message is ready.
275 match from_main_tube.recv() {
276 Ok(m) => m,
277 Err(mpsc::RecvError) => {
278 error!("Failed to read from main tube in vcpu");
279 return ExitState::Crash;
280 }
281 }
282 }
283 Err(mpsc::TryRecvError::Disconnected) => {
284 error!("Failed to read from main tube in vcpu");
285 return ExitState::Crash;
286 }
287 };
288
289 // Collect all pending messages.
290 let mut messages = vec![msg];
291 messages.append(&mut from_main_tube.try_iter().collect());
292
293 for msg in messages {
294 match msg {
295 VcpuControl::RunState(new_mode) => {
296 run_mode = new_mode;
297 match run_mode {
298 VmRunMode::Running => {}
299 VmRunMode::Suspending => {
300 if let Err(e) = vcpu.on_suspend() {
301 error!(
302 "failed to tell hypervisor vcpu {} is suspending: {}",
303 cpu_id, e
304 );
305 }
306 }
307 VmRunMode::Breakpoint => {}
308 VmRunMode::Exiting => return ExitState::Stop,
309 }
310 }
311 #[cfg(feature = "gdb")]
312 VcpuControl::Debug(d) => {
313 if let Err(e) = crate::crosvm::gdb::vcpu_control_debug(
314 cpu_id,
315 &vcpu,
316 &guest_mem,
317 d,
318 to_gdb_tube.as_ref(),
319 ) {
320 error!("Failed to handle VcpuControl::Debug message: {:#}", e);
321 }
322 }
323 VcpuControl::MakeRT => {
324 if run_rt && delay_rt {
325 info!("Making vcpu {} RT\n", cpu_id);
326 const DEFAULT_VCPU_RT_LEVEL: u16 = 6;
327 if let Err(e) = set_rt_prio_limit(u64::from(DEFAULT_VCPU_RT_LEVEL))
328 .and_then(|_| {
329 set_rt_round_robin(i32::from(DEFAULT_VCPU_RT_LEVEL))
330 })
331 {
332 warn!("Failed to set vcpu to real time: {}", e);
333 }
334 }
335 }
336 VcpuControl::GetStates(response_chan) => {
337 if let Err(e) = response_chan.send(run_mode) {
338 error!("Failed to send GetState: {}", e);
339 };
340 }
341 VcpuControl::Snapshot(snapshot_writer, response_chan) => {
342 let resp = vcpu
343 .snapshot()
344 .and_then(|s| {
345 snapshot_writer
346 .write_fragment(&format!("vcpu{}", vcpu.id()), &s)
347 })
348 .with_context(|| format!("Failed to snapshot Vcpu #{}", vcpu.id()));
349 if let Err(e) = response_chan.send(resp) {
350 error!("Failed to send snapshot response: {}", e);
351 }
352 }
353 VcpuControl::Restore(req) => {
354 let resp = req
355 .snapshot_reader
356 .read_fragment(&format!("vcpu{}", vcpu.id()))
357 .and_then(|s| {
358 vcpu.restore(
359 &s,
360 #[cfg(target_arch = "x86_64")]
361 req.host_tsc_reference_moment,
362 )
363 })
364 .with_context(|| format!("Failed to restore Vcpu #{}", vcpu.id()));
365 if let Err(e) = req.result_sender.send(resp) {
366 error!("Failed to send restore response: {}", e);
367 }
368 }
369 }
370 }
371 if run_mode == VmRunMode::Running {
372 break 'state_loop;
373 }
374 }
375 }
376
377 interrupted_by_signal = false;
378
379 // Vcpus may have run a HLT instruction, which puts them into a state other than
380 // VcpuRunState::Runnable. In that case, this call to wait_until_runnable blocks
381 // until either the irqchip receives an interrupt for this vcpu, or until the main
382 // thread kicks this vcpu as a result of some VmControl operation. In most IrqChip
383 // implementations HLT instructions do not make it to crosvm, and thus this is a
384 // no-op that always returns VcpuRunState::Runnable.
385 match irq_chip.wait_until_runnable(vcpu.as_vcpu()) {
386 Ok(VcpuRunState::Runnable) => {}
387 Ok(VcpuRunState::Interrupted) => interrupted_by_signal = true,
388 Err(e) => error!(
389 "error waiting for vcpu {} to become runnable: {}",
390 cpu_id, e
391 ),
392 }
393
394 if !interrupted_by_signal {
395 match vcpu.run() {
396 Ok(VcpuExit::Io) => {
397 if let Err(e) = vcpu.handle_io(&mut bus_io_handler(&io_bus)) {
398 error!("failed to handle io: {}", e)
399 }
400 }
401 Ok(VcpuExit::Mmio) => {
402 if let Err(e) = vcpu.handle_mmio(&mut bus_io_handler(&mmio_bus)) {
403 error!("failed to handle mmio: {}", e);
404 }
405 }
406 Ok(VcpuExit::IoapicEoi { vector }) => {
407 if let Err(e) = irq_chip.broadcast_eoi(vector) {
408 error!(
409 "failed to broadcast eoi {} on vcpu {}: {}",
410 vector, cpu_id, e
411 );
412 }
413 }
414 Ok(VcpuExit::IrqWindowOpen) => {}
415 Ok(VcpuExit::Hlt) => irq_chip.halted(cpu_id),
416 Ok(VcpuExit::Shutdown) => return ExitState::Stop,
417 Ok(VcpuExit::FailEntry {
418 hardware_entry_failure_reason,
419 }) => {
420 error!("vcpu hw run failure: {:#x}", hardware_entry_failure_reason);
421 return ExitState::Crash;
422 }
423 Ok(VcpuExit::SystemEventShutdown) => {
424 info!("system shutdown event on vcpu {}", cpu_id);
425 return ExitState::Stop;
426 }
427 Ok(VcpuExit::SystemEventReset) => {
428 info!("system reset event");
429 return ExitState::Reset;
430 }
431 Ok(VcpuExit::SystemEventCrash) => {
432 info!("system crash event on vcpu {}", cpu_id);
433 return ExitState::Stop;
434 }
435 Ok(VcpuExit::Debug) => {
436 #[cfg(feature = "gdb")]
437 if let Err(e) =
438 crate::crosvm::gdb::vcpu_exit_debug(cpu_id, to_gdb_tube.as_ref())
439 {
440 error!("Failed to handle VcpuExit::Debug: {:#}", e);
441 return ExitState::Crash;
442 }
443
444 run_mode = VmRunMode::Breakpoint;
445 }
446 #[cfg(target_arch = "x86_64")]
447 Ok(VcpuExit::BusLock) => {
448 let delay_ns: u64 = bus_lock_ratelimit_ctrl.lock().ratelimit_calculate_delay(1);
449 thread::sleep(Duration::from_nanos(delay_ns));
450 }
451 Ok(VcpuExit::Sbi {
452 extension_id: _,
453 function_id: _,
454 args: _,
455 }) => {
456 unimplemented!("Sbi exits not yet supported");
457 }
458 Ok(VcpuExit::RiscvCsr {
459 csr_num,
460 new_value,
461 write_mask,
462 ret_value: _,
463 }) => {
464 unimplemented!(
465 "csr exit! {:#x} to {:#x} mask {:#x}",
466 csr_num,
467 new_value,
468 write_mask
469 );
470 }
471
472 Ok(r) => warn!("unexpected vcpu exit: {:?}", r),
473 Err(e) => match e.errno() {
474 libc::EINTR => interrupted_by_signal = true,
475 libc::EAGAIN => {}
476 _ => {
477 error!("vcpu hit unknown error: {}", e);
478 return ExitState::Crash;
479 }
480 },
481 }
482 }
483
484 if interrupted_by_signal {
485 vcpu.set_immediate_exit(false);
486 }
487
488 if let Err(e) = irq_chip.inject_interrupts(vcpu.as_vcpu()) {
489 error!("failed to inject interrupts for vcpu {}: {}", cpu_id, e);
490 }
491 }
492 }
493
run_vcpu<V>( cpu_id: usize, vcpu_id: usize, vcpu: Option<V>, vcpu_init: VcpuInitArch, vm: impl VmArch + 'static, mut irq_chip: Box<dyn IrqChipArch + 'static>, vcpu_count: usize, run_rt: bool, vcpu_affinity: CpuSet, delay_rt: bool, start_barrier: Arc<Barrier>, mut io_bus: Bus, mut mmio_bus: Bus, vm_evt_wrtube: SendTube, from_main_tube: mpsc::Receiver<VcpuControl>, #[cfg(feature = "gdb")] to_gdb_tube: Option<mpsc::Sender<VcpuDebugStatusMessage>>, enable_core_scheduling: bool, enable_per_vm_core_scheduling: bool, cpu_config: Option<CpuConfigArch>, vcpu_cgroup_tasks_file: Option<File>, #[cfg(target_arch = "x86_64")] bus_lock_ratelimit_ctrl: Arc<Mutex<Ratelimit>>, run_mode: VmRunMode, boost_uclamp: bool, ) -> Result<JoinHandle<()>> where V: VcpuArch + 'static,494 pub fn run_vcpu<V>(
495 cpu_id: usize,
496 vcpu_id: usize,
497 vcpu: Option<V>,
498 vcpu_init: VcpuInitArch,
499 vm: impl VmArch + 'static,
500 mut irq_chip: Box<dyn IrqChipArch + 'static>,
501 vcpu_count: usize,
502 run_rt: bool,
503 vcpu_affinity: CpuSet,
504 delay_rt: bool,
505 start_barrier: Arc<Barrier>,
506 mut io_bus: Bus,
507 mut mmio_bus: Bus,
508 vm_evt_wrtube: SendTube,
509 from_main_tube: mpsc::Receiver<VcpuControl>,
510 #[cfg(feature = "gdb")] to_gdb_tube: Option<mpsc::Sender<VcpuDebugStatusMessage>>,
511 enable_core_scheduling: bool,
512 enable_per_vm_core_scheduling: bool,
513 cpu_config: Option<CpuConfigArch>,
514 vcpu_cgroup_tasks_file: Option<File>,
515 #[cfg(target_arch = "x86_64")] bus_lock_ratelimit_ctrl: Arc<Mutex<Ratelimit>>,
516 run_mode: VmRunMode,
517 boost_uclamp: bool,
518 ) -> Result<JoinHandle<()>>
519 where
520 V: VcpuArch + 'static,
521 {
522 thread::Builder::new()
523 .name(format!("crosvm_vcpu{}", cpu_id))
524 .spawn(move || {
525 // Having a closure returning ExitState guarentees that we
526 // send a VmEventType on all code paths after the closure
527 // returns.
528 let vcpu_fn = || -> ExitState {
529 if let Err(e) = set_vcpu_thread_scheduling(
530 vcpu_affinity,
531 enable_core_scheduling,
532 enable_per_vm_core_scheduling,
533 vcpu_cgroup_tasks_file,
534 run_rt && !delay_rt,
535 boost_uclamp,
536 ) {
537 error!("vcpu thread setup failed: {:#}", e);
538 return ExitState::Stop;
539 }
540
541 #[cfg(feature = "gdb")]
542 let guest_mem = vm.get_memory().clone();
543
544 let runnable_vcpu = runnable_vcpu(
545 cpu_id,
546 vcpu_id,
547 vcpu,
548 vcpu_init,
549 vm,
550 irq_chip.as_mut(),
551 vcpu_count,
552 cpu_config,
553 );
554
555 start_barrier.wait();
556
557 let vcpu = match runnable_vcpu {
558 Ok(v) => v,
559 Err(e) => {
560 error!("failed to start vcpu {}: {:#}", cpu_id, e);
561 return ExitState::Stop;
562 }
563 };
564
565 set_vcpu_thread_local(Some(&vcpu), SIGRTMIN() + 0);
566
567 mmio_bus.set_access_id(cpu_id);
568 io_bus.set_access_id(cpu_id);
569
570 let vcpu_exit_state = vcpu_loop(
571 run_mode,
572 cpu_id,
573 vcpu,
574 irq_chip,
575 run_rt,
576 delay_rt,
577 io_bus,
578 mmio_bus,
579 from_main_tube,
580 #[cfg(feature = "gdb")]
581 to_gdb_tube,
582 #[cfg(feature = "gdb")]
583 guest_mem,
584 #[cfg(target_arch = "x86_64")]
585 bus_lock_ratelimit_ctrl,
586 );
587
588 // We don't want any more VCPU signals from now until the thread exits.
589 let _ = block_signal(SIGRTMIN() + 0);
590 set_vcpu_thread_local(None, SIGRTMIN() + 0);
591
592 vcpu_exit_state
593 };
594
595 let final_event_data = match vcpu_fn() {
596 ExitState::Stop => VmEventType::Exit,
597 ExitState::Reset => VmEventType::Reset,
598 ExitState::Crash => VmEventType::Crash,
599 // vcpu_loop doesn't exit with GuestPanic.
600 ExitState::GuestPanic => unreachable!(),
601 ExitState::WatchdogReset => VmEventType::WatchdogReset,
602 };
603 if let Err(e) = vm_evt_wrtube.send::<VmEventType>(&final_event_data) {
604 error!(
605 "failed to send final event {:?} on vcpu {}: {}",
606 final_event_data, cpu_id, e
607 )
608 }
609 })
610 .context("failed to spawn VCPU thread")
611 }
612
613 /// Signals all running VCPUs to vmexit, sends VcpuControl message to each VCPU tube, and tells
614 /// `irq_chip` to stop blocking halted VCPUs. The channel message is set first because both the
615 /// signal and the irq_chip kick could cause the VCPU thread to continue through the VCPU run
616 /// loop.
kick_all_vcpus( vcpu_handles: &[(JoinHandle<()>, mpsc::Sender<vm_control::VcpuControl>)], irq_chip: &dyn IrqChip, message: VcpuControl, )617 pub fn kick_all_vcpus(
618 vcpu_handles: &[(JoinHandle<()>, mpsc::Sender<vm_control::VcpuControl>)],
619 irq_chip: &dyn IrqChip,
620 message: VcpuControl,
621 ) {
622 for (handle, tube) in vcpu_handles {
623 if let Err(e) = tube.send(message.clone()) {
624 error!("failed to send VcpuControl: {}", e);
625 }
626 let _ = handle.kill(SIGRTMIN() + 0);
627 }
628 irq_chip.kick_halted_vcpus();
629 }
630
631 /// Signals specific running VCPUs to vmexit, sends VcpuControl message to the VCPU tube, and tells
632 /// `irq_chip` to stop blocking halted VCPUs. The channel message is set first because both the
633 /// signal and the irq_chip kick could cause the VCPU thread to continue through the VCPU run
634 /// loop.
kick_vcpu( vcpu_handle: &Option<&(JoinHandle<()>, mpsc::Sender<vm_control::VcpuControl>)>, irq_chip: &dyn IrqChip, message: VcpuControl, )635 pub fn kick_vcpu(
636 vcpu_handle: &Option<&(JoinHandle<()>, mpsc::Sender<vm_control::VcpuControl>)>,
637 irq_chip: &dyn IrqChip,
638 message: VcpuControl,
639 ) {
640 if let Some((handle, tube)) = vcpu_handle {
641 if let Err(e) = tube.send(message) {
642 error!("failed to send VcpuControl: {}", e);
643 }
644 let _ = handle.kill(SIGRTMIN() + 0);
645 }
646 irq_chip.kick_halted_vcpus();
647 }
648