1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::alloc::Layout;
6 use std::cell::{Cell, RefCell};
7 use std::cmp::min;
8 use std::cmp::{self, Ord, PartialEq, PartialOrd};
9 use std::collections::btree_set::BTreeSet;
10 use std::io::{Read, Write};
11 use std::mem;
12 use std::sync::{Arc, RwLock};
13
14 use libc::{EINVAL, ENOENT, ENOTTY, EPERM, EPIPE, EPROTO};
15
16 use protobuf;
17 use protobuf::Message;
18
19 use assertions::const_assert;
20 use data_model::DataInit;
21 use kvm::{CpuId, Vcpu};
22 use kvm_sys::{
23 kvm_debugregs, kvm_fpu, kvm_lapic_state, kvm_mp_state, kvm_msr_entry, kvm_msrs, kvm_regs,
24 kvm_sregs, kvm_vcpu_events, kvm_xcrs, KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
25 };
26 use protos::plugin::*;
27 use sync::Mutex;
28 use sys_util::{error, LayoutAllocation};
29
30 use super::*;
31
32 /// Identifier for an address space in the VM.
33 #[derive(Copy, Clone)]
34 pub enum IoSpace {
35 Ioport,
36 Mmio,
37 }
38
39 #[derive(Debug, Copy, Clone)]
40 struct Range(u64, u64);
41
42 impl Eq for Range {}
43
44 impl PartialEq for Range {
eq(&self, other: &Range) -> bool45 fn eq(&self, other: &Range) -> bool {
46 self.0 == other.0
47 }
48 }
49
50 impl Ord for Range {
cmp(&self, other: &Range) -> cmp::Ordering51 fn cmp(&self, other: &Range) -> cmp::Ordering {
52 self.0.cmp(&other.0)
53 }
54 }
55
56 impl PartialOrd for Range {
partial_cmp(&self, other: &Range) -> Option<cmp::Ordering>57 fn partial_cmp(&self, other: &Range) -> Option<cmp::Ordering> {
58 self.0.partial_cmp(&other.0)
59 }
60 }
61
62 // Wrapper types to make the kvm register structs DataInit
63 #[derive(Copy, Clone)]
64 struct VcpuRegs(kvm_regs);
65 unsafe impl DataInit for VcpuRegs {}
66 #[derive(Copy, Clone)]
67 struct VcpuSregs(kvm_sregs);
68 unsafe impl DataInit for VcpuSregs {}
69 #[derive(Copy, Clone)]
70 struct VcpuFpu(kvm_fpu);
71 unsafe impl DataInit for VcpuFpu {}
72 #[derive(Copy, Clone)]
73 struct VcpuDebugregs(kvm_debugregs);
74 unsafe impl DataInit for VcpuDebugregs {}
75 #[derive(Copy, Clone)]
76 struct VcpuXcregs(kvm_xcrs);
77 unsafe impl DataInit for VcpuXcregs {}
78 #[derive(Copy, Clone)]
79 struct VcpuLapicState(kvm_lapic_state);
80 unsafe impl DataInit for VcpuLapicState {}
81 #[derive(Copy, Clone)]
82 struct VcpuMpState(kvm_mp_state);
83 unsafe impl DataInit for VcpuMpState {}
84 #[derive(Copy, Clone)]
85 struct VcpuEvents(kvm_vcpu_events);
86 unsafe impl DataInit for VcpuEvents {}
87
get_vcpu_state(vcpu: &Vcpu, state_set: VcpuRequest_StateSet) -> SysResult<Vec<u8>>88 fn get_vcpu_state(vcpu: &Vcpu, state_set: VcpuRequest_StateSet) -> SysResult<Vec<u8>> {
89 Ok(match state_set {
90 VcpuRequest_StateSet::REGS => VcpuRegs(vcpu.get_regs()?).as_slice().to_vec(),
91 VcpuRequest_StateSet::SREGS => VcpuSregs(vcpu.get_sregs()?).as_slice().to_vec(),
92 VcpuRequest_StateSet::FPU => VcpuFpu(vcpu.get_fpu()?).as_slice().to_vec(),
93 VcpuRequest_StateSet::DEBUGREGS => VcpuDebugregs(vcpu.get_debugregs()?).as_slice().to_vec(),
94 VcpuRequest_StateSet::XCREGS => VcpuXcregs(vcpu.get_xcrs()?).as_slice().to_vec(),
95 VcpuRequest_StateSet::LAPIC => VcpuLapicState(vcpu.get_lapic()?).as_slice().to_vec(),
96 VcpuRequest_StateSet::MP => VcpuMpState(vcpu.get_mp_state()?).as_slice().to_vec(),
97 VcpuRequest_StateSet::EVENTS => VcpuEvents(vcpu.get_vcpu_events()?).as_slice().to_vec(),
98 })
99 }
100
set_vcpu_state(vcpu: &Vcpu, state_set: VcpuRequest_StateSet, state: &[u8]) -> SysResult<()>101 fn set_vcpu_state(vcpu: &Vcpu, state_set: VcpuRequest_StateSet, state: &[u8]) -> SysResult<()> {
102 match state_set {
103 VcpuRequest_StateSet::REGS => {
104 vcpu.set_regs(&VcpuRegs::from_slice(state).ok_or(SysError::new(EINVAL))?.0)
105 }
106 VcpuRequest_StateSet::SREGS => {
107 vcpu.set_sregs(&VcpuSregs::from_slice(state).ok_or(SysError::new(EINVAL))?.0)
108 }
109 VcpuRequest_StateSet::FPU => {
110 vcpu.set_fpu(&VcpuFpu::from_slice(state).ok_or(SysError::new(EINVAL))?.0)
111 }
112 VcpuRequest_StateSet::DEBUGREGS => vcpu.set_debugregs(
113 &VcpuDebugregs::from_slice(state)
114 .ok_or(SysError::new(EINVAL))?
115 .0,
116 ),
117 VcpuRequest_StateSet::XCREGS => vcpu.set_xcrs(
118 &VcpuXcregs::from_slice(state)
119 .ok_or(SysError::new(EINVAL))?
120 .0,
121 ),
122 VcpuRequest_StateSet::LAPIC => vcpu.set_lapic(
123 &VcpuLapicState::from_slice(state)
124 .ok_or(SysError::new(EINVAL))?
125 .0,
126 ),
127 VcpuRequest_StateSet::MP => vcpu.set_mp_state(
128 &VcpuMpState::from_slice(state)
129 .ok_or(SysError::new(EINVAL))?
130 .0,
131 ),
132 VcpuRequest_StateSet::EVENTS => vcpu.set_vcpu_events(
133 &VcpuEvents::from_slice(state)
134 .ok_or(SysError::new(EINVAL))?
135 .0,
136 ),
137 }
138 }
139
140 /// State shared by every VCPU, grouped together to make edits to the state coherent across VCPUs.
141 #[derive(Default)]
142 pub struct SharedVcpuState {
143 ioport_regions: BTreeSet<Range>,
144 mmio_regions: BTreeSet<Range>,
145 }
146
147 impl SharedVcpuState {
148 /// Reserves the given range for handling by the plugin process.
149 ///
150 /// This will reject any reservation that overlaps with an existing reservation.
reserve_range(&mut self, space: IoSpace, start: u64, length: u64) -> SysResult<()>151 pub fn reserve_range(&mut self, space: IoSpace, start: u64, length: u64) -> SysResult<()> {
152 if length == 0 {
153 return Err(SysError::new(EINVAL));
154 }
155
156 // Reject all cases where this reservation is part of another reservation.
157 if self.is_reserved(space, start) {
158 return Err(SysError::new(EPERM));
159 }
160
161 let last_address = match start.checked_add(length) {
162 Some(end) => end - 1,
163 None => return Err(SysError::new(EINVAL)),
164 };
165
166 let space = match space {
167 IoSpace::Ioport => &mut self.ioport_regions,
168 IoSpace::Mmio => &mut self.mmio_regions,
169 };
170
171 match space.range(..Range(last_address, 0)).next_back().cloned() {
172 Some(Range(existing_start, _)) if existing_start >= start => Err(SysError::new(EPERM)),
173 _ => {
174 space.insert(Range(start, length));
175 Ok(())
176 }
177 }
178 }
179
180 //// Releases a reservation previously made at `start` in the given `space`.
unreserve_range(&mut self, space: IoSpace, start: u64) -> SysResult<()>181 pub fn unreserve_range(&mut self, space: IoSpace, start: u64) -> SysResult<()> {
182 let range = Range(start, 0);
183 let space = match space {
184 IoSpace::Ioport => &mut self.ioport_regions,
185 IoSpace::Mmio => &mut self.mmio_regions,
186 };
187 if space.remove(&range) {
188 Ok(())
189 } else {
190 Err(SysError::new(ENOENT))
191 }
192 }
193
is_reserved(&self, space: IoSpace, addr: u64) -> bool194 fn is_reserved(&self, space: IoSpace, addr: u64) -> bool {
195 if let Some(Range(start, len)) = self.first_before(space, addr) {
196 let offset = addr - start;
197 if offset < len {
198 return true;
199 }
200 }
201 false
202 }
203
first_before(&self, io_space: IoSpace, addr: u64) -> Option<Range>204 fn first_before(&self, io_space: IoSpace, addr: u64) -> Option<Range> {
205 let space = match io_space {
206 IoSpace::Ioport => &self.ioport_regions,
207 IoSpace::Mmio => &self.mmio_regions,
208 };
209
210 match addr.checked_add(1) {
211 Some(next_addr) => space.range(..Range(next_addr, 0)).next_back().cloned(),
212 None => None,
213 }
214 }
215 }
216
217 /// State specific to a VCPU, grouped so that each `PluginVcpu` object will share a canonical
218 /// version.
219 #[derive(Default)]
220 pub struct PerVcpuState {
221 pause_request: Option<u64>,
222 }
223
224 impl PerVcpuState {
225 /// Indicates that a VCPU should wait until the plugin process resumes the VCPU.
226 ///
227 /// This method will not cause a VCPU to pause immediately. Instead, the VCPU thread will
228 /// continue running until a interrupted, at which point it will check for a pending pause. If
229 /// there is another call to `request_pause` for this VCPU before that happens, the last pause
230 /// request's `data` will be overwritten with the most recent `data.
231 ///
232 /// To get an immediate pause after calling `request_pause`, send a signal (with a registered
233 /// handler) to the thread handling the VCPU corresponding to this state. This should interrupt
234 /// the running VCPU, which should check for a pause with `PluginVcpu::pre_run`.
request_pause(&mut self, data: u64)235 pub fn request_pause(&mut self, data: u64) {
236 self.pause_request = Some(data);
237 }
238 }
239
240 enum VcpuRunData<'a> {
241 Read(&'a mut [u8]),
242 Write(&'a [u8]),
243 }
244
245 impl<'a> VcpuRunData<'a> {
is_write(&self) -> bool246 fn is_write(&self) -> bool {
247 match self {
248 VcpuRunData::Write(_) => true,
249 _ => false,
250 }
251 }
252
as_slice(&self) -> &[u8]253 fn as_slice(&self) -> &[u8] {
254 match self {
255 VcpuRunData::Read(s) => s,
256 VcpuRunData::Write(s) => s,
257 }
258 }
259
copy_from_slice(&mut self, data: &[u8])260 fn copy_from_slice(&mut self, data: &[u8]) {
261 if let VcpuRunData::Read(s) = self {
262 let copy_size = min(s.len(), data.len());
263 s.copy_from_slice(&data[..copy_size]);
264 }
265 }
266 }
267
268 /// State object for a VCPU's connection with the plugin process.
269 ///
270 /// This is used by a VCPU thread to allow the plugin process to handle vmexits. Each method may
271 /// block indefinitely while the plugin process is handling requests. In order to cleanly shutdown
272 /// during these blocking calls, the `connection` socket should be shutdown. This will end the
273 /// blocking calls,
274 pub struct PluginVcpu {
275 shared_vcpu_state: Arc<RwLock<SharedVcpuState>>,
276 per_vcpu_state: Arc<Mutex<PerVcpuState>>,
277 read_pipe: File,
278 write_pipe: File,
279 wait_reason: Cell<Option<VcpuResponse_Wait>>,
280 request_buffer: RefCell<Vec<u8>>,
281 response_buffer: RefCell<Vec<u8>>,
282 }
283
284 impl PluginVcpu {
285 /// Creates the plugin state and connection container for a VCPU thread.
new( shared_vcpu_state: Arc<RwLock<SharedVcpuState>>, per_vcpu_state: Arc<Mutex<PerVcpuState>>, read_pipe: File, write_pipe: File, ) -> PluginVcpu286 pub fn new(
287 shared_vcpu_state: Arc<RwLock<SharedVcpuState>>,
288 per_vcpu_state: Arc<Mutex<PerVcpuState>>,
289 read_pipe: File,
290 write_pipe: File,
291 ) -> PluginVcpu {
292 PluginVcpu {
293 shared_vcpu_state,
294 per_vcpu_state,
295 read_pipe,
296 write_pipe,
297 wait_reason: Default::default(),
298 request_buffer: Default::default(),
299 response_buffer: Default::default(),
300 }
301 }
302
303 /// Tells the plugin process to initialize this VCPU.
304 ///
305 /// This should be called for each VCPU before the first run of any of the VCPUs in the VM.
init(&self, vcpu: &Vcpu) -> SysResult<()>306 pub fn init(&self, vcpu: &Vcpu) -> SysResult<()> {
307 let mut wait_reason = VcpuResponse_Wait::new();
308 wait_reason.mut_init();
309 self.wait_reason.set(Some(wait_reason));
310 self.handle_until_resume(vcpu)?;
311 Ok(())
312 }
313
314 /// The VCPU thread should call this before rerunning a VM in order to handle pending requests
315 /// to this VCPU.
pre_run(&self, vcpu: &Vcpu) -> SysResult<()>316 pub fn pre_run(&self, vcpu: &Vcpu) -> SysResult<()> {
317 let request = {
318 let mut lock = self.per_vcpu_state.lock();
319 lock.pause_request.take()
320 };
321
322 if let Some(user_data) = request {
323 let mut wait_reason = VcpuResponse_Wait::new();
324 wait_reason.mut_user().user = user_data;
325 self.wait_reason.set(Some(wait_reason));
326 self.handle_until_resume(vcpu)?;
327 }
328 Ok(())
329 }
330
process(&self, io_space: IoSpace, addr: u64, mut data: VcpuRunData, vcpu: &Vcpu) -> bool331 fn process(&self, io_space: IoSpace, addr: u64, mut data: VcpuRunData, vcpu: &Vcpu) -> bool {
332 let vcpu_state_lock = match self.shared_vcpu_state.read() {
333 Ok(l) => l,
334 Err(e) => {
335 error!("error read locking shared cpu state: {}", e);
336 return false;
337 }
338 };
339
340 let first_before_addr = vcpu_state_lock.first_before(io_space, addr);
341 // Drops the read lock as soon as possible, to prevent holding lock while blocked in
342 // `handle_until_resume`.
343 drop(vcpu_state_lock);
344
345 match first_before_addr {
346 Some(Range(start, len)) => {
347 let offset = addr - start;
348 if offset >= len {
349 return false;
350 }
351
352 let mut wait_reason = VcpuResponse_Wait::new();
353 let io = wait_reason.mut_io();
354 io.space = match io_space {
355 IoSpace::Ioport => AddressSpace::IOPORT,
356 IoSpace::Mmio => AddressSpace::MMIO,
357 };
358 io.address = addr;
359 io.is_write = data.is_write();
360 io.data = data.as_slice().to_vec();
361
362 self.wait_reason.set(Some(wait_reason));
363 match self.handle_until_resume(vcpu) {
364 Ok(resume_data) => data.copy_from_slice(&resume_data),
365 Err(e) if e.errno() == EPIPE => {}
366 Err(e) => error!("failed to process vcpu requests: {}", e),
367 }
368 true
369 }
370 None => false,
371 }
372 }
373
374 /// Has the plugin process handle a IO port read.
io_read(&self, addr: u64, data: &mut [u8], vcpu: &Vcpu) -> bool375 pub fn io_read(&self, addr: u64, data: &mut [u8], vcpu: &Vcpu) -> bool {
376 self.process(IoSpace::Ioport, addr, VcpuRunData::Read(data), vcpu)
377 }
378
379 /// Has the plugin process handle a IO port write.
io_write(&self, addr: u64, data: &[u8], vcpu: &Vcpu) -> bool380 pub fn io_write(&self, addr: u64, data: &[u8], vcpu: &Vcpu) -> bool {
381 self.process(IoSpace::Ioport, addr, VcpuRunData::Write(data), vcpu)
382 }
383
384 /// Has the plugin process handle a MMIO read.
mmio_read(&self, addr: u64, data: &mut [u8], vcpu: &Vcpu) -> bool385 pub fn mmio_read(&self, addr: u64, data: &mut [u8], vcpu: &Vcpu) -> bool {
386 self.process(IoSpace::Mmio, addr, VcpuRunData::Read(data), vcpu)
387 }
388
389 /// Has the plugin process handle a MMIO write.
mmio_write(&self, addr: u64, data: &[u8], vcpu: &Vcpu) -> bool390 pub fn mmio_write(&self, addr: u64, data: &[u8], vcpu: &Vcpu) -> bool {
391 self.process(IoSpace::Mmio, addr, VcpuRunData::Write(data), vcpu)
392 }
393
handle_request(&self, vcpu: &Vcpu) -> SysResult<Option<Vec<u8>>>394 fn handle_request(&self, vcpu: &Vcpu) -> SysResult<Option<Vec<u8>>> {
395 let mut wait_reason = self.wait_reason.take();
396 let mut do_recv = true;
397 let mut resume_data = None;
398 let mut response = VcpuResponse::new();
399
400 // Typically a response is sent for every request received. The odd (yet common)
401 // case is when a resume request is received. This function will skip sending
402 // a resume reply, and instead we'll go run the VM and then later reply with a wait
403 // response message. This code block handles checking if a wait reason is pending (where
404 // the wait reason isn't the first-time init [first time init needs to first
405 // receive a wait request from the plugin]) to send it as a reply before doing a recv()
406 // for the next request. Note that if a wait reply is pending then this function
407 // will send the reply and do nothing else--the expectation is that handle_until_resume()
408 // is the only caller of this function, so the function will immediately get called again
409 // and this second call will no longer see a pending wait reason and do a recv() for the
410 // next message.
411 if let Some(reason) = wait_reason {
412 if reason.has_init() {
413 wait_reason = Some(reason);
414 } else {
415 response.set_wait(reason);
416 do_recv = false;
417 wait_reason = None;
418 }
419 }
420
421 if do_recv {
422 let mut request_buffer = self.request_buffer.borrow_mut();
423 request_buffer.resize(MAX_VCPU_DATAGRAM_SIZE, 0);
424
425 let mut read_pipe = &self.read_pipe;
426 let msg_size = read_pipe.read(&mut request_buffer).map_err(io_to_sys_err)?;
427
428 let mut request =
429 protobuf::parse_from_bytes::<VcpuRequest>(&request_buffer[..msg_size])
430 .map_err(proto_to_sys_err)?;
431
432 let res = if request.has_wait() {
433 match wait_reason {
434 Some(wait_reason) => {
435 response.set_wait(wait_reason);
436 Ok(())
437 }
438 None => Err(SysError::new(EPROTO)),
439 }
440 } else if wait_reason.is_some() {
441 // Any request other than getting the wait_reason while there is one pending is invalid.
442 self.wait_reason.set(wait_reason);
443 Err(SysError::new(EPROTO))
444 } else if request.has_resume() {
445 response.mut_resume();
446 resume_data = Some(request.take_resume().take_data());
447 Ok(())
448 } else if request.has_get_state() {
449 let response_state = response.mut_get_state();
450 match get_vcpu_state(vcpu, request.get_get_state().set) {
451 Ok(state) => {
452 response_state.state = state;
453 Ok(())
454 }
455 Err(e) => Err(e),
456 }
457 } else if request.has_set_state() {
458 response.mut_set_state();
459 let set_state = request.get_set_state();
460 set_vcpu_state(vcpu, set_state.set, set_state.get_state())
461 } else if request.has_get_msrs() {
462 let entry_data = &mut response.mut_get_msrs().entry_data;
463 let entry_indices = &request.get_get_msrs().entry_indices;
464 let mut msr_entries = Vec::with_capacity(entry_indices.len());
465 for &index in entry_indices {
466 msr_entries.push(kvm_msr_entry {
467 index,
468 ..Default::default()
469 });
470 }
471 match vcpu.get_msrs(&mut msr_entries) {
472 Ok(()) => {
473 for msr_entry in msr_entries {
474 entry_data.push(msr_entry.data);
475 }
476 Ok(())
477 }
478 Err(e) => Err(e),
479 }
480 } else if request.has_set_msrs() {
481 const SIZE_OF_MSRS: usize = mem::size_of::<kvm_msrs>();
482 const SIZE_OF_ENTRY: usize = mem::size_of::<kvm_msr_entry>();
483 const ALIGN_OF_MSRS: usize = mem::align_of::<kvm_msrs>();
484 const ALIGN_OF_ENTRY: usize = mem::align_of::<kvm_msr_entry>();
485 const_assert!(ALIGN_OF_MSRS >= ALIGN_OF_ENTRY);
486
487 response.mut_set_msrs();
488 let request_entries = &request.get_set_msrs().entries;
489
490 let size = SIZE_OF_MSRS + request_entries.len() * SIZE_OF_ENTRY;
491 let layout =
492 Layout::from_size_align(size, ALIGN_OF_MSRS).expect("impossible layout");
493 let mut allocation = LayoutAllocation::zeroed(layout);
494
495 // Safe to obtain an exclusive reference because there are no other
496 // references to the allocation yet and all-zero is a valid bit
497 // pattern.
498 let kvm_msrs = unsafe { allocation.as_mut::<kvm_msrs>() };
499
500 unsafe {
501 // Mapping the unsized array to a slice is unsafe becase the length isn't known.
502 // Providing the length used to create the struct guarantees the entire slice is
503 // valid.
504 let kvm_msr_entries: &mut [kvm_msr_entry] =
505 kvm_msrs.entries.as_mut_slice(request_entries.len());
506 for (msr_entry, entry) in kvm_msr_entries.iter_mut().zip(request_entries) {
507 msr_entry.index = entry.index;
508 msr_entry.data = entry.data;
509 }
510 }
511 kvm_msrs.nmsrs = request_entries.len() as u32;
512 vcpu.set_msrs(&kvm_msrs)
513 } else if request.has_set_cpuid() {
514 response.mut_set_cpuid();
515 let request_entries = &request.get_set_cpuid().entries;
516 let mut cpuid = CpuId::new(request_entries.len());
517 let cpuid_entries = cpuid.mut_entries_slice();
518 for (request_entry, cpuid_entry) in request_entries.iter().zip(cpuid_entries) {
519 cpuid_entry.function = request_entry.function;
520 if request_entry.has_index {
521 cpuid_entry.index = request_entry.index;
522 cpuid_entry.flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
523 }
524 cpuid_entry.eax = request_entry.eax;
525 cpuid_entry.ebx = request_entry.ebx;
526 cpuid_entry.ecx = request_entry.ecx;
527 cpuid_entry.edx = request_entry.edx;
528 }
529 vcpu.set_cpuid2(&cpuid)
530 } else if request.has_shutdown() {
531 return Err(SysError::new(EPIPE));
532 } else {
533 Err(SysError::new(ENOTTY))
534 };
535
536 if let Err(e) = res {
537 response.errno = e.errno();
538 }
539 }
540
541 // Send the response, except if it's a resume response (in which case
542 // we'll go run the VM and afterwards send a wait response message).
543 if !response.has_resume() {
544 let mut response_buffer = self.response_buffer.borrow_mut();
545 response_buffer.clear();
546 response
547 .write_to_vec(&mut response_buffer)
548 .map_err(proto_to_sys_err)?;
549 let mut write_pipe = &self.write_pipe;
550 write_pipe
551 .write(&response_buffer[..])
552 .map_err(io_to_sys_err)?;
553 }
554
555 Ok(resume_data)
556 }
557
handle_until_resume(&self, vcpu: &Vcpu) -> SysResult<Vec<u8>>558 fn handle_until_resume(&self, vcpu: &Vcpu) -> SysResult<Vec<u8>> {
559 loop {
560 if let Some(resume_data) = self.handle_request(vcpu)? {
561 return Ok(resume_data);
562 }
563 }
564 }
565 }
566
567 #[cfg(test)]
568 mod tests {
569 use super::*;
570
571 #[test]
shared_vcpu_reserve()572 fn shared_vcpu_reserve() {
573 let mut shared_vcpu_state = SharedVcpuState::default();
574 shared_vcpu_state
575 .reserve_range(IoSpace::Ioport, 0x10, 0)
576 .unwrap_err();
577 shared_vcpu_state
578 .reserve_range(IoSpace::Ioport, 0x10, 0x10)
579 .unwrap();
580 shared_vcpu_state
581 .reserve_range(IoSpace::Ioport, 0x0f, 0x10)
582 .unwrap_err();
583 shared_vcpu_state
584 .reserve_range(IoSpace::Ioport, 0x10, 0x10)
585 .unwrap_err();
586 shared_vcpu_state
587 .reserve_range(IoSpace::Ioport, 0x10, 0x15)
588 .unwrap_err();
589 shared_vcpu_state
590 .reserve_range(IoSpace::Ioport, 0x12, 0x15)
591 .unwrap_err();
592 shared_vcpu_state
593 .reserve_range(IoSpace::Ioport, 0x12, 0x01)
594 .unwrap_err();
595 shared_vcpu_state
596 .reserve_range(IoSpace::Ioport, 0x0, 0x20)
597 .unwrap_err();
598 shared_vcpu_state
599 .reserve_range(IoSpace::Ioport, 0x20, 0x05)
600 .unwrap();
601 shared_vcpu_state
602 .reserve_range(IoSpace::Ioport, 0x25, 0x05)
603 .unwrap();
604 shared_vcpu_state
605 .reserve_range(IoSpace::Ioport, 0x0, 0x10)
606 .unwrap();
607 }
608 }
609