• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cell::RefCell;
6 use std::fs::File;
7 use std::io;
8 use std::rc::Rc;
9 use std::thread;
10 
11 use base::{error, AsRawDescriptor, Event, RawDescriptor, Tube};
12 use base::{Error as SysError, Result as SysResult};
13 use cros_async::{select3, EventAsync, Executor};
14 use data_model::{DataInit, Le32, Le64};
15 use futures::pin_mut;
16 use remain::sorted;
17 use thiserror::Error;
18 use vm_control::{MemSlot, VmMsyncRequest, VmMsyncResponse};
19 use vm_memory::{GuestAddress, GuestMemory};
20 
21 use super::{
22     async_utils, copy_config, DescriptorChain, DescriptorError, Interrupt, Queue, Reader,
23     VirtioDevice, Writer, TYPE_PMEM,
24 };
25 
26 const QUEUE_SIZE: u16 = 256;
27 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
28 
29 const VIRTIO_PMEM_REQ_TYPE_FLUSH: u32 = 0;
30 const VIRTIO_PMEM_RESP_TYPE_OK: u32 = 0;
31 const VIRTIO_PMEM_RESP_TYPE_EIO: u32 = 1;
32 
33 #[derive(Copy, Clone, Debug, Default)]
34 #[repr(C)]
35 struct virtio_pmem_config {
36     start_address: Le64,
37     size: Le64,
38 }
39 
40 // Safe because it only has data and has no implicit padding.
41 unsafe impl DataInit for virtio_pmem_config {}
42 
43 #[derive(Copy, Clone, Debug, Default)]
44 #[repr(C)]
45 struct virtio_pmem_resp {
46     status_code: Le32,
47 }
48 
49 // Safe because it only has data and has no implicit padding.
50 unsafe impl DataInit for virtio_pmem_resp {}
51 
52 #[derive(Copy, Clone, Debug, Default)]
53 #[repr(C)]
54 struct virtio_pmem_req {
55     type_: Le32,
56 }
57 
58 // Safe because it only has data and has no implicit padding.
59 unsafe impl DataInit for virtio_pmem_req {}
60 
61 #[sorted]
62 #[derive(Error, Debug)]
63 enum Error {
64     /// Invalid virtio descriptor chain.
65     #[error("virtio descriptor error: {0}")]
66     Descriptor(DescriptorError),
67     /// Failed to read from virtqueue.
68     #[error("failed to read from virtqueue: {0}")]
69     ReadQueue(io::Error),
70     /// Failed to write to virtqueue.
71     #[error("failed to write to virtqueue: {0}")]
72     WriteQueue(io::Error),
73 }
74 
75 type Result<T> = ::std::result::Result<T, Error>;
76 
execute_request( request: virtio_pmem_req, pmem_device_tube: &Tube, mapping_arena_slot: u32, mapping_size: usize, ) -> u3277 fn execute_request(
78     request: virtio_pmem_req,
79     pmem_device_tube: &Tube,
80     mapping_arena_slot: u32,
81     mapping_size: usize,
82 ) -> u32 {
83     match request.type_.to_native() {
84         VIRTIO_PMEM_REQ_TYPE_FLUSH => {
85             let request = VmMsyncRequest::MsyncArena {
86                 slot: mapping_arena_slot,
87                 offset: 0, // The pmem backing file is always at offset 0 in the arena.
88                 size: mapping_size,
89             };
90 
91             if let Err(e) = pmem_device_tube.send(&request) {
92                 error!("failed to send request: {}", e);
93                 return VIRTIO_PMEM_RESP_TYPE_EIO;
94             }
95 
96             match pmem_device_tube.recv() {
97                 Ok(response) => match response {
98                     VmMsyncResponse::Ok => VIRTIO_PMEM_RESP_TYPE_OK,
99                     VmMsyncResponse::Err(e) => {
100                         error!("failed flushing disk image: {}", e);
101                         VIRTIO_PMEM_RESP_TYPE_EIO
102                     }
103                 },
104                 Err(e) => {
105                     error!("failed to receive data: {}", e);
106                     VIRTIO_PMEM_RESP_TYPE_EIO
107                 }
108             }
109         }
110         _ => {
111             error!("unknown request type: {}", request.type_.to_native());
112             VIRTIO_PMEM_RESP_TYPE_EIO
113         }
114     }
115 }
116 
handle_request( mem: &GuestMemory, avail_desc: DescriptorChain, pmem_device_tube: &Tube, mapping_arena_slot: u32, mapping_size: usize, ) -> Result<usize>117 fn handle_request(
118     mem: &GuestMemory,
119     avail_desc: DescriptorChain,
120     pmem_device_tube: &Tube,
121     mapping_arena_slot: u32,
122     mapping_size: usize,
123 ) -> Result<usize> {
124     let mut reader = Reader::new(mem.clone(), avail_desc.clone()).map_err(Error::Descriptor)?;
125     let mut writer = Writer::new(mem.clone(), avail_desc).map_err(Error::Descriptor)?;
126 
127     let status_code = reader
128         .read_obj()
129         .map(|request| execute_request(request, pmem_device_tube, mapping_arena_slot, mapping_size))
130         .map_err(Error::ReadQueue)?;
131 
132     let response = virtio_pmem_resp {
133         status_code: status_code.into(),
134     };
135 
136     writer.write_obj(response).map_err(Error::WriteQueue)?;
137 
138     Ok(writer.bytes_written())
139 }
140 
handle_queue( mem: &GuestMemory, mut queue: Queue, mut queue_event: EventAsync, interrupt: Rc<RefCell<Interrupt>>, pmem_device_tube: Tube, mapping_arena_slot: u32, mapping_size: usize, )141 async fn handle_queue(
142     mem: &GuestMemory,
143     mut queue: Queue,
144     mut queue_event: EventAsync,
145     interrupt: Rc<RefCell<Interrupt>>,
146     pmem_device_tube: Tube,
147     mapping_arena_slot: u32,
148     mapping_size: usize,
149 ) {
150     loop {
151         let avail_desc = match queue.next_async(mem, &mut queue_event).await {
152             Err(e) => {
153                 error!("Failed to read descriptor {}", e);
154                 return;
155             }
156             Ok(d) => d,
157         };
158         let index = avail_desc.index;
159         let written = match handle_request(
160             mem,
161             avail_desc,
162             &pmem_device_tube,
163             mapping_arena_slot,
164             mapping_size,
165         ) {
166             Ok(n) => n,
167             Err(e) => {
168                 error!("pmem: failed to handle request: {}", e);
169                 0
170             }
171         };
172         queue.add_used(mem, index, written as u32);
173         queue.trigger_interrupt(mem, &*interrupt.borrow());
174     }
175 }
176 
run_worker( queue_evt: Event, queue: Queue, pmem_device_tube: Tube, interrupt: Interrupt, kill_evt: Event, mem: GuestMemory, mapping_arena_slot: u32, mapping_size: usize, )177 fn run_worker(
178     queue_evt: Event,
179     queue: Queue,
180     pmem_device_tube: Tube,
181     interrupt: Interrupt,
182     kill_evt: Event,
183     mem: GuestMemory,
184     mapping_arena_slot: u32,
185     mapping_size: usize,
186 ) {
187     // Wrap the interrupt in a `RefCell` so it can be shared between async functions.
188     let interrupt = Rc::new(RefCell::new(interrupt));
189 
190     let ex = Executor::new().unwrap();
191 
192     let queue_evt = EventAsync::new(queue_evt.0, &ex).expect("failed to set up the queue event");
193 
194     // Process requests from the virtio queue.
195     let queue_fut = handle_queue(
196         &mem,
197         queue,
198         queue_evt,
199         interrupt.clone(),
200         pmem_device_tube,
201         mapping_arena_slot,
202         mapping_size,
203     );
204     pin_mut!(queue_fut);
205 
206     // Process any requests to resample the irq value.
207     let resample = async_utils::handle_irq_resample(&ex, interrupt);
208     pin_mut!(resample);
209 
210     // Exit if the kill event is triggered.
211     let kill = async_utils::await_and_exit(&ex, kill_evt);
212     pin_mut!(kill);
213 
214     if let Err(e) = ex.run_until(select3(queue_fut, resample, kill)) {
215         error!("error happened in executor: {}", e);
216     }
217 }
218 
219 pub struct Pmem {
220     kill_event: Option<Event>,
221     worker_thread: Option<thread::JoinHandle<()>>,
222     base_features: u64,
223     disk_image: Option<File>,
224     mapping_address: GuestAddress,
225     mapping_arena_slot: MemSlot,
226     mapping_size: u64,
227     pmem_device_tube: Option<Tube>,
228 }
229 
230 impl Pmem {
new( base_features: u64, disk_image: File, mapping_address: GuestAddress, mapping_arena_slot: MemSlot, mapping_size: u64, pmem_device_tube: Option<Tube>, ) -> SysResult<Pmem>231     pub fn new(
232         base_features: u64,
233         disk_image: File,
234         mapping_address: GuestAddress,
235         mapping_arena_slot: MemSlot,
236         mapping_size: u64,
237         pmem_device_tube: Option<Tube>,
238     ) -> SysResult<Pmem> {
239         if mapping_size > usize::max_value() as u64 {
240             return Err(SysError::new(libc::EOVERFLOW));
241         }
242 
243         Ok(Pmem {
244             kill_event: None,
245             worker_thread: None,
246             base_features,
247             disk_image: Some(disk_image),
248             mapping_address,
249             mapping_arena_slot,
250             mapping_size,
251             pmem_device_tube,
252         })
253     }
254 }
255 
256 impl Drop for Pmem {
drop(&mut self)257     fn drop(&mut self) {
258         if let Some(kill_evt) = self.kill_event.take() {
259             // Ignore the result because there is nothing we can do about it.
260             let _ = kill_evt.write(1);
261         }
262 
263         if let Some(worker_thread) = self.worker_thread.take() {
264             let _ = worker_thread.join();
265         }
266     }
267 }
268 
269 impl VirtioDevice for Pmem {
keep_rds(&self) -> Vec<RawDescriptor>270     fn keep_rds(&self) -> Vec<RawDescriptor> {
271         let mut keep_rds = Vec::new();
272         if let Some(disk_image) = &self.disk_image {
273             keep_rds.push(disk_image.as_raw_descriptor());
274         }
275 
276         if let Some(ref pmem_device_tube) = self.pmem_device_tube {
277             keep_rds.push(pmem_device_tube.as_raw_descriptor());
278         }
279         keep_rds
280     }
281 
device_type(&self) -> u32282     fn device_type(&self) -> u32 {
283         TYPE_PMEM
284     }
285 
queue_max_sizes(&self) -> &[u16]286     fn queue_max_sizes(&self) -> &[u16] {
287         QUEUE_SIZES
288     }
289 
features(&self) -> u64290     fn features(&self) -> u64 {
291         self.base_features
292     }
293 
read_config(&self, offset: u64, data: &mut [u8])294     fn read_config(&self, offset: u64, data: &mut [u8]) {
295         let config = virtio_pmem_config {
296             start_address: Le64::from(self.mapping_address.offset()),
297             size: Le64::from(self.mapping_size as u64),
298         };
299         copy_config(data, 0, config.as_slice(), offset);
300     }
301 
activate( &mut self, memory: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, mut queue_events: Vec<Event>, )302     fn activate(
303         &mut self,
304         memory: GuestMemory,
305         interrupt: Interrupt,
306         mut queues: Vec<Queue>,
307         mut queue_events: Vec<Event>,
308     ) {
309         if queues.len() != 1 || queue_events.len() != 1 {
310             return;
311         }
312 
313         let queue = queues.remove(0);
314         let queue_event = queue_events.remove(0);
315 
316         let mapping_arena_slot = self.mapping_arena_slot;
317         // We checked that this fits in a usize in `Pmem::new`.
318         let mapping_size = self.mapping_size as usize;
319 
320         if let Some(pmem_device_tube) = self.pmem_device_tube.take() {
321             let (self_kill_event, kill_event) =
322                 match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
323                     Ok(v) => v,
324                     Err(e) => {
325                         error!("failed creating kill Event pair: {}", e);
326                         return;
327                     }
328                 };
329             self.kill_event = Some(self_kill_event);
330 
331             let worker_result = thread::Builder::new()
332                 .name("virtio_pmem".to_string())
333                 .spawn(move || {
334                     run_worker(
335                         queue_event,
336                         queue,
337                         pmem_device_tube,
338                         interrupt,
339                         kill_event,
340                         memory,
341                         mapping_arena_slot,
342                         mapping_size,
343                     )
344                 });
345 
346             match worker_result {
347                 Err(e) => {
348                     error!("failed to spawn virtio_pmem worker: {}", e);
349                     return;
350                 }
351                 Ok(join_handle) => {
352                     self.worker_thread = Some(join_handle);
353                 }
354             }
355         }
356     }
357 }
358