1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::BTreeMap;
6 use std::fs::File;
7 use std::io;
8
9 use anyhow::anyhow;
10 use anyhow::Context;
11 use base::error;
12 use base::AsRawDescriptor;
13 use base::Error as SysError;
14 use base::Event;
15 use base::RawDescriptor;
16 use base::Result as SysResult;
17 use base::Tube;
18 use base::WorkerThread;
19 use cros_async::select3;
20 use cros_async::EventAsync;
21 use cros_async::Executor;
22 use data_model::Le32;
23 use data_model::Le64;
24 use futures::pin_mut;
25 use remain::sorted;
26 use thiserror::Error;
27 use vm_control::MemSlot;
28 use vm_control::VmMsyncRequest;
29 use vm_control::VmMsyncResponse;
30 use vm_memory::GuestAddress;
31 use vm_memory::GuestMemory;
32 use zerocopy::AsBytes;
33 use zerocopy::FromBytes;
34 use zerocopy::FromZeroes;
35
36 use super::async_utils;
37 use super::copy_config;
38 use super::DescriptorChain;
39 use super::DeviceType;
40 use super::Interrupt;
41 use super::Queue;
42 use super::VirtioDevice;
43
44 const QUEUE_SIZE: u16 = 256;
45 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
46
47 const VIRTIO_PMEM_REQ_TYPE_FLUSH: u32 = 0;
48 const VIRTIO_PMEM_RESP_TYPE_OK: u32 = 0;
49 const VIRTIO_PMEM_RESP_TYPE_EIO: u32 = 1;
50
51 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes)]
52 #[repr(C)]
53 struct virtio_pmem_config {
54 start_address: Le64,
55 size: Le64,
56 }
57
58 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes)]
59 #[repr(C)]
60 struct virtio_pmem_resp {
61 status_code: Le32,
62 }
63
64 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes)]
65 #[repr(C)]
66 struct virtio_pmem_req {
67 type_: Le32,
68 }
69
70 #[sorted]
71 #[derive(Error, Debug)]
72 enum Error {
73 /// Failed to read from virtqueue.
74 #[error("failed to read from virtqueue: {0}")]
75 ReadQueue(io::Error),
76 /// Failed to write to virtqueue.
77 #[error("failed to write to virtqueue: {0}")]
78 WriteQueue(io::Error),
79 }
80
81 type Result<T> = ::std::result::Result<T, Error>;
82
execute_request( request: virtio_pmem_req, pmem_device_tube: &Tube, mapping_arena_slot: u32, mapping_size: usize, ) -> u3283 fn execute_request(
84 request: virtio_pmem_req,
85 pmem_device_tube: &Tube,
86 mapping_arena_slot: u32,
87 mapping_size: usize,
88 ) -> u32 {
89 match request.type_.to_native() {
90 VIRTIO_PMEM_REQ_TYPE_FLUSH => {
91 let request = VmMsyncRequest::MsyncArena {
92 slot: mapping_arena_slot,
93 offset: 0, // The pmem backing file is always at offset 0 in the arena.
94 size: mapping_size,
95 };
96
97 if let Err(e) = pmem_device_tube.send(&request) {
98 error!("failed to send request: {}", e);
99 return VIRTIO_PMEM_RESP_TYPE_EIO;
100 }
101
102 match pmem_device_tube.recv() {
103 Ok(response) => match response {
104 VmMsyncResponse::Ok => VIRTIO_PMEM_RESP_TYPE_OK,
105 VmMsyncResponse::Err(e) => {
106 error!("failed flushing disk image: {}", e);
107 VIRTIO_PMEM_RESP_TYPE_EIO
108 }
109 },
110 Err(e) => {
111 error!("failed to receive data: {}", e);
112 VIRTIO_PMEM_RESP_TYPE_EIO
113 }
114 }
115 }
116 _ => {
117 error!("unknown request type: {}", request.type_.to_native());
118 VIRTIO_PMEM_RESP_TYPE_EIO
119 }
120 }
121 }
122
handle_request( avail_desc: &mut DescriptorChain, pmem_device_tube: &Tube, mapping_arena_slot: u32, mapping_size: usize, ) -> Result<usize>123 fn handle_request(
124 avail_desc: &mut DescriptorChain,
125 pmem_device_tube: &Tube,
126 mapping_arena_slot: u32,
127 mapping_size: usize,
128 ) -> Result<usize> {
129 let status_code = avail_desc
130 .reader
131 .read_obj()
132 .map(|request| execute_request(request, pmem_device_tube, mapping_arena_slot, mapping_size))
133 .map_err(Error::ReadQueue)?;
134
135 let response = virtio_pmem_resp {
136 status_code: status_code.into(),
137 };
138
139 avail_desc
140 .writer
141 .write_obj(response)
142 .map_err(Error::WriteQueue)?;
143
144 Ok(avail_desc.writer.bytes_written())
145 }
146
handle_queue( queue: &mut Queue, mut queue_event: EventAsync, interrupt: Interrupt, pmem_device_tube: &Tube, mapping_arena_slot: u32, mapping_size: usize, )147 async fn handle_queue(
148 queue: &mut Queue,
149 mut queue_event: EventAsync,
150 interrupt: Interrupt,
151 pmem_device_tube: &Tube,
152 mapping_arena_slot: u32,
153 mapping_size: usize,
154 ) {
155 loop {
156 let mut avail_desc = match queue.next_async(&mut queue_event).await {
157 Err(e) => {
158 error!("Failed to read descriptor {}", e);
159 return;
160 }
161 Ok(d) => d,
162 };
163
164 let written = match handle_request(
165 &mut avail_desc,
166 pmem_device_tube,
167 mapping_arena_slot,
168 mapping_size,
169 ) {
170 Ok(n) => n,
171 Err(e) => {
172 error!("pmem: failed to handle request: {}", e);
173 0
174 }
175 };
176 queue.add_used(avail_desc, written as u32);
177 queue.trigger_interrupt(&interrupt);
178 }
179 }
180
run_worker( queue: &mut Queue, pmem_device_tube: &Tube, interrupt: Interrupt, kill_evt: Event, mapping_arena_slot: u32, mapping_size: usize, )181 fn run_worker(
182 queue: &mut Queue,
183 pmem_device_tube: &Tube,
184 interrupt: Interrupt,
185 kill_evt: Event,
186 mapping_arena_slot: u32,
187 mapping_size: usize,
188 ) {
189 let ex = Executor::new().unwrap();
190
191 let queue_evt = queue
192 .event()
193 .try_clone()
194 .expect("failed to clone queue event");
195 let queue_evt = EventAsync::new(queue_evt, &ex).expect("failed to set up the queue event");
196
197 // Process requests from the virtio queue.
198 let queue_fut = handle_queue(
199 queue,
200 queue_evt,
201 interrupt.clone(),
202 pmem_device_tube,
203 mapping_arena_slot,
204 mapping_size,
205 );
206 pin_mut!(queue_fut);
207
208 // Process any requests to resample the irq value.
209 let resample = async_utils::handle_irq_resample(&ex, interrupt);
210 pin_mut!(resample);
211
212 // Exit if the kill event is triggered.
213 let kill = async_utils::await_and_exit(&ex, kill_evt);
214 pin_mut!(kill);
215
216 if let Err(e) = ex.run_until(select3(queue_fut, resample, kill)) {
217 error!("error happened in executor: {}", e);
218 }
219 }
220
221 pub struct Pmem {
222 worker_thread: Option<WorkerThread<(Queue, Tube)>>,
223 base_features: u64,
224 disk_image: Option<File>,
225 mapping_address: GuestAddress,
226 mapping_arena_slot: MemSlot,
227 mapping_size: u64,
228 pmem_device_tube: Option<Tube>,
229 }
230
231 #[derive(serde::Serialize, serde::Deserialize)]
232 struct PmemSnapshot {
233 mapping_address: GuestAddress,
234 mapping_size: u64,
235 }
236
237 impl Pmem {
new( base_features: u64, disk_image: File, mapping_address: GuestAddress, mapping_arena_slot: MemSlot, mapping_size: u64, pmem_device_tube: Tube, ) -> SysResult<Pmem>238 pub fn new(
239 base_features: u64,
240 disk_image: File,
241 mapping_address: GuestAddress,
242 mapping_arena_slot: MemSlot,
243 mapping_size: u64,
244 pmem_device_tube: Tube,
245 ) -> SysResult<Pmem> {
246 if mapping_size > usize::max_value() as u64 {
247 return Err(SysError::new(libc::EOVERFLOW));
248 }
249
250 Ok(Pmem {
251 worker_thread: None,
252 base_features,
253 disk_image: Some(disk_image),
254 mapping_address,
255 mapping_arena_slot,
256 mapping_size,
257 pmem_device_tube: Some(pmem_device_tube),
258 })
259 }
260 }
261
262 impl VirtioDevice for Pmem {
keep_rds(&self) -> Vec<RawDescriptor>263 fn keep_rds(&self) -> Vec<RawDescriptor> {
264 let mut keep_rds = Vec::new();
265 if let Some(disk_image) = &self.disk_image {
266 keep_rds.push(disk_image.as_raw_descriptor());
267 }
268
269 if let Some(ref pmem_device_tube) = self.pmem_device_tube {
270 keep_rds.push(pmem_device_tube.as_raw_descriptor());
271 }
272 keep_rds
273 }
274
device_type(&self) -> DeviceType275 fn device_type(&self) -> DeviceType {
276 DeviceType::Pmem
277 }
278
queue_max_sizes(&self) -> &[u16]279 fn queue_max_sizes(&self) -> &[u16] {
280 QUEUE_SIZES
281 }
282
features(&self) -> u64283 fn features(&self) -> u64 {
284 self.base_features
285 }
286
read_config(&self, offset: u64, data: &mut [u8])287 fn read_config(&self, offset: u64, data: &mut [u8]) {
288 let config = virtio_pmem_config {
289 start_address: Le64::from(self.mapping_address.offset()),
290 size: Le64::from(self.mapping_size),
291 };
292 copy_config(data, 0, config.as_bytes(), offset);
293 }
294
activate( &mut self, _memory: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>295 fn activate(
296 &mut self,
297 _memory: GuestMemory,
298 interrupt: Interrupt,
299 mut queues: BTreeMap<usize, Queue>,
300 ) -> anyhow::Result<()> {
301 if queues.len() != 1 {
302 return Err(anyhow!("expected 1 queue, got {}", queues.len()));
303 }
304
305 let mut queue = queues.remove(&0).unwrap();
306
307 let mapping_arena_slot = self.mapping_arena_slot;
308 // We checked that this fits in a usize in `Pmem::new`.
309 let mapping_size = self.mapping_size as usize;
310
311 let pmem_device_tube = self
312 .pmem_device_tube
313 .take()
314 .context("missing pmem device tube")?;
315
316 self.worker_thread = Some(WorkerThread::start("v_pmem", move |kill_event| {
317 run_worker(
318 &mut queue,
319 &pmem_device_tube,
320 interrupt,
321 kill_event,
322 mapping_arena_slot,
323 mapping_size,
324 );
325 (queue, pmem_device_tube)
326 }));
327
328 Ok(())
329 }
330
reset(&mut self) -> anyhow::Result<()>331 fn reset(&mut self) -> anyhow::Result<()> {
332 if let Some(worker_thread) = self.worker_thread.take() {
333 let (_queue, pmem_device_tube) = worker_thread.stop();
334 self.pmem_device_tube = Some(pmem_device_tube);
335 }
336 Ok(())
337 }
338
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>339 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
340 if let Some(worker_thread) = self.worker_thread.take() {
341 let (queue, pmem_device_tube) = worker_thread.stop();
342 self.pmem_device_tube = Some(pmem_device_tube);
343 return Ok(Some(BTreeMap::from([(0, queue)])));
344 }
345 Ok(None)
346 }
347
virtio_wake( &mut self, queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>348 fn virtio_wake(
349 &mut self,
350 queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
351 ) -> anyhow::Result<()> {
352 if let Some((mem, interrupt, queues)) = queues_state {
353 self.activate(mem, interrupt, queues)?;
354 }
355 Ok(())
356 }
357
virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value>358 fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
359 serde_json::to_value(PmemSnapshot {
360 mapping_address: self.mapping_address,
361 mapping_size: self.mapping_size,
362 })
363 .context("failed to serialize pmem snapshot")
364 }
365
virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>366 fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
367 let snapshot: PmemSnapshot =
368 serde_json::from_value(data).context("failed to deserialize pmem snapshot")?;
369 anyhow::ensure!(
370 snapshot.mapping_address == self.mapping_address
371 && snapshot.mapping_size == self.mapping_size,
372 "pmem snapshot doesn't match config: expected {:?}, got {:?}",
373 (self.mapping_address, self.mapping_size),
374 (snapshot.mapping_address, snapshot.mapping_size),
375 );
376 Ok(())
377 }
378 }
379