• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Resource management and resolution for the virtio-video device.
6 
7 use std::convert::TryInto;
8 use std::fmt;
9 
10 use base::{
11     self, FromRawDescriptor, IntoRawDescriptor, MemoryMappingArena, MemoryMappingBuilder,
12     MemoryMappingBuilderUnix, MmapError, SafeDescriptor,
13 };
14 use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
15 
16 use thiserror::Error as ThisError;
17 
18 use crate::virtio::resource_bridge::{self, ResourceBridgeError, ResourceInfo, ResourceRequest};
19 use crate::virtio::video::format::{FramePlane, PlaneFormat};
20 use crate::virtio::video::protocol::{virtio_video_mem_entry, virtio_video_object_entry};
21 
22 /// Defines how resources for a given queue are represented.
23 #[derive(Clone, Copy, Debug)]
24 pub enum ResourceType {
25     /// Resources are backed by guest memory pages.
26     GuestPages,
27     /// Resources are backed by virtio objects.
28     VirtioObject,
29 }
30 
31 impl Default for ResourceType {
default() -> Self32     fn default() -> Self {
33         ResourceType::VirtioObject
34     }
35 }
36 
37 #[repr(C)]
38 #[derive(Clone, Copy)]
39 /// A guest resource entry which type is not decided yet.
40 pub union UnresolvedResourceEntry {
41     pub object: virtio_video_object_entry,
42     pub guest_mem: virtio_video_mem_entry,
43 }
44 unsafe impl data_model::DataInit for UnresolvedResourceEntry {}
45 
46 impl fmt::Debug for UnresolvedResourceEntry {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result47     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
48         // Safe because `self.object` and `self.guest_mem` are the same size and both made of
49         // integers, making it safe to display them no matter their value.
50         write!(
51             f,
52             "unresolved {:?} or {:?}",
53             unsafe { self.object },
54             unsafe { self.guest_mem }
55         )
56     }
57 }
58 
59 /// Trait for types that can serve as video buffer backing memory.
60 pub trait BufferHandle: Sized {
61     /// Try to clone this handle. This must only create a new reference to the same backing memory
62     /// and not duplicate the buffer itself.
try_clone(&self) -> Result<Self, base::Error>63     fn try_clone(&self) -> Result<Self, base::Error>;
64 
65     /// Returns a linear mapping of [`offset`..`offset`+`size`] of the memory backing this buffer.
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>66     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>;
67 }
68 
69 /// Linear memory area of a `GuestMemHandle`
70 #[derive(Clone)]
71 pub struct GuestMemArea {
72     /// Offset within the guest region to the start of the area.
73     pub offset: u64,
74     /// Length of the area within the memory region.
75     pub length: usize,
76 }
77 
78 pub struct GuestMemHandle {
79     /// Descriptor to the guest memory region containing the buffer.
80     pub desc: SafeDescriptor,
81     /// Memory areas (i.e. sg list) that make the memory buffer.
82     pub mem_areas: Vec<GuestMemArea>,
83 }
84 
85 impl BufferHandle for GuestMemHandle {
try_clone(&self) -> Result<Self, base::Error>86     fn try_clone(&self) -> Result<Self, base::Error> {
87         Ok(Self {
88             desc: self.desc.try_clone()?,
89             mem_areas: self.mem_areas.clone(),
90         })
91     }
92 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>93     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
94         let mut arena = MemoryMappingArena::new(size)?;
95         let mut mapped_size = 0;
96         let mut area_iter = self.mem_areas.iter();
97         let mut area_offset = offset;
98         while mapped_size < size {
99             let area = match area_iter.next() {
100                 Some(area) => area,
101                 None => {
102                     return Err(MmapError::InvalidRange(
103                         offset,
104                         size,
105                         self.mem_areas.iter().map(|a| a.length).sum(),
106                     ));
107                 }
108             };
109             if area_offset > area.length {
110                 area_offset -= area.length;
111             } else {
112                 let mapping_length = std::cmp::min(area.length - area_offset, size - mapped_size);
113                 arena.add_fd_offset(mapped_size, mapping_length, &self.desc, area.offset)?;
114                 mapped_size += mapping_length;
115                 area_offset = 0;
116             }
117         }
118         Ok(arena)
119     }
120 }
121 
122 pub struct VirtioObjectHandle {
123     /// Descriptor for the object.
124     pub desc: SafeDescriptor,
125     /// Modifier to apply to frame resources.
126     pub modifier: u64,
127 }
128 
129 impl BufferHandle for VirtioObjectHandle {
try_clone(&self) -> Result<Self, base::Error>130     fn try_clone(&self) -> Result<Self, base::Error> {
131         Ok(Self {
132             desc: self.desc.try_clone()?,
133             modifier: self.modifier,
134         })
135     }
136 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>137     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
138         MemoryMappingBuilder::new(size)
139             .from_descriptor(&self.desc)
140             .offset(offset as u64)
141             .build()
142             .map(MemoryMappingArena::from)
143     }
144 }
145 
146 pub enum GuestResourceHandle {
147     GuestPages(GuestMemHandle),
148     VirtioObject(VirtioObjectHandle),
149 }
150 
151 impl BufferHandle for GuestResourceHandle {
try_clone(&self) -> Result<Self, base::Error>152     fn try_clone(&self) -> Result<Self, base::Error> {
153         Ok(match self {
154             Self::GuestPages(handle) => Self::GuestPages(handle.try_clone()?),
155             Self::VirtioObject(handle) => Self::VirtioObject(handle.try_clone()?),
156         })
157     }
158 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>159     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
160         match self {
161             GuestResourceHandle::GuestPages(handle) => handle.get_mapping(offset, size),
162             GuestResourceHandle::VirtioObject(handle) => handle.get_mapping(offset, size),
163         }
164     }
165 }
166 
167 pub struct GuestResource {
168     /// Handle to the backing memory.
169     pub handle: GuestResourceHandle,
170     /// Layout of color planes, if the resource will receive frames.
171     pub planes: Vec<FramePlane>,
172 }
173 
174 #[derive(Debug, ThisError)]
175 pub enum GuestMemResourceCreationError {
176     #[error("Provided slice of entries is empty")]
177     NoEntriesProvided,
178     #[error("cannot get shm region: {0}")]
179     CantGetShmRegion(GuestMemoryError),
180     #[error("cannot get shm offset: {0}")]
181     CantGetShmOffset(GuestMemoryError),
182     #[error("error while cloning shm region descriptor: {0}")]
183     DescriptorCloneError(base::Error),
184 }
185 
186 #[derive(Debug, ThisError)]
187 pub enum ObjectResourceCreationError {
188     #[error("uuid {0:08} is larger than 32 bits")]
189     UuidNot32Bits(u128),
190     #[error("resource returned by bridge is not a buffer")]
191     NotABuffer,
192     #[error("resource bridge failure: {0}")]
193     ResourceBridgeFailure(ResourceBridgeError),
194 }
195 
196 impl GuestResource {
197     /// Try to convert an unresolved virtio guest memory entry into a resolved guest memory
198     /// resource.
199     ///
200     /// Convert `mem_entry` into the guest memory resource it represents and resolve it through
201     /// `mem`. `planes_format` describes the format of the individual planes for the buffer.
from_virtio_guest_mem_entry( mem_entries: &[virtio_video_mem_entry], mem: &GuestMemory, planes_format: &[PlaneFormat], ) -> Result<GuestResource, GuestMemResourceCreationError>202     pub fn from_virtio_guest_mem_entry(
203         mem_entries: &[virtio_video_mem_entry],
204         mem: &GuestMemory,
205         planes_format: &[PlaneFormat],
206     ) -> Result<GuestResource, GuestMemResourceCreationError> {
207         let region_desc = match mem_entries.first() {
208             None => return Err(GuestMemResourceCreationError::NoEntriesProvided),
209             Some(entry) => {
210                 let addr: u64 = entry.addr.into();
211 
212                 let guest_region = mem
213                     .shm_region(GuestAddress(addr))
214                     .map_err(GuestMemResourceCreationError::CantGetShmRegion)?;
215                 let desc = base::clone_descriptor(guest_region)
216                     .map_err(GuestMemResourceCreationError::DescriptorCloneError)?;
217                 // Safe because we are the sole owner of the duplicated descriptor.
218                 unsafe { SafeDescriptor::from_raw_descriptor(desc) }
219             }
220         };
221 
222         let mem_areas = mem_entries
223             .iter()
224             .map(|entry| {
225                 let addr: u64 = entry.addr.into();
226                 let length: u32 = entry.length.into();
227                 let region_offset = mem
228                     .offset_from_base(GuestAddress(addr))
229                     .map_err(GuestMemResourceCreationError::CantGetShmOffset)
230                     .unwrap();
231 
232                 GuestMemArea {
233                     offset: region_offset,
234                     length: length as usize,
235                 }
236             })
237             .collect();
238 
239         // The plane information can be computed from the currently set format.
240         let mut buffer_offset = 0;
241         let planes = planes_format
242             .iter()
243             .map(|p| {
244                 let plane_offset = buffer_offset;
245                 buffer_offset += p.plane_size;
246 
247                 FramePlane {
248                     offset: plane_offset as usize,
249                     stride: p.stride as usize,
250                 }
251             })
252             .collect();
253 
254         Ok(GuestResource {
255             handle: GuestResourceHandle::GuestPages(GuestMemHandle {
256                 desc: region_desc,
257                 mem_areas,
258             }),
259             planes,
260         })
261     }
262 
263     /// Try to convert an unresolved virtio object entry into a resolved object resource.
264     ///
265     /// Convert `object` into the object resource it represents and resolve it through `res_bridge`.
266     /// Returns an error if the object's UUID is invalid or cannot be resolved to a buffer object
267     /// by `res_bridge`.
from_virtio_object_entry( object: virtio_video_object_entry, res_bridge: &base::Tube, ) -> Result<GuestResource, ObjectResourceCreationError>268     pub fn from_virtio_object_entry(
269         object: virtio_video_object_entry,
270         res_bridge: &base::Tube,
271     ) -> Result<GuestResource, ObjectResourceCreationError> {
272         // We trust that the caller has chosen the correct object type.
273         let uuid = u128::from_be_bytes(object.uuid);
274 
275         // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
276         // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
277         // this conversion.
278         let handle = TryInto::<u32>::try_into(uuid)
279             .map_err(|_| ObjectResourceCreationError::UuidNot32Bits(uuid))?;
280 
281         let buffer_info = match resource_bridge::get_resource_info(
282             res_bridge,
283             ResourceRequest::GetBuffer { id: handle },
284         ) {
285             Ok(ResourceInfo::Buffer(buffer_info)) => buffer_info,
286             Ok(_) => return Err(ObjectResourceCreationError::NotABuffer),
287             Err(e) => return Err(ObjectResourceCreationError::ResourceBridgeFailure(e)),
288         };
289 
290         Ok(GuestResource {
291             handle: GuestResourceHandle::VirtioObject(VirtioObjectHandle {
292                 // Safe because `buffer_info.file` is a valid file descriptor and we are stealing
293                 // it.
294                 desc: unsafe {
295                     SafeDescriptor::from_raw_descriptor(buffer_info.file.into_raw_descriptor())
296                 },
297                 modifier: buffer_info.modifier,
298             }),
299             planes: buffer_info
300                 .planes
301                 .iter()
302                 .take_while(|p| p.offset != 0 || p.stride != 0)
303                 .map(|p| FramePlane {
304                     offset: p.offset as usize,
305                     stride: p.stride as usize,
306                 })
307                 .collect(),
308         })
309     }
310 
311     #[cfg(feature = "video-encoder")]
try_clone(&self) -> Result<Self, base::Error>312     pub fn try_clone(&self) -> Result<Self, base::Error> {
313         Ok(Self {
314             handle: self.handle.try_clone()?,
315             planes: self.planes.clone(),
316         })
317     }
318 }
319 
320 #[cfg(test)]
321 mod tests {
322     use super::*;
323     use base::{MappedRegion, SafeDescriptor, SharedMemory};
324 
325     /// Creates a sparse guest memory handle using as many pages as there are entries in
326     /// `page_order`. The page with index `0` will be the first page, `1` will be the second page,
327     /// etc.
328     ///
329     /// The memory handle is filled with increasing u32s starting from page 0, then page 1, and so
330     /// on. Finally the handle is mapped into a linear space and we check that the written integers
331     /// appear in the expected order.
check_guest_mem_handle(page_order: &[usize])332     fn check_guest_mem_handle(page_order: &[usize]) {
333         const PAGE_SIZE: usize = 0x1000;
334         const U32_SIZE: usize = std::mem::size_of::<u32>();
335         const ENTRIES_PER_PAGE: usize = PAGE_SIZE as usize / std::mem::size_of::<u32>();
336 
337         // Fill a vector of the same size as the handle with u32s of increasing value, following
338         // the page layout given as argument.
339         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
340         for (page_index, page) in page_order.iter().enumerate() {
341             let page_slice =
342                 &mut data[(page * PAGE_SIZE as usize)..((page + 1) * PAGE_SIZE as usize)];
343             for (index, chunk) in page_slice.chunks_exact_mut(4).enumerate() {
344                 let sized_chunk: &mut [u8; 4] = chunk.try_into().unwrap();
345                 *sized_chunk = (((page_index * ENTRIES_PER_PAGE) + index) as u32).to_ne_bytes();
346             }
347         }
348 
349         // Copy the initialized vector's content into an anonymous shared memory.
350         let mem = SharedMemory::anon(data.len() as u64).unwrap();
351         let mapping = MemoryMappingBuilder::new(mem.size() as usize)
352             .from_shared_memory(&mem)
353             .build()
354             .unwrap();
355         assert_eq!(mapping.write_slice(&data, 0).unwrap(), data.len());
356 
357         // Create the `GuestMemHandle` we will try to map and retrieve the data from.
358         let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
359             desc: unsafe {
360                 SafeDescriptor::from_raw_descriptor(base::clone_descriptor(&mem).unwrap())
361             },
362             mem_areas: page_order
363                 .iter()
364                 .map(|&page| GuestMemArea {
365                     offset: page as u64 * PAGE_SIZE as u64,
366                     length: PAGE_SIZE as usize,
367                 })
368                 .collect(),
369         });
370 
371         // Map the handle into a linear memory area, retrieve its data into a new vector, and check
372         // that its u32s appear to increase linearly.
373         let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap();
374         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
375         unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) };
376         for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() {
377             let sized_chunk: &[u8; 4] = chunk.try_into().unwrap();
378             assert_eq!(u32::from_ne_bytes(*sized_chunk), index as u32);
379         }
380     }
381 
382     // Fill a guest memory handle with a single memory page.
383     // Then check that the data can be properly mapped and appears in the expected order.
384     #[test]
test_single_guest_mem_handle()385     fn test_single_guest_mem_handle() {
386         check_guest_mem_handle(&[0])
387     }
388 
389     // Fill a guest memory handle with 4 memory pages that are contiguous.
390     // Then check that the pages appear in the expected order in the mapping.
391     #[test]
test_linear_guest_mem_handle()392     fn test_linear_guest_mem_handle() {
393         check_guest_mem_handle(&[0, 1, 2, 3])
394     }
395 
396     // Fill a guest memory handle with 8 pages mapped in non-linear order.
397     // Then check that the pages appear in the expected order in the mapping.
398     #[test]
test_sparse_guest_mem_handle()399     fn test_sparse_guest_mem_handle() {
400         check_guest_mem_handle(&[1, 7, 6, 3, 5, 0, 4, 2])
401     }
402 }
403