• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Resource management and resolution for the virtio-video device.
6 
7 use std::convert::TryInto;
8 use std::fmt;
9 
10 use base::linux::MemoryMappingBuilderUnix;
11 use base::AsRawDescriptor;
12 use base::FromRawDescriptor;
13 use base::IntoRawDescriptor;
14 use base::MemoryMappingArena;
15 use base::MemoryMappingBuilder;
16 use base::MmapError;
17 use base::SafeDescriptor;
18 use thiserror::Error as ThisError;
19 use vm_memory::GuestAddress;
20 use vm_memory::GuestMemory;
21 use vm_memory::GuestMemoryError;
22 use zerocopy::FromBytes;
23 use zerocopy::Immutable;
24 use zerocopy::IntoBytes;
25 use zerocopy::KnownLayout;
26 
27 use crate::virtio::resource_bridge;
28 use crate::virtio::resource_bridge::ResourceBridgeError;
29 use crate::virtio::resource_bridge::ResourceInfo;
30 use crate::virtio::resource_bridge::ResourceRequest;
31 use crate::virtio::video::format::Format;
32 use crate::virtio::video::format::FramePlane;
33 use crate::virtio::video::params::Params;
34 use crate::virtio::video::protocol::virtio_video_mem_entry;
35 use crate::virtio::video::protocol::virtio_video_object_entry;
36 
37 /// Defines how resources for a given queue are represented.
38 #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
39 pub enum ResourceType {
40     /// Resources are backed by guest memory pages.
41     GuestPages,
42     /// Resources are backed by virtio objects.
43     #[default]
44     VirtioObject,
45 }
46 
47 #[repr(C)]
48 #[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)]
49 /// A guest resource entry which type is not decided yet.
50 pub struct UnresolvedResourceEntry([u8; 16]);
51 
52 impl fmt::Debug for UnresolvedResourceEntry {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result53     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
54         write!(f, "unresolved {:?}", self.0)
55     }
56 }
57 
58 impl UnresolvedResourceEntry {
object(&self) -> virtio_video_object_entry59     pub fn object(&self) -> virtio_video_object_entry {
60         virtio_video_object_entry::read_from_bytes(&self.0).unwrap()
61     }
62 }
63 
64 /// Trait for types that can serve as video buffer backing memory.
65 pub trait BufferHandle: Sized {
66     /// Try to clone this handle. This must only create a new reference to the same backing memory
67     /// and not duplicate the buffer itself.
try_clone(&self) -> Result<Self, base::Error>68     fn try_clone(&self) -> Result<Self, base::Error>;
69 
70     /// Returns a linear mapping of [`offset`..`offset`+`size`] of the memory backing this buffer.
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>71     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>;
72 }
73 
74 /// Linear memory area of a `GuestMemHandle`
75 #[derive(Clone)]
76 pub struct GuestMemArea {
77     /// Offset within the guest region to the start of the area.
78     pub offset: u64,
79     /// Length of the area within the memory region.
80     pub length: usize,
81 }
82 
83 pub struct GuestMemHandle {
84     /// Descriptor to the guest memory region containing the buffer.
85     pub desc: SafeDescriptor,
86     /// Memory areas (i.e. sg list) that make the memory buffer.
87     pub mem_areas: Vec<GuestMemArea>,
88 }
89 
90 impl BufferHandle for GuestMemHandle {
try_clone(&self) -> Result<Self, base::Error>91     fn try_clone(&self) -> Result<Self, base::Error> {
92         Ok(Self {
93             desc: self.desc.try_clone()?,
94             mem_areas: self.mem_areas.clone(),
95         })
96     }
97 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>98     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
99         let mut arena = MemoryMappingArena::new(size)?;
100         let mut mapped_size = 0;
101         let mut area_iter = self.mem_areas.iter();
102         let mut area_offset = offset;
103         while mapped_size < size {
104             let area = match area_iter.next() {
105                 Some(area) => area,
106                 None => {
107                     return Err(MmapError::InvalidRange(
108                         offset,
109                         size,
110                         self.mem_areas.iter().map(|a| a.length).sum(),
111                     ));
112                 }
113             };
114             if area_offset > area.length {
115                 area_offset -= area.length;
116             } else {
117                 let mapping_length = std::cmp::min(area.length - area_offset, size - mapped_size);
118                 arena.add_fd_offset(mapped_size, mapping_length, &self.desc, area.offset)?;
119                 mapped_size += mapping_length;
120                 area_offset = 0;
121             }
122         }
123         Ok(arena)
124     }
125 }
126 
127 pub struct VirtioObjectHandle {
128     /// Descriptor for the object.
129     pub desc: SafeDescriptor,
130     /// Modifier to apply to frame resources.
131     pub modifier: u64,
132 }
133 
134 impl BufferHandle for VirtioObjectHandle {
try_clone(&self) -> Result<Self, base::Error>135     fn try_clone(&self) -> Result<Self, base::Error> {
136         Ok(Self {
137             desc: self.desc.try_clone()?,
138             modifier: self.modifier,
139         })
140     }
141 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>142     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
143         MemoryMappingBuilder::new(size)
144             .from_descriptor(&self.desc)
145             .offset(offset as u64)
146             .build()
147             .map(MemoryMappingArena::from)
148     }
149 }
150 
151 pub enum GuestResourceHandle {
152     GuestPages(GuestMemHandle),
153     VirtioObject(VirtioObjectHandle),
154 }
155 
156 impl BufferHandle for GuestResourceHandle {
try_clone(&self) -> Result<Self, base::Error>157     fn try_clone(&self) -> Result<Self, base::Error> {
158         Ok(match self {
159             Self::GuestPages(handle) => Self::GuestPages(handle.try_clone()?),
160             Self::VirtioObject(handle) => Self::VirtioObject(handle.try_clone()?),
161         })
162     }
163 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>164     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
165         match self {
166             GuestResourceHandle::GuestPages(handle) => handle.get_mapping(offset, size),
167             GuestResourceHandle::VirtioObject(handle) => handle.get_mapping(offset, size),
168         }
169     }
170 }
171 
172 pub struct GuestResource {
173     /// Handle to the backing memory.
174     pub handle: GuestResourceHandle,
175     /// Layout of color planes, if the resource will receive frames.
176     pub planes: Vec<FramePlane>,
177     pub width: u32,
178     pub height: u32,
179     pub format: Format,
180     /// Whether the buffer can be accessed by the guest CPU. This means the host must ensure that
181     /// all operations on the buffer are completed before passing it to the guest.
182     pub guest_cpu_mappable: bool,
183 }
184 
185 #[derive(Debug, ThisError)]
186 pub enum GuestMemResourceCreationError {
187     #[error("Provided slice of entries is empty")]
188     NoEntriesProvided,
189     #[error("cannot get shm region: {0}")]
190     CantGetShmRegion(GuestMemoryError),
191     #[error("cannot get shm offset: {0}")]
192     CantGetShmOffset(GuestMemoryError),
193     #[error("error while cloning shm region descriptor: {0}")]
194     DescriptorCloneError(base::Error),
195     #[error("guest memory with multiple shm objects not supported")]
196     MultipleShmObjects,
197 }
198 
199 #[derive(Debug, ThisError)]
200 pub enum ObjectResourceCreationError {
201     #[error("uuid {0:08} is larger than 32 bits")]
202     UuidNot32Bits(u128),
203     #[error("resource returned by bridge is not a buffer")]
204     NotABuffer,
205     #[error("resource bridge failure: {0}")]
206     ResourceBridgeFailure(ResourceBridgeError),
207 }
208 
209 impl GuestResource {
210     /// Try to convert an unresolved virtio guest memory entry into a resolved guest memory
211     /// resource.
212     ///
213     /// Convert `mem_entry` into the guest memory resource it represents and resolve it through
214     /// `mem`.
215     /// Width, height and format is set from `params`.
216     ///
217     /// Panics if `params.format` is `None`.
from_virtio_guest_mem_entry( mem_entries: &[virtio_video_mem_entry], mem: &GuestMemory, params: &Params, ) -> Result<GuestResource, GuestMemResourceCreationError>218     pub fn from_virtio_guest_mem_entry(
219         mem_entries: &[virtio_video_mem_entry],
220         mem: &GuestMemory,
221         params: &Params,
222     ) -> Result<GuestResource, GuestMemResourceCreationError> {
223         let region_desc = match mem_entries.first() {
224             None => return Err(GuestMemResourceCreationError::NoEntriesProvided),
225             Some(entry) => {
226                 let addr: u64 = entry.addr.into();
227 
228                 let guest_region = mem
229                     .shm_region(GuestAddress(addr))
230                     .map_err(GuestMemResourceCreationError::CantGetShmRegion)?;
231                 base::clone_descriptor(guest_region)
232                     .map_err(GuestMemResourceCreationError::DescriptorCloneError)?
233             }
234         };
235 
236         let mem_areas = mem_entries
237             .iter()
238             .map(|entry| {
239                 let addr: u64 = entry.addr.into();
240                 let length: u32 = entry.length.into();
241                 let (backing_obj, region_offset) = mem
242                     .offset_from_base(GuestAddress(addr))
243                     .map_err(GuestMemResourceCreationError::CantGetShmOffset)
244                     .unwrap();
245                 if region_desc.as_raw_descriptor() != backing_obj.as_raw_descriptor() {
246                     return Err(GuestMemResourceCreationError::MultipleShmObjects);
247                 }
248 
249                 Ok(GuestMemArea {
250                     offset: region_offset,
251                     length: length as usize,
252                 })
253             })
254             .collect::<Result<_, _>>()?;
255 
256         let handle = GuestResourceHandle::GuestPages(GuestMemHandle {
257             desc: region_desc,
258             mem_areas,
259         });
260 
261         // The plane information can be computed from the currently set format.
262         let mut buffer_offset = 0;
263         let planes = params
264             .plane_formats
265             .iter()
266             .map(|p| {
267                 let plane_offset = buffer_offset;
268                 buffer_offset += p.plane_size;
269 
270                 FramePlane {
271                     offset: plane_offset as usize,
272                     stride: p.stride as usize,
273                     size: p.plane_size as usize,
274                 }
275             })
276             .collect();
277 
278         Ok(GuestResource {
279             handle,
280             planes,
281             width: params.frame_width,
282             height: params.frame_height,
283             format: params.format.unwrap(),
284             guest_cpu_mappable: true,
285         })
286     }
287 
288     /// Try to convert an unresolved virtio object entry into a resolved object resource.
289     ///
290     /// Convert `object` into the object resource it represents and resolve it through `res_bridge`.
291     /// Returns an error if the object's UUID is invalid or cannot be resolved to a buffer object
292     /// by `res_bridge`.
from_virtio_object_entry( object: virtio_video_object_entry, res_bridge: &base::Tube, params: &Params, ) -> Result<GuestResource, ObjectResourceCreationError>293     pub fn from_virtio_object_entry(
294         object: virtio_video_object_entry,
295         res_bridge: &base::Tube,
296         params: &Params,
297     ) -> Result<GuestResource, ObjectResourceCreationError> {
298         // We trust that the caller has chosen the correct object type.
299         let uuid = u128::from_be_bytes(object.uuid);
300 
301         // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
302         // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
303         // this conversion.
304         let handle = TryInto::<u32>::try_into(uuid)
305             .map_err(|_| ObjectResourceCreationError::UuidNot32Bits(uuid))?;
306 
307         let buffer_info = match resource_bridge::get_resource_info(
308             res_bridge,
309             ResourceRequest::GetBuffer { id: handle },
310         ) {
311             Ok(ResourceInfo::Buffer(buffer_info)) => buffer_info,
312             Ok(_) => return Err(ObjectResourceCreationError::NotABuffer),
313             Err(e) => return Err(ObjectResourceCreationError::ResourceBridgeFailure(e)),
314         };
315 
316         let handle = GuestResourceHandle::VirtioObject(VirtioObjectHandle {
317             // SAFETY:
318             // Safe because `buffer_info.file` is a valid file descriptor and we are stealing
319             // it.
320             desc: unsafe {
321                 SafeDescriptor::from_raw_descriptor(buffer_info.handle.into_raw_descriptor())
322             },
323             modifier: buffer_info.modifier,
324         });
325 
326         // TODO(ishitatsuyuki): Right now, there are two sources of metadata: through the
327         //                      virtio_video_params fields, or through the buffer metadata provided
328         //                      by the VirtioObject backend.
329         //                      Unfortunately neither is sufficient. The virtio_video_params struct
330         //                      lacks the plane offset, while some virtio-gpu backend doesn't
331         //                      have information about the plane size, or in some cases even the
332         //                      overall frame width and height.
333         //                      We will mix-and-match metadata from the more reliable data source
334         //                      below; ideally this should be fixed to use single source of truth.
335         let planes = params
336             .plane_formats
337             .iter()
338             .zip(&buffer_info.planes)
339             .map(|(param, buffer)| FramePlane {
340                 // When the virtio object backend was implemented, the buffer and stride was sourced
341                 // from the object backend's metadata (`buffer`). To lean on the safe side, we'll
342                 // keep using data from `buffer`, even in case of stride it's also provided by
343                 // `param`.
344                 offset: buffer.offset as usize,
345                 stride: buffer.stride as usize,
346                 size: param.plane_size as usize,
347             })
348             .collect();
349 
350         Ok(GuestResource {
351             handle,
352             planes,
353             width: params.frame_width,
354             height: params.frame_height,
355             format: params.format.unwrap(),
356             guest_cpu_mappable: buffer_info.guest_cpu_mappable,
357         })
358     }
359 
360     #[cfg(feature = "video-encoder")]
try_clone(&self) -> Result<Self, base::Error>361     pub fn try_clone(&self) -> Result<Self, base::Error> {
362         Ok(Self {
363             handle: self.handle.try_clone()?,
364             planes: self.planes.clone(),
365             width: self.width,
366             height: self.height,
367             format: self.format,
368             guest_cpu_mappable: self.guest_cpu_mappable,
369         })
370     }
371 }
372 
373 #[cfg(test)]
374 mod tests {
375     use base::MappedRegion;
376     use base::SharedMemory;
377 
378     use super::*;
379 
380     /// Creates a sparse guest memory handle using as many pages as there are entries in
381     /// `page_order`. The page with index `0` will be the first page, `1` will be the second page,
382     /// etc.
383     ///
384     /// The memory handle is filled with increasing u32s starting from page 0, then page 1, and so
385     /// on. Finally the handle is mapped into a linear space and we check that the written integers
386     /// appear in the expected order.
check_guest_mem_handle(page_order: &[usize])387     fn check_guest_mem_handle(page_order: &[usize]) {
388         const PAGE_SIZE: usize = 0x1000;
389         const U32_SIZE: usize = std::mem::size_of::<u32>();
390         const ENTRIES_PER_PAGE: usize = PAGE_SIZE / std::mem::size_of::<u32>();
391 
392         // Fill a vector of the same size as the handle with u32s of increasing value, following
393         // the page layout given as argument.
394         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
395         for (page_index, page) in page_order.iter().enumerate() {
396             let page_slice = &mut data[(page * PAGE_SIZE)..((page + 1) * PAGE_SIZE)];
397             for (index, chunk) in page_slice.chunks_exact_mut(4).enumerate() {
398                 let sized_chunk: &mut [u8; 4] = chunk.try_into().unwrap();
399                 *sized_chunk = (((page_index * ENTRIES_PER_PAGE) + index) as u32).to_ne_bytes();
400             }
401         }
402 
403         // Copy the initialized vector's content into an anonymous shared memory.
404         let mem = SharedMemory::new("data-dest", data.len() as u64).unwrap();
405         let mapping = MemoryMappingBuilder::new(mem.size() as usize)
406             .from_shared_memory(&mem)
407             .build()
408             .unwrap();
409         assert_eq!(mapping.write_slice(&data, 0).unwrap(), data.len());
410 
411         // Create the `GuestMemHandle` we will try to map and retrieve the data from.
412         let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
413             desc: base::clone_descriptor(&mem).unwrap(),
414             mem_areas: page_order
415                 .iter()
416                 .map(|&page| GuestMemArea {
417                     offset: page as u64 * PAGE_SIZE as u64,
418                     length: PAGE_SIZE,
419                 })
420                 .collect(),
421         });
422 
423         // Map the handle into a linear memory area, retrieve its data into a new vector, and check
424         // that its u32s appear to increase linearly.
425         let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap();
426         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
427         // SAFETY: src and dst are valid and aligned
428         unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) };
429         for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() {
430             let sized_chunk: &[u8; 4] = chunk.try_into().unwrap();
431             assert_eq!(u32::from_ne_bytes(*sized_chunk), index as u32);
432         }
433     }
434 
435     // Fill a guest memory handle with a single memory page.
436     // Then check that the data can be properly mapped and appears in the expected order.
437     #[test]
test_single_guest_mem_handle()438     fn test_single_guest_mem_handle() {
439         check_guest_mem_handle(&[0])
440     }
441 
442     // Fill a guest memory handle with 4 memory pages that are contiguous.
443     // Then check that the pages appear in the expected order in the mapping.
444     #[test]
test_linear_guest_mem_handle()445     fn test_linear_guest_mem_handle() {
446         check_guest_mem_handle(&[0, 1, 2, 3])
447     }
448 
449     // Fill a guest memory handle with 8 pages mapped in non-linear order.
450     // Then check that the pages appear in the expected order in the mapping.
451     #[test]
test_sparse_guest_mem_handle()452     fn test_sparse_guest_mem_handle() {
453         check_guest_mem_handle(&[1, 7, 6, 3, 5, 0, 4, 2])
454     }
455 }
456