• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Resource management and resolution for the virtio-video device.
6 
7 use std::convert::TryInto;
8 use std::fmt;
9 
10 use base::FromRawDescriptor;
11 use base::IntoRawDescriptor;
12 use base::MemoryMappingArena;
13 use base::MemoryMappingBuilder;
14 use base::MemoryMappingBuilderUnix;
15 use base::MmapError;
16 use base::SafeDescriptor;
17 use thiserror::Error as ThisError;
18 use vm_memory::GuestAddress;
19 use vm_memory::GuestMemory;
20 use vm_memory::GuestMemoryError;
21 use zerocopy::AsBytes;
22 use zerocopy::FromBytes;
23 
24 use crate::virtio::resource_bridge;
25 use crate::virtio::resource_bridge::ResourceBridgeError;
26 use crate::virtio::resource_bridge::ResourceInfo;
27 use crate::virtio::resource_bridge::ResourceRequest;
28 use crate::virtio::video::format::Format;
29 use crate::virtio::video::format::FramePlane;
30 use crate::virtio::video::params::Params;
31 use crate::virtio::video::protocol::virtio_video_mem_entry;
32 use crate::virtio::video::protocol::virtio_video_object_entry;
33 
34 /// Defines how resources for a given queue are represented.
35 #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
36 pub enum ResourceType {
37     /// Resources are backed by guest memory pages.
38     GuestPages,
39     /// Resources are backed by virtio objects.
40     #[default]
41     VirtioObject,
42 }
43 
44 #[repr(C)]
45 #[derive(Clone, Copy, AsBytes, FromBytes)]
46 /// A guest resource entry which type is not decided yet.
47 pub union UnresolvedResourceEntry {
48     pub object: virtio_video_object_entry,
49     pub guest_mem: virtio_video_mem_entry,
50 }
51 
52 impl fmt::Debug for UnresolvedResourceEntry {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result53     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
54         // Safe because `self.object` and `self.guest_mem` are the same size and both made of
55         // integers, making it safe to display them no matter their value.
56         write!(
57             f,
58             "unresolved {:?} or {:?}",
59             unsafe { self.object },
60             unsafe { self.guest_mem }
61         )
62     }
63 }
64 
65 /// Trait for types that can serve as video buffer backing memory.
66 pub trait BufferHandle: Sized {
67     /// Try to clone this handle. This must only create a new reference to the same backing memory
68     /// and not duplicate the buffer itself.
try_clone(&self) -> Result<Self, base::Error>69     fn try_clone(&self) -> Result<Self, base::Error>;
70 
71     /// Returns a linear mapping of [`offset`..`offset`+`size`] of the memory backing this buffer.
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>72     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>;
73 }
74 
75 /// Linear memory area of a `GuestMemHandle`
76 #[derive(Clone)]
77 pub struct GuestMemArea {
78     /// Offset within the guest region to the start of the area.
79     pub offset: u64,
80     /// Length of the area within the memory region.
81     pub length: usize,
82 }
83 
84 pub struct GuestMemHandle {
85     /// Descriptor to the guest memory region containing the buffer.
86     pub desc: SafeDescriptor,
87     /// Memory areas (i.e. sg list) that make the memory buffer.
88     pub mem_areas: Vec<GuestMemArea>,
89 }
90 
91 impl BufferHandle for GuestMemHandle {
try_clone(&self) -> Result<Self, base::Error>92     fn try_clone(&self) -> Result<Self, base::Error> {
93         Ok(Self {
94             desc: self.desc.try_clone()?,
95             mem_areas: self.mem_areas.clone(),
96         })
97     }
98 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>99     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
100         let mut arena = MemoryMappingArena::new(size)?;
101         let mut mapped_size = 0;
102         let mut area_iter = self.mem_areas.iter();
103         let mut area_offset = offset;
104         while mapped_size < size {
105             let area = match area_iter.next() {
106                 Some(area) => area,
107                 None => {
108                     return Err(MmapError::InvalidRange(
109                         offset,
110                         size,
111                         self.mem_areas.iter().map(|a| a.length).sum(),
112                     ));
113                 }
114             };
115             if area_offset > area.length {
116                 area_offset -= area.length;
117             } else {
118                 let mapping_length = std::cmp::min(area.length - area_offset, size - mapped_size);
119                 arena.add_fd_offset(mapped_size, mapping_length, &self.desc, area.offset)?;
120                 mapped_size += mapping_length;
121                 area_offset = 0;
122             }
123         }
124         Ok(arena)
125     }
126 }
127 
128 pub struct VirtioObjectHandle {
129     /// Descriptor for the object.
130     pub desc: SafeDescriptor,
131     /// Modifier to apply to frame resources.
132     pub modifier: u64,
133 }
134 
135 impl BufferHandle for VirtioObjectHandle {
try_clone(&self) -> Result<Self, base::Error>136     fn try_clone(&self) -> Result<Self, base::Error> {
137         Ok(Self {
138             desc: self.desc.try_clone()?,
139             modifier: self.modifier,
140         })
141     }
142 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>143     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
144         MemoryMappingBuilder::new(size)
145             .from_descriptor(&self.desc)
146             .offset(offset as u64)
147             .build()
148             .map(MemoryMappingArena::from)
149     }
150 }
151 
152 pub enum GuestResourceHandle {
153     GuestPages(GuestMemHandle),
154     VirtioObject(VirtioObjectHandle),
155 }
156 
157 impl BufferHandle for GuestResourceHandle {
try_clone(&self) -> Result<Self, base::Error>158     fn try_clone(&self) -> Result<Self, base::Error> {
159         Ok(match self {
160             Self::GuestPages(handle) => Self::GuestPages(handle.try_clone()?),
161             Self::VirtioObject(handle) => Self::VirtioObject(handle.try_clone()?),
162         })
163     }
164 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>165     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
166         match self {
167             GuestResourceHandle::GuestPages(handle) => handle.get_mapping(offset, size),
168             GuestResourceHandle::VirtioObject(handle) => handle.get_mapping(offset, size),
169         }
170     }
171 }
172 
173 pub struct GuestResource {
174     /// Handle to the backing memory.
175     pub handle: GuestResourceHandle,
176     /// Layout of color planes, if the resource will receive frames.
177     pub planes: Vec<FramePlane>,
178     pub width: u32,
179     pub height: u32,
180     pub format: Format,
181 }
182 
183 #[derive(Debug, ThisError)]
184 pub enum GuestMemResourceCreationError {
185     #[error("Provided slice of entries is empty")]
186     NoEntriesProvided,
187     #[error("cannot get shm region: {0}")]
188     CantGetShmRegion(GuestMemoryError),
189     #[error("cannot get shm offset: {0}")]
190     CantGetShmOffset(GuestMemoryError),
191     #[error("error while cloning shm region descriptor: {0}")]
192     DescriptorCloneError(base::Error),
193 }
194 
195 #[derive(Debug, ThisError)]
196 pub enum ObjectResourceCreationError {
197     #[error("uuid {0:08} is larger than 32 bits")]
198     UuidNot32Bits(u128),
199     #[error("resource returned by bridge is not a buffer")]
200     NotABuffer,
201     #[error("resource bridge failure: {0}")]
202     ResourceBridgeFailure(ResourceBridgeError),
203 }
204 
205 impl GuestResource {
206     /// Try to convert an unresolved virtio guest memory entry into a resolved guest memory
207     /// resource.
208     ///
209     /// Convert `mem_entry` into the guest memory resource it represents and resolve it through
210     /// `mem`.
211     /// Width, height and format is set from `params`.
212     ///
213     /// Panics if `params.format` is `None`.
from_virtio_guest_mem_entry( mem_entries: &[virtio_video_mem_entry], mem: &GuestMemory, params: &Params, ) -> Result<GuestResource, GuestMemResourceCreationError>214     pub fn from_virtio_guest_mem_entry(
215         mem_entries: &[virtio_video_mem_entry],
216         mem: &GuestMemory,
217         params: &Params,
218     ) -> Result<GuestResource, GuestMemResourceCreationError> {
219         let region_desc = match mem_entries.first() {
220             None => return Err(GuestMemResourceCreationError::NoEntriesProvided),
221             Some(entry) => {
222                 let addr: u64 = entry.addr.into();
223 
224                 let guest_region = mem
225                     .shm_region(GuestAddress(addr))
226                     .map_err(GuestMemResourceCreationError::CantGetShmRegion)?;
227                 let desc = base::clone_descriptor(guest_region)
228                     .map_err(GuestMemResourceCreationError::DescriptorCloneError)?;
229                 // Safe because we are the sole owner of the duplicated descriptor.
230                 unsafe { SafeDescriptor::from_raw_descriptor(desc) }
231             }
232         };
233 
234         let mem_areas = mem_entries
235             .iter()
236             .map(|entry| {
237                 let addr: u64 = entry.addr.into();
238                 let length: u32 = entry.length.into();
239                 let region_offset = mem
240                     .offset_from_base(GuestAddress(addr))
241                     .map_err(GuestMemResourceCreationError::CantGetShmOffset)
242                     .unwrap();
243 
244                 GuestMemArea {
245                     offset: region_offset,
246                     length: length as usize,
247                 }
248             })
249             .collect();
250 
251         let handle = GuestResourceHandle::GuestPages(GuestMemHandle {
252             desc: region_desc,
253             mem_areas,
254         });
255 
256         // The plane information can be computed from the currently set format.
257         let mut buffer_offset = 0;
258         let planes = params
259             .plane_formats
260             .iter()
261             .map(|p| {
262                 let plane_offset = buffer_offset;
263                 buffer_offset += p.plane_size;
264 
265                 FramePlane {
266                     offset: plane_offset as usize,
267                     stride: p.stride as usize,
268                     size: p.plane_size as usize,
269                 }
270             })
271             .collect();
272 
273         Ok(GuestResource {
274             handle,
275             planes,
276             width: params.frame_width,
277             height: params.frame_height,
278             format: params.format.unwrap(),
279         })
280     }
281 
282     /// Try to convert an unresolved virtio object entry into a resolved object resource.
283     ///
284     /// Convert `object` into the object resource it represents and resolve it through `res_bridge`.
285     /// Returns an error if the object's UUID is invalid or cannot be resolved to a buffer object
286     /// by `res_bridge`.
from_virtio_object_entry( object: virtio_video_object_entry, res_bridge: &base::Tube, params: &Params, ) -> Result<GuestResource, ObjectResourceCreationError>287     pub fn from_virtio_object_entry(
288         object: virtio_video_object_entry,
289         res_bridge: &base::Tube,
290         params: &Params,
291     ) -> Result<GuestResource, ObjectResourceCreationError> {
292         // We trust that the caller has chosen the correct object type.
293         let uuid = u128::from_be_bytes(object.uuid);
294 
295         // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
296         // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
297         // this conversion.
298         let handle = TryInto::<u32>::try_into(uuid)
299             .map_err(|_| ObjectResourceCreationError::UuidNot32Bits(uuid))?;
300 
301         let buffer_info = match resource_bridge::get_resource_info(
302             res_bridge,
303             ResourceRequest::GetBuffer { id: handle },
304         ) {
305             Ok(ResourceInfo::Buffer(buffer_info)) => buffer_info,
306             Ok(_) => return Err(ObjectResourceCreationError::NotABuffer),
307             Err(e) => return Err(ObjectResourceCreationError::ResourceBridgeFailure(e)),
308         };
309 
310         let handle = GuestResourceHandle::VirtioObject(VirtioObjectHandle {
311             // Safe because `buffer_info.file` is a valid file descriptor and we are stealing
312             // it.
313             desc: unsafe {
314                 SafeDescriptor::from_raw_descriptor(buffer_info.handle.into_raw_descriptor())
315             },
316             modifier: buffer_info.modifier,
317         });
318 
319         // TODO(ishitatsuyuki): Right now, there are two sources of metadata: through the
320         //                      virtio_video_params fields, or through the buffer metadata provided
321         //                      by the VirtioObject backend.
322         //                      Unfortunately neither is sufficient. The virtio_video_params struct
323         //                      lacks the plane offset, while some virtio-gpu backend doesn't
324         //                      have information about the plane size, or in some cases even the
325         //                      overall frame width and height.
326         //                      We will mix-and-match metadata from the more reliable data source
327         //                      below; ideally this should be fixed to use single source of truth.
328         let planes = params
329             .plane_formats
330             .iter()
331             .zip(&buffer_info.planes)
332             .map(|(param, buffer)| FramePlane {
333                 // When the virtio object backend was implemented, the buffer and stride was sourced
334                 // from the object backend's metadata (`buffer`). To lean on the safe side, we'll
335                 // keep using data from `buffer`, even in case of stride it's also provided by
336                 // `param`.
337                 offset: buffer.offset as usize,
338                 stride: buffer.stride as usize,
339                 size: param.plane_size as usize,
340             })
341             .collect();
342 
343         Ok(GuestResource {
344             handle,
345             planes,
346             width: params.frame_width,
347             height: params.frame_height,
348             format: params.format.unwrap(),
349         })
350     }
351 
352     #[cfg(feature = "video-encoder")]
try_clone(&self) -> Result<Self, base::Error>353     pub fn try_clone(&self) -> Result<Self, base::Error> {
354         Ok(Self {
355             handle: self.handle.try_clone()?,
356             planes: self.planes.clone(),
357             width: self.width,
358             height: self.height,
359             format: self.format,
360         })
361     }
362 }
363 
364 #[cfg(test)]
365 mod tests {
366     use base::MappedRegion;
367     use base::SafeDescriptor;
368     use base::SharedMemory;
369 
370     use super::*;
371 
372     /// Creates a sparse guest memory handle using as many pages as there are entries in
373     /// `page_order`. The page with index `0` will be the first page, `1` will be the second page,
374     /// etc.
375     ///
376     /// The memory handle is filled with increasing u32s starting from page 0, then page 1, and so
377     /// on. Finally the handle is mapped into a linear space and we check that the written integers
378     /// appear in the expected order.
check_guest_mem_handle(page_order: &[usize])379     fn check_guest_mem_handle(page_order: &[usize]) {
380         const PAGE_SIZE: usize = 0x1000;
381         const U32_SIZE: usize = std::mem::size_of::<u32>();
382         const ENTRIES_PER_PAGE: usize = PAGE_SIZE as usize / std::mem::size_of::<u32>();
383 
384         // Fill a vector of the same size as the handle with u32s of increasing value, following
385         // the page layout given as argument.
386         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
387         for (page_index, page) in page_order.iter().enumerate() {
388             let page_slice =
389                 &mut data[(page * PAGE_SIZE as usize)..((page + 1) * PAGE_SIZE as usize)];
390             for (index, chunk) in page_slice.chunks_exact_mut(4).enumerate() {
391                 let sized_chunk: &mut [u8; 4] = chunk.try_into().unwrap();
392                 *sized_chunk = (((page_index * ENTRIES_PER_PAGE) + index) as u32).to_ne_bytes();
393             }
394         }
395 
396         // Copy the initialized vector's content into an anonymous shared memory.
397         let mem = SharedMemory::new("data-dest", data.len() as u64).unwrap();
398         let mapping = MemoryMappingBuilder::new(mem.size() as usize)
399             .from_shared_memory(&mem)
400             .build()
401             .unwrap();
402         assert_eq!(mapping.write_slice(&data, 0).unwrap(), data.len());
403 
404         // Create the `GuestMemHandle` we will try to map and retrieve the data from.
405         let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
406             desc: unsafe {
407                 SafeDescriptor::from_raw_descriptor(base::clone_descriptor(&mem).unwrap())
408             },
409             mem_areas: page_order
410                 .iter()
411                 .map(|&page| GuestMemArea {
412                     offset: page as u64 * PAGE_SIZE as u64,
413                     length: PAGE_SIZE as usize,
414                 })
415                 .collect(),
416         });
417 
418         // Map the handle into a linear memory area, retrieve its data into a new vector, and check
419         // that its u32s appear to increase linearly.
420         let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap();
421         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
422         unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) };
423         for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() {
424             let sized_chunk: &[u8; 4] = chunk.try_into().unwrap();
425             assert_eq!(u32::from_ne_bytes(*sized_chunk), index as u32);
426         }
427     }
428 
429     // Fill a guest memory handle with a single memory page.
430     // Then check that the data can be properly mapped and appears in the expected order.
431     #[test]
test_single_guest_mem_handle()432     fn test_single_guest_mem_handle() {
433         check_guest_mem_handle(&[0])
434     }
435 
436     // Fill a guest memory handle with 4 memory pages that are contiguous.
437     // Then check that the pages appear in the expected order in the mapping.
438     #[test]
test_linear_guest_mem_handle()439     fn test_linear_guest_mem_handle() {
440         check_guest_mem_handle(&[0, 1, 2, 3])
441     }
442 
443     // Fill a guest memory handle with 8 pages mapped in non-linear order.
444     // Then check that the pages appear in the expected order in the mapping.
445     #[test]
test_sparse_guest_mem_handle()446     fn test_sparse_guest_mem_handle() {
447         check_guest_mem_handle(&[1, 7, 6, 3, 5, 0, 4, 2])
448     }
449 }
450