1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #[cfg(feature = "gpu")]
6 pub(crate) mod gpu;
7
8 use std::path::Path;
9 use std::time::Duration;
10
11 use base::error;
12 use base::AsRawDescriptor;
13 use base::Descriptor;
14 use base::Error as SysError;
15 use base::MemoryMappingArena;
16 use base::MmapError;
17 use base::Protection;
18 use base::SafeDescriptor;
19 use base::Tube;
20 use base::UnixSeqpacket;
21 use hypervisor::MemCacheType;
22 use hypervisor::MemSlot;
23 use hypervisor::Vm;
24 use libc::EINVAL;
25 use libc::ERANGE;
26 use once_cell::sync::Lazy;
27 use resources::Alloc;
28 use resources::SystemAllocator;
29 use serde::Deserialize;
30 use serde::Serialize;
31 use vm_memory::GuestAddress;
32
33 use crate::client::HandleRequestResult;
34 use crate::VmRequest;
35 use crate::VmResponse;
36
handle_request<T: AsRef<Path> + std::fmt::Debug>( request: &VmRequest, socket_path: T, ) -> HandleRequestResult37 pub fn handle_request<T: AsRef<Path> + std::fmt::Debug>(
38 request: &VmRequest,
39 socket_path: T,
40 ) -> HandleRequestResult {
41 handle_request_with_timeout(request, socket_path, None)
42 }
43
handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>( request: &VmRequest, socket_path: T, timeout: Option<Duration>, ) -> HandleRequestResult44 pub fn handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>(
45 request: &VmRequest,
46 socket_path: T,
47 timeout: Option<Duration>,
48 ) -> HandleRequestResult {
49 match UnixSeqpacket::connect(&socket_path) {
50 Ok(s) => {
51 let socket = Tube::new_from_unix_seqpacket(s).map_err(|_| ())?;
52 if timeout.is_some() {
53 if let Err(e) = socket.set_recv_timeout(timeout) {
54 error!(
55 "failed to set recv timeout on socket at '{:?}': {}",
56 socket_path, e
57 );
58 return Err(());
59 }
60 }
61 if let Err(e) = socket.send(request) {
62 error!(
63 "failed to send request to socket at '{:?}': {}",
64 socket_path, e
65 );
66 return Err(());
67 }
68 match socket.recv() {
69 Ok(response) => Ok(response),
70 Err(e) => {
71 error!(
72 "failed to recv response from socket at '{:?}': {}",
73 socket_path, e
74 );
75 Err(())
76 }
77 }
78 }
79 Err(e) => {
80 error!("failed to connect to socket at '{:?}': {}", socket_path, e);
81 Err(())
82 }
83 }
84 }
85
86 #[derive(Serialize, Deserialize, Debug)]
87 pub enum VmMsyncRequest {
88 /// Flush the content of a memory mapping to its backing file.
89 /// `slot` selects the arena (as returned by `Vm::add_mmap_arena`).
90 /// `offset` is the offset of the mapping to sync within the arena.
91 /// `size` is the size of the mapping to sync within the arena.
92 MsyncArena {
93 slot: MemSlot,
94 offset: usize,
95 size: usize,
96 },
97 }
98
99 #[derive(Serialize, Deserialize, Debug)]
100 pub enum VmMsyncResponse {
101 Ok,
102 Err(SysError),
103 }
104
105 impl VmMsyncRequest {
106 /// Executes this request on the given Vm.
107 ///
108 /// # Arguments
109 /// * `vm` - The `Vm` to perform the request on.
110 ///
111 /// This does not return a result, instead encapsulating the success or failure in a
112 /// `VmMsyncResponse` with the intended purpose of sending the response back over the socket
113 /// that received this `VmMsyncResponse`.
execute(&self, vm: &mut impl Vm) -> VmMsyncResponse114 pub fn execute(&self, vm: &mut impl Vm) -> VmMsyncResponse {
115 use self::VmMsyncRequest::*;
116 match *self {
117 MsyncArena { slot, offset, size } => match vm.msync_memory_region(slot, offset, size) {
118 Ok(()) => VmMsyncResponse::Ok,
119 Err(e) => VmMsyncResponse::Err(e),
120 },
121 }
122 }
123 }
124
125 #[derive(Serialize, Deserialize, Debug)]
126 pub enum FsMappingRequest {
127 /// Create an anonymous memory mapping that spans the entire region described by `Alloc`.
128 AllocateSharedMemoryRegion(Alloc),
129 /// Create a memory mapping.
130 CreateMemoryMapping {
131 /// The slot for a MemoryMappingArena, previously returned by a response to an
132 /// `AllocateSharedMemoryRegion` request.
133 slot: u32,
134 /// The file descriptor that should be mapped.
135 fd: SafeDescriptor,
136 /// The size of the mapping.
137 size: usize,
138 /// The offset into the file from where the mapping should start.
139 file_offset: u64,
140 /// The memory protection to be used for the mapping. Protections other than readable and
141 /// writable will be silently dropped.
142 prot: Protection,
143 /// The offset into the shared memory region where the mapping should be placed.
144 mem_offset: usize,
145 },
146 /// Remove a memory mapping.
147 RemoveMemoryMapping {
148 /// The slot for a MemoryMappingArena.
149 slot: u32,
150 /// The offset into the shared memory region.
151 offset: usize,
152 /// The size of the mapping.
153 size: usize,
154 },
155 }
156
prepare_shared_memory_region( vm: &mut dyn Vm, allocator: &mut SystemAllocator, alloc: Alloc, cache: MemCacheType, ) -> Result<(u64, MemSlot), SysError>157 pub fn prepare_shared_memory_region(
158 vm: &mut dyn Vm,
159 allocator: &mut SystemAllocator,
160 alloc: Alloc,
161 cache: MemCacheType,
162 ) -> Result<(u64, MemSlot), SysError> {
163 if !matches!(alloc, Alloc::PciBar { .. }) {
164 return Err(SysError::new(EINVAL));
165 }
166 match allocator.mmio_allocator_any().get(&alloc) {
167 Some((range, _)) => {
168 let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
169 Some(v) => v,
170 None => return Err(SysError::new(ERANGE)),
171 };
172 let arena = match MemoryMappingArena::new(size) {
173 Ok(a) => a,
174 Err(MmapError::SystemCallFailed(e)) => return Err(e),
175 _ => return Err(SysError::new(EINVAL)),
176 };
177
178 match vm.add_memory_region(
179 GuestAddress(range.start),
180 Box::new(arena),
181 false,
182 false,
183 cache,
184 ) {
185 Ok(slot) => Ok((range.start >> 12, slot)),
186 Err(e) => Err(e),
187 }
188 }
189 None => Err(SysError::new(EINVAL)),
190 }
191 }
192
193 static SHOULD_PREPARE_MEMORY_REGION: Lazy<bool> = Lazy::new(|| {
194 if cfg!(target_arch = "x86_64") {
195 // The legacy x86 MMU allocates an rmap and a page tracking array
196 // that take 2.5MiB per 1GiB of user memory region address space,
197 // so avoid mapping the whole shared memory region if we're not
198 // using the tdp mmu.
199 match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") {
200 Ok(bytes) if !bytes.is_empty() => bytes[0] == b'Y',
201 _ => false,
202 }
203 } else if cfg!(target_pointer_width = "64") {
204 true
205 } else {
206 // Not enough address space on 32-bit systems
207 false
208 }
209 });
210
should_prepare_memory_region() -> bool211 pub fn should_prepare_memory_region() -> bool {
212 *SHOULD_PREPARE_MEMORY_REGION
213 }
214
215 impl FsMappingRequest {
execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse216 pub fn execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse {
217 use self::FsMappingRequest::*;
218 match *self {
219 AllocateSharedMemoryRegion(alloc) => {
220 match prepare_shared_memory_region(
221 vm,
222 allocator,
223 alloc,
224 MemCacheType::CacheCoherent,
225 ) {
226 Ok((pfn, slot)) => VmResponse::RegisterMemory { pfn, slot },
227 Err(e) => VmResponse::Err(e),
228 }
229 }
230 CreateMemoryMapping {
231 slot,
232 ref fd,
233 size,
234 file_offset,
235 prot,
236 mem_offset,
237 } => {
238 let raw_fd: Descriptor = Descriptor(fd.as_raw_descriptor());
239
240 match vm.add_fd_mapping(slot, mem_offset, size, &raw_fd, file_offset, prot) {
241 Ok(()) => VmResponse::Ok,
242 Err(e) => VmResponse::Err(e),
243 }
244 }
245 RemoveMemoryMapping { slot, offset, size } => {
246 match vm.remove_mapping(slot, offset, size) {
247 Ok(()) => VmResponse::Ok,
248 Err(e) => VmResponse::Err(e),
249 }
250 }
251 }
252 }
253 }
254