1 // Copyright 2021 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::io::Write;
6 use std::os::unix::net::UnixStream;
7 use std::path::Path;
8
9 use base::{AsRawDescriptor, Event, Tube};
10 use vm_memory::GuestMemory;
11 use vmm_vhost::message::{
12 MasterReq, VhostUserConfigFlags, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
13 };
14 use vmm_vhost::{
15 connection::socket::Endpoint as SocketEndpoint, Master, VhostBackend, VhostUserMaster,
16 VhostUserMemoryRegionInfo, VringConfigData,
17 };
18
19 use crate::virtio::vhost::user::vmm::{Error, Result};
20 use crate::virtio::{Interrupt, Queue};
21
22 type SocketMaster = Master<SocketEndpoint<MasterReq>>;
23
set_features(vu: &mut SocketMaster, avail_features: u64, ack_features: u64) -> Result<u64>24 fn set_features(vu: &mut SocketMaster, avail_features: u64, ack_features: u64) -> Result<u64> {
25 let features = avail_features & ack_features;
26 vu.set_features(features).map_err(Error::SetFeatures)?;
27 Ok(features)
28 }
29
30 pub struct VhostUserHandler {
31 vu: SocketMaster,
32 pub avail_features: u64,
33 acked_features: u64,
34 protocol_features: VhostUserProtocolFeatures,
35 }
36
37 impl VhostUserHandler {
38 /// Creates a `VhostUserHandler` instance attached to the provided UDS path
39 /// with features and protocol features initialized.
new_from_path<P: AsRef<Path>>( path: P, max_queue_num: u64, allow_features: u64, init_features: u64, allow_protocol_features: VhostUserProtocolFeatures, ) -> Result<Self>40 pub fn new_from_path<P: AsRef<Path>>(
41 path: P,
42 max_queue_num: u64,
43 allow_features: u64,
44 init_features: u64,
45 allow_protocol_features: VhostUserProtocolFeatures,
46 ) -> Result<Self> {
47 Self::new(
48 SocketMaster::connect(path, max_queue_num)
49 .map_err(Error::SocketConnectOnMasterCreate)?,
50 allow_features,
51 init_features,
52 allow_protocol_features,
53 )
54 }
55
56 /// Creates a `VhostUserHandler` instance attached to the provided
57 /// UnixStream with features and protocol features initialized.
new_from_stream( sock: UnixStream, max_queue_num: u64, allow_features: u64, init_features: u64, allow_protocol_features: VhostUserProtocolFeatures, ) -> Result<Self>58 pub fn new_from_stream(
59 sock: UnixStream,
60 max_queue_num: u64,
61 allow_features: u64,
62 init_features: u64,
63 allow_protocol_features: VhostUserProtocolFeatures,
64 ) -> Result<Self> {
65 Self::new(
66 SocketMaster::from_stream(sock, max_queue_num),
67 allow_features,
68 init_features,
69 allow_protocol_features,
70 )
71 }
72
73 /// Creates a `VhostUserHandler` instance with features and protocol features initialized.
new( mut vu: SocketMaster, allow_features: u64, init_features: u64, allow_protocol_features: VhostUserProtocolFeatures, ) -> Result<Self>74 fn new(
75 mut vu: SocketMaster,
76 allow_features: u64,
77 init_features: u64,
78 allow_protocol_features: VhostUserProtocolFeatures,
79 ) -> Result<Self> {
80 vu.set_owner().map_err(Error::SetOwner)?;
81
82 let avail_features = allow_features & vu.get_features().map_err(Error::GetFeatures)?;
83 let acked_features = set_features(&mut vu, avail_features, init_features)?;
84
85 let mut protocol_features = VhostUserProtocolFeatures::empty();
86 if acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0 {
87 let avail_protocol_features = vu
88 .get_protocol_features()
89 .map_err(Error::GetProtocolFeatures)?;
90 protocol_features = allow_protocol_features & avail_protocol_features;
91 vu.set_protocol_features(protocol_features)
92 .map_err(Error::SetProtocolFeatures)?;
93 }
94
95 Ok(VhostUserHandler {
96 vu,
97 avail_features,
98 acked_features,
99 protocol_features,
100 })
101 }
102
103 /// Returns a vector of sizes of each queue.
queue_sizes(&mut self, queue_size: u16, default_queues_num: usize) -> Result<Vec<u16>>104 pub fn queue_sizes(&mut self, queue_size: u16, default_queues_num: usize) -> Result<Vec<u16>> {
105 let queues_num = if self
106 .protocol_features
107 .contains(VhostUserProtocolFeatures::MQ)
108 {
109 self.vu.get_queue_num().map_err(Error::GetQueueNum)? as usize
110 } else {
111 default_queues_num
112 };
113 Ok(vec![queue_size; queues_num])
114 }
115
116 /// Enables a set of features.
ack_features(&mut self, ack_features: u64) -> Result<()>117 pub fn ack_features(&mut self, ack_features: u64) -> Result<()> {
118 let features = set_features(
119 &mut self.vu,
120 self.avail_features,
121 self.acked_features | ack_features,
122 )?;
123 self.acked_features = features;
124 Ok(())
125 }
126
127 /// Gets the device configuration space at `offset` and writes it into `data`.
read_config<T>(&mut self, offset: u64, mut data: &mut [u8]) -> Result<()>128 pub fn read_config<T>(&mut self, offset: u64, mut data: &mut [u8]) -> Result<()> {
129 let config_len = std::mem::size_of::<T>() as u64;
130 let data_len = data.len() as u64;
131 offset
132 .checked_add(data_len)
133 .and_then(|l| if l <= config_len { Some(()) } else { None })
134 .ok_or(Error::InvalidConfigOffset {
135 data_len,
136 offset,
137 config_len,
138 })?;
139
140 let buf = vec![0u8; config_len as usize];
141 let (_, config) = self
142 .vu
143 .get_config(0, config_len as u32, VhostUserConfigFlags::WRITABLE, &buf)
144 .map_err(Error::GetConfig)?;
145
146 data.write_all(
147 &config[offset as usize..std::cmp::min(data_len + offset, config_len) as usize],
148 )
149 .map_err(Error::CopyConfig)
150 }
151
152 /// Writes `data` into the device configuration space at `offset`.
write_config<T>(&mut self, offset: u64, data: &[u8]) -> Result<()>153 pub fn write_config<T>(&mut self, offset: u64, data: &[u8]) -> Result<()> {
154 let config_len = std::mem::size_of::<T>() as u64;
155 let data_len = data.len() as u64;
156 offset
157 .checked_add(data_len)
158 .and_then(|l| if l <= config_len { Some(()) } else { None })
159 .ok_or(Error::InvalidConfigOffset {
160 data_len,
161 offset,
162 config_len,
163 })?;
164
165 self.vu
166 .set_config(offset as u32, VhostUserConfigFlags::empty(), data)
167 .map_err(Error::SetConfig)
168 }
169
170 /// Sets the channel for device-specific messages.
set_device_request_channel(&mut self, channel: Tube) -> Result<()>171 pub fn set_device_request_channel(&mut self, channel: Tube) -> Result<()> {
172 self.vu
173 .set_slave_request_fd(&channel)
174 .map_err(Error::SetDeviceRequestChannel)
175 }
176
177 /// Sets the memory map regions so it can translate the vring addresses.
set_mem_table(&mut self, mem: &GuestMemory) -> Result<()>178 pub fn set_mem_table(&mut self, mem: &GuestMemory) -> Result<()> {
179 let mut regions: Vec<VhostUserMemoryRegionInfo> = Vec::new();
180 mem.with_regions::<_, ()>(
181 |_idx, guest_phys_addr, memory_size, userspace_addr, mmap, mmap_offset| {
182 let region = VhostUserMemoryRegionInfo {
183 guest_phys_addr: guest_phys_addr.0,
184 memory_size: memory_size as u64,
185 userspace_addr: userspace_addr as u64,
186 mmap_offset,
187 mmap_handle: mmap.as_raw_descriptor(),
188 };
189 regions.push(region);
190 Ok(())
191 },
192 )
193 .unwrap(); // never fail
194
195 self.vu
196 .set_mem_table(regions.as_slice())
197 .map_err(Error::SetMemTable)?;
198
199 Ok(())
200 }
201
202 /// Activates a vring for the given `queue`.
activate_vring( &mut self, mem: &GuestMemory, queue_index: usize, queue: &Queue, queue_evt: &Event, irqfd: &Event, ) -> Result<()>203 pub fn activate_vring(
204 &mut self,
205 mem: &GuestMemory,
206 queue_index: usize,
207 queue: &Queue,
208 queue_evt: &Event,
209 irqfd: &Event,
210 ) -> Result<()> {
211 self.vu
212 .set_vring_num(queue_index, queue.actual_size())
213 .map_err(Error::SetVringNum)?;
214
215 let config_data = VringConfigData {
216 queue_max_size: queue.max_size,
217 queue_size: queue.actual_size(),
218 flags: 0u32,
219 desc_table_addr: mem
220 .get_host_address(queue.desc_table)
221 .map_err(Error::GetHostAddress)? as u64,
222 used_ring_addr: mem
223 .get_host_address(queue.used_ring)
224 .map_err(Error::GetHostAddress)? as u64,
225 avail_ring_addr: mem
226 .get_host_address(queue.avail_ring)
227 .map_err(Error::GetHostAddress)? as u64,
228 log_addr: None,
229 };
230 self.vu
231 .set_vring_addr(queue_index, &config_data)
232 .map_err(Error::SetVringAddr)?;
233
234 self.vu
235 .set_vring_base(queue_index, 0)
236 .map_err(Error::SetVringBase)?;
237
238 self.vu
239 .set_vring_call(queue_index, irqfd)
240 .map_err(Error::SetVringCall)?;
241 self.vu
242 .set_vring_kick(queue_index, queue_evt)
243 .map_err(Error::SetVringKick)?;
244 self.vu
245 .set_vring_enable(queue_index, true)
246 .map_err(Error::SetVringEnable)?;
247
248 Ok(())
249 }
250
251 /// Activates vrings.
activate( &mut self, mem: &GuestMemory, interrupt: &Interrupt, queues: &[Queue], queue_evts: &[Event], ) -> Result<()>252 pub fn activate(
253 &mut self,
254 mem: &GuestMemory,
255 interrupt: &Interrupt,
256 queues: &[Queue],
257 queue_evts: &[Event],
258 ) -> Result<()> {
259 self.set_mem_table(mem)?;
260
261 let msix_config_opt = interrupt
262 .get_msix_config()
263 .as_ref()
264 .ok_or(Error::MsixConfigUnavailable)?;
265 let msix_config = msix_config_opt.lock();
266
267 for (queue_index, queue) in queues.iter().enumerate() {
268 let queue_evt = &queue_evts[queue_index];
269 let irqfd = msix_config
270 .get_irqfd(queue.vector as usize)
271 .unwrap_or_else(|| interrupt.get_interrupt_evt());
272 self.activate_vring(mem, queue_index, queue, queue_evt, irqfd)?;
273 }
274
275 Ok(())
276 }
277
278 /// Deactivates all vrings.
reset(&mut self, queues_num: usize) -> Result<()>279 pub fn reset(&mut self, queues_num: usize) -> Result<()> {
280 for queue_index in 0..queues_num {
281 self.vu
282 .set_vring_enable(queue_index, false)
283 .map_err(Error::SetVringEnable)?;
284 self.vu
285 .get_vring_base(queue_index)
286 .map_err(Error::GetVringBase)?;
287 }
288 Ok(())
289 }
290 }
291