1 use std::ffi::CString;
2 use std::fs::File;
3 use std::io::Result;
4 use std::os::unix::io::AsRawFd;
5 use std::os::unix::net::UnixStream;
6 use std::path::Path;
7 use std::sync::{Arc, Barrier, Mutex};
8 use std::thread;
9
10 use vhost::vhost_user::message::{
11 VhostUserConfigFlags, VhostUserHeaderFlag, VhostUserInflight, VhostUserProtocolFeatures,
12 };
13 use vhost::vhost_user::{Backend, Frontend, Listener, VhostUserFrontend};
14 use vhost::{VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
15 use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock};
16 use vm_memory::{
17 FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap,
18 };
19 use vmm_sys_util::epoll::EventSet;
20 use vmm_sys_util::eventfd::EventFd;
21
22 struct MockVhostBackend {
23 events: u64,
24 event_idx: bool,
25 acked_features: u64,
26 }
27
28 impl MockVhostBackend {
29 const SUPPORTED_FEATURES: u64 = 0xffff_ffff_ffff_ffff;
30
new() -> Self31 fn new() -> Self {
32 MockVhostBackend {
33 events: 0,
34 event_idx: false,
35 acked_features: 0,
36 }
37 }
38 }
39
40 impl VhostUserBackendMut for MockVhostBackend {
41 type Bitmap = ();
42 type Vring = VringRwLock;
43
num_queues(&self) -> usize44 fn num_queues(&self) -> usize {
45 2
46 }
47
max_queue_size(&self) -> usize48 fn max_queue_size(&self) -> usize {
49 256
50 }
51
features(&self) -> u6452 fn features(&self) -> u64 {
53 Self::SUPPORTED_FEATURES
54 }
55
acked_features(&mut self, features: u64)56 fn acked_features(&mut self, features: u64) {
57 self.acked_features = features;
58 }
59
protocol_features(&self) -> VhostUserProtocolFeatures60 fn protocol_features(&self) -> VhostUserProtocolFeatures {
61 VhostUserProtocolFeatures::all()
62 }
63
reset_device(&mut self)64 fn reset_device(&mut self) {
65 self.events = 0;
66 self.event_idx = false;
67 self.acked_features = 0;
68 }
69
set_event_idx(&mut self, enabled: bool)70 fn set_event_idx(&mut self, enabled: bool) {
71 self.event_idx = enabled;
72 }
73
get_config(&self, offset: u32, size: u32) -> Vec<u8>74 fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
75 assert_eq!(offset, 0x200);
76 assert_eq!(size, 8);
77
78 vec![0xa5u8; 8]
79 }
80
set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()>81 fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> {
82 assert_eq!(offset, 0x200);
83 assert_eq!(buf, &[0xa5u8; 8]);
84
85 Ok(())
86 }
87
update_memory(&mut self, atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()>88 fn update_memory(&mut self, atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()> {
89 let mem = atomic_mem.memory();
90 let region = mem.find_region(GuestAddress(0x100000)).unwrap();
91 assert_eq!(region.size(), 0x100000);
92 Ok(())
93 }
94
set_backend_req_fd(&mut self, _backend: Backend)95 fn set_backend_req_fd(&mut self, _backend: Backend) {}
96
queues_per_thread(&self) -> Vec<u64>97 fn queues_per_thread(&self) -> Vec<u64> {
98 vec![1, 1]
99 }
100
exit_event(&self, _thread_index: usize) -> Option<EventFd>101 fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
102 let event_fd = EventFd::new(0).unwrap();
103
104 Some(event_fd)
105 }
106
handle_event( &mut self, _device_event: u16, _evset: EventSet, _vrings: &[VringRwLock], _thread_id: usize, ) -> Result<()>107 fn handle_event(
108 &mut self,
109 _device_event: u16,
110 _evset: EventSet,
111 _vrings: &[VringRwLock],
112 _thread_id: usize,
113 ) -> Result<()> {
114 self.events += 1;
115
116 Ok(())
117 }
118 }
119
setup_frontend(path: &Path, barrier: Arc<Barrier>) -> Frontend120 fn setup_frontend(path: &Path, barrier: Arc<Barrier>) -> Frontend {
121 barrier.wait();
122 let mut frontend = Frontend::connect(path, 1).unwrap();
123 frontend.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
124 // Wait before issue service requests.
125 barrier.wait();
126
127 let features = frontend.get_features().unwrap();
128 let proto = frontend.get_protocol_features().unwrap();
129 frontend.set_features(features).unwrap();
130 frontend.set_protocol_features(proto).unwrap();
131 assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
132
133 frontend
134 }
135
vhost_user_client(path: &Path, barrier: Arc<Barrier>)136 fn vhost_user_client(path: &Path, barrier: Arc<Barrier>) {
137 barrier.wait();
138 let mut frontend = Frontend::connect(path, 1).unwrap();
139 frontend.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
140 // Wait before issue service requests.
141 barrier.wait();
142
143 let features = frontend.get_features().unwrap();
144 let proto = frontend.get_protocol_features().unwrap();
145 frontend.set_features(features).unwrap();
146 frontend.set_protocol_features(proto).unwrap();
147 assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
148
149 let queue_num = frontend.get_queue_num().unwrap();
150 assert_eq!(queue_num, 2);
151
152 frontend.set_owner().unwrap();
153 //frontend.set_owner().unwrap_err();
154 frontend.reset_owner().unwrap();
155 frontend.reset_owner().unwrap();
156 frontend.set_owner().unwrap();
157
158 frontend.set_features(features).unwrap();
159 frontend.set_protocol_features(proto).unwrap();
160 assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
161
162 let memfd = nix::sys::memfd::memfd_create(
163 &CString::new("test").unwrap(),
164 nix::sys::memfd::MemFdCreateFlag::empty(),
165 )
166 .unwrap();
167 let file = File::from(memfd);
168 file.set_len(0x100000).unwrap();
169 let file_offset = FileOffset::new(file, 0);
170 let mem = GuestMemoryMmap::<()>::from_ranges_with_files(&[(
171 GuestAddress(0x100000),
172 0x100000,
173 Some(file_offset),
174 )])
175 .unwrap();
176 let addr = mem.get_host_address(GuestAddress(0x100000)).unwrap() as u64;
177 let reg = mem.find_region(GuestAddress(0x100000)).unwrap();
178 let fd = reg.file_offset().unwrap();
179 let regions = [VhostUserMemoryRegionInfo::new(
180 0x100000,
181 0x100000,
182 addr,
183 0,
184 fd.file().as_raw_fd(),
185 )];
186 frontend.set_mem_table(®ions).unwrap();
187
188 frontend.set_vring_num(0, 256).unwrap();
189
190 let config = VringConfigData {
191 queue_max_size: 256,
192 queue_size: 256,
193 flags: 0,
194 desc_table_addr: addr,
195 used_ring_addr: addr + 0x10000,
196 avail_ring_addr: addr + 0x20000,
197 log_addr: None,
198 };
199 frontend.set_vring_addr(0, &config).unwrap();
200
201 let eventfd = EventFd::new(0).unwrap();
202 frontend.set_vring_kick(0, &eventfd).unwrap();
203 frontend.set_vring_call(0, &eventfd).unwrap();
204 frontend.set_vring_err(0, &eventfd).unwrap();
205 frontend.set_vring_enable(0, true).unwrap();
206
207 let buf = [0u8; 8];
208 let (_cfg, data) = frontend
209 .get_config(0x200, 8, VhostUserConfigFlags::empty(), &buf)
210 .unwrap();
211 assert_eq!(&data, &[0xa5u8; 8]);
212 frontend
213 .set_config(0x200, VhostUserConfigFlags::empty(), &data)
214 .unwrap();
215
216 let (tx, _rx) = UnixStream::pair().unwrap();
217 frontend.set_backend_request_fd(&tx).unwrap();
218
219 let state = frontend.get_vring_base(0).unwrap();
220 frontend.set_vring_base(0, state as u16).unwrap();
221
222 assert_eq!(frontend.get_max_mem_slots().unwrap(), 509);
223 let region = VhostUserMemoryRegionInfo::new(0x800000, 0x100000, addr, 0, fd.file().as_raw_fd());
224 frontend.add_mem_region(®ion).unwrap();
225 frontend.remove_mem_region(®ion).unwrap();
226 }
227
228 /// Provide a vhost-user back-end for front-end testing.
229 ///
230 /// Set up a `MockVhostBackend` vhost-user back-end and run `cb` in a thread, passing the
231 /// vhost-user socket's path and a barrier to await request processing. `cb` is supposed to run
232 /// the front-end tests.
233 ///
234 /// After request processing has begun, run `server_fn`, passing both a reference to the back-end
235 /// and the same barrier as given to `cb`. `server_fn` may perform additional back-end tests while
236 /// `cb` is still run in its thread.
237 ///
238 /// After `server_fn` is done, await `cb` (joining its thread), and return.
vhost_user_server_with_fn<F: FnOnce(Arc<Mutex<MockVhostBackend>>, Arc<Barrier>)>( cb: fn(&Path, Arc<Barrier>), server_fn: F, )239 fn vhost_user_server_with_fn<F: FnOnce(Arc<Mutex<MockVhostBackend>>, Arc<Barrier>)>(
240 cb: fn(&Path, Arc<Barrier>),
241 server_fn: F,
242 ) {
243 let mem = GuestMemoryAtomic::new(GuestMemoryMmap::<()>::new());
244 let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
245 let mut daemon = VhostUserDaemon::new("test".to_owned(), backend.clone(), mem).unwrap();
246
247 let barrier = Arc::new(Barrier::new(2));
248 let tmpdir = tempfile::tempdir().unwrap();
249 let mut path = tmpdir.path().to_path_buf();
250 path.push("socket");
251
252 let barrier2 = barrier.clone();
253 let path1 = path.clone();
254 let thread = thread::spawn(move || cb(&path1, barrier2));
255
256 let listener = Listener::new(&path, false).unwrap();
257 barrier.wait();
258 daemon.start(listener).unwrap();
259 barrier.wait();
260
261 server_fn(backend, barrier);
262
263 // handle service requests from clients.
264 thread.join().unwrap();
265 }
266
vhost_user_server(cb: fn(&Path, Arc<Barrier>))267 fn vhost_user_server(cb: fn(&Path, Arc<Barrier>)) {
268 vhost_user_server_with_fn(cb, |_, _| {})
269 }
270
271 #[test]
test_vhost_user_server()272 fn test_vhost_user_server() {
273 vhost_user_server(vhost_user_client);
274 }
275
vhost_user_enable(path: &Path, barrier: Arc<Barrier>)276 fn vhost_user_enable(path: &Path, barrier: Arc<Barrier>) {
277 let frontend = setup_frontend(path, barrier);
278 frontend.set_owner().unwrap();
279 frontend.set_owner().unwrap_err();
280 }
281
282 #[test]
test_vhost_user_enable()283 fn test_vhost_user_enable() {
284 vhost_user_server(vhost_user_enable);
285 }
286
vhost_user_set_inflight(path: &Path, barrier: Arc<Barrier>)287 fn vhost_user_set_inflight(path: &Path, barrier: Arc<Barrier>) {
288 let mut frontend = setup_frontend(path, barrier);
289 let eventfd = EventFd::new(0).unwrap();
290 // No implementation for inflight_fd yet.
291 let inflight = VhostUserInflight {
292 mmap_size: 0x100000,
293 mmap_offset: 0,
294 num_queues: 1,
295 queue_size: 256,
296 };
297 frontend
298 .set_inflight_fd(&inflight, eventfd.as_raw_fd())
299 .unwrap_err();
300 }
301
302 #[test]
test_vhost_user_set_inflight()303 fn test_vhost_user_set_inflight() {
304 vhost_user_server(vhost_user_set_inflight);
305 }
306
vhost_user_get_inflight(path: &Path, barrier: Arc<Barrier>)307 fn vhost_user_get_inflight(path: &Path, barrier: Arc<Barrier>) {
308 let mut frontend = setup_frontend(path, barrier);
309 // No implementation for inflight_fd yet.
310 let inflight = VhostUserInflight {
311 mmap_size: 0x100000,
312 mmap_offset: 0,
313 num_queues: 1,
314 queue_size: 256,
315 };
316 assert!(frontend.get_inflight_fd(&inflight).is_err());
317 }
318
319 #[test]
test_vhost_user_get_inflight()320 fn test_vhost_user_get_inflight() {
321 vhost_user_server(vhost_user_get_inflight);
322 }
323
324 #[cfg(feature = "postcopy")]
vhost_user_postcopy_advise(path: &Path, barrier: Arc<Barrier>)325 fn vhost_user_postcopy_advise(path: &Path, barrier: Arc<Barrier>) {
326 let mut frontend = setup_frontend(path, barrier);
327 let _uffd_file = frontend.postcopy_advise().unwrap();
328 }
329
330 #[cfg(feature = "postcopy")]
vhost_user_postcopy_listen(path: &Path, barrier: Arc<Barrier>)331 fn vhost_user_postcopy_listen(path: &Path, barrier: Arc<Barrier>) {
332 let mut frontend = setup_frontend(path, barrier);
333 let _uffd_file = frontend.postcopy_advise().unwrap();
334 frontend.postcopy_listen().unwrap();
335 }
336
337 #[cfg(feature = "postcopy")]
vhost_user_postcopy_end(path: &Path, barrier: Arc<Barrier>)338 fn vhost_user_postcopy_end(path: &Path, barrier: Arc<Barrier>) {
339 let mut frontend = setup_frontend(path, barrier);
340 let _uffd_file = frontend.postcopy_advise().unwrap();
341 frontend.postcopy_listen().unwrap();
342 frontend.postcopy_end().unwrap();
343 }
344
345 // These tests need an access to the `/dev/userfaultfd`
346 // in order to pass.
347 #[cfg(feature = "postcopy")]
348 #[test]
test_vhost_user_postcopy()349 fn test_vhost_user_postcopy() {
350 vhost_user_server(vhost_user_postcopy_advise);
351 vhost_user_server(vhost_user_postcopy_listen);
352 vhost_user_server(vhost_user_postcopy_end);
353 }
354
vhost_user_reset_device(path: &Path, barrier: Arc<Barrier>)355 fn vhost_user_reset_device(path: &Path, barrier: Arc<Barrier>) {
356 let mut frontend = setup_frontend(path, barrier.clone());
357
358 // Signal that we are about to reset
359 barrier.wait();
360 // Wait until server has checked non-reset state
361 barrier.wait();
362
363 frontend.reset_device().unwrap();
364
365 // Signal reset is done
366 barrier.wait();
367 }
368
369 #[test]
test_vhost_user_reset_device()370 fn test_vhost_user_reset_device() {
371 vhost_user_server_with_fn(vhost_user_reset_device, |backend, barrier| {
372 // Wait until `vhost_user_reset_device()` is before reset
373 barrier.wait();
374 // Check non-reset state
375 assert!(backend.lock().unwrap().acked_features == MockVhostBackend::SUPPORTED_FEATURES);
376 // Set up some arbitrary internal state
377 backend.lock().unwrap().events = 42;
378
379 // Allow reset
380 barrier.wait();
381 // Wait for reset to be done
382 barrier.wait();
383
384 // Check reset state
385 assert!(backend.lock().unwrap().acked_features == 0);
386 assert!(backend.lock().unwrap().events == 0);
387 });
388 }
389