1 // Copyright 2021 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod sys;
6
7 use std::cell::RefCell;
8 use std::path::PathBuf;
9 use std::rc::Rc;
10 use std::sync::Arc;
11
12 use anyhow::anyhow;
13 use anyhow::bail;
14 use anyhow::Context;
15 use argh::FromArgs;
16 use base::error;
17 use base::warn;
18 use base::AsRawDescriptors;
19 use base::RawDescriptor;
20 use base::Tube;
21 use cros_async::EventAsync;
22 use cros_async::Executor;
23 use data_model::Le32;
24 use fuse::Server;
25 use hypervisor::ProtectionType;
26 use sync::Mutex;
27 pub use sys::start_device as run_fs_device;
28 use virtio_sys::virtio_fs::virtio_fs_config;
29 use vm_memory::GuestMemory;
30 use vmm_vhost::message::VhostUserProtocolFeatures;
31 use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
32 use zerocopy::AsBytes;
33
34 use crate::virtio;
35 use crate::virtio::copy_config;
36 use crate::virtio::device_constants::fs::FS_MAX_TAG_LEN;
37 use crate::virtio::fs::passthrough::PassthroughFs;
38 use crate::virtio::fs::process_fs_queue;
39 use crate::virtio::fs::Config;
40 use crate::virtio::vhost::user::device::handler::Error as DeviceError;
41 use crate::virtio::vhost::user::device::handler::VhostUserDevice;
42 use crate::virtio::vhost::user::device::handler::WorkerState;
43 use crate::virtio::Interrupt;
44 use crate::virtio::Queue;
45
46 const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */
47
handle_fs_queue( queue: Rc<RefCell<virtio::Queue>>, doorbell: Interrupt, kick_evt: EventAsync, server: Arc<fuse::Server<PassthroughFs>>, tube: Arc<Mutex<Tube>>, )48 async fn handle_fs_queue(
49 queue: Rc<RefCell<virtio::Queue>>,
50 doorbell: Interrupt,
51 kick_evt: EventAsync,
52 server: Arc<fuse::Server<PassthroughFs>>,
53 tube: Arc<Mutex<Tube>>,
54 ) {
55 // Slot is always going to be 0 because we do not support DAX
56 let slot: u32 = 0;
57
58 loop {
59 if let Err(e) = kick_evt.next_val().await {
60 error!("Failed to read kick event for fs queue: {}", e);
61 break;
62 }
63 if let Err(e) = process_fs_queue(&doorbell, &mut queue.borrow_mut(), &server, &tube, slot) {
64 error!("Process FS queue failed: {}", e);
65 break;
66 }
67 }
68 }
69
70 struct FsBackend {
71 ex: Executor,
72 server: Arc<fuse::Server<PassthroughFs>>,
73 tag: [u8; FS_MAX_TAG_LEN],
74 avail_features: u64,
75 acked_features: u64,
76 acked_protocol_features: VhostUserProtocolFeatures,
77 workers: [Option<WorkerState<Rc<RefCell<Queue>>, ()>>; MAX_QUEUE_NUM],
78 keep_rds: Vec<RawDescriptor>,
79 }
80
81 impl FsBackend {
new(ex: &Executor, tag: &str, cfg: Option<Config>) -> anyhow::Result<Self>82 pub fn new(ex: &Executor, tag: &str, cfg: Option<Config>) -> anyhow::Result<Self> {
83 if tag.len() > FS_MAX_TAG_LEN {
84 bail!(
85 "fs tag is too long: {} (max supported: {})",
86 tag.len(),
87 FS_MAX_TAG_LEN
88 );
89 }
90 let mut fs_tag = [0u8; FS_MAX_TAG_LEN];
91 fs_tag[..tag.len()].copy_from_slice(tag.as_bytes());
92
93 let avail_features = virtio::base_features(ProtectionType::Unprotected)
94 | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
95
96 // Use default passthroughfs config
97 let fs = PassthroughFs::new(tag, cfg.unwrap_or_default())?;
98
99 let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec();
100 keep_rds.append(&mut fs.keep_rds());
101
102 let ex = ex.clone();
103 keep_rds.extend(ex.as_raw_descriptors());
104
105 let server = Arc::new(Server::new(fs));
106
107 Ok(FsBackend {
108 ex,
109 server,
110 tag: fs_tag,
111 avail_features,
112 acked_features: 0,
113 acked_protocol_features: VhostUserProtocolFeatures::empty(),
114 workers: Default::default(),
115 keep_rds,
116 })
117 }
118 }
119
120 impl VhostUserDevice for FsBackend {
max_queue_num(&self) -> usize121 fn max_queue_num(&self) -> usize {
122 MAX_QUEUE_NUM
123 }
124
features(&self) -> u64125 fn features(&self) -> u64 {
126 self.avail_features
127 }
128
ack_features(&mut self, value: u64) -> anyhow::Result<()>129 fn ack_features(&mut self, value: u64) -> anyhow::Result<()> {
130 let unrequested_features = value & !self.avail_features;
131 if unrequested_features != 0 {
132 bail!("invalid features are given: {:#x}", unrequested_features);
133 }
134
135 self.acked_features |= value;
136
137 Ok(())
138 }
139
acked_features(&self) -> u64140 fn acked_features(&self) -> u64 {
141 self.acked_features
142 }
143
protocol_features(&self) -> VhostUserProtocolFeatures144 fn protocol_features(&self) -> VhostUserProtocolFeatures {
145 VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
146 }
147
ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()>148 fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
149 let features = VhostUserProtocolFeatures::from_bits(features)
150 .ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?;
151 let supported = self.protocol_features();
152 self.acked_protocol_features = features & supported;
153 Ok(())
154 }
155
acked_protocol_features(&self) -> u64156 fn acked_protocol_features(&self) -> u64 {
157 self.acked_protocol_features.bits()
158 }
159
read_config(&self, offset: u64, data: &mut [u8])160 fn read_config(&self, offset: u64, data: &mut [u8]) {
161 let config = virtio_fs_config {
162 tag: self.tag,
163 num_request_queues: Le32::from(1),
164 };
165 copy_config(data, 0, config.as_bytes(), offset);
166 }
167
reset(&mut self)168 fn reset(&mut self) {
169 for worker in self.workers.iter_mut().filter_map(Option::take) {
170 let _ = self.ex.run_until(worker.queue_task.cancel());
171 }
172 }
173
start_queue( &mut self, idx: usize, queue: virtio::Queue, _mem: GuestMemory, doorbell: Interrupt, ) -> anyhow::Result<()>174 fn start_queue(
175 &mut self,
176 idx: usize,
177 queue: virtio::Queue,
178 _mem: GuestMemory,
179 doorbell: Interrupt,
180 ) -> anyhow::Result<()> {
181 if self.workers[idx].is_some() {
182 warn!("Starting new queue handler without stopping old handler");
183 self.stop_queue(idx)?;
184 }
185
186 let kick_evt = queue
187 .event()
188 .try_clone()
189 .context("failed to clone queue event")?;
190 let kick_evt = EventAsync::new(kick_evt, &self.ex)
191 .context("failed to create EventAsync for kick_evt")?;
192 let (_, fs_device_tube) = Tube::pair()?;
193
194 let queue = Rc::new(RefCell::new(queue));
195 let queue_task = self.ex.spawn_local(handle_fs_queue(
196 queue.clone(),
197 doorbell,
198 kick_evt,
199 self.server.clone(),
200 Arc::new(Mutex::new(fs_device_tube)),
201 ));
202
203 self.workers[idx] = Some(WorkerState { queue_task, queue });
204 Ok(())
205 }
206
stop_queue(&mut self, idx: usize) -> anyhow::Result<virtio::Queue>207 fn stop_queue(&mut self, idx: usize) -> anyhow::Result<virtio::Queue> {
208 if let Some(worker) = self.workers.get_mut(idx).and_then(Option::take) {
209 // Wait for queue_task to be aborted.
210 let _ = self.ex.run_until(worker.queue_task.cancel());
211
212 let queue = match Rc::try_unwrap(worker.queue) {
213 Ok(queue_cell) => queue_cell.into_inner(),
214 Err(_) => panic!("failed to recover queue from worker"),
215 };
216
217 Ok(queue)
218 } else {
219 Err(anyhow::Error::new(DeviceError::WorkerNotFound))
220 }
221 }
222 }
223
224 #[derive(FromArgs)]
225 #[argh(subcommand, name = "fs")]
226 /// FS Device
227 pub struct Options {
228 #[argh(option, arg_name = "PATH")]
229 /// path to a vhost-user socket
230 socket: String,
231 #[argh(option, arg_name = "TAG")]
232 /// the virtio-fs tag
233 tag: String,
234 #[argh(option, arg_name = "DIR")]
235 /// path to a directory to share
236 shared_dir: PathBuf,
237 #[argh(option, arg_name = "UIDMAP")]
238 /// uid map to use
239 uid_map: Option<String>,
240 #[argh(option, arg_name = "GIDMAP")]
241 /// gid map to use
242 gid_map: Option<String>,
243 #[argh(option, arg_name = "CFG")]
244 /// colon-separated options for configuring a directory to be
245 /// shared with the VM through virtio-fs. The format is the same as
246 /// `crosvm run --shared-dir` flag except only the keys related to virtio-fs
247 /// are valid here.
248 cfg: Option<Config>,
249 #[argh(option, arg_name = "UID", default = "0")]
250 /// uid of the device process in the new user namespace created by minijail.
251 /// These two options (uid/gid) are useful when the crosvm process cannot
252 /// get CAP_SETGID/CAP_SETUID but an identity mapping of the current
253 /// user/group between the VM and the host is required.
254 /// Say the current user and the crosvm process has uid 5000, a user can use
255 /// "uid=5000" and "uidmap=5000 5000 1" such that files owned by user 5000
256 /// still appear to be owned by user 5000 in the VM. These 2 options are
257 /// useful only when there is 1 user in the VM accessing shared files.
258 /// If multiple users want to access the shared file, gid/uid options are
259 /// useless. It'd be better to create a new user namespace and give
260 /// CAP_SETUID/CAP_SETGID to the crosvm.
261 /// Default: 0.
262 uid: u32,
263 #[argh(option, arg_name = "GID", default = "0")]
264 /// gid of the device process in the new user namespace created by minijail.
265 /// Default: 0.
266 gid: u32,
267 }
268