1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::max;
6 use std::cmp::min;
7 use std::convert::TryInto;
8 use std::ffi::CStr;
9 use std::io;
10 use std::mem::size_of;
11 use std::mem::MaybeUninit;
12 use std::os::unix::io::AsRawFd;
13 use std::time::Duration;
14
15 use base::error;
16 use base::pagesize;
17 use base::Protection;
18 use zerocopy::AsBytes;
19 use zerocopy::FromBytes;
20 use zerocopy::FromZeroes;
21
22 use crate::filesystem::Context;
23 use crate::filesystem::DirEntry;
24 use crate::filesystem::DirectoryIterator;
25 use crate::filesystem::Entry;
26 use crate::filesystem::FileSystem;
27 use crate::filesystem::GetxattrReply;
28 use crate::filesystem::IoctlReply;
29 use crate::filesystem::ListxattrReply;
30 use crate::filesystem::ZeroCopyReader;
31 use crate::filesystem::ZeroCopyWriter;
32 use crate::sys::*;
33 use crate::Error;
34 use crate::Result;
35
36 const DIRENT_PADDING: [u8; 8] = [0; 8];
37
38 const SELINUX_XATTR_CSTR: &[u8] = b"security.selinux\0";
39
40 /// A trait for reading from the underlying FUSE endpoint.
41 pub trait Reader: io::Read {
read_struct<T: AsBytes + FromBytes + FromZeroes>(&mut self) -> Result<T>42 fn read_struct<T: AsBytes + FromBytes + FromZeroes>(&mut self) -> Result<T> {
43 let mut out = T::new_zeroed();
44 self.read_exact(out.as_bytes_mut())
45 .map_err(Error::DecodeMessage)?;
46 Ok(out)
47 }
48 }
49
50 impl<R: Reader> Reader for &'_ mut R {}
51
52 /// A trait for writing to the underlying FUSE endpoint. The FUSE device expects the write
53 /// operation to happen in one write transaction. Since there are cases when data needs to be
54 /// generated earlier than the header, it implies the writer implementation to keep an internal
55 /// buffer. The buffer then can be flushed once header and data are both prepared.
56 pub trait Writer: io::Write {
57 /// The type passed in to the closure in `write_at`. For most implementations, this should be
58 /// `Self`.
59 type ClosureWriter: Writer + ZeroCopyWriter;
60
61 /// Allows a closure to generate and write data at the current writer's offset. The current
62 /// writer is passed as a mutable reference to the closure. As an example, this provides an
63 /// adapter for the read implementation of a filesystem to write directly to the final buffer
64 /// without generating the FUSE header first.
65 ///
66 /// Notes: An alternative implementation would be to return a slightly different writer for the
67 /// API client to write to the offset. Since the API needs to be called for more than one time,
68 /// it imposes some complexity to deal with borrowing and mutability. The current approach
69 /// simply does not need to create a different writer, thus no need to deal with the mentioned
70 /// complexity.
write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize> where F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>71 fn write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize>
72 where
73 F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>;
74
75 /// Checks if the writer can still accept certain amount of data.
has_sufficient_buffer(&self, size: u32) -> bool76 fn has_sufficient_buffer(&self, size: u32) -> bool;
77 }
78
79 impl<W: Writer> Writer for &'_ mut W {
80 type ClosureWriter = W::ClosureWriter;
81
write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize> where F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>,82 fn write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize>
83 where
84 F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>,
85 {
86 (**self).write_at(offset, f)
87 }
88
has_sufficient_buffer(&self, size: u32) -> bool89 fn has_sufficient_buffer(&self, size: u32) -> bool {
90 (**self).has_sufficient_buffer(size)
91 }
92 }
93
94 /// A trait for memory mapping for DAX.
95 ///
96 /// For some transports (like virtio) it may be possible to share a region of memory with the
97 /// FUSE kernel driver so that it can access file contents directly without issuing read or
98 /// write requests. In this case the driver will instead send requests to map a section of a
99 /// file into the shared memory region.
100 pub trait Mapper {
101 /// Maps `size` bytes starting at `file_offset` bytes from within the given `fd` at `mem_offset`
102 /// bytes from the start of the memory region with `prot` protections. `mem_offset` must be
103 /// page aligned.
104 ///
105 /// # Arguments
106 /// * `mem_offset` - Page aligned offset into the memory region in bytes.
107 /// * `size` - Size of memory region in bytes.
108 /// * `fd` - File descriptor to mmap from.
109 /// * `file_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
110 /// * `prot` - Protection of the memory region.
map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: Protection, ) -> io::Result<()>111 fn map(
112 &self,
113 mem_offset: u64,
114 size: usize,
115 fd: &dyn AsRawFd,
116 file_offset: u64,
117 prot: Protection,
118 ) -> io::Result<()>;
119
120 /// Unmaps `size` bytes at `offset` bytes from the start of the memory region. `offset` must be
121 /// page aligned.
122 ///
123 /// # Arguments
124 /// * `offset` - Page aligned offset into the arena in bytes.
125 /// * `size` - Size of memory region in bytes.
unmap(&self, offset: u64, size: u64) -> io::Result<()>126 fn unmap(&self, offset: u64, size: u64) -> io::Result<()>;
127 }
128
129 impl<'a, M: Mapper> Mapper for &'a M {
map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: Protection, ) -> io::Result<()>130 fn map(
131 &self,
132 mem_offset: u64,
133 size: usize,
134 fd: &dyn AsRawFd,
135 file_offset: u64,
136 prot: Protection,
137 ) -> io::Result<()> {
138 (**self).map(mem_offset, size, fd, file_offset, prot)
139 }
140
unmap(&self, offset: u64, size: u64) -> io::Result<()>141 fn unmap(&self, offset: u64, size: u64) -> io::Result<()> {
142 (**self).unmap(offset, size)
143 }
144 }
145
146 pub struct Server<F: FileSystem + Sync> {
147 fs: F,
148 }
149
150 impl<F: FileSystem + Sync> Server<F> {
new(fs: F) -> Server<F>151 pub fn new(fs: F) -> Server<F> {
152 Server { fs }
153 }
154
handle_message<R: Reader + ZeroCopyReader, W: Writer + ZeroCopyWriter, M: Mapper>( &self, mut r: R, w: W, mapper: M, ) -> Result<usize>155 pub fn handle_message<R: Reader + ZeroCopyReader, W: Writer + ZeroCopyWriter, M: Mapper>(
156 &self,
157 mut r: R,
158 w: W,
159 mapper: M,
160 ) -> Result<usize> {
161 let in_header: InHeader = r.read_struct()?;
162 cros_tracing::trace_simple_print!("fuse server: handle_message: in_header={:?}", in_header);
163
164 if in_header.len
165 > size_of::<InHeader>() as u32 + size_of::<WriteIn>() as u32 + self.fs.max_buffer_size()
166 {
167 return reply_error(
168 io::Error::from_raw_os_error(libc::ENOMEM),
169 in_header.unique,
170 w,
171 );
172 }
173 match Opcode::n(in_header.opcode) {
174 Some(Opcode::Lookup) => self.lookup(in_header, r, w),
175 Some(Opcode::Forget) => self.forget(in_header, r), // No reply.
176 Some(Opcode::Getattr) => self.getattr(in_header, r, w),
177 Some(Opcode::Setattr) => self.setattr(in_header, r, w),
178 Some(Opcode::Readlink) => self.readlink(in_header, w),
179 Some(Opcode::Symlink) => self.symlink(in_header, r, w),
180 Some(Opcode::Mknod) => self.mknod(in_header, r, w),
181 Some(Opcode::Mkdir) => self.mkdir(in_header, r, w),
182 Some(Opcode::Unlink) => self.unlink(in_header, r, w),
183 Some(Opcode::Rmdir) => self.rmdir(in_header, r, w),
184 Some(Opcode::Rename) => self.rename(in_header, r, w),
185 Some(Opcode::Link) => self.link(in_header, r, w),
186 Some(Opcode::Open) => self.open(in_header, r, w),
187 Some(Opcode::Read) => self.read(in_header, r, w),
188 Some(Opcode::Write) => self.write(in_header, r, w),
189 Some(Opcode::Statfs) => self.statfs(in_header, w),
190 Some(Opcode::Release) => self.release(in_header, r, w),
191 Some(Opcode::Fsync) => self.fsync(in_header, r, w),
192 Some(Opcode::Setxattr) => self.setxattr(in_header, r, w),
193 Some(Opcode::Getxattr) => self.getxattr(in_header, r, w),
194 Some(Opcode::Listxattr) => self.listxattr(in_header, r, w),
195 Some(Opcode::Removexattr) => self.removexattr(in_header, r, w),
196 Some(Opcode::Flush) => self.flush(in_header, r, w),
197 Some(Opcode::Init) => self.init(in_header, r, w),
198 Some(Opcode::Opendir) => self.opendir(in_header, r, w),
199 Some(Opcode::Readdir) => self.readdir(in_header, r, w),
200 Some(Opcode::Releasedir) => self.releasedir(in_header, r, w),
201 Some(Opcode::Fsyncdir) => self.fsyncdir(in_header, r, w),
202 Some(Opcode::Getlk) => self.getlk(in_header, r, w),
203 Some(Opcode::Setlk) => self.setlk(in_header, r, w),
204 Some(Opcode::Setlkw) => self.setlkw(in_header, r, w),
205 Some(Opcode::Access) => self.access(in_header, r, w),
206 Some(Opcode::Create) => self.create(in_header, r, w),
207 Some(Opcode::Interrupt) => self.interrupt(in_header),
208 Some(Opcode::Bmap) => self.bmap(in_header, r, w),
209 Some(Opcode::Destroy) => self.destroy(),
210 Some(Opcode::Ioctl) => self.ioctl(in_header, r, w),
211 Some(Opcode::Poll) => self.poll(in_header, r, w),
212 Some(Opcode::NotifyReply) => self.notify_reply(in_header, r, w),
213 Some(Opcode::BatchForget) => self.batch_forget(in_header, r, w),
214 Some(Opcode::Fallocate) => self.fallocate(in_header, r, w),
215 Some(Opcode::Readdirplus) => self.readdirplus(in_header, r, w),
216 Some(Opcode::Rename2) => self.rename2(in_header, r, w),
217 Some(Opcode::Lseek) => self.lseek(in_header, r, w),
218 Some(Opcode::CopyFileRange) => self.copy_file_range(in_header, r, w),
219 Some(Opcode::ChromeOsTmpfile) => self.chromeos_tmpfile(in_header, r, w),
220 Some(Opcode::SetUpMapping) => self.set_up_mapping(in_header, r, w, mapper),
221 Some(Opcode::RemoveMapping) => self.remove_mapping(in_header, r, w, mapper),
222 Some(Opcode::OpenAtomic) => self.open_atomic(in_header, r, w),
223 None => reply_error(
224 io::Error::from_raw_os_error(libc::ENOSYS),
225 in_header.unique,
226 w,
227 ),
228 }
229 }
230
lookup<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>231 fn lookup<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
232 let namelen = (in_header.len as usize)
233 .checked_sub(size_of::<InHeader>())
234 .ok_or(Error::InvalidHeaderLength)?;
235
236 let mut buf = vec![0; namelen];
237
238 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
239
240 let name = bytes_to_cstr(&buf)?;
241
242 match self
243 .fs
244 .lookup(Context::from(in_header), in_header.nodeid.into(), name)
245 {
246 Ok(entry) => {
247 let out = EntryOut::from(entry);
248
249 reply_ok(Some(out), None, in_header.unique, w)
250 }
251 Err(e) => reply_error(e, in_header.unique, w),
252 }
253 }
254
forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize>255 fn forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize> {
256 let ForgetIn { nlookup } = r.read_struct()?;
257
258 self.fs
259 .forget(Context::from(in_header), in_header.nodeid.into(), nlookup);
260
261 // There is no reply for forget messages.
262 Ok(0)
263 }
264
getattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>265 fn getattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
266 let GetattrIn {
267 flags,
268 dummy: _,
269 fh,
270 } = r.read_struct()?;
271
272 let handle = if (flags & GETATTR_FH) != 0 {
273 Some(fh.into())
274 } else {
275 None
276 };
277
278 match self
279 .fs
280 .getattr(Context::from(in_header), in_header.nodeid.into(), handle)
281 {
282 Ok((st, timeout)) => {
283 let out = AttrOut {
284 attr_valid: timeout.as_secs(),
285 attr_valid_nsec: timeout.subsec_nanos(),
286 dummy: 0,
287 attr: st.into(),
288 };
289 reply_ok(Some(out), None, in_header.unique, w)
290 }
291 Err(e) => reply_error(e, in_header.unique, w),
292 }
293 }
294
setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>295 fn setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
296 let setattr_in: SetattrIn = r.read_struct()?;
297
298 let handle = if setattr_in.valid & FATTR_FH != 0 {
299 Some(setattr_in.fh.into())
300 } else {
301 None
302 };
303
304 let valid = SetattrValid::from_bits_truncate(setattr_in.valid);
305
306 let st: libc::stat64 = setattr_in.into();
307
308 match self.fs.setattr(
309 Context::from(in_header),
310 in_header.nodeid.into(),
311 st,
312 handle,
313 valid,
314 ) {
315 Ok((st, timeout)) => {
316 let out = AttrOut {
317 attr_valid: timeout.as_secs(),
318 attr_valid_nsec: timeout.subsec_nanos(),
319 dummy: 0,
320 attr: st.into(),
321 };
322 reply_ok(Some(out), None, in_header.unique, w)
323 }
324 Err(e) => reply_error(e, in_header.unique, w),
325 }
326 }
327
readlink<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize>328 fn readlink<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> {
329 match self
330 .fs
331 .readlink(Context::from(in_header), in_header.nodeid.into())
332 {
333 Ok(linkname) => {
334 // We need to disambiguate the option type here even though it is `None`.
335 reply_ok(None::<u8>, Some(&linkname), in_header.unique, w)
336 }
337 Err(e) => reply_error(e, in_header.unique, w),
338 }
339 }
340
symlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>341 fn symlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
342 // Unfortunately the name and linkname are encoded one after another and
343 // separated by a nul character.
344 let len = (in_header.len as usize)
345 .checked_sub(size_of::<InHeader>())
346 .ok_or(Error::InvalidHeaderLength)?;
347 let mut buf = vec![0; len];
348
349 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
350
351 let mut iter = buf.split_inclusive(|&c| c == b'\0');
352 let name = iter
353 .next()
354 .ok_or(Error::MissingParameter)
355 .and_then(bytes_to_cstr)?;
356 let linkname = iter
357 .next()
358 .ok_or(Error::MissingParameter)
359 .and_then(bytes_to_cstr)?;
360
361 let split_pos = name.to_bytes_with_nul().len() + linkname.to_bytes_with_nul().len();
362 let security_ctx = parse_selinux_xattr(&buf[split_pos..])?;
363
364 match self.fs.symlink(
365 Context::from(in_header),
366 linkname,
367 in_header.nodeid.into(),
368 name,
369 security_ctx,
370 ) {
371 Ok(entry) => {
372 let out = EntryOut::from(entry);
373
374 reply_ok(Some(out), None, in_header.unique, w)
375 }
376 Err(e) => reply_error(e, in_header.unique, w),
377 }
378 }
379
mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>380 fn mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
381 let MknodIn {
382 mode, rdev, umask, ..
383 } = r.read_struct()?;
384
385 let buflen = (in_header.len as usize)
386 .checked_sub(size_of::<InHeader>())
387 .and_then(|l| l.checked_sub(size_of::<MknodIn>()))
388 .ok_or(Error::InvalidHeaderLength)?;
389 let mut buf = vec![0; buflen];
390
391 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
392
393 let mut iter = buf.split_inclusive(|&c| c == b'\0');
394 let name = iter
395 .next()
396 .ok_or(Error::MissingParameter)
397 .and_then(bytes_to_cstr)?;
398
399 let split_pos = name.to_bytes_with_nul().len();
400 let security_ctx = parse_selinux_xattr(&buf[split_pos..])?;
401
402 match self.fs.mknod(
403 Context::from(in_header),
404 in_header.nodeid.into(),
405 name,
406 mode,
407 rdev,
408 umask,
409 security_ctx,
410 ) {
411 Ok(entry) => {
412 let out = EntryOut::from(entry);
413
414 reply_ok(Some(out), None, in_header.unique, w)
415 }
416 Err(e) => reply_error(e, in_header.unique, w),
417 }
418 }
419
mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>420 fn mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
421 let MkdirIn { mode, umask } = r.read_struct()?;
422
423 let buflen = (in_header.len as usize)
424 .checked_sub(size_of::<InHeader>())
425 .and_then(|l| l.checked_sub(size_of::<MkdirIn>()))
426 .ok_or(Error::InvalidHeaderLength)?;
427 let mut buf = vec![0; buflen];
428
429 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
430
431 let mut iter = buf.split_inclusive(|&c| c == b'\0');
432 let name = iter
433 .next()
434 .ok_or(Error::MissingParameter)
435 .and_then(bytes_to_cstr)?;
436
437 let split_pos = name.to_bytes_with_nul().len();
438 let security_ctx = parse_selinux_xattr(&buf[split_pos..])?;
439
440 match self.fs.mkdir(
441 Context::from(in_header),
442 in_header.nodeid.into(),
443 name,
444 mode,
445 umask,
446 security_ctx,
447 ) {
448 Ok(entry) => {
449 let out = EntryOut::from(entry);
450
451 reply_ok(Some(out), None, in_header.unique, w)
452 }
453 Err(e) => reply_error(e, in_header.unique, w),
454 }
455 }
456
chromeos_tmpfile<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>457 fn chromeos_tmpfile<R: Reader, W: Writer>(
458 &self,
459 in_header: InHeader,
460 mut r: R,
461 w: W,
462 ) -> Result<usize> {
463 let ChromeOsTmpfileIn { mode, umask } = r.read_struct()?;
464
465 let len = (in_header.len as usize)
466 .checked_sub(size_of::<InHeader>())
467 .and_then(|l| l.checked_sub(size_of::<ChromeOsTmpfileIn>()))
468 .ok_or(Error::InvalidHeaderLength)?;
469 let mut buf = vec![0; len];
470
471 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
472
473 let security_ctx = parse_selinux_xattr(&buf)?;
474
475 match self.fs.chromeos_tmpfile(
476 Context::from(in_header),
477 in_header.nodeid.into(),
478 mode,
479 umask,
480 security_ctx,
481 ) {
482 Ok(entry) => {
483 let out = EntryOut::from(entry);
484
485 reply_ok(Some(out), None, in_header.unique, w)
486 }
487 Err(e) => reply_error(e, in_header.unique, w),
488 }
489 }
490
unlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>491 fn unlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
492 let namelen = (in_header.len as usize)
493 .checked_sub(size_of::<InHeader>())
494 .ok_or(Error::InvalidHeaderLength)?;
495 let mut name = vec![0; namelen];
496
497 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
498
499 match self.fs.unlink(
500 Context::from(in_header),
501 in_header.nodeid.into(),
502 bytes_to_cstr(&name)?,
503 ) {
504 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
505 Err(e) => reply_error(e, in_header.unique, w),
506 }
507 }
508
rmdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>509 fn rmdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
510 let namelen = (in_header.len as usize)
511 .checked_sub(size_of::<InHeader>())
512 .ok_or(Error::InvalidHeaderLength)?;
513 let mut name = vec![0; namelen];
514
515 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
516
517 match self.fs.rmdir(
518 Context::from(in_header),
519 in_header.nodeid.into(),
520 bytes_to_cstr(&name)?,
521 ) {
522 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
523 Err(e) => reply_error(e, in_header.unique, w),
524 }
525 }
526
do_rename<R: Reader, W: Writer>( &self, in_header: InHeader, msg_size: usize, newdir: u64, flags: u32, mut r: R, w: W, ) -> Result<usize>527 fn do_rename<R: Reader, W: Writer>(
528 &self,
529 in_header: InHeader,
530 msg_size: usize,
531 newdir: u64,
532 flags: u32,
533 mut r: R,
534 w: W,
535 ) -> Result<usize> {
536 let buflen = (in_header.len as usize)
537 .checked_sub(size_of::<InHeader>())
538 .and_then(|l| l.checked_sub(msg_size))
539 .ok_or(Error::InvalidHeaderLength)?;
540 let mut buf = vec![0; buflen];
541
542 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
543
544 // We want to include the '\0' byte in the first slice.
545 let split_pos = buf
546 .iter()
547 .position(|c| *c == b'\0')
548 .map(|p| p + 1)
549 .ok_or(Error::MissingParameter)?;
550
551 let (oldname, newname) = buf.split_at(split_pos);
552
553 match self.fs.rename(
554 Context::from(in_header),
555 in_header.nodeid.into(),
556 bytes_to_cstr(oldname)?,
557 newdir.into(),
558 bytes_to_cstr(newname)?,
559 flags,
560 ) {
561 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
562 Err(e) => reply_error(e, in_header.unique, w),
563 }
564 }
565
rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>566 fn rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
567 let RenameIn { newdir } = r.read_struct()?;
568
569 self.do_rename(in_header, size_of::<RenameIn>(), newdir, 0, r, w)
570 }
571
rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>572 fn rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
573 let Rename2In { newdir, flags, .. } = r.read_struct()?;
574
575 #[allow(clippy::unnecessary_cast)]
576 let flags = flags & (libc::RENAME_EXCHANGE | libc::RENAME_NOREPLACE) as u32;
577
578 self.do_rename(in_header, size_of::<Rename2In>(), newdir, flags, r, w)
579 }
580
link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>581 fn link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
582 let LinkIn { oldnodeid } = r.read_struct()?;
583
584 let namelen = (in_header.len as usize)
585 .checked_sub(size_of::<InHeader>())
586 .and_then(|l| l.checked_sub(size_of::<LinkIn>()))
587 .ok_or(Error::InvalidHeaderLength)?;
588 let mut name = vec![0; namelen];
589
590 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
591
592 match self.fs.link(
593 Context::from(in_header),
594 oldnodeid.into(),
595 in_header.nodeid.into(),
596 bytes_to_cstr(&name)?,
597 ) {
598 Ok(entry) => {
599 let out = EntryOut::from(entry);
600
601 reply_ok(Some(out), None, in_header.unique, w)
602 }
603 Err(e) => reply_error(e, in_header.unique, w),
604 }
605 }
606
open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>607 fn open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
608 let OpenIn { flags, .. } = r.read_struct()?;
609
610 match self
611 .fs
612 .open(Context::from(in_header), in_header.nodeid.into(), flags)
613 {
614 Ok((handle, opts)) => {
615 let out = OpenOut {
616 fh: handle.map(Into::into).unwrap_or(0),
617 open_flags: opts.bits(),
618 ..Default::default()
619 };
620
621 reply_ok(Some(out), None, in_header.unique, w)
622 }
623 Err(e) => reply_error(e, in_header.unique, w),
624 }
625 }
626
read<R: Reader, W: ZeroCopyWriter + Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>627 fn read<R: Reader, W: ZeroCopyWriter + Writer>(
628 &self,
629 in_header: InHeader,
630 mut r: R,
631 mut w: W,
632 ) -> Result<usize> {
633 let ReadIn {
634 fh,
635 offset,
636 size,
637 read_flags,
638 lock_owner,
639 flags,
640 ..
641 } = r.read_struct()?;
642
643 if size > self.fs.max_buffer_size() {
644 return reply_error(
645 io::Error::from_raw_os_error(libc::ENOMEM),
646 in_header.unique,
647 w,
648 );
649 }
650
651 let owner = if read_flags & READ_LOCKOWNER != 0 {
652 Some(lock_owner)
653 } else {
654 None
655 };
656
657 // Skip for the header size to write the data first.
658 match w.write_at(size_of::<OutHeader>(), |writer| {
659 self.fs.read(
660 Context::from(in_header),
661 in_header.nodeid.into(),
662 fh.into(),
663 writer,
664 size,
665 offset,
666 owner,
667 flags,
668 )
669 }) {
670 Ok(count) => {
671 // Don't use `reply_ok` because we need to set a custom size length for the
672 // header.
673 let out = OutHeader {
674 len: (size_of::<OutHeader>() + count) as u32,
675 error: 0,
676 unique: in_header.unique,
677 };
678
679 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
680 w.flush().map_err(Error::FlushMessage)?;
681 Ok(out.len as usize)
682 }
683 Err(e) => reply_error(e, in_header.unique, w),
684 }
685 }
686
write<R: Reader + ZeroCopyReader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>687 fn write<R: Reader + ZeroCopyReader, W: Writer>(
688 &self,
689 in_header: InHeader,
690 mut r: R,
691 w: W,
692 ) -> Result<usize> {
693 let WriteIn {
694 fh,
695 offset,
696 size,
697 write_flags,
698 lock_owner,
699 flags,
700 ..
701 } = r.read_struct()?;
702
703 if size > self.fs.max_buffer_size() {
704 return reply_error(
705 io::Error::from_raw_os_error(libc::ENOMEM),
706 in_header.unique,
707 w,
708 );
709 }
710
711 let owner = if write_flags & WRITE_LOCKOWNER != 0 {
712 Some(lock_owner)
713 } else {
714 None
715 };
716
717 let delayed_write = write_flags & WRITE_CACHE != 0;
718
719 match self.fs.write(
720 Context::from(in_header),
721 in_header.nodeid.into(),
722 fh.into(),
723 r,
724 size,
725 offset,
726 owner,
727 delayed_write,
728 flags,
729 ) {
730 Ok(count) => {
731 let out = WriteOut {
732 size: count as u32,
733 ..Default::default()
734 };
735
736 reply_ok(Some(out), None, in_header.unique, w)
737 }
738 Err(e) => reply_error(e, in_header.unique, w),
739 }
740 }
741
statfs<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize>742 fn statfs<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> {
743 match self
744 .fs
745 .statfs(Context::from(in_header), in_header.nodeid.into())
746 {
747 Ok(st) => reply_ok(Some(Kstatfs::from(st)), None, in_header.unique, w),
748 Err(e) => reply_error(e, in_header.unique, w),
749 }
750 }
751
release<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>752 fn release<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
753 let ReleaseIn {
754 fh,
755 flags,
756 release_flags,
757 lock_owner,
758 } = r.read_struct()?;
759
760 let flush = release_flags & RELEASE_FLUSH != 0;
761 let flock_release = release_flags & RELEASE_FLOCK_UNLOCK != 0;
762 let lock_owner = if flush || flock_release {
763 Some(lock_owner)
764 } else {
765 None
766 };
767
768 match self.fs.release(
769 Context::from(in_header),
770 in_header.nodeid.into(),
771 flags,
772 fh.into(),
773 flush,
774 flock_release,
775 lock_owner,
776 ) {
777 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
778 Err(e) => reply_error(e, in_header.unique, w),
779 }
780 }
781
fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>782 fn fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
783 let FsyncIn {
784 fh, fsync_flags, ..
785 } = r.read_struct()?;
786 let datasync = fsync_flags & 0x1 != 0;
787
788 match self.fs.fsync(
789 Context::from(in_header),
790 in_header.nodeid.into(),
791 datasync,
792 fh.into(),
793 ) {
794 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
795 Err(e) => reply_error(e, in_header.unique, w),
796 }
797 }
798
setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>799 fn setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
800 let SetxattrIn { size, flags } = r.read_struct()?;
801
802 // The name and value and encoded one after another and separated by a '\0' character.
803 let len = (in_header.len as usize)
804 .checked_sub(size_of::<InHeader>())
805 .and_then(|l| l.checked_sub(size_of::<SetxattrIn>()))
806 .ok_or(Error::InvalidHeaderLength)?;
807 let mut buf = vec![0; len];
808
809 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
810
811 // We want to include the '\0' byte in the first slice.
812 let split_pos = buf
813 .iter()
814 .position(|c| *c == b'\0')
815 .map(|p| p + 1)
816 .ok_or(Error::MissingParameter)?;
817
818 let (name, value) = buf.split_at(split_pos);
819
820 if size != value.len() as u32 {
821 return Err(Error::InvalidXattrSize(size, value.len()));
822 }
823
824 match self.fs.setxattr(
825 Context::from(in_header),
826 in_header.nodeid.into(),
827 bytes_to_cstr(name)?,
828 value,
829 flags,
830 ) {
831 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
832 Err(e) => reply_error(e, in_header.unique, w),
833 }
834 }
835
getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>836 fn getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
837 let GetxattrIn { size, .. } = r.read_struct()?;
838
839 let namelen = (in_header.len as usize)
840 .checked_sub(size_of::<InHeader>())
841 .and_then(|l| l.checked_sub(size_of::<GetxattrIn>()))
842 .ok_or(Error::InvalidHeaderLength)?;
843 let mut name = vec![0; namelen];
844
845 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
846
847 if size > self.fs.max_buffer_size() {
848 return reply_error(
849 io::Error::from_raw_os_error(libc::ENOMEM),
850 in_header.unique,
851 w,
852 );
853 }
854
855 match self.fs.getxattr(
856 Context::from(in_header),
857 in_header.nodeid.into(),
858 bytes_to_cstr(&name)?,
859 size,
860 ) {
861 Ok(GetxattrReply::Value(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w),
862 Ok(GetxattrReply::Count(count)) => {
863 let out = GetxattrOut {
864 size: count,
865 ..Default::default()
866 };
867
868 reply_ok(Some(out), None, in_header.unique, w)
869 }
870 Err(e) => reply_error(e, in_header.unique, w),
871 }
872 }
873
listxattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>874 fn listxattr<R: Reader, W: Writer>(
875 &self,
876 in_header: InHeader,
877 mut r: R,
878 w: W,
879 ) -> Result<usize> {
880 let GetxattrIn { size, .. } = r.read_struct()?;
881
882 if size > self.fs.max_buffer_size() {
883 return reply_error(
884 io::Error::from_raw_os_error(libc::ENOMEM),
885 in_header.unique,
886 w,
887 );
888 }
889
890 match self
891 .fs
892 .listxattr(Context::from(in_header), in_header.nodeid.into(), size)
893 {
894 Ok(ListxattrReply::Names(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w),
895 Ok(ListxattrReply::Count(count)) => {
896 let out = GetxattrOut {
897 size: count,
898 ..Default::default()
899 };
900
901 reply_ok(Some(out), None, in_header.unique, w)
902 }
903 Err(e) => reply_error(e, in_header.unique, w),
904 }
905 }
906
removexattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>907 fn removexattr<R: Reader, W: Writer>(
908 &self,
909 in_header: InHeader,
910 mut r: R,
911 w: W,
912 ) -> Result<usize> {
913 let namelen = (in_header.len as usize)
914 .checked_sub(size_of::<InHeader>())
915 .ok_or(Error::InvalidHeaderLength)?;
916
917 let mut buf = vec![0; namelen];
918
919 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
920
921 let name = bytes_to_cstr(&buf)?;
922
923 match self
924 .fs
925 .removexattr(Context::from(in_header), in_header.nodeid.into(), name)
926 {
927 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
928 Err(e) => reply_error(e, in_header.unique, w),
929 }
930 }
931
flush<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>932 fn flush<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
933 let FlushIn {
934 fh,
935 unused: _,
936 padding: _,
937 lock_owner,
938 } = r.read_struct()?;
939
940 match self.fs.flush(
941 Context::from(in_header),
942 in_header.nodeid.into(),
943 fh.into(),
944 lock_owner,
945 ) {
946 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
947 Err(e) => reply_error(e, in_header.unique, w),
948 }
949 }
950
init<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>951 fn init<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
952 cros_tracing::trace_simple_print!("fuse server: init: in_header={:?}", in_header);
953 let InitIn {
954 major,
955 minor,
956 max_readahead,
957 flags,
958 } = r.read_struct()?;
959
960 if major < KERNEL_VERSION {
961 error!("Unsupported fuse protocol version: {}.{}", major, minor);
962 return reply_error(
963 io::Error::from_raw_os_error(libc::EPROTO),
964 in_header.unique,
965 w,
966 );
967 }
968
969 if major > KERNEL_VERSION {
970 // Wait for the kernel to reply back with a 7.X version.
971 let out = InitOut {
972 major: KERNEL_VERSION,
973 minor: KERNEL_MINOR_VERSION,
974 ..Default::default()
975 };
976
977 return reply_ok(Some(out), None, in_header.unique, w);
978 }
979
980 if minor < OLDEST_SUPPORTED_KERNEL_MINOR_VERSION {
981 error!(
982 "Unsupported fuse protocol minor version: {}.{}",
983 major, minor
984 );
985 return reply_error(
986 io::Error::from_raw_os_error(libc::EPROTO),
987 in_header.unique,
988 w,
989 );
990 }
991
992 let InitInExt { flags2, .. } =
993 if (FsOptions::from_bits_truncate(u64::from(flags)) & FsOptions::INIT_EXT).is_empty() {
994 InitInExt::default()
995 } else {
996 r.read_struct()?
997 };
998
999 // These fuse features are supported by this server by default.
1000 let supported = FsOptions::ASYNC_READ
1001 | FsOptions::PARALLEL_DIROPS
1002 | FsOptions::BIG_WRITES
1003 | FsOptions::AUTO_INVAL_DATA
1004 | FsOptions::HANDLE_KILLPRIV
1005 | FsOptions::ASYNC_DIO
1006 | FsOptions::HAS_IOCTL_DIR
1007 | FsOptions::DO_READDIRPLUS
1008 | FsOptions::READDIRPLUS_AUTO
1009 | FsOptions::ATOMIC_O_TRUNC
1010 | FsOptions::MAX_PAGES
1011 | FsOptions::MAP_ALIGNMENT
1012 | FsOptions::INIT_EXT;
1013
1014 let capable = FsOptions::from_bits_truncate(u64::from(flags) | u64::from(flags2) << 32);
1015
1016 match self.fs.init(capable) {
1017 Ok(want) => {
1018 let mut enabled = capable & (want | supported);
1019
1020 // HANDLE_KILLPRIV doesn't work correctly when writeback caching is enabled so turn
1021 // it off.
1022 if enabled.contains(FsOptions::WRITEBACK_CACHE) {
1023 enabled.remove(FsOptions::HANDLE_KILLPRIV);
1024 }
1025
1026 // ATOMIC_O_TRUNC doesn't work with ZERO_MESSAGE_OPEN.
1027 if enabled.contains(FsOptions::ZERO_MESSAGE_OPEN) {
1028 enabled.remove(FsOptions::ATOMIC_O_TRUNC);
1029 }
1030
1031 let max_write = self.fs.max_buffer_size();
1032 let max_pages = min(
1033 max(max_readahead, max_write) / pagesize() as u32,
1034 u16::MAX as u32,
1035 ) as u16;
1036 let out = InitOut {
1037 major: KERNEL_VERSION,
1038 minor: KERNEL_MINOR_VERSION,
1039 max_readahead,
1040 flags: enabled.bits() as u32,
1041 max_background: ::std::u16::MAX,
1042 congestion_threshold: (::std::u16::MAX / 4) * 3,
1043 max_write,
1044 time_gran: 1, // nanoseconds
1045 max_pages,
1046 map_alignment: pagesize().trailing_zeros() as u16,
1047 flags2: (enabled.bits() >> 32) as u32,
1048 ..Default::default()
1049 };
1050
1051 reply_ok(Some(out), None, in_header.unique, w)
1052 }
1053 Err(e) => reply_error(e, in_header.unique, w),
1054 }
1055 }
1056
opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1057 fn opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1058 let OpenIn { flags, .. } = r.read_struct()?;
1059
1060 match self
1061 .fs
1062 .opendir(Context::from(in_header), in_header.nodeid.into(), flags)
1063 {
1064 Ok((handle, opts)) => {
1065 let out = OpenOut {
1066 fh: handle.map(Into::into).unwrap_or(0),
1067 open_flags: opts.bits(),
1068 ..Default::default()
1069 };
1070
1071 reply_ok(Some(out), None, in_header.unique, w)
1072 }
1073 Err(e) => reply_error(e, in_header.unique, w),
1074 }
1075 }
1076
readdir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>1077 fn readdir<R: Reader, W: Writer>(
1078 &self,
1079 in_header: InHeader,
1080 mut r: R,
1081 mut w: W,
1082 ) -> Result<usize> {
1083 let ReadIn {
1084 fh, offset, size, ..
1085 } = r.read_struct()?;
1086
1087 if size > self.fs.max_buffer_size() {
1088 return reply_error(
1089 io::Error::from_raw_os_error(libc::ENOMEM),
1090 in_header.unique,
1091 w,
1092 );
1093 }
1094
1095 if !w.has_sufficient_buffer(size) {
1096 return reply_error(
1097 io::Error::from_raw_os_error(libc::ENOMEM),
1098 in_header.unique,
1099 w,
1100 );
1101 }
1102
1103 // Skip over enough bytes for the header.
1104 let unique = in_header.unique;
1105 let result = w.write_at(size_of::<OutHeader>(), |cursor| {
1106 match self.fs.readdir(
1107 Context::from(in_header),
1108 in_header.nodeid.into(),
1109 fh.into(),
1110 size,
1111 offset,
1112 ) {
1113 Ok(mut entries) => {
1114 let mut total_written = 0;
1115 while let Some(dirent) = entries.next() {
1116 let remaining = (size as usize).saturating_sub(total_written);
1117 match add_dirent(cursor, remaining, &dirent, None) {
1118 // No more space left in the buffer.
1119 Ok(0) => break,
1120 Ok(bytes_written) => {
1121 total_written += bytes_written;
1122 }
1123 Err(e) => return Err(e),
1124 }
1125 }
1126 Ok(total_written)
1127 }
1128 Err(e) => Err(e),
1129 }
1130 });
1131
1132 match result {
1133 Ok(total_written) => reply_readdir(total_written, unique, w),
1134 Err(e) => reply_error(e, unique, w),
1135 }
1136 }
1137
lookup_dirent_attribute( &self, in_header: &InHeader, dir_entry: &DirEntry, ) -> io::Result<Entry>1138 fn lookup_dirent_attribute(
1139 &self,
1140 in_header: &InHeader,
1141 dir_entry: &DirEntry,
1142 ) -> io::Result<Entry> {
1143 let parent = in_header.nodeid.into();
1144 let name = dir_entry.name.to_bytes();
1145 let entry = if name == b"." || name == b".." {
1146 // Don't do lookups on the current directory or the parent directory.
1147 // SAFETY: struct only contains integer fields and any value is valid.
1148 let mut attr = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
1149 attr.st_ino = dir_entry.ino;
1150 attr.st_mode = dir_entry.type_;
1151
1152 // We use 0 for the inode value to indicate a negative entry.
1153 Entry {
1154 inode: 0,
1155 generation: 0,
1156 attr,
1157 attr_timeout: Duration::from_secs(0),
1158 entry_timeout: Duration::from_secs(0),
1159 }
1160 } else {
1161 self.fs
1162 .lookup(Context::from(*in_header), parent, dir_entry.name)?
1163 };
1164
1165 Ok(entry)
1166 }
1167
readdirplus<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>1168 fn readdirplus<R: Reader, W: Writer>(
1169 &self,
1170 in_header: InHeader,
1171 mut r: R,
1172 mut w: W,
1173 ) -> Result<usize> {
1174 cros_tracing::trace_simple_print!("fuse server: readdirplus: in_header={:?}", in_header);
1175 let ReadIn {
1176 fh, offset, size, ..
1177 } = r.read_struct()?;
1178
1179 if size > self.fs.max_buffer_size() {
1180 return reply_error(
1181 io::Error::from_raw_os_error(libc::ENOMEM),
1182 in_header.unique,
1183 w,
1184 );
1185 }
1186
1187 if !w.has_sufficient_buffer(size) {
1188 return reply_error(
1189 io::Error::from_raw_os_error(libc::ENOMEM),
1190 in_header.unique,
1191 w,
1192 );
1193 }
1194
1195 // Skip over enough bytes for the header.
1196 let unique = in_header.unique;
1197 let result = w.write_at(size_of::<OutHeader>(), |cursor| {
1198 match self.fs.readdir(
1199 Context::from(in_header),
1200 in_header.nodeid.into(),
1201 fh.into(),
1202 size,
1203 offset,
1204 ) {
1205 Ok(mut entries) => {
1206 let mut total_written = 0;
1207 while let Some(dirent) = entries.next() {
1208 let mut entry_inode = None;
1209 match self
1210 .lookup_dirent_attribute(&in_header, &dirent)
1211 .and_then(|e| {
1212 entry_inode = Some(e.inode);
1213 let remaining = (size as usize).saturating_sub(total_written);
1214 add_dirent(cursor, remaining, &dirent, Some(e))
1215 }) {
1216 Ok(0) => {
1217 // No more space left in the buffer but we need to undo the lookup
1218 // that created the Entry or we will end up with mismatched lookup
1219 // counts.
1220 if let Some(inode) = entry_inode {
1221 self.fs.forget(Context::from(in_header), inode.into(), 1);
1222 }
1223 break;
1224 }
1225 Ok(bytes_written) => {
1226 total_written += bytes_written;
1227 }
1228 Err(e) => {
1229 if let Some(inode) = entry_inode {
1230 self.fs.forget(Context::from(in_header), inode.into(), 1);
1231 }
1232
1233 if total_written == 0 {
1234 // We haven't filled any entries yet so we can just propagate
1235 // the error.
1236 return Err(e);
1237 }
1238
1239 // We already filled in some entries. Returning an error now will
1240 // cause lookup count mismatches for those entries so just return
1241 // whatever we already have.
1242 break;
1243 }
1244 }
1245 }
1246 Ok(total_written)
1247 }
1248 Err(e) => Err(e),
1249 }
1250 });
1251
1252 match result {
1253 Ok(total_written) => reply_readdir(total_written, unique, w),
1254 Err(e) => reply_error(e, unique, w),
1255 }
1256 }
1257
releasedir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1258 fn releasedir<R: Reader, W: Writer>(
1259 &self,
1260 in_header: InHeader,
1261 mut r: R,
1262 w: W,
1263 ) -> Result<usize> {
1264 let ReleaseIn { fh, flags, .. } = r.read_struct()?;
1265
1266 match self.fs.releasedir(
1267 Context::from(in_header),
1268 in_header.nodeid.into(),
1269 flags,
1270 fh.into(),
1271 ) {
1272 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1273 Err(e) => reply_error(e, in_header.unique, w),
1274 }
1275 }
1276
fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1277 fn fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1278 let FsyncIn {
1279 fh, fsync_flags, ..
1280 } = r.read_struct()?;
1281 let datasync = fsync_flags & 0x1 != 0;
1282
1283 match self.fs.fsyncdir(
1284 Context::from(in_header),
1285 in_header.nodeid.into(),
1286 datasync,
1287 fh.into(),
1288 ) {
1289 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1290 Err(e) => reply_error(e, in_header.unique, w),
1291 }
1292 }
1293
getlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1294 fn getlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1295 if let Err(e) = self.fs.getlk() {
1296 reply_error(e, in_header.unique, w)
1297 } else {
1298 Ok(0)
1299 }
1300 }
1301
setlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1302 fn setlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1303 if let Err(e) = self.fs.setlk() {
1304 reply_error(e, in_header.unique, w)
1305 } else {
1306 Ok(0)
1307 }
1308 }
1309
setlkw<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1310 fn setlkw<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1311 if let Err(e) = self.fs.setlkw() {
1312 reply_error(e, in_header.unique, w)
1313 } else {
1314 Ok(0)
1315 }
1316 }
1317
access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1318 fn access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1319 let AccessIn { mask, .. } = r.read_struct()?;
1320
1321 match self
1322 .fs
1323 .access(Context::from(in_header), in_header.nodeid.into(), mask)
1324 {
1325 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1326 Err(e) => reply_error(e, in_header.unique, w),
1327 }
1328 }
1329
create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1330 fn create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1331 let CreateIn {
1332 flags, mode, umask, ..
1333 } = r.read_struct()?;
1334
1335 let buflen = (in_header.len as usize)
1336 .checked_sub(size_of::<InHeader>())
1337 .and_then(|l| l.checked_sub(size_of::<CreateIn>()))
1338 .ok_or(Error::InvalidHeaderLength)?;
1339
1340 let mut buf = vec![0; buflen];
1341
1342 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
1343
1344 let mut iter = buf.split_inclusive(|&c| c == b'\0');
1345 let name = iter
1346 .next()
1347 .ok_or(Error::MissingParameter)
1348 .and_then(bytes_to_cstr)?;
1349
1350 let split_pos = name.to_bytes_with_nul().len();
1351 let security_ctx = parse_selinux_xattr(&buf[split_pos..])?;
1352
1353 match self.fs.create(
1354 Context::from(in_header),
1355 in_header.nodeid.into(),
1356 name,
1357 mode,
1358 flags,
1359 umask,
1360 security_ctx,
1361 ) {
1362 Ok((entry, handle, opts)) => {
1363 let entry_out = EntryOut {
1364 nodeid: entry.inode,
1365 generation: entry.generation,
1366 entry_valid: entry.entry_timeout.as_secs(),
1367 attr_valid: entry.attr_timeout.as_secs(),
1368 entry_valid_nsec: entry.entry_timeout.subsec_nanos(),
1369 attr_valid_nsec: entry.attr_timeout.subsec_nanos(),
1370 attr: entry.attr.into(),
1371 };
1372 let open_out = OpenOut {
1373 fh: handle.map(Into::into).unwrap_or(0),
1374 open_flags: opts.bits(),
1375 ..Default::default()
1376 };
1377
1378 // Kind of a hack to write both structs.
1379 reply_ok(
1380 Some(entry_out),
1381 Some(open_out.as_bytes()),
1382 in_header.unique,
1383 w,
1384 )
1385 }
1386 Err(e) => reply_error(e, in_header.unique, w),
1387 }
1388 }
1389
1390 #[allow(clippy::unnecessary_wraps)]
interrupt(&self, _in_header: InHeader) -> Result<usize>1391 fn interrupt(&self, _in_header: InHeader) -> Result<usize> {
1392 Ok(0)
1393 }
1394
bmap<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1395 fn bmap<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1396 if let Err(e) = self.fs.bmap() {
1397 reply_error(e, in_header.unique, w)
1398 } else {
1399 Ok(0)
1400 }
1401 }
1402
1403 #[allow(clippy::unnecessary_wraps)]
destroy(&self) -> Result<usize>1404 fn destroy(&self) -> Result<usize> {
1405 // No reply to this function.
1406 self.fs.destroy();
1407
1408 Ok(0)
1409 }
1410
ioctl<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1411 fn ioctl<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1412 let IoctlIn {
1413 fh,
1414 flags,
1415 cmd,
1416 arg,
1417 in_size,
1418 out_size,
1419 } = r.read_struct()?;
1420
1421 let res = self.fs.ioctl(
1422 in_header.into(),
1423 in_header.nodeid.into(),
1424 fh.into(),
1425 IoctlFlags::from_bits_truncate(flags),
1426 cmd,
1427 arg,
1428 in_size,
1429 out_size,
1430 r,
1431 );
1432
1433 match res {
1434 Ok(reply) => match reply {
1435 IoctlReply::Retry { input, output } => {
1436 retry_ioctl(in_header.unique, input, output, w)
1437 }
1438 IoctlReply::Done(res) => finish_ioctl(in_header.unique, res, w),
1439 },
1440 Err(e) => reply_error(e, in_header.unique, w),
1441 }
1442 }
1443
poll<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1444 fn poll<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1445 if let Err(e) = self.fs.poll() {
1446 reply_error(e, in_header.unique, w)
1447 } else {
1448 Ok(0)
1449 }
1450 }
1451
notify_reply<R: Reader, W: Writer>( &self, in_header: InHeader, mut _r: R, w: W, ) -> Result<usize>1452 fn notify_reply<R: Reader, W: Writer>(
1453 &self,
1454 in_header: InHeader,
1455 mut _r: R,
1456 w: W,
1457 ) -> Result<usize> {
1458 if let Err(e) = self.fs.notify_reply() {
1459 reply_error(e, in_header.unique, w)
1460 } else {
1461 Ok(0)
1462 }
1463 }
1464
batch_forget<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1465 fn batch_forget<R: Reader, W: Writer>(
1466 &self,
1467 in_header: InHeader,
1468 mut r: R,
1469 w: W,
1470 ) -> Result<usize> {
1471 let BatchForgetIn { count, .. } = r.read_struct()?;
1472
1473 if let Some(size) = (count as usize).checked_mul(size_of::<ForgetOne>()) {
1474 if size > self.fs.max_buffer_size() as usize {
1475 return reply_error(
1476 io::Error::from_raw_os_error(libc::ENOMEM),
1477 in_header.unique,
1478 w,
1479 );
1480 }
1481 } else {
1482 return reply_error(
1483 io::Error::from_raw_os_error(libc::EOVERFLOW),
1484 in_header.unique,
1485 w,
1486 );
1487 }
1488
1489 let mut requests = Vec::with_capacity(count as usize);
1490 for _ in 0..count {
1491 let f: ForgetOne = r.read_struct()?;
1492 requests.push((f.nodeid.into(), f.nlookup));
1493 }
1494
1495 self.fs.batch_forget(Context::from(in_header), requests);
1496
1497 // No reply for forget messages.
1498 Ok(0)
1499 }
1500
fallocate<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1501 fn fallocate<R: Reader, W: Writer>(
1502 &self,
1503 in_header: InHeader,
1504 mut r: R,
1505 w: W,
1506 ) -> Result<usize> {
1507 let FallocateIn {
1508 fh,
1509 offset,
1510 length,
1511 mode,
1512 ..
1513 } = r.read_struct()?;
1514
1515 match self.fs.fallocate(
1516 Context::from(in_header),
1517 in_header.nodeid.into(),
1518 fh.into(),
1519 mode,
1520 offset,
1521 length,
1522 ) {
1523 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1524 Err(e) => reply_error(e, in_header.unique, w),
1525 }
1526 }
1527
lseek<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1528 fn lseek<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1529 if let Err(e) = self.fs.lseek() {
1530 reply_error(e, in_header.unique, w)
1531 } else {
1532 Ok(0)
1533 }
1534 }
1535
copy_file_range<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1536 fn copy_file_range<R: Reader, W: Writer>(
1537 &self,
1538 in_header: InHeader,
1539 mut r: R,
1540 w: W,
1541 ) -> Result<usize> {
1542 let CopyFileRangeIn {
1543 fh_src,
1544 off_src,
1545 nodeid_dst,
1546 fh_dst,
1547 off_dst,
1548 len,
1549 flags,
1550 } = r.read_struct()?;
1551
1552 match self.fs.copy_file_range(
1553 Context::from(in_header),
1554 in_header.nodeid.into(),
1555 fh_src.into(),
1556 off_src,
1557 nodeid_dst.into(),
1558 fh_dst.into(),
1559 off_dst,
1560 len,
1561 flags,
1562 ) {
1563 Ok(count) => {
1564 let out = WriteOut {
1565 size: count as u32,
1566 ..Default::default()
1567 };
1568
1569 reply_ok(Some(out), None, in_header.unique, w)
1570 }
1571 Err(e) => reply_error(e, in_header.unique, w),
1572 }
1573 }
1574
set_up_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper,1575 fn set_up_mapping<R, W, M>(
1576 &self,
1577 in_header: InHeader,
1578 mut r: R,
1579 w: W,
1580 mapper: M,
1581 ) -> Result<usize>
1582 where
1583 R: Reader,
1584 W: Writer,
1585 M: Mapper,
1586 {
1587 let SetUpMappingIn {
1588 fh,
1589 foffset,
1590 len,
1591 flags,
1592 moffset,
1593 } = r.read_struct()?;
1594 let flags = SetUpMappingFlags::from_bits_truncate(flags);
1595
1596 let mut prot = 0;
1597 if flags.contains(SetUpMappingFlags::READ) {
1598 prot |= libc::PROT_READ as u32;
1599 }
1600 if flags.contains(SetUpMappingFlags::WRITE) {
1601 prot |= libc::PROT_WRITE as u32;
1602 }
1603
1604 let size = if let Ok(s) = len.try_into() {
1605 s
1606 } else {
1607 return reply_error(
1608 io::Error::from_raw_os_error(libc::EOVERFLOW),
1609 in_header.unique,
1610 w,
1611 );
1612 };
1613
1614 match self.fs.set_up_mapping(
1615 Context::from(in_header),
1616 in_header.nodeid.into(),
1617 fh.into(),
1618 foffset,
1619 moffset,
1620 size,
1621 prot,
1622 mapper,
1623 ) {
1624 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1625 Err(e) => {
1626 error!("set_up_mapping failed: {}", e);
1627 reply_error(e, in_header.unique, w)
1628 }
1629 }
1630 }
1631
remove_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper,1632 fn remove_mapping<R, W, M>(
1633 &self,
1634 in_header: InHeader,
1635 mut r: R,
1636 w: W,
1637 mapper: M,
1638 ) -> Result<usize>
1639 where
1640 R: Reader,
1641 W: Writer,
1642 M: Mapper,
1643 {
1644 let RemoveMappingIn { count } = r.read_struct()?;
1645
1646 // `FUSE_REMOVEMAPPING_MAX_ENTRY` is defined as
1647 // `PAGE_SIZE / sizeof(struct fuse_removemapping_one)` in /kernel/include/uapi/linux/fuse.h.
1648 let max_entry = pagesize() / std::mem::size_of::<RemoveMappingOne>();
1649
1650 if max_entry < count as usize {
1651 return reply_error(
1652 io::Error::from_raw_os_error(libc::EINVAL),
1653 in_header.unique,
1654 w,
1655 );
1656 }
1657
1658 let mut msgs = Vec::with_capacity(count as usize);
1659 for _ in 0..(count as usize) {
1660 let msg: RemoveMappingOne = r.read_struct()?;
1661 msgs.push(msg);
1662 }
1663
1664 match self.fs.remove_mapping(&msgs, mapper) {
1665 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1666 Err(e) => reply_error(e, in_header.unique, w),
1667 }
1668 }
1669
open_atomic<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1670 fn open_atomic<R: Reader, W: Writer>(
1671 &self,
1672 in_header: InHeader,
1673 mut r: R,
1674 w: W,
1675 ) -> Result<usize> {
1676 let CreateIn {
1677 flags, mode, umask, ..
1678 } = r.read_struct()?;
1679
1680 let buflen = (in_header.len as usize)
1681 .checked_sub(size_of::<InHeader>())
1682 .and_then(|l| l.checked_sub(size_of::<CreateIn>()))
1683 .ok_or(Error::InvalidHeaderLength)?;
1684
1685 let mut buf = vec![0; buflen];
1686
1687 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
1688
1689 let mut iter = buf.split_inclusive(|&c| c == b'\0');
1690 let name = iter
1691 .next()
1692 .ok_or(Error::MissingParameter)
1693 .and_then(bytes_to_cstr)?;
1694
1695 let split_pos = name.to_bytes_with_nul().len();
1696 let security_ctx = parse_selinux_xattr(&buf[split_pos..])?;
1697
1698 match self.fs.atomic_open(
1699 Context::from(in_header),
1700 in_header.nodeid.into(),
1701 name,
1702 mode,
1703 flags,
1704 umask,
1705 security_ctx,
1706 ) {
1707 Ok((entry, handle, opts)) => {
1708 let entry_out = EntryOut {
1709 nodeid: entry.inode,
1710 generation: entry.generation,
1711 entry_valid: entry.entry_timeout.as_secs(),
1712 attr_valid: entry.attr_timeout.as_secs(),
1713 entry_valid_nsec: entry.entry_timeout.subsec_nanos(),
1714 attr_valid_nsec: entry.attr_timeout.subsec_nanos(),
1715 attr: entry.attr.into(),
1716 };
1717 let open_out = OpenOut {
1718 fh: handle.map(Into::into).unwrap_or(0),
1719 open_flags: opts.bits(),
1720 ..Default::default()
1721 };
1722
1723 // open_out passed the `data` argument, but the two out structs are independent
1724 // This is a hack to return two out stucts in one fuse reply
1725 reply_ok(
1726 Some(entry_out),
1727 Some(open_out.as_bytes()),
1728 in_header.unique,
1729 w,
1730 )
1731 }
1732 Err(e) => reply_error(e, in_header.unique, w),
1733 }
1734 }
1735 }
1736
retry_ioctl<W: Writer>( unique: u64, input: Vec<IoctlIovec>, output: Vec<IoctlIovec>, mut w: W, ) -> Result<usize>1737 fn retry_ioctl<W: Writer>(
1738 unique: u64,
1739 input: Vec<IoctlIovec>,
1740 output: Vec<IoctlIovec>,
1741 mut w: W,
1742 ) -> Result<usize> {
1743 // We don't need to check for overflow here because if adding these 2 values caused an overflow
1744 // we would have run out of memory before reaching this point.
1745 if input.len() + output.len() > IOCTL_MAX_IOV {
1746 return Err(Error::TooManyIovecs(
1747 input.len() + output.len(),
1748 IOCTL_MAX_IOV,
1749 ));
1750 }
1751
1752 let len = size_of::<OutHeader>()
1753 + size_of::<IoctlOut>()
1754 + (input.len() * size_of::<IoctlIovec>())
1755 + (output.len() * size_of::<IoctlIovec>());
1756 let header = OutHeader {
1757 len: len as u32,
1758 error: 0,
1759 unique,
1760 };
1761 let out = IoctlOut {
1762 result: 0,
1763 flags: IoctlFlags::RETRY.bits(),
1764 in_iovs: input.len() as u32,
1765 out_iovs: output.len() as u32,
1766 };
1767
1768 let mut total_bytes = size_of::<OutHeader>() + size_of::<IoctlOut>();
1769 w.write_all(header.as_bytes())
1770 .map_err(Error::EncodeMessage)?;
1771 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1772 for i in input.into_iter().chain(output.into_iter()) {
1773 total_bytes += i.as_bytes().len();
1774 w.write_all(i.as_bytes()).map_err(Error::EncodeMessage)?;
1775 }
1776
1777 w.flush().map_err(Error::FlushMessage)?;
1778 debug_assert_eq!(len, total_bytes);
1779 Ok(len)
1780 }
1781
finish_ioctl<W: Writer>(unique: u64, res: io::Result<Vec<u8>>, w: W) -> Result<usize>1782 fn finish_ioctl<W: Writer>(unique: u64, res: io::Result<Vec<u8>>, w: W) -> Result<usize> {
1783 let (out, data) = match res {
1784 Ok(data) => {
1785 let out = IoctlOut {
1786 result: 0,
1787 ..Default::default()
1788 };
1789 (out, Some(data))
1790 }
1791 Err(e) => {
1792 let out = IoctlOut {
1793 result: -e.raw_os_error().unwrap_or(libc::EIO),
1794 ..Default::default()
1795 };
1796 (out, None)
1797 }
1798 };
1799 reply_ok(Some(out), data.as_ref().map(|d| &d[..]), unique, w)
1800 }
1801
reply_readdir<W: Writer>(len: usize, unique: u64, mut w: W) -> Result<usize>1802 fn reply_readdir<W: Writer>(len: usize, unique: u64, mut w: W) -> Result<usize> {
1803 let out = OutHeader {
1804 len: (size_of::<OutHeader>() + len) as u32,
1805 error: 0,
1806 unique,
1807 };
1808
1809 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1810 w.flush().map_err(Error::FlushMessage)?;
1811 Ok(out.len as usize)
1812 }
1813
reply_ok<T: AsBytes, W: Writer>( out: Option<T>, data: Option<&[u8]>, unique: u64, mut w: W, ) -> Result<usize>1814 fn reply_ok<T: AsBytes, W: Writer>(
1815 out: Option<T>,
1816 data: Option<&[u8]>,
1817 unique: u64,
1818 mut w: W,
1819 ) -> Result<usize> {
1820 let mut len = size_of::<OutHeader>();
1821
1822 if out.is_some() {
1823 len += size_of::<T>();
1824 }
1825
1826 if let Some(data) = data {
1827 len += data.len();
1828 }
1829
1830 let header = OutHeader {
1831 len: len as u32,
1832 error: 0,
1833 unique,
1834 };
1835
1836 let mut total_bytes = size_of::<OutHeader>();
1837 w.write_all(header.as_bytes())
1838 .map_err(Error::EncodeMessage)?;
1839
1840 if let Some(out) = out {
1841 total_bytes += out.as_bytes().len();
1842 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1843 }
1844
1845 if let Some(data) = data {
1846 total_bytes += data.len();
1847 w.write_all(data).map_err(Error::EncodeMessage)?;
1848 }
1849
1850 w.flush().map_err(Error::FlushMessage)?;
1851 debug_assert_eq!(len, total_bytes);
1852 Ok(len)
1853 }
1854
reply_error<W: Writer>(e: io::Error, unique: u64, mut w: W) -> Result<usize>1855 fn reply_error<W: Writer>(e: io::Error, unique: u64, mut w: W) -> Result<usize> {
1856 let header = OutHeader {
1857 len: size_of::<OutHeader>() as u32,
1858 error: -e.raw_os_error().unwrap_or(libc::EIO),
1859 unique,
1860 };
1861
1862 w.write_all(header.as_bytes())
1863 .map_err(Error::EncodeMessage)?;
1864 w.flush().map_err(Error::FlushMessage)?;
1865
1866 Ok(header.len as usize)
1867 }
1868
bytes_to_cstr(buf: &[u8]) -> Result<&CStr>1869 fn bytes_to_cstr(buf: &[u8]) -> Result<&CStr> {
1870 // Convert to a `CStr` first so that we can drop the '\0' byte at the end
1871 // and make sure there are no interior '\0' bytes.
1872 CStr::from_bytes_with_nul(buf).map_err(Error::InvalidCString)
1873 }
1874
add_dirent<W: Writer>( cursor: &mut W, max: usize, d: &DirEntry, entry: Option<Entry>, ) -> io::Result<usize>1875 fn add_dirent<W: Writer>(
1876 cursor: &mut W,
1877 max: usize,
1878 d: &DirEntry,
1879 entry: Option<Entry>,
1880 ) -> io::Result<usize> {
1881 // Strip the trailing '\0'.
1882 let name = d.name.to_bytes();
1883 if name.len() > ::std::u32::MAX as usize {
1884 return Err(io::Error::from_raw_os_error(libc::EOVERFLOW));
1885 }
1886
1887 let dirent_len = size_of::<Dirent>()
1888 .checked_add(name.len())
1889 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
1890
1891 // Directory entries must be padded to 8-byte alignment. If adding 7 causes
1892 // an overflow then this dirent cannot be properly padded.
1893 let padded_dirent_len = dirent_len
1894 .checked_add(7)
1895 .map(|l| l & !7)
1896 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
1897
1898 let total_len = if entry.is_some() {
1899 padded_dirent_len
1900 .checked_add(size_of::<EntryOut>())
1901 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?
1902 } else {
1903 padded_dirent_len
1904 };
1905
1906 if max < total_len {
1907 Ok(0)
1908 } else {
1909 if let Some(entry) = entry {
1910 cursor.write_all(EntryOut::from(entry).as_bytes())?;
1911 }
1912
1913 let dirent = Dirent {
1914 ino: d.ino,
1915 off: d.offset,
1916 namelen: name.len() as u32,
1917 type_: d.type_,
1918 };
1919
1920 cursor.write_all(dirent.as_bytes())?;
1921 cursor.write_all(name)?;
1922
1923 // We know that `dirent_len` <= `padded_dirent_len` due to the check above
1924 // so there's no need for checked arithmetic.
1925 let padding = padded_dirent_len - dirent_len;
1926 if padding > 0 {
1927 cursor.write_all(&DIRENT_PADDING[..padding])?;
1928 }
1929
1930 Ok(total_len)
1931 }
1932 }
1933
1934 /// Parses the value of the desired attribute from the FUSE request input and returns it as an
1935 /// `Ok(CStr)`. Returns `Ok(None)` if `buf` is empty or appears to be a valid request extension that
1936 /// does not contain any security context information.
1937 ///
1938 /// # Arguments
1939 ///
1940 /// * `buf` - a byte array that contains the contents following any expected byte string parameters
1941 /// of the FUSE request from the server. It begins with a struct `SecctxHeader`, and then the
1942 /// subsequent entry is a struct `Secctx` followed by a nul-terminated string with the xattribute
1943 /// name and then another nul-terminated string with the value for that xattr.
1944 ///
1945 /// # Errors
1946 ///
1947 /// * `Error::InvalidHeaderLength` - indicates that there is an inconsistency between the size of
1948 /// the data read from `buf` and the stated `size` of the `SecctxHeader`, the respective `Secctx`
1949 /// struct, or `buf` itself.
1950 /// * `Error::DecodeMessage` - indicates that the expected structs cannot be read from `buf`.
1951 /// * `Error::MissingParameter` - indicates that either a security context `name` or `value` is
1952 /// missing from a security context entry.
parse_selinux_xattr(buf: &[u8]) -> Result<Option<&CStr>>1953 fn parse_selinux_xattr(buf: &[u8]) -> Result<Option<&CStr>> {
1954 // Return early if request was not followed by context information
1955 if buf.is_empty() {
1956 return Ok(None);
1957 } else if buf.len() < size_of::<SecctxHeader>() {
1958 return Err(Error::InvalidHeaderLength);
1959 }
1960
1961 // Because the security context data block may have been preceded by variable-length strings,
1962 // `SecctxHeader` and the subsequent `Secctx` structs may not be correctly byte-aligned
1963 // within `buf`.
1964 let secctx_header = SecctxHeader::read_from_prefix(buf).ok_or(Error::DecodeMessage(
1965 io::Error::from_raw_os_error(libc::EINVAL),
1966 ))?;
1967
1968 // FUSE 7.38 introduced a generic request extension with the same structure as `SecctxHeader`.
1969 // A `nr_secctx` value above `MAX_NR_SECCTX` indicates that this data block does not contain
1970 // any security context information.
1971 if secctx_header.nr_secctx > MAX_NR_SECCTX {
1972 return Ok(None);
1973 }
1974
1975 let mut cur_secctx_pos = size_of::<SecctxHeader>();
1976 for _ in 0..secctx_header.nr_secctx {
1977 // `SecctxHeader.size` denotes the total size for the `SecctxHeader`, each of the
1978 // `nr_secctx` `Secctx` structs along with the corresponding context name and value,
1979 // and any additional padding.
1980 if (cur_secctx_pos + size_of::<Secctx>()) > buf.len()
1981 || (cur_secctx_pos + size_of::<Secctx>()) > secctx_header.size as usize
1982 {
1983 return Err(Error::InvalidHeaderLength);
1984 }
1985
1986 let secctx =
1987 Secctx::read_from(&buf[cur_secctx_pos..(cur_secctx_pos + size_of::<Secctx>())]).ok_or(
1988 Error::DecodeMessage(io::Error::from_raw_os_error(libc::EINVAL)),
1989 )?;
1990
1991 cur_secctx_pos += size_of::<Secctx>();
1992
1993 let secctx_data = &buf[cur_secctx_pos..]
1994 .split_inclusive(|&c| c == b'\0')
1995 .take(2)
1996 .map(bytes_to_cstr)
1997 .collect::<Result<Vec<&CStr>>>()?;
1998
1999 if secctx_data.len() != 2 {
2000 return Err(Error::MissingParameter);
2001 }
2002
2003 let name = secctx_data[0];
2004 let value = secctx_data[1];
2005
2006 cur_secctx_pos += name.to_bytes_with_nul().len() + value.to_bytes_with_nul().len();
2007 if cur_secctx_pos > secctx_header.size as usize {
2008 return Err(Error::InvalidHeaderLength);
2009 }
2010
2011 // `Secctx.size` contains the size of the security context value (not including the
2012 // corresponding context name).
2013 if value.to_bytes_with_nul().len() as u32 != secctx.size {
2014 return Err(Error::InvalidHeaderLength);
2015 }
2016
2017 if name.to_bytes_with_nul() == SELINUX_XATTR_CSTR {
2018 return Ok(Some(value));
2019 }
2020 }
2021
2022 // `SecctxHeader.size` is always the total size of the security context data padded to an
2023 // 8-byte alignment. If adding 7 causes an overflow, then the `size` field of our header
2024 // is invalid, so we should return an error.
2025 let padded_secctx_size = cur_secctx_pos
2026 .checked_add(7)
2027 .map(|l| l & !7)
2028 .ok_or_else(|| Error::InvalidHeaderLength)?;
2029 if padded_secctx_size != secctx_header.size as usize {
2030 return Err(Error::InvalidHeaderLength);
2031 }
2032
2033 // None of the `nr_secctx` attributes we parsed had a `name` matching `SELINUX_XATTR_CSTR`.
2034 // Return `Ok(None)` to indicate that the security context data block was valid but there was no
2035 // specified selinux label attached to this request.
2036 Ok(None)
2037 }
2038
2039 #[cfg(test)]
2040 mod tests {
2041 use super::*;
2042
create_secctx(ctxs: &[(&[u8], &[u8])], size_truncation: u32) -> Vec<u8>2043 fn create_secctx(ctxs: &[(&[u8], &[u8])], size_truncation: u32) -> Vec<u8> {
2044 let nr_secctx = ctxs.len();
2045 let total_size = (size_of::<SecctxHeader>() as u32
2046 + (size_of::<Secctx>() * nr_secctx) as u32
2047 + ctxs
2048 .iter()
2049 .fold(0, |s, &(n, v)| s + n.len() as u32 + v.len() as u32))
2050 .checked_add(7)
2051 .map(|l| l & !7)
2052 .expect("total_size padded to 8-byte boundary")
2053 .checked_sub(size_truncation)
2054 .expect("size truncated by bytes < total_size");
2055
2056 let ctx_data: Vec<_> = ctxs
2057 .iter()
2058 .map(|(n, v)| {
2059 [
2060 Secctx {
2061 size: v.len() as u32,
2062 padding: 0,
2063 }
2064 .as_bytes(),
2065 n,
2066 v,
2067 ]
2068 .concat()
2069 })
2070 .collect::<Vec<_>>()
2071 .concat();
2072
2073 [
2074 SecctxHeader {
2075 size: total_size,
2076 nr_secctx: nr_secctx as u32,
2077 }
2078 .as_bytes(),
2079 ctx_data.as_slice(),
2080 ]
2081 .concat()
2082 }
2083
2084 #[test]
parse_selinux_xattr_empty()2085 fn parse_selinux_xattr_empty() {
2086 let v: Vec<u8> = vec![];
2087 let res = parse_selinux_xattr(&v);
2088 assert_eq!(res.unwrap(), None);
2089 }
2090
2091 #[test]
parse_selinux_xattr_basic()2092 fn parse_selinux_xattr_basic() {
2093 let sec_value = CStr::from_bytes_with_nul(b"user_u:object_r:security_type:s0\0").unwrap();
2094 let v = create_secctx(&[(SELINUX_XATTR_CSTR, sec_value.to_bytes_with_nul())], 0);
2095
2096 let res = parse_selinux_xattr(&v);
2097 assert_eq!(res.unwrap(), Some(sec_value));
2098 }
2099
2100 #[test]
parse_selinux_xattr_find_attr()2101 fn parse_selinux_xattr_find_attr() {
2102 let foo_value: &CStr =
2103 CStr::from_bytes_with_nul(b"user_foo:object_foo:foo_type:s0\0").unwrap();
2104 let sec_value: &CStr =
2105 CStr::from_bytes_with_nul(b"user_u:object_r:security_type:s0\0").unwrap();
2106 let v = create_secctx(
2107 &[
2108 (b"foo\0", foo_value.to_bytes_with_nul()),
2109 (SELINUX_XATTR_CSTR, sec_value.to_bytes_with_nul()),
2110 ],
2111 0,
2112 );
2113
2114 let res = parse_selinux_xattr(&v);
2115 assert_eq!(res.unwrap(), Some(sec_value));
2116 }
2117
2118 #[test]
parse_selinux_xattr_wrong_attr()2119 fn parse_selinux_xattr_wrong_attr() {
2120 // Test with an xattr name that looks similar to security.selinux, but has extra
2121 // characters to ensure that `parse_selinux_xattr` will not return the associated
2122 // context value to the caller.
2123 let invalid_selinux_value: &CStr =
2124 CStr::from_bytes_with_nul(b"user_invalid:object_invalid:invalid_type:s0\0").unwrap();
2125 let v = create_secctx(
2126 &[(
2127 b"invalid.security.selinux\0",
2128 invalid_selinux_value.to_bytes_with_nul(),
2129 )],
2130 0,
2131 );
2132
2133 let res = parse_selinux_xattr(&v);
2134 assert_eq!(res.unwrap(), None);
2135 }
2136
2137 #[test]
parse_selinux_xattr_too_short()2138 fn parse_selinux_xattr_too_short() {
2139 // Test that parse_selinux_xattr will return an `Error::InvalidHeaderLength` when
2140 // the total size in the `SecctxHeader` does not encompass the entirety of the
2141 // associated data.
2142 let foo_value: &CStr =
2143 CStr::from_bytes_with_nul(b"user_foo:object_foo:foo_type:s0\0").unwrap();
2144 let sec_value: &CStr =
2145 CStr::from_bytes_with_nul(b"user_u:object_r:security_type:s0\0").unwrap();
2146 let v = create_secctx(
2147 &[
2148 (b"foo\0", foo_value.to_bytes_with_nul()),
2149 (SELINUX_XATTR_CSTR, sec_value.to_bytes_with_nul()),
2150 ],
2151 8,
2152 );
2153
2154 let res = parse_selinux_xattr(&v);
2155 assert!(matches!(res, Err(Error::InvalidHeaderLength)));
2156 }
2157 }
2158