1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::max;
6 use std::cmp::min;
7 use std::convert::TryInto;
8 use std::ffi::CStr;
9 use std::io;
10 use std::mem::size_of;
11 use std::mem::MaybeUninit;
12 use std::os::unix::io::AsRawFd;
13 use std::time::Duration;
14
15 use base::error;
16 use base::pagesize;
17 use data_model::zerocopy_from_reader;
18 use zerocopy::AsBytes;
19
20 use crate::filesystem::Context;
21 use crate::filesystem::DirEntry;
22 use crate::filesystem::DirectoryIterator;
23 use crate::filesystem::Entry;
24 use crate::filesystem::FileSystem;
25 use crate::filesystem::GetxattrReply;
26 use crate::filesystem::IoctlReply;
27 use crate::filesystem::ListxattrReply;
28 use crate::filesystem::ZeroCopyReader;
29 use crate::filesystem::ZeroCopyWriter;
30 use crate::sys::*;
31 use crate::Error;
32 use crate::Result;
33
34 const DIRENT_PADDING: [u8; 8] = [0; 8];
35
36 /// A trait for reading from the underlying FUSE endpoint.
37 pub trait Reader: io::Read {}
38
39 impl<R: Reader> Reader for &'_ mut R {}
40
41 /// A trait for writing to the underlying FUSE endpoint. The FUSE device expects the write
42 /// operation to happen in one write transaction. Since there are cases when data needs to be
43 /// generated earlier than the header, it implies the writer implementation to keep an internal
44 /// buffer. The buffer then can be flushed once header and data are both prepared.
45 pub trait Writer: io::Write {
46 /// The type passed in to the closure in `write_at`. For most implementations, this should be
47 /// `Self`.
48 type ClosureWriter: Writer + ZeroCopyWriter;
49
50 /// Allows a closure to generate and write data at the current writer's offset. The current
51 /// writer is passed as a mutable reference to the closure. As an example, this provides an
52 /// adapter for the read implementation of a filesystem to write directly to the final buffer
53 /// without generating the FUSE header first.
54 ///
55 /// Notes: An alternative implementation would be to return a slightly different writer for the
56 /// API client to write to the offset. Since the API needs to be called for more than one time,
57 /// it imposes some complexity to deal with borrowing and mutability. The current approach
58 /// simply does not need to create a different writer, thus no need to deal with the mentioned
59 /// complexity.
write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize> where F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>60 fn write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize>
61 where
62 F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>;
63
64 /// Checks if the writer can still accept certain amount of data.
has_sufficient_buffer(&self, size: u32) -> bool65 fn has_sufficient_buffer(&self, size: u32) -> bool;
66 }
67
68 impl<W: Writer> Writer for &'_ mut W {
69 type ClosureWriter = W::ClosureWriter;
70
write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize> where F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>,71 fn write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize>
72 where
73 F: Fn(&mut Self::ClosureWriter) -> io::Result<usize>,
74 {
75 (**self).write_at(offset, f)
76 }
77
has_sufficient_buffer(&self, size: u32) -> bool78 fn has_sufficient_buffer(&self, size: u32) -> bool {
79 (**self).has_sufficient_buffer(size)
80 }
81 }
82
83 /// A trait for memory mapping for DAX.
84 ///
85 /// For some transports (like virtio) it may be possible to share a region of memory with the
86 /// FUSE kernel driver so that it can access file contents directly without issuing read or
87 /// write requests. In this case the driver will instead send requests to map a section of a
88 /// file into the shared memory region.
89 pub trait Mapper {
90 /// Maps `size` bytes starting at `file_offset` bytes from within the given `fd` at `mem_offset`
91 /// bytes from the start of the memory region with `prot` protections. `mem_offset` must be
92 /// page aligned.
93 ///
94 /// # Arguments
95 /// * `mem_offset` - Page aligned offset into the memory region in bytes.
96 /// * `size` - Size of memory region in bytes.
97 /// * `fd` - File descriptor to mmap from.
98 /// * `file_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
99 /// * `prot` - Protection (e.g. `libc::PROT_READ`) of the memory region.
map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: u32, ) -> io::Result<()>100 fn map(
101 &self,
102 mem_offset: u64,
103 size: usize,
104 fd: &dyn AsRawFd,
105 file_offset: u64,
106 prot: u32,
107 ) -> io::Result<()>;
108
109 /// Unmaps `size` bytes at `offset` bytes from the start of the memory region. `offset` must be
110 /// page aligned.
111 ///
112 /// # Arguments
113 /// * `offset` - Page aligned offset into the arena in bytes.
114 /// * `size` - Size of memory region in bytes.
unmap(&self, offset: u64, size: u64) -> io::Result<()>115 fn unmap(&self, offset: u64, size: u64) -> io::Result<()>;
116 }
117
118 impl<'a, M: Mapper> Mapper for &'a M {
map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: u32, ) -> io::Result<()>119 fn map(
120 &self,
121 mem_offset: u64,
122 size: usize,
123 fd: &dyn AsRawFd,
124 file_offset: u64,
125 prot: u32,
126 ) -> io::Result<()> {
127 (**self).map(mem_offset, size, fd, file_offset, prot)
128 }
129
unmap(&self, offset: u64, size: u64) -> io::Result<()>130 fn unmap(&self, offset: u64, size: u64) -> io::Result<()> {
131 (**self).unmap(offset, size)
132 }
133 }
134
135 pub struct Server<F: FileSystem + Sync> {
136 fs: F,
137 }
138
139 impl<F: FileSystem + Sync> Server<F> {
new(fs: F) -> Server<F>140 pub fn new(fs: F) -> Server<F> {
141 Server { fs }
142 }
143
handle_message<R: Reader + ZeroCopyReader, W: Writer + ZeroCopyWriter, M: Mapper>( &self, mut r: R, w: W, mapper: M, ) -> Result<usize>144 pub fn handle_message<R: Reader + ZeroCopyReader, W: Writer + ZeroCopyWriter, M: Mapper>(
145 &self,
146 mut r: R,
147 w: W,
148 mapper: M,
149 ) -> Result<usize> {
150 let in_header: InHeader = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
151 cros_tracing::trace_simple_print!("fuse server: handle_message: in_header={:?}", in_header);
152
153 if in_header.len
154 > size_of::<InHeader>() as u32 + size_of::<WriteIn>() as u32 + self.fs.max_buffer_size()
155 {
156 return reply_error(
157 io::Error::from_raw_os_error(libc::ENOMEM),
158 in_header.unique,
159 w,
160 );
161 }
162 match Opcode::n(in_header.opcode) {
163 Some(Opcode::Lookup) => self.lookup(in_header, r, w),
164 Some(Opcode::Forget) => self.forget(in_header, r), // No reply.
165 Some(Opcode::Getattr) => self.getattr(in_header, r, w),
166 Some(Opcode::Setattr) => self.setattr(in_header, r, w),
167 Some(Opcode::Readlink) => self.readlink(in_header, w),
168 Some(Opcode::Symlink) => self.symlink(in_header, r, w),
169 Some(Opcode::Mknod) => self.mknod(in_header, r, w),
170 Some(Opcode::Mkdir) => self.mkdir(in_header, r, w),
171 Some(Opcode::Unlink) => self.unlink(in_header, r, w),
172 Some(Opcode::Rmdir) => self.rmdir(in_header, r, w),
173 Some(Opcode::Rename) => self.rename(in_header, r, w),
174 Some(Opcode::Link) => self.link(in_header, r, w),
175 Some(Opcode::Open) => self.open(in_header, r, w),
176 Some(Opcode::Read) => self.read(in_header, r, w),
177 Some(Opcode::Write) => self.write(in_header, r, w),
178 Some(Opcode::Statfs) => self.statfs(in_header, w),
179 Some(Opcode::Release) => self.release(in_header, r, w),
180 Some(Opcode::Fsync) => self.fsync(in_header, r, w),
181 Some(Opcode::Setxattr) => self.setxattr(in_header, r, w),
182 Some(Opcode::Getxattr) => self.getxattr(in_header, r, w),
183 Some(Opcode::Listxattr) => self.listxattr(in_header, r, w),
184 Some(Opcode::Removexattr) => self.removexattr(in_header, r, w),
185 Some(Opcode::Flush) => self.flush(in_header, r, w),
186 Some(Opcode::Init) => self.init(in_header, r, w),
187 Some(Opcode::Opendir) => self.opendir(in_header, r, w),
188 Some(Opcode::Readdir) => self.readdir(in_header, r, w),
189 Some(Opcode::Releasedir) => self.releasedir(in_header, r, w),
190 Some(Opcode::Fsyncdir) => self.fsyncdir(in_header, r, w),
191 Some(Opcode::Getlk) => self.getlk(in_header, r, w),
192 Some(Opcode::Setlk) => self.setlk(in_header, r, w),
193 Some(Opcode::Setlkw) => self.setlkw(in_header, r, w),
194 Some(Opcode::Access) => self.access(in_header, r, w),
195 Some(Opcode::Create) => self.create(in_header, r, w),
196 Some(Opcode::Interrupt) => self.interrupt(in_header),
197 Some(Opcode::Bmap) => self.bmap(in_header, r, w),
198 Some(Opcode::Destroy) => self.destroy(),
199 Some(Opcode::Ioctl) => self.ioctl(in_header, r, w),
200 Some(Opcode::Poll) => self.poll(in_header, r, w),
201 Some(Opcode::NotifyReply) => self.notify_reply(in_header, r, w),
202 Some(Opcode::BatchForget) => self.batch_forget(in_header, r, w),
203 Some(Opcode::Fallocate) => self.fallocate(in_header, r, w),
204 Some(Opcode::Readdirplus) => self.readdirplus(in_header, r, w),
205 Some(Opcode::Rename2) => self.rename2(in_header, r, w),
206 Some(Opcode::Lseek) => self.lseek(in_header, r, w),
207 Some(Opcode::CopyFileRange) => self.copy_file_range(in_header, r, w),
208 Some(Opcode::ChromeOsTmpfile) => self.chromeos_tmpfile(in_header, r, w),
209 Some(Opcode::SetUpMapping) => self.set_up_mapping(in_header, r, w, mapper),
210 Some(Opcode::RemoveMapping) => self.remove_mapping(in_header, r, w, mapper),
211 None => reply_error(
212 io::Error::from_raw_os_error(libc::ENOSYS),
213 in_header.unique,
214 w,
215 ),
216 }
217 }
218
lookup<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>219 fn lookup<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
220 let namelen = (in_header.len as usize)
221 .checked_sub(size_of::<InHeader>())
222 .ok_or(Error::InvalidHeaderLength)?;
223
224 let mut buf = vec![0; namelen];
225
226 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
227
228 let name = bytes_to_cstr(&buf)?;
229
230 match self
231 .fs
232 .lookup(Context::from(in_header), in_header.nodeid.into(), name)
233 {
234 Ok(entry) => {
235 let out = EntryOut::from(entry);
236
237 reply_ok(Some(out), None, in_header.unique, w)
238 }
239 Err(e) => reply_error(e, in_header.unique, w),
240 }
241 }
242
forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize>243 fn forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize> {
244 let ForgetIn { nlookup } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
245
246 self.fs
247 .forget(Context::from(in_header), in_header.nodeid.into(), nlookup);
248
249 // There is no reply for forget messages.
250 Ok(0)
251 }
252
getattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>253 fn getattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
254 let GetattrIn {
255 flags,
256 dummy: _,
257 fh,
258 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
259
260 let handle = if (flags & GETATTR_FH) != 0 {
261 Some(fh.into())
262 } else {
263 None
264 };
265
266 match self
267 .fs
268 .getattr(Context::from(in_header), in_header.nodeid.into(), handle)
269 {
270 Ok((st, timeout)) => {
271 let out = AttrOut {
272 attr_valid: timeout.as_secs(),
273 attr_valid_nsec: timeout.subsec_nanos(),
274 dummy: 0,
275 attr: st.into(),
276 };
277 reply_ok(Some(out), None, in_header.unique, w)
278 }
279 Err(e) => reply_error(e, in_header.unique, w),
280 }
281 }
282
setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>283 fn setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
284 let setattr_in: SetattrIn = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
285
286 let handle = if setattr_in.valid & FATTR_FH != 0 {
287 Some(setattr_in.fh.into())
288 } else {
289 None
290 };
291
292 let valid = SetattrValid::from_bits_truncate(setattr_in.valid);
293
294 let st: libc::stat64 = setattr_in.into();
295
296 match self.fs.setattr(
297 Context::from(in_header),
298 in_header.nodeid.into(),
299 st,
300 handle,
301 valid,
302 ) {
303 Ok((st, timeout)) => {
304 let out = AttrOut {
305 attr_valid: timeout.as_secs(),
306 attr_valid_nsec: timeout.subsec_nanos(),
307 dummy: 0,
308 attr: st.into(),
309 };
310 reply_ok(Some(out), None, in_header.unique, w)
311 }
312 Err(e) => reply_error(e, in_header.unique, w),
313 }
314 }
315
readlink<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize>316 fn readlink<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> {
317 match self
318 .fs
319 .readlink(Context::from(in_header), in_header.nodeid.into())
320 {
321 Ok(linkname) => {
322 // We need to disambiguate the option type here even though it is `None`.
323 reply_ok(None::<u8>, Some(&linkname), in_header.unique, w)
324 }
325 Err(e) => reply_error(e, in_header.unique, w),
326 }
327 }
328
symlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>329 fn symlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
330 // Unfortunately the name and linkname are encoded one after another and
331 // separated by a nul character.
332 let len = (in_header.len as usize)
333 .checked_sub(size_of::<InHeader>())
334 .ok_or(Error::InvalidHeaderLength)?;
335 let mut buf = vec![0; len];
336
337 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
338
339 let mut iter = split_inclusive(&buf, |&c| c == b'\0');
340 let name = iter
341 .next()
342 .ok_or(Error::MissingParameter)
343 .and_then(bytes_to_cstr)?;
344 let linkname = iter
345 .next()
346 .ok_or(Error::MissingParameter)
347 .and_then(bytes_to_cstr)?;
348
349 match self.fs.symlink(
350 Context::from(in_header),
351 linkname,
352 in_header.nodeid.into(),
353 name,
354 ) {
355 Ok(entry) => {
356 let out = EntryOut::from(entry);
357
358 reply_ok(Some(out), None, in_header.unique, w)
359 }
360 Err(e) => reply_error(e, in_header.unique, w),
361 }
362 }
363
mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>364 fn mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
365 let MknodIn {
366 mode, rdev, umask, ..
367 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
368
369 let buflen = (in_header.len as usize)
370 .checked_sub(size_of::<InHeader>())
371 .and_then(|l| l.checked_sub(size_of::<MknodIn>()))
372 .ok_or(Error::InvalidHeaderLength)?;
373 let mut buf = vec![0; buflen];
374
375 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
376
377 let mut iter = split_inclusive(&buf, |&c| c == b'\0');
378 let name = iter
379 .next()
380 .ok_or(Error::MissingParameter)
381 .and_then(bytes_to_cstr)?;
382
383 match self.fs.mknod(
384 Context::from(in_header),
385 in_header.nodeid.into(),
386 name,
387 mode,
388 rdev,
389 umask,
390 ) {
391 Ok(entry) => {
392 let out = EntryOut::from(entry);
393
394 reply_ok(Some(out), None, in_header.unique, w)
395 }
396 Err(e) => reply_error(e, in_header.unique, w),
397 }
398 }
399
mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>400 fn mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
401 let MkdirIn { mode, umask } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
402
403 let buflen = (in_header.len as usize)
404 .checked_sub(size_of::<InHeader>())
405 .and_then(|l| l.checked_sub(size_of::<MkdirIn>()))
406 .ok_or(Error::InvalidHeaderLength)?;
407 let mut buf = vec![0; buflen];
408
409 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
410
411 let mut iter = split_inclusive(&buf, |&c| c == b'\0');
412 let name = iter
413 .next()
414 .ok_or(Error::MissingParameter)
415 .and_then(bytes_to_cstr)?;
416
417 match self.fs.mkdir(
418 Context::from(in_header),
419 in_header.nodeid.into(),
420 name,
421 mode,
422 umask,
423 ) {
424 Ok(entry) => {
425 let out = EntryOut::from(entry);
426
427 reply_ok(Some(out), None, in_header.unique, w)
428 }
429 Err(e) => reply_error(e, in_header.unique, w),
430 }
431 }
432
chromeos_tmpfile<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>433 fn chromeos_tmpfile<R: Reader, W: Writer>(
434 &self,
435 in_header: InHeader,
436 mut r: R,
437 w: W,
438 ) -> Result<usize> {
439 let ChromeOsTmpfileIn { mode, umask } =
440 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
441
442 match self.fs.chromeos_tmpfile(
443 Context::from(in_header),
444 in_header.nodeid.into(),
445 mode,
446 umask,
447 ) {
448 Ok(entry) => {
449 let out = EntryOut::from(entry);
450
451 reply_ok(Some(out), None, in_header.unique, w)
452 }
453 Err(e) => reply_error(e, in_header.unique, w),
454 }
455 }
456
unlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>457 fn unlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
458 let namelen = (in_header.len as usize)
459 .checked_sub(size_of::<InHeader>())
460 .ok_or(Error::InvalidHeaderLength)?;
461 let mut name = vec![0; namelen];
462
463 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
464
465 match self.fs.unlink(
466 Context::from(in_header),
467 in_header.nodeid.into(),
468 bytes_to_cstr(&name)?,
469 ) {
470 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
471 Err(e) => reply_error(e, in_header.unique, w),
472 }
473 }
474
rmdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>475 fn rmdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
476 let namelen = (in_header.len as usize)
477 .checked_sub(size_of::<InHeader>())
478 .ok_or(Error::InvalidHeaderLength)?;
479 let mut name = vec![0; namelen];
480
481 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
482
483 match self.fs.rmdir(
484 Context::from(in_header),
485 in_header.nodeid.into(),
486 bytes_to_cstr(&name)?,
487 ) {
488 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
489 Err(e) => reply_error(e, in_header.unique, w),
490 }
491 }
492
do_rename<R: Reader, W: Writer>( &self, in_header: InHeader, msg_size: usize, newdir: u64, flags: u32, mut r: R, w: W, ) -> Result<usize>493 fn do_rename<R: Reader, W: Writer>(
494 &self,
495 in_header: InHeader,
496 msg_size: usize,
497 newdir: u64,
498 flags: u32,
499 mut r: R,
500 w: W,
501 ) -> Result<usize> {
502 let buflen = (in_header.len as usize)
503 .checked_sub(size_of::<InHeader>())
504 .and_then(|l| l.checked_sub(msg_size))
505 .ok_or(Error::InvalidHeaderLength)?;
506 let mut buf = vec![0; buflen];
507
508 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
509
510 // We want to include the '\0' byte in the first slice.
511 let split_pos = buf
512 .iter()
513 .position(|c| *c == b'\0')
514 .map(|p| p + 1)
515 .ok_or(Error::MissingParameter)?;
516
517 let (oldname, newname) = buf.split_at(split_pos);
518
519 match self.fs.rename(
520 Context::from(in_header),
521 in_header.nodeid.into(),
522 bytes_to_cstr(oldname)?,
523 newdir.into(),
524 bytes_to_cstr(newname)?,
525 flags,
526 ) {
527 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
528 Err(e) => reply_error(e, in_header.unique, w),
529 }
530 }
531
rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>532 fn rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
533 let RenameIn { newdir } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
534
535 self.do_rename(in_header, size_of::<RenameIn>(), newdir, 0, r, w)
536 }
537
rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>538 fn rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
539 let Rename2In { newdir, flags, .. } =
540 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
541
542 let flags = flags & (libc::RENAME_EXCHANGE | libc::RENAME_NOREPLACE) as u32;
543
544 self.do_rename(in_header, size_of::<Rename2In>(), newdir, flags, r, w)
545 }
546
link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>547 fn link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
548 let LinkIn { oldnodeid } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
549
550 let namelen = (in_header.len as usize)
551 .checked_sub(size_of::<InHeader>())
552 .and_then(|l| l.checked_sub(size_of::<LinkIn>()))
553 .ok_or(Error::InvalidHeaderLength)?;
554 let mut name = vec![0; namelen];
555
556 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
557
558 match self.fs.link(
559 Context::from(in_header),
560 oldnodeid.into(),
561 in_header.nodeid.into(),
562 bytes_to_cstr(&name)?,
563 ) {
564 Ok(entry) => {
565 let out = EntryOut::from(entry);
566
567 reply_ok(Some(out), None, in_header.unique, w)
568 }
569 Err(e) => reply_error(e, in_header.unique, w),
570 }
571 }
572
open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>573 fn open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
574 let OpenIn { flags, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
575
576 match self
577 .fs
578 .open(Context::from(in_header), in_header.nodeid.into(), flags)
579 {
580 Ok((handle, opts)) => {
581 let out = OpenOut {
582 fh: handle.map(Into::into).unwrap_or(0),
583 open_flags: opts.bits(),
584 ..Default::default()
585 };
586
587 reply_ok(Some(out), None, in_header.unique, w)
588 }
589 Err(e) => reply_error(e, in_header.unique, w),
590 }
591 }
592
read<R: Reader, W: ZeroCopyWriter + Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>593 fn read<R: Reader, W: ZeroCopyWriter + Writer>(
594 &self,
595 in_header: InHeader,
596 mut r: R,
597 mut w: W,
598 ) -> Result<usize> {
599 let ReadIn {
600 fh,
601 offset,
602 size,
603 read_flags,
604 lock_owner,
605 flags,
606 ..
607 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
608
609 if size > self.fs.max_buffer_size() {
610 return reply_error(
611 io::Error::from_raw_os_error(libc::ENOMEM),
612 in_header.unique,
613 w,
614 );
615 }
616
617 let owner = if read_flags & READ_LOCKOWNER != 0 {
618 Some(lock_owner)
619 } else {
620 None
621 };
622
623 // Skip for the header size to write the data first.
624 match w.write_at(size_of::<OutHeader>(), |writer| {
625 self.fs.read(
626 Context::from(in_header),
627 in_header.nodeid.into(),
628 fh.into(),
629 writer,
630 size,
631 offset,
632 owner,
633 flags,
634 )
635 }) {
636 Ok(count) => {
637 // Don't use `reply_ok` because we need to set a custom size length for the
638 // header.
639 let out = OutHeader {
640 len: (size_of::<OutHeader>() + count) as u32,
641 error: 0,
642 unique: in_header.unique,
643 };
644
645 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
646 w.flush().map_err(Error::FlushMessage)?;
647 Ok(out.len as usize)
648 }
649 Err(e) => reply_error(e, in_header.unique, w),
650 }
651 }
652
write<R: Reader + ZeroCopyReader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>653 fn write<R: Reader + ZeroCopyReader, W: Writer>(
654 &self,
655 in_header: InHeader,
656 mut r: R,
657 w: W,
658 ) -> Result<usize> {
659 let WriteIn {
660 fh,
661 offset,
662 size,
663 write_flags,
664 lock_owner,
665 flags,
666 ..
667 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
668
669 if size > self.fs.max_buffer_size() {
670 return reply_error(
671 io::Error::from_raw_os_error(libc::ENOMEM),
672 in_header.unique,
673 w,
674 );
675 }
676
677 let owner = if write_flags & WRITE_LOCKOWNER != 0 {
678 Some(lock_owner)
679 } else {
680 None
681 };
682
683 let delayed_write = write_flags & WRITE_CACHE != 0;
684
685 match self.fs.write(
686 Context::from(in_header),
687 in_header.nodeid.into(),
688 fh.into(),
689 r,
690 size,
691 offset,
692 owner,
693 delayed_write,
694 flags,
695 ) {
696 Ok(count) => {
697 let out = WriteOut {
698 size: count as u32,
699 ..Default::default()
700 };
701
702 reply_ok(Some(out), None, in_header.unique, w)
703 }
704 Err(e) => reply_error(e, in_header.unique, w),
705 }
706 }
707
statfs<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize>708 fn statfs<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> {
709 match self
710 .fs
711 .statfs(Context::from(in_header), in_header.nodeid.into())
712 {
713 Ok(st) => reply_ok(Some(Kstatfs::from(st)), None, in_header.unique, w),
714 Err(e) => reply_error(e, in_header.unique, w),
715 }
716 }
717
release<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>718 fn release<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
719 let ReleaseIn {
720 fh,
721 flags,
722 release_flags,
723 lock_owner,
724 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
725
726 let flush = release_flags & RELEASE_FLUSH != 0;
727 let flock_release = release_flags & RELEASE_FLOCK_UNLOCK != 0;
728 let lock_owner = if flush || flock_release {
729 Some(lock_owner)
730 } else {
731 None
732 };
733
734 match self.fs.release(
735 Context::from(in_header),
736 in_header.nodeid.into(),
737 flags,
738 fh.into(),
739 flush,
740 flock_release,
741 lock_owner,
742 ) {
743 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
744 Err(e) => reply_error(e, in_header.unique, w),
745 }
746 }
747
fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>748 fn fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
749 let FsyncIn {
750 fh, fsync_flags, ..
751 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
752 let datasync = fsync_flags & 0x1 != 0;
753
754 match self.fs.fsync(
755 Context::from(in_header),
756 in_header.nodeid.into(),
757 datasync,
758 fh.into(),
759 ) {
760 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
761 Err(e) => reply_error(e, in_header.unique, w),
762 }
763 }
764
setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>765 fn setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
766 let SetxattrIn { size, flags } =
767 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
768
769 // The name and value and encoded one after another and separated by a '\0' character.
770 let len = (in_header.len as usize)
771 .checked_sub(size_of::<InHeader>())
772 .and_then(|l| l.checked_sub(size_of::<SetxattrIn>()))
773 .ok_or(Error::InvalidHeaderLength)?;
774 let mut buf = vec![0; len];
775
776 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
777
778 // We want to include the '\0' byte in the first slice.
779 let split_pos = buf
780 .iter()
781 .position(|c| *c == b'\0')
782 .map(|p| p + 1)
783 .ok_or(Error::MissingParameter)?;
784
785 let (name, value) = buf.split_at(split_pos);
786
787 if size != value.len() as u32 {
788 return Err(Error::InvalidXattrSize(size, value.len()));
789 }
790
791 match self.fs.setxattr(
792 Context::from(in_header),
793 in_header.nodeid.into(),
794 bytes_to_cstr(name)?,
795 value,
796 flags,
797 ) {
798 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
799 Err(e) => reply_error(e, in_header.unique, w),
800 }
801 }
802
getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>803 fn getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
804 let GetxattrIn { size, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
805
806 let namelen = (in_header.len as usize)
807 .checked_sub(size_of::<InHeader>())
808 .and_then(|l| l.checked_sub(size_of::<GetxattrIn>()))
809 .ok_or(Error::InvalidHeaderLength)?;
810 let mut name = vec![0; namelen];
811
812 r.read_exact(&mut name).map_err(Error::DecodeMessage)?;
813
814 if size > self.fs.max_buffer_size() {
815 return reply_error(
816 io::Error::from_raw_os_error(libc::ENOMEM),
817 in_header.unique,
818 w,
819 );
820 }
821
822 match self.fs.getxattr(
823 Context::from(in_header),
824 in_header.nodeid.into(),
825 bytes_to_cstr(&name)?,
826 size,
827 ) {
828 Ok(GetxattrReply::Value(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w),
829 Ok(GetxattrReply::Count(count)) => {
830 let out = GetxattrOut {
831 size: count,
832 ..Default::default()
833 };
834
835 reply_ok(Some(out), None, in_header.unique, w)
836 }
837 Err(e) => reply_error(e, in_header.unique, w),
838 }
839 }
840
listxattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>841 fn listxattr<R: Reader, W: Writer>(
842 &self,
843 in_header: InHeader,
844 mut r: R,
845 w: W,
846 ) -> Result<usize> {
847 let GetxattrIn { size, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
848
849 if size > self.fs.max_buffer_size() {
850 return reply_error(
851 io::Error::from_raw_os_error(libc::ENOMEM),
852 in_header.unique,
853 w,
854 );
855 }
856
857 match self
858 .fs
859 .listxattr(Context::from(in_header), in_header.nodeid.into(), size)
860 {
861 Ok(ListxattrReply::Names(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w),
862 Ok(ListxattrReply::Count(count)) => {
863 let out = GetxattrOut {
864 size: count,
865 ..Default::default()
866 };
867
868 reply_ok(Some(out), None, in_header.unique, w)
869 }
870 Err(e) => reply_error(e, in_header.unique, w),
871 }
872 }
873
removexattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>874 fn removexattr<R: Reader, W: Writer>(
875 &self,
876 in_header: InHeader,
877 mut r: R,
878 w: W,
879 ) -> Result<usize> {
880 let namelen = (in_header.len as usize)
881 .checked_sub(size_of::<InHeader>())
882 .ok_or(Error::InvalidHeaderLength)?;
883
884 let mut buf = vec![0; namelen];
885
886 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
887
888 let name = bytes_to_cstr(&buf)?;
889
890 match self
891 .fs
892 .removexattr(Context::from(in_header), in_header.nodeid.into(), name)
893 {
894 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
895 Err(e) => reply_error(e, in_header.unique, w),
896 }
897 }
898
flush<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>899 fn flush<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
900 let FlushIn {
901 fh,
902 unused: _,
903 padding: _,
904 lock_owner,
905 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
906
907 match self.fs.flush(
908 Context::from(in_header),
909 in_header.nodeid.into(),
910 fh.into(),
911 lock_owner,
912 ) {
913 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
914 Err(e) => reply_error(e, in_header.unique, w),
915 }
916 }
917
init<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>918 fn init<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
919 cros_tracing::trace_simple_print!("fuse server: init: in_header={:?}", in_header);
920 let InitIn {
921 major,
922 minor,
923 max_readahead,
924 flags,
925 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
926
927 if major < KERNEL_VERSION {
928 error!("Unsupported fuse protocol version: {}.{}", major, minor);
929 return reply_error(
930 io::Error::from_raw_os_error(libc::EPROTO),
931 in_header.unique,
932 w,
933 );
934 }
935
936 if major > KERNEL_VERSION {
937 // Wait for the kernel to reply back with a 7.X version.
938 let out = InitOut {
939 major: KERNEL_VERSION,
940 minor: KERNEL_MINOR_VERSION,
941 ..Default::default()
942 };
943
944 return reply_ok(Some(out), None, in_header.unique, w);
945 }
946
947 if minor < OLDEST_SUPPORTED_KERNEL_MINOR_VERSION {
948 error!(
949 "Unsupported fuse protocol minor version: {}.{}",
950 major, minor
951 );
952 return reply_error(
953 io::Error::from_raw_os_error(libc::EPROTO),
954 in_header.unique,
955 w,
956 );
957 }
958
959 // These fuse features are supported by this server by default.
960 let supported = FsOptions::ASYNC_READ
961 | FsOptions::PARALLEL_DIROPS
962 | FsOptions::BIG_WRITES
963 | FsOptions::AUTO_INVAL_DATA
964 | FsOptions::HANDLE_KILLPRIV
965 | FsOptions::ASYNC_DIO
966 | FsOptions::HAS_IOCTL_DIR
967 | FsOptions::DO_READDIRPLUS
968 | FsOptions::READDIRPLUS_AUTO
969 | FsOptions::ATOMIC_O_TRUNC
970 | FsOptions::MAX_PAGES
971 | FsOptions::MAP_ALIGNMENT;
972
973 let capable = FsOptions::from_bits_truncate(flags);
974
975 match self.fs.init(capable) {
976 Ok(want) => {
977 let mut enabled = capable & (want | supported);
978
979 // HANDLE_KILLPRIV doesn't work correctly when writeback caching is enabled so turn
980 // it off.
981 if enabled.contains(FsOptions::WRITEBACK_CACHE) {
982 enabled.remove(FsOptions::HANDLE_KILLPRIV);
983 }
984
985 // ATOMIC_O_TRUNC doesn't work with ZERO_MESSAGE_OPEN.
986 if enabled.contains(FsOptions::ZERO_MESSAGE_OPEN) {
987 enabled.remove(FsOptions::ATOMIC_O_TRUNC);
988 }
989
990 let max_write = self.fs.max_buffer_size();
991 let max_pages = min(
992 max(max_readahead, max_write) / pagesize() as u32,
993 u16::MAX as u32,
994 ) as u16;
995 let out = InitOut {
996 major: KERNEL_VERSION,
997 minor: KERNEL_MINOR_VERSION,
998 max_readahead,
999 flags: enabled.bits(),
1000 max_background: ::std::u16::MAX,
1001 congestion_threshold: (::std::u16::MAX / 4) * 3,
1002 max_write,
1003 time_gran: 1, // nanoseconds
1004 max_pages,
1005 map_alignment: pagesize().trailing_zeros() as u16,
1006 ..Default::default()
1007 };
1008
1009 reply_ok(Some(out), None, in_header.unique, w)
1010 }
1011 Err(e) => reply_error(e, in_header.unique, w),
1012 }
1013 }
1014
opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1015 fn opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1016 let OpenIn { flags, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1017
1018 match self
1019 .fs
1020 .opendir(Context::from(in_header), in_header.nodeid.into(), flags)
1021 {
1022 Ok((handle, opts)) => {
1023 let out = OpenOut {
1024 fh: handle.map(Into::into).unwrap_or(0),
1025 open_flags: opts.bits(),
1026 ..Default::default()
1027 };
1028
1029 reply_ok(Some(out), None, in_header.unique, w)
1030 }
1031 Err(e) => reply_error(e, in_header.unique, w),
1032 }
1033 }
1034
readdir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>1035 fn readdir<R: Reader, W: Writer>(
1036 &self,
1037 in_header: InHeader,
1038 mut r: R,
1039 mut w: W,
1040 ) -> Result<usize> {
1041 let ReadIn {
1042 fh, offset, size, ..
1043 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1044
1045 if size > self.fs.max_buffer_size() {
1046 return reply_error(
1047 io::Error::from_raw_os_error(libc::ENOMEM),
1048 in_header.unique,
1049 w,
1050 );
1051 }
1052
1053 if !w.has_sufficient_buffer(size) {
1054 return reply_error(
1055 io::Error::from_raw_os_error(libc::ENOMEM),
1056 in_header.unique,
1057 w,
1058 );
1059 }
1060
1061 // Skip over enough bytes for the header.
1062 let unique = in_header.unique;
1063 let result = w.write_at(size_of::<OutHeader>(), |cursor| {
1064 match self.fs.readdir(
1065 Context::from(in_header),
1066 in_header.nodeid.into(),
1067 fh.into(),
1068 size,
1069 offset,
1070 ) {
1071 Ok(mut entries) => {
1072 let mut total_written = 0;
1073 while let Some(dirent) = entries.next() {
1074 let remaining = (size as usize).saturating_sub(total_written);
1075 match add_dirent(cursor, remaining, &dirent, None) {
1076 // No more space left in the buffer.
1077 Ok(0) => break,
1078 Ok(bytes_written) => {
1079 total_written += bytes_written;
1080 }
1081 Err(e) => return Err(e),
1082 }
1083 }
1084 Ok(total_written)
1085 }
1086 Err(e) => Err(e),
1087 }
1088 });
1089
1090 match result {
1091 Ok(total_written) => reply_readdir(total_written, unique, w),
1092 Err(e) => reply_error(e, unique, w),
1093 }
1094 }
1095
lookup_dirent_attribute<'d>( &self, in_header: &InHeader, dir_entry: &DirEntry<'d>, ) -> io::Result<Entry>1096 fn lookup_dirent_attribute<'d>(
1097 &self,
1098 in_header: &InHeader,
1099 dir_entry: &DirEntry<'d>,
1100 ) -> io::Result<Entry> {
1101 let parent = in_header.nodeid.into();
1102 let name = dir_entry.name.to_bytes();
1103 let entry = if name == b"." || name == b".." {
1104 // Don't do lookups on the current directory or the parent directory. Safe because
1105 // this only contains integer fields and any value is valid.
1106 let mut attr = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
1107 attr.st_ino = dir_entry.ino;
1108 attr.st_mode = dir_entry.type_;
1109
1110 // We use 0 for the inode value to indicate a negative entry.
1111 Entry {
1112 inode: 0,
1113 generation: 0,
1114 attr,
1115 attr_timeout: Duration::from_secs(0),
1116 entry_timeout: Duration::from_secs(0),
1117 }
1118 } else {
1119 self.fs
1120 .lookup(Context::from(*in_header), parent, dir_entry.name)?
1121 };
1122
1123 Ok(entry)
1124 }
1125
readdirplus<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize>1126 fn readdirplus<R: Reader, W: Writer>(
1127 &self,
1128 in_header: InHeader,
1129 mut r: R,
1130 mut w: W,
1131 ) -> Result<usize> {
1132 cros_tracing::trace_simple_print!("fuse server: readdirplus: in_header={:?}", in_header);
1133 let ReadIn {
1134 fh, offset, size, ..
1135 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1136
1137 if size > self.fs.max_buffer_size() {
1138 return reply_error(
1139 io::Error::from_raw_os_error(libc::ENOMEM),
1140 in_header.unique,
1141 w,
1142 );
1143 }
1144
1145 if !w.has_sufficient_buffer(size) {
1146 return reply_error(
1147 io::Error::from_raw_os_error(libc::ENOMEM),
1148 in_header.unique,
1149 w,
1150 );
1151 }
1152
1153 // Skip over enough bytes for the header.
1154 let unique = in_header.unique;
1155 let result = w.write_at(size_of::<OutHeader>(), |cursor| {
1156 match self.fs.readdir(
1157 Context::from(in_header),
1158 in_header.nodeid.into(),
1159 fh.into(),
1160 size,
1161 offset,
1162 ) {
1163 Ok(mut entries) => {
1164 let mut total_written = 0;
1165 while let Some(dirent) = entries.next() {
1166 let mut entry_inode = None;
1167 match self
1168 .lookup_dirent_attribute(&in_header, &dirent)
1169 .and_then(|e| {
1170 entry_inode = Some(e.inode);
1171 let remaining = (size as usize).saturating_sub(total_written);
1172 add_dirent(cursor, remaining, &dirent, Some(e))
1173 }) {
1174 Ok(0) => {
1175 // No more space left in the buffer but we need to undo the lookup
1176 // that created the Entry or we will end up with mismatched lookup
1177 // counts.
1178 if let Some(inode) = entry_inode {
1179 self.fs.forget(Context::from(in_header), inode.into(), 1);
1180 }
1181 break;
1182 }
1183 Ok(bytes_written) => {
1184 total_written += bytes_written;
1185 }
1186 Err(e) => {
1187 if let Some(inode) = entry_inode {
1188 self.fs.forget(Context::from(in_header), inode.into(), 1);
1189 }
1190
1191 if total_written == 0 {
1192 // We haven't filled any entries yet so we can just propagate
1193 // the error.
1194 return Err(e);
1195 }
1196
1197 // We already filled in some entries. Returning an error now will
1198 // cause lookup count mismatches for those entries so just return
1199 // whatever we already have.
1200 break;
1201 }
1202 }
1203 }
1204 Ok(total_written)
1205 }
1206 Err(e) => Err(e),
1207 }
1208 });
1209
1210 match result {
1211 Ok(total_written) => reply_readdir(total_written, unique, w),
1212 Err(e) => reply_error(e, unique, w),
1213 }
1214 }
1215
releasedir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1216 fn releasedir<R: Reader, W: Writer>(
1217 &self,
1218 in_header: InHeader,
1219 mut r: R,
1220 w: W,
1221 ) -> Result<usize> {
1222 let ReleaseIn { fh, flags, .. } =
1223 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1224
1225 match self.fs.releasedir(
1226 Context::from(in_header),
1227 in_header.nodeid.into(),
1228 flags,
1229 fh.into(),
1230 ) {
1231 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1232 Err(e) => reply_error(e, in_header.unique, w),
1233 }
1234 }
1235
fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1236 fn fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1237 let FsyncIn {
1238 fh, fsync_flags, ..
1239 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1240 let datasync = fsync_flags & 0x1 != 0;
1241
1242 match self.fs.fsyncdir(
1243 Context::from(in_header),
1244 in_header.nodeid.into(),
1245 datasync,
1246 fh.into(),
1247 ) {
1248 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1249 Err(e) => reply_error(e, in_header.unique, w),
1250 }
1251 }
1252
getlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1253 fn getlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1254 if let Err(e) = self.fs.getlk() {
1255 reply_error(e, in_header.unique, w)
1256 } else {
1257 Ok(0)
1258 }
1259 }
1260
setlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1261 fn setlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1262 if let Err(e) = self.fs.setlk() {
1263 reply_error(e, in_header.unique, w)
1264 } else {
1265 Ok(0)
1266 }
1267 }
1268
setlkw<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1269 fn setlkw<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1270 if let Err(e) = self.fs.setlkw() {
1271 reply_error(e, in_header.unique, w)
1272 } else {
1273 Ok(0)
1274 }
1275 }
1276
access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1277 fn access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1278 let AccessIn { mask, .. } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1279
1280 match self
1281 .fs
1282 .access(Context::from(in_header), in_header.nodeid.into(), mask)
1283 {
1284 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1285 Err(e) => reply_error(e, in_header.unique, w),
1286 }
1287 }
1288
create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1289 fn create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1290 let CreateIn {
1291 flags, mode, umask, ..
1292 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1293
1294 let buflen = (in_header.len as usize)
1295 .checked_sub(size_of::<InHeader>())
1296 .and_then(|l| l.checked_sub(size_of::<CreateIn>()))
1297 .ok_or(Error::InvalidHeaderLength)?;
1298
1299 let mut buf = vec![0; buflen];
1300
1301 r.read_exact(&mut buf).map_err(Error::DecodeMessage)?;
1302
1303 let mut iter = split_inclusive(&buf, |&c| c == b'\0');
1304 let name = iter
1305 .next()
1306 .ok_or(Error::MissingParameter)
1307 .and_then(bytes_to_cstr)?;
1308
1309 match self.fs.create(
1310 Context::from(in_header),
1311 in_header.nodeid.into(),
1312 name,
1313 mode,
1314 flags,
1315 umask,
1316 ) {
1317 Ok((entry, handle, opts)) => {
1318 let entry_out = EntryOut {
1319 nodeid: entry.inode,
1320 generation: entry.generation,
1321 entry_valid: entry.entry_timeout.as_secs(),
1322 attr_valid: entry.attr_timeout.as_secs(),
1323 entry_valid_nsec: entry.entry_timeout.subsec_nanos(),
1324 attr_valid_nsec: entry.attr_timeout.subsec_nanos(),
1325 attr: entry.attr.into(),
1326 };
1327 let open_out = OpenOut {
1328 fh: handle.map(Into::into).unwrap_or(0),
1329 open_flags: opts.bits(),
1330 ..Default::default()
1331 };
1332
1333 // Kind of a hack to write both structs.
1334 reply_ok(
1335 Some(entry_out),
1336 Some(open_out.as_bytes()),
1337 in_header.unique,
1338 w,
1339 )
1340 }
1341 Err(e) => reply_error(e, in_header.unique, w),
1342 }
1343 }
1344
1345 #[allow(clippy::unnecessary_wraps)]
interrupt(&self, _in_header: InHeader) -> Result<usize>1346 fn interrupt(&self, _in_header: InHeader) -> Result<usize> {
1347 Ok(0)
1348 }
1349
bmap<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1350 fn bmap<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1351 if let Err(e) = self.fs.bmap() {
1352 reply_error(e, in_header.unique, w)
1353 } else {
1354 Ok(0)
1355 }
1356 }
1357
1358 #[allow(clippy::unnecessary_wraps)]
destroy(&self) -> Result<usize>1359 fn destroy(&self) -> Result<usize> {
1360 // No reply to this function.
1361 self.fs.destroy();
1362
1363 Ok(0)
1364 }
1365
ioctl<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize>1366 fn ioctl<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> {
1367 let IoctlIn {
1368 fh,
1369 flags,
1370 cmd,
1371 arg,
1372 in_size,
1373 out_size,
1374 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1375
1376 let res = self.fs.ioctl(
1377 in_header.into(),
1378 in_header.nodeid.into(),
1379 fh.into(),
1380 IoctlFlags::from_bits_truncate(flags),
1381 cmd,
1382 arg,
1383 in_size,
1384 out_size,
1385 r,
1386 );
1387
1388 match res {
1389 Ok(reply) => match reply {
1390 IoctlReply::Retry { input, output } => {
1391 retry_ioctl(in_header.unique, input, output, w)
1392 }
1393 IoctlReply::Done(res) => finish_ioctl(in_header.unique, res, w),
1394 },
1395 Err(e) => reply_error(e, in_header.unique, w),
1396 }
1397 }
1398
poll<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1399 fn poll<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1400 if let Err(e) = self.fs.poll() {
1401 reply_error(e, in_header.unique, w)
1402 } else {
1403 Ok(0)
1404 }
1405 }
1406
notify_reply<R: Reader, W: Writer>( &self, in_header: InHeader, mut _r: R, w: W, ) -> Result<usize>1407 fn notify_reply<R: Reader, W: Writer>(
1408 &self,
1409 in_header: InHeader,
1410 mut _r: R,
1411 w: W,
1412 ) -> Result<usize> {
1413 if let Err(e) = self.fs.notify_reply() {
1414 reply_error(e, in_header.unique, w)
1415 } else {
1416 Ok(0)
1417 }
1418 }
1419
batch_forget<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1420 fn batch_forget<R: Reader, W: Writer>(
1421 &self,
1422 in_header: InHeader,
1423 mut r: R,
1424 w: W,
1425 ) -> Result<usize> {
1426 let BatchForgetIn { count, .. } =
1427 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1428
1429 if let Some(size) = (count as usize).checked_mul(size_of::<ForgetOne>()) {
1430 if size > self.fs.max_buffer_size() as usize {
1431 return reply_error(
1432 io::Error::from_raw_os_error(libc::ENOMEM),
1433 in_header.unique,
1434 w,
1435 );
1436 }
1437 } else {
1438 return reply_error(
1439 io::Error::from_raw_os_error(libc::EOVERFLOW),
1440 in_header.unique,
1441 w,
1442 );
1443 }
1444
1445 let mut requests = Vec::with_capacity(count as usize);
1446 for _ in 0..count {
1447 requests.push(
1448 zerocopy_from_reader(&mut r)
1449 .map(|f: ForgetOne| (f.nodeid.into(), f.nlookup))
1450 .map_err(Error::DecodeMessage)?,
1451 );
1452 }
1453
1454 self.fs.batch_forget(Context::from(in_header), requests);
1455
1456 // No reply for forget messages.
1457 Ok(0)
1458 }
1459
fallocate<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1460 fn fallocate<R: Reader, W: Writer>(
1461 &self,
1462 in_header: InHeader,
1463 mut r: R,
1464 w: W,
1465 ) -> Result<usize> {
1466 let FallocateIn {
1467 fh,
1468 offset,
1469 length,
1470 mode,
1471 ..
1472 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1473
1474 match self.fs.fallocate(
1475 Context::from(in_header),
1476 in_header.nodeid.into(),
1477 fh.into(),
1478 mode,
1479 offset,
1480 length,
1481 ) {
1482 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1483 Err(e) => reply_error(e, in_header.unique, w),
1484 }
1485 }
1486
lseek<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize>1487 fn lseek<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> {
1488 if let Err(e) = self.fs.lseek() {
1489 reply_error(e, in_header.unique, w)
1490 } else {
1491 Ok(0)
1492 }
1493 }
1494
copy_file_range<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize>1495 fn copy_file_range<R: Reader, W: Writer>(
1496 &self,
1497 in_header: InHeader,
1498 mut r: R,
1499 w: W,
1500 ) -> Result<usize> {
1501 let CopyFileRangeIn {
1502 fh_src,
1503 off_src,
1504 nodeid_dst,
1505 fh_dst,
1506 off_dst,
1507 len,
1508 flags,
1509 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1510
1511 match self.fs.copy_file_range(
1512 Context::from(in_header),
1513 in_header.nodeid.into(),
1514 fh_src.into(),
1515 off_src,
1516 nodeid_dst.into(),
1517 fh_dst.into(),
1518 off_dst,
1519 len,
1520 flags,
1521 ) {
1522 Ok(count) => {
1523 let out = WriteOut {
1524 size: count as u32,
1525 ..Default::default()
1526 };
1527
1528 reply_ok(Some(out), None, in_header.unique, w)
1529 }
1530 Err(e) => reply_error(e, in_header.unique, w),
1531 }
1532 }
1533
set_up_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper,1534 fn set_up_mapping<R, W, M>(
1535 &self,
1536 in_header: InHeader,
1537 mut r: R,
1538 w: W,
1539 mapper: M,
1540 ) -> Result<usize>
1541 where
1542 R: Reader,
1543 W: Writer,
1544 M: Mapper,
1545 {
1546 let SetUpMappingIn {
1547 fh,
1548 foffset,
1549 len,
1550 flags,
1551 moffset,
1552 } = zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1553 let flags = SetUpMappingFlags::from_bits_truncate(flags);
1554
1555 let mut prot = 0;
1556 if flags.contains(SetUpMappingFlags::READ) {
1557 prot |= libc::PROT_READ as u32;
1558 }
1559 if flags.contains(SetUpMappingFlags::WRITE) {
1560 prot |= libc::PROT_WRITE as u32;
1561 }
1562
1563 let size = if let Ok(s) = len.try_into() {
1564 s
1565 } else {
1566 return reply_error(
1567 io::Error::from_raw_os_error(libc::EOVERFLOW),
1568 in_header.unique,
1569 w,
1570 );
1571 };
1572
1573 match self.fs.set_up_mapping(
1574 Context::from(in_header),
1575 in_header.nodeid.into(),
1576 fh.into(),
1577 foffset,
1578 moffset,
1579 size,
1580 prot,
1581 mapper,
1582 ) {
1583 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1584 Err(e) => {
1585 error!("set_up_mapping failed: {}", e);
1586 reply_error(e, in_header.unique, w)
1587 }
1588 }
1589 }
1590
remove_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper,1591 fn remove_mapping<R, W, M>(
1592 &self,
1593 in_header: InHeader,
1594 mut r: R,
1595 w: W,
1596 mapper: M,
1597 ) -> Result<usize>
1598 where
1599 R: Reader,
1600 W: Writer,
1601 M: Mapper,
1602 {
1603 let RemoveMappingIn { count } =
1604 zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?;
1605
1606 // `FUSE_REMOVEMAPPING_MAX_ENTRY` is defined as
1607 // `PAGE_SIZE / sizeof(struct fuse_removemapping_one)` in /kernel/include/uapi/linux/fuse.h.
1608 let max_entry = pagesize() / std::mem::size_of::<RemoveMappingOne>();
1609
1610 if max_entry < count as usize {
1611 return reply_error(
1612 io::Error::from_raw_os_error(libc::EINVAL),
1613 in_header.unique,
1614 w,
1615 );
1616 }
1617
1618 let mut msgs = Vec::with_capacity(count as usize);
1619 for _ in 0..(count as usize) {
1620 msgs.push(zerocopy_from_reader(&mut r).map_err(Error::DecodeMessage)?);
1621 }
1622
1623 match self.fs.remove_mapping(&msgs, mapper) {
1624 Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w),
1625 Err(e) => reply_error(e, in_header.unique, w),
1626 }
1627 }
1628 }
1629
retry_ioctl<W: Writer>( unique: u64, input: Vec<IoctlIovec>, output: Vec<IoctlIovec>, mut w: W, ) -> Result<usize>1630 fn retry_ioctl<W: Writer>(
1631 unique: u64,
1632 input: Vec<IoctlIovec>,
1633 output: Vec<IoctlIovec>,
1634 mut w: W,
1635 ) -> Result<usize> {
1636 // We don't need to check for overflow here because if adding these 2 values caused an overflow
1637 // we would have run out of memory before reaching this point.
1638 if input.len() + output.len() > IOCTL_MAX_IOV {
1639 return Err(Error::TooManyIovecs(
1640 input.len() + output.len(),
1641 IOCTL_MAX_IOV,
1642 ));
1643 }
1644
1645 let len = size_of::<OutHeader>()
1646 + size_of::<IoctlOut>()
1647 + (input.len() * size_of::<IoctlIovec>())
1648 + (output.len() * size_of::<IoctlIovec>());
1649 let header = OutHeader {
1650 len: len as u32,
1651 error: 0,
1652 unique,
1653 };
1654 let out = IoctlOut {
1655 result: 0,
1656 flags: IoctlFlags::RETRY.bits(),
1657 in_iovs: input.len() as u32,
1658 out_iovs: output.len() as u32,
1659 };
1660
1661 let mut total_bytes = size_of::<OutHeader>() + size_of::<IoctlOut>();
1662 w.write_all(header.as_bytes())
1663 .map_err(Error::EncodeMessage)?;
1664 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1665 for i in input.into_iter().chain(output.into_iter()) {
1666 total_bytes += i.as_bytes().len();
1667 w.write_all(i.as_bytes()).map_err(Error::EncodeMessage)?;
1668 }
1669
1670 w.flush().map_err(Error::FlushMessage)?;
1671 debug_assert_eq!(len, total_bytes);
1672 Ok(len)
1673 }
1674
finish_ioctl<W: Writer>(unique: u64, res: io::Result<Vec<u8>>, w: W) -> Result<usize>1675 fn finish_ioctl<W: Writer>(unique: u64, res: io::Result<Vec<u8>>, w: W) -> Result<usize> {
1676 let (out, data) = match res {
1677 Ok(data) => {
1678 let out = IoctlOut {
1679 result: 0,
1680 ..Default::default()
1681 };
1682 (out, Some(data))
1683 }
1684 Err(e) => {
1685 let out = IoctlOut {
1686 result: -e.raw_os_error().unwrap_or(libc::EIO),
1687 ..Default::default()
1688 };
1689 (out, None)
1690 }
1691 };
1692 reply_ok(Some(out), data.as_ref().map(|d| &d[..]), unique, w)
1693 }
1694
reply_readdir<W: Writer>(len: usize, unique: u64, mut w: W) -> Result<usize>1695 fn reply_readdir<W: Writer>(len: usize, unique: u64, mut w: W) -> Result<usize> {
1696 let out = OutHeader {
1697 len: (size_of::<OutHeader>() + len) as u32,
1698 error: 0,
1699 unique,
1700 };
1701
1702 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1703 w.flush().map_err(Error::FlushMessage)?;
1704 Ok(out.len as usize)
1705 }
1706
reply_ok<T: AsBytes, W: Writer>( out: Option<T>, data: Option<&[u8]>, unique: u64, mut w: W, ) -> Result<usize>1707 fn reply_ok<T: AsBytes, W: Writer>(
1708 out: Option<T>,
1709 data: Option<&[u8]>,
1710 unique: u64,
1711 mut w: W,
1712 ) -> Result<usize> {
1713 let mut len = size_of::<OutHeader>();
1714
1715 if out.is_some() {
1716 len += size_of::<T>();
1717 }
1718
1719 if let Some(data) = data {
1720 len += data.len();
1721 }
1722
1723 let header = OutHeader {
1724 len: len as u32,
1725 error: 0,
1726 unique,
1727 };
1728
1729 let mut total_bytes = size_of::<OutHeader>();
1730 w.write_all(header.as_bytes())
1731 .map_err(Error::EncodeMessage)?;
1732
1733 if let Some(out) = out {
1734 total_bytes += out.as_bytes().len();
1735 w.write_all(out.as_bytes()).map_err(Error::EncodeMessage)?;
1736 }
1737
1738 if let Some(data) = data {
1739 total_bytes += data.len();
1740 w.write_all(data).map_err(Error::EncodeMessage)?;
1741 }
1742
1743 w.flush().map_err(Error::FlushMessage)?;
1744 debug_assert_eq!(len, total_bytes);
1745 Ok(len)
1746 }
1747
reply_error<W: Writer>(e: io::Error, unique: u64, mut w: W) -> Result<usize>1748 fn reply_error<W: Writer>(e: io::Error, unique: u64, mut w: W) -> Result<usize> {
1749 let header = OutHeader {
1750 len: size_of::<OutHeader>() as u32,
1751 error: -e.raw_os_error().unwrap_or(libc::EIO),
1752 unique,
1753 };
1754
1755 w.write_all(header.as_bytes())
1756 .map_err(Error::EncodeMessage)?;
1757 w.flush().map_err(Error::FlushMessage)?;
1758
1759 Ok(header.len as usize)
1760 }
1761
bytes_to_cstr(buf: &[u8]) -> Result<&CStr>1762 fn bytes_to_cstr(buf: &[u8]) -> Result<&CStr> {
1763 // Convert to a `CStr` first so that we can drop the '\0' byte at the end
1764 // and make sure there are no interior '\0' bytes.
1765 CStr::from_bytes_with_nul(buf).map_err(Error::InvalidCString)
1766 }
1767
add_dirent<W: Writer>( cursor: &mut W, max: usize, d: &DirEntry, entry: Option<Entry>, ) -> io::Result<usize>1768 fn add_dirent<W: Writer>(
1769 cursor: &mut W,
1770 max: usize,
1771 d: &DirEntry,
1772 entry: Option<Entry>,
1773 ) -> io::Result<usize> {
1774 // Strip the trailing '\0'.
1775 let name = d.name.to_bytes();
1776 if name.len() > ::std::u32::MAX as usize {
1777 return Err(io::Error::from_raw_os_error(libc::EOVERFLOW));
1778 }
1779
1780 let dirent_len = size_of::<Dirent>()
1781 .checked_add(name.len())
1782 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
1783
1784 // Directory entries must be padded to 8-byte alignment. If adding 7 causes
1785 // an overflow then this dirent cannot be properly padded.
1786 let padded_dirent_len = dirent_len
1787 .checked_add(7)
1788 .map(|l| l & !7)
1789 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
1790
1791 let total_len = if entry.is_some() {
1792 padded_dirent_len
1793 .checked_add(size_of::<EntryOut>())
1794 .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?
1795 } else {
1796 padded_dirent_len
1797 };
1798
1799 if max < total_len {
1800 Ok(0)
1801 } else {
1802 if let Some(entry) = entry {
1803 cursor.write_all(EntryOut::from(entry).as_bytes())?;
1804 }
1805
1806 let dirent = Dirent {
1807 ino: d.ino,
1808 off: d.offset,
1809 namelen: name.len() as u32,
1810 type_: d.type_,
1811 };
1812
1813 cursor.write_all(dirent.as_bytes())?;
1814 cursor.write_all(name)?;
1815
1816 // We know that `dirent_len` <= `padded_dirent_len` due to the check above
1817 // so there's no need for checked arithmetic.
1818 let padding = padded_dirent_len - dirent_len;
1819 if padding > 0 {
1820 cursor.write_all(&DIRENT_PADDING[..padding])?;
1821 }
1822
1823 Ok(total_len)
1824 }
1825 }
1826
1827 // TODO: Remove this once std::slice::SplitInclusive is stabilized.
1828 struct SplitInclusive<'a, T, F> {
1829 buf: &'a [T],
1830 pred: F,
1831 }
1832
1833 impl<'a, T, F> Iterator for SplitInclusive<'a, T, F>
1834 where
1835 F: FnMut(&T) -> bool,
1836 {
1837 type Item = &'a [T];
1838
next(&mut self) -> Option<Self::Item>1839 fn next(&mut self) -> Option<Self::Item> {
1840 if self.buf.is_empty() {
1841 return None;
1842 }
1843
1844 let split_pos = self
1845 .buf
1846 .iter()
1847 .position(&mut self.pred)
1848 .map(|p| p + 1)
1849 .unwrap_or(self.buf.len());
1850
1851 let (next, rem) = self.buf.split_at(split_pos);
1852 self.buf = rem;
1853
1854 Some(next)
1855 }
1856
size_hint(&self) -> (usize, Option<usize>)1857 fn size_hint(&self) -> (usize, Option<usize>) {
1858 if self.buf.is_empty() {
1859 (0, Some(0))
1860 } else {
1861 (1, Some(self.buf.len()))
1862 }
1863 }
1864 }
1865
split_inclusive<T, F>(buf: &[T], pred: F) -> SplitInclusive<T, F> where F: FnMut(&T) -> bool,1866 fn split_inclusive<T, F>(buf: &[T], pred: F) -> SplitInclusive<T, F>
1867 where
1868 F: FnMut(&T) -> bool,
1869 {
1870 SplitInclusive { buf, pred }
1871 }
1872
1873 #[cfg(test)]
1874 mod tests {
1875 use super::*;
1876
1877 #[test]
split_inclusive_basic()1878 fn split_inclusive_basic() {
1879 let slice = [10, 40, 33, 20];
1880 let mut iter = split_inclusive(&slice, |num| num % 3 == 0);
1881
1882 assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
1883 assert_eq!(iter.next().unwrap(), &[20]);
1884 assert!(iter.next().is_none());
1885 }
1886
1887 #[test]
split_inclusive_last()1888 fn split_inclusive_last() {
1889 let slice = [3, 10, 40, 33];
1890 let mut iter = split_inclusive(&slice, |num| num % 3 == 0);
1891
1892 assert_eq!(iter.next().unwrap(), &[3]);
1893 assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
1894 assert!(iter.next().is_none());
1895 }
1896
1897 #[test]
split_inclusive_no_match()1898 fn split_inclusive_no_match() {
1899 let slice = [3, 10, 40, 33];
1900 let mut iter = split_inclusive(&slice, |num| num % 7 == 0);
1901
1902 assert_eq!(iter.next().unwrap(), &slice);
1903 assert!(iter.next().is_none());
1904 }
1905 }
1906