1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file COPYING.LIB
10 */
11
12 #define _GNU_SOURCE
13
14 #include "fuse_config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 #include <sys/ioctl.h>
31
32 #ifndef F_LINUX_SPECIFIC_BASE
33 #define F_LINUX_SPECIFIC_BASE 1024
34 #endif
35 #ifndef F_SETPIPE_SZ
36 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
37 #endif
38
39
40 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
41 #define OFFSET_MAX 0x7fffffffffffffffLL
42
43 #define container_of(ptr, type, member) ({ \
44 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
45 (type *)( (char *)__mptr - offsetof(type,member) );})
46
47 struct fuse_pollhandle {
48 uint64_t kh;
49 struct fuse_session *se;
50 };
51
52 static size_t pagesize;
53
fuse_ll_init_pagesize(void)54 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
55 {
56 pagesize = getpagesize();
57 }
58
convert_stat(const struct stat * stbuf,struct fuse_attr * attr)59 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
60 {
61 attr->ino = stbuf->st_ino;
62 attr->mode = stbuf->st_mode;
63 attr->nlink = stbuf->st_nlink;
64 attr->uid = stbuf->st_uid;
65 attr->gid = stbuf->st_gid;
66 attr->rdev = stbuf->st_rdev;
67 attr->size = stbuf->st_size;
68 attr->blksize = stbuf->st_blksize;
69 attr->blocks = stbuf->st_blocks;
70 attr->atime = stbuf->st_atime;
71 attr->mtime = stbuf->st_mtime;
72 attr->ctime = stbuf->st_ctime;
73 attr->atimensec = ST_ATIM_NSEC(stbuf);
74 attr->mtimensec = ST_MTIM_NSEC(stbuf);
75 attr->ctimensec = ST_CTIM_NSEC(stbuf);
76 }
77
convert_attr(const struct fuse_setattr_in * attr,struct stat * stbuf)78 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
79 {
80 stbuf->st_mode = attr->mode;
81 stbuf->st_uid = attr->uid;
82 stbuf->st_gid = attr->gid;
83 stbuf->st_size = attr->size;
84 stbuf->st_atime = attr->atime;
85 stbuf->st_mtime = attr->mtime;
86 stbuf->st_ctime = attr->ctime;
87 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
88 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
89 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
90 }
91
iov_length(const struct iovec * iov,size_t count)92 static size_t iov_length(const struct iovec *iov, size_t count)
93 {
94 size_t seg;
95 size_t ret = 0;
96
97 for (seg = 0; seg < count; seg++)
98 ret += iov[seg].iov_len;
99 return ret;
100 }
101
list_init_req(struct fuse_req * req)102 static void list_init_req(struct fuse_req *req)
103 {
104 req->next = req;
105 req->prev = req;
106 }
107
list_del_req(struct fuse_req * req)108 static void list_del_req(struct fuse_req *req)
109 {
110 struct fuse_req *prev = req->prev;
111 struct fuse_req *next = req->next;
112 prev->next = next;
113 next->prev = prev;
114 }
115
list_add_req(struct fuse_req * req,struct fuse_req * next)116 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
117 {
118 struct fuse_req *prev = next->prev;
119 req->next = next;
120 req->prev = prev;
121 prev->next = req;
122 next->prev = req;
123 }
124
destroy_req(fuse_req_t req)125 static void destroy_req(fuse_req_t req)
126 {
127 assert(req->ch == NULL);
128 pthread_mutex_destroy(&req->lock);
129 free(req);
130 }
131
fuse_free_req(fuse_req_t req)132 void fuse_free_req(fuse_req_t req)
133 {
134 int ctr;
135 struct fuse_session *se = req->se;
136
137 pthread_mutex_lock(&se->lock);
138 req->u.ni.func = NULL;
139 req->u.ni.data = NULL;
140 list_del_req(req);
141 ctr = --req->ctr;
142 fuse_chan_put(req->ch);
143 req->ch = NULL;
144 pthread_mutex_unlock(&se->lock);
145 if (!ctr)
146 destroy_req(req);
147 }
148
fuse_ll_alloc_req(struct fuse_session * se)149 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
150 {
151 struct fuse_req *req;
152
153 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
154 if (req == NULL) {
155 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
156 } else {
157 req->se = se;
158 req->ctr = 1;
159 list_init_req(req);
160 pthread_mutex_init(&req->lock, NULL);
161 }
162
163 return req;
164 }
165
166 /* Send data. If *ch* is NULL, send via session master fd */
fuse_send_msg(struct fuse_session * se,struct fuse_chan * ch,struct iovec * iov,int count)167 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
168 struct iovec *iov, int count)
169 {
170 struct fuse_out_header *out = iov[0].iov_base;
171
172 assert(se != NULL);
173 out->len = iov_length(iov, count);
174 if (se->debug) {
175 if (out->unique == 0) {
176 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
177 out->error, out->len);
178 } else if (out->error) {
179 fuse_log(FUSE_LOG_DEBUG,
180 " unique: %llu, error: %i (%s), outsize: %i\n",
181 (unsigned long long) out->unique, out->error,
182 strerror(-out->error), out->len);
183 } else {
184 fuse_log(FUSE_LOG_DEBUG,
185 " unique: %llu, success, outsize: %i\n",
186 (unsigned long long) out->unique, out->len);
187 }
188 }
189
190 ssize_t res;
191 if (se->io != NULL)
192 /* se->io->writev is never NULL if se->io is not NULL as
193 specified by fuse_session_custom_io()*/
194 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
195 se->userdata);
196 else
197 res = writev(ch ? ch->fd : se->fd, iov, count);
198
199 int err = errno;
200
201 if (res == -1) {
202 /* ENOENT means the operation was interrupted */
203 if (!fuse_session_exited(se) && err != ENOENT)
204 perror("fuse: writing device");
205 return -err;
206 }
207
208 return 0;
209 }
210
211
fuse_send_reply_iov_nofree(fuse_req_t req,int error,struct iovec * iov,int count)212 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
213 int count)
214 {
215 struct fuse_out_header out;
216
217 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
218 const char *str = strerrordesc_np(error * -1);
219 if ((str == NULL && error != 0) || error > 0) {
220 #else
221 if (error <= -1000 || error > 0) {
222 #endif
223 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
224 error = -ERANGE;
225 }
226
227 out.unique = req->unique;
228 out.error = error;
229
230 iov[0].iov_base = &out;
231 iov[0].iov_len = sizeof(struct fuse_out_header);
232
233 return fuse_send_msg(req->se, req->ch, iov, count);
234 }
235
236 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
237 int count)
238 {
239 int res;
240
241 res = fuse_send_reply_iov_nofree(req, error, iov, count);
242 fuse_free_req(req);
243 return res;
244 }
245
246 static int send_reply(fuse_req_t req, int error, const void *arg,
247 size_t argsize)
248 {
249 struct iovec iov[2];
250 int count = 1;
251 if (argsize) {
252 iov[1].iov_base = (void *) arg;
253 iov[1].iov_len = argsize;
254 count++;
255 }
256 return send_reply_iov(req, error, iov, count);
257 }
258
259 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
260 {
261 int res;
262 struct iovec *padded_iov;
263
264 padded_iov = malloc((count + 1) * sizeof(struct iovec));
265 if (padded_iov == NULL)
266 return fuse_reply_err(req, ENOMEM);
267
268 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
269 count++;
270
271 res = send_reply_iov(req, 0, padded_iov, count);
272 free(padded_iov);
273
274 return res;
275 }
276
277
278 /* `buf` is allowed to be empty so that the proper size may be
279 allocated by the caller */
280 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
281 const char *name, const struct stat *stbuf, off_t off)
282 {
283 (void)req;
284 size_t namelen;
285 size_t entlen;
286 size_t entlen_padded;
287 struct fuse_dirent *dirent;
288
289 namelen = strlen(name);
290 entlen = FUSE_NAME_OFFSET + namelen;
291 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
292
293 if ((buf == NULL) || (entlen_padded > bufsize))
294 return entlen_padded;
295
296 dirent = (struct fuse_dirent*) buf;
297 dirent->ino = stbuf->st_ino;
298 dirent->off = off;
299 dirent->namelen = namelen;
300 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
301 memcpy(dirent->name, name, namelen);
302 memset(dirent->name + namelen, 0, entlen_padded - entlen);
303
304 return entlen_padded;
305 }
306
307 static void convert_statfs(const struct statvfs *stbuf,
308 struct fuse_kstatfs *kstatfs)
309 {
310 kstatfs->bsize = stbuf->f_bsize;
311 kstatfs->frsize = stbuf->f_frsize;
312 kstatfs->blocks = stbuf->f_blocks;
313 kstatfs->bfree = stbuf->f_bfree;
314 kstatfs->bavail = stbuf->f_bavail;
315 kstatfs->files = stbuf->f_files;
316 kstatfs->ffree = stbuf->f_ffree;
317 kstatfs->namelen = stbuf->f_namemax;
318 }
319
320 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
321 {
322 return send_reply(req, 0, arg, argsize);
323 }
324
325 int fuse_reply_err(fuse_req_t req, int err)
326 {
327 return send_reply(req, -err, NULL, 0);
328 }
329
330 void fuse_reply_none(fuse_req_t req)
331 {
332 fuse_free_req(req);
333 }
334
335 static unsigned long calc_timeout_sec(double t)
336 {
337 if (t > (double) ULONG_MAX)
338 return ULONG_MAX;
339 else if (t < 0.0)
340 return 0;
341 else
342 return (unsigned long) t;
343 }
344
345 static unsigned int calc_timeout_nsec(double t)
346 {
347 double f = t - (double) calc_timeout_sec(t);
348 if (f < 0.0)
349 return 0;
350 else if (f >= 0.999999999)
351 return 999999999;
352 else
353 return (unsigned int) (f * 1.0e9);
354 }
355
356 static void fill_entry(struct fuse_entry_out *arg,
357 const struct fuse_entry_param *e)
358 {
359 arg->nodeid = e->ino;
360 arg->generation = e->generation;
361 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
362 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
363 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
364 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
365 convert_stat(&e->attr, &arg->attr);
366 }
367
368 /* `buf` is allowed to be empty so that the proper size may be
369 allocated by the caller */
370 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
371 const char *name,
372 const struct fuse_entry_param *e, off_t off)
373 {
374 (void)req;
375 size_t namelen;
376 size_t entlen;
377 size_t entlen_padded;
378
379 namelen = strlen(name);
380 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
381 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
382 if ((buf == NULL) || (entlen_padded > bufsize))
383 return entlen_padded;
384
385 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
386 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
387 fill_entry(&dp->entry_out, e);
388
389 struct fuse_dirent *dirent = &dp->dirent;
390 dirent->ino = e->attr.st_ino;
391 dirent->off = off;
392 dirent->namelen = namelen;
393 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
394 memcpy(dirent->name, name, namelen);
395 memset(dirent->name + namelen, 0, entlen_padded - entlen);
396
397 return entlen_padded;
398 }
399
400 static void fill_open(struct fuse_open_out *arg,
401 const struct fuse_file_info *f,
402 int use_upstream_passthrough)
403 {
404 arg->fh = f->fh;
405 if (use_upstream_passthrough) {
406 if (f->backing_id > 0) {
407 arg->backing_id = f->backing_id;
408 arg->open_flags |= FOPEN_PASSTHROUGH;
409 }
410 } else {
411 arg->passthrough_fh = f->passthrough_fh;
412 }
413
414 if (f->direct_io)
415 arg->open_flags |= FOPEN_DIRECT_IO;
416 if (f->keep_cache)
417 arg->open_flags |= FOPEN_KEEP_CACHE;
418 if (f->cache_readdir)
419 arg->open_flags |= FOPEN_CACHE_DIR;
420 if (f->nonseekable)
421 arg->open_flags |= FOPEN_NONSEEKABLE;
422 if (f->noflush)
423 arg->open_flags |= FOPEN_NOFLUSH;
424 if (f->parallel_direct_writes)
425 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
426 }
427
428 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param* e) {
429 struct {
430 struct fuse_entry_out arg;
431 struct fuse_entry_bpf_out bpf_arg;
432 } __attribute__((packed)) arg_ext = {0};
433
434 struct fuse_entry_out arg;
435 struct fuse_entry_bpf_out bpf_arg;
436 size_t size;
437 int extended_args = e->bpf_action || bpf_arg.bpf_fd || e->backing_action || e->backing_fd;
438
439 if (extended_args) {
440 size = req->se->conn.proto_minor < 9 ? FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg_ext);
441 } else {
442 size = req->se->conn.proto_minor < 9 ? FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
443 }
444
445 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
446 negative entry */
447 if (!e->ino && req->se->conn.proto_minor < 4) return fuse_reply_err(req, ENOENT);
448
449 memset(&arg, 0, sizeof(arg));
450 fill_entry(&arg, e);
451
452 if (extended_args) {
453 memset(&bpf_arg, 0, sizeof(bpf_arg));
454
455 bpf_arg.bpf_action = e->bpf_action;
456 bpf_arg.bpf_fd = e->bpf_fd;
457 bpf_arg.backing_action = e->backing_action;
458 bpf_arg.backing_fd = e->backing_fd;
459
460 arg_ext.arg = arg;
461 arg_ext.bpf_arg = bpf_arg;
462
463 return send_reply_ok(req, &arg_ext, size);
464 } else {
465 return send_reply_ok(req, &arg, size);
466 }
467 }
468
469 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
470 const struct fuse_file_info *f)
471 {
472 char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
473 size_t entrysize = req->se->conn.proto_minor < 9 ?
474 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
475 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
476 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
477
478 memset(buf, 0, sizeof(buf));
479 fill_entry(earg, e);
480 fill_open(oarg, f, req->se->conn.capable & FUSE_CAP_PASSTHROUGH_UPSTREAM);
481 return send_reply_ok(req, buf,
482 entrysize + sizeof(struct fuse_open_out));
483 }
484
485 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
486 double attr_timeout)
487 {
488 struct fuse_attr_out arg;
489 size_t size = req->se->conn.proto_minor < 9 ?
490 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
491
492 memset(&arg, 0, sizeof(arg));
493 arg.attr_valid = calc_timeout_sec(attr_timeout);
494 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
495 convert_stat(attr, &arg.attr);
496
497 return send_reply_ok(req, &arg, size);
498 }
499
500 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
501 {
502 return send_reply_ok(req, linkname, strlen(linkname));
503 }
504
505 int fuse_reply_canonical_path(fuse_req_t req, const char *path)
506 {
507 // The kernel expects a buffer containing the null terminator for this op
508 // So we add the null terminator size to strlen
509 return send_reply_ok(req, path, strlen(path) + 1);
510 }
511
512 enum {
513 FUSE_PASSTHROUGH_API_UNAVAILABLE,
514 FUSE_PASSTHROUGH_API_V0,
515 FUSE_PASSTHROUGH_API_V1,
516 FUSE_PASSTHROUGH_API_V2,
517 FUSE_PASSTHROUGH_API_STABLE,
518 };
519
520 /*
521 * Requests the FUSE passthrough feature to be enabled on a specific file
522 * through the passed fd.
523 * This function returns an identifier that must be used as passthrough_fh
524 * when the open/create_open request reply is sent back to /dev/fuse.
525 * As for the current FUSE passthrough implementation, passthrough_fh values
526 * are only valid if > 0, so in case the FUSE passthrough open ioctl returns
527 * a value <= 0, this must be considered an error and is returned as-is by
528 * this function.
529 */
530 int fuse_passthrough_enable(fuse_req_t req, unsigned int fd) {
531 static sig_atomic_t passthrough_version = FUSE_PASSTHROUGH_API_STABLE;
532 int ret = 0; /* values <= 0 represent errors in FUSE passthrough */
533
534 if (!(req->se->conn.capable & FUSE_CAP_PASSTHROUGH))
535 return -ENOTTY;
536 /*
537 * The interface of FUSE passthrough is still unstable in the kernel,
538 * so the following solution is to search for the most updated API
539 * version and, if not found, fall back to an older one.
540 * This happens when ioctl() returns -1 and errno is set to ENOTTY,
541 * an error code that corresponds to the lack of a specific ioctl.
542 */
543 switch (passthrough_version) {
544 case FUSE_PASSTHROUGH_API_STABLE:
545 /* There is not a stable API yet */
546 passthrough_version = FUSE_PASSTHROUGH_API_V2;
547 case FUSE_PASSTHROUGH_API_V2: {
548 ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V2, &fd);
549 if (ret == -1 && errno == ENOTTY)
550 passthrough_version = FUSE_PASSTHROUGH_API_V1;
551 else
552 break;
553 }
554 case FUSE_PASSTHROUGH_API_V1: {
555 struct fuse_passthrough_out_v0 out = {};
556 out.fd = fd;
557
558 ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V1, &out);
559 if (ret == -1 && errno == ENOTTY)
560 passthrough_version = FUSE_PASSTHROUGH_API_V0;
561 else
562 break;
563 }
564 case FUSE_PASSTHROUGH_API_V0: {
565 struct fuse_passthrough_out_v0 out = {};
566 out.fd = fd;
567
568 ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V0, &out);
569 if (ret == -1 && errno == ENOTTY)
570 passthrough_version = FUSE_PASSTHROUGH_API_UNAVAILABLE;
571 else
572 break;
573 }
574 default:
575 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_enable no valid API\n");
576 return -ENOTTY;
577 }
578
579 if (ret <= 0)
580 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_enable: %s\n", strerror(errno));
581 return ret;
582 }
583
584 int fuse_passthrough_open(fuse_req_t req, int fd)
585 {
586 struct fuse_backing_map map = { .fd = fd };
587 int ret;
588
589 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
590 if (ret <= 0) {
591 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
592 return 0;
593 }
594
595 return ret;
596 }
597
598 int fuse_passthrough_close(fuse_req_t req, int backing_id)
599 {
600 int ret;
601
602 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
603 if (ret < 0)
604 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
605
606 return ret;
607 }
608
609 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
610 {
611 struct fuse_open_out arg;
612
613 memset(&arg, 0, sizeof(arg));
614 fill_open(&arg, f, req->se->conn.capable & FUSE_CAP_PASSTHROUGH_UPSTREAM);
615 return send_reply_ok(req, &arg, sizeof(arg));
616 }
617
618 int fuse_reply_write(fuse_req_t req, size_t count)
619 {
620 struct fuse_write_out arg;
621
622 memset(&arg, 0, sizeof(arg));
623 arg.size = count;
624
625 return send_reply_ok(req, &arg, sizeof(arg));
626 }
627
628 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
629 {
630 return send_reply_ok(req, buf, size);
631 }
632
633 static int fuse_send_data_iov_fallback(struct fuse_session *se,
634 struct fuse_chan *ch,
635 struct iovec *iov, int iov_count,
636 struct fuse_bufvec *buf,
637 size_t len)
638 {
639 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
640 void *mbuf;
641 int res;
642
643 /* Optimize common case */
644 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
645 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
646 /* FIXME: also avoid memory copy if there are multiple buffers
647 but none of them contain an fd */
648
649 iov[iov_count].iov_base = buf->buf[0].mem;
650 iov[iov_count].iov_len = len;
651 iov_count++;
652 return fuse_send_msg(se, ch, iov, iov_count);
653 }
654
655 res = posix_memalign(&mbuf, pagesize, len);
656 if (res != 0)
657 return res;
658
659 mem_buf.buf[0].mem = mbuf;
660 res = fuse_buf_copy(&mem_buf, buf, 0);
661 if (res < 0) {
662 free(mbuf);
663 return -res;
664 }
665 len = res;
666
667 iov[iov_count].iov_base = mbuf;
668 iov[iov_count].iov_len = len;
669 iov_count++;
670 res = fuse_send_msg(se, ch, iov, iov_count);
671 free(mbuf);
672
673 return res;
674 }
675
676 struct fuse_ll_pipe {
677 size_t size;
678 int can_grow;
679 int pipe[2];
680 };
681
682 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
683 {
684 close(llp->pipe[0]);
685 close(llp->pipe[1]);
686 free(llp);
687 }
688
689 #ifdef HAVE_SPLICE
690 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
691 static int fuse_pipe(int fds[2])
692 {
693 int rv = pipe(fds);
694
695 if (rv == -1)
696 return rv;
697
698 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
699 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
700 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
701 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
702 close(fds[0]);
703 close(fds[1]);
704 rv = -1;
705 }
706 return rv;
707 }
708 #else
709 static int fuse_pipe(int fds[2])
710 {
711 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
712 }
713 #endif
714
715 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
716 {
717 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
718 if (llp == NULL) {
719 int res;
720
721 llp = malloc(sizeof(struct fuse_ll_pipe));
722 if (llp == NULL)
723 return NULL;
724
725 res = fuse_pipe(llp->pipe);
726 if (res == -1) {
727 free(llp);
728 return NULL;
729 }
730
731 /*
732 *the default size is 16 pages on linux
733 */
734 llp->size = pagesize * 16;
735 llp->can_grow = 1;
736
737 pthread_setspecific(se->pipe_key, llp);
738 }
739
740 return llp;
741 }
742 #endif
743
744 static void fuse_ll_clear_pipe(struct fuse_session *se)
745 {
746 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
747 if (llp) {
748 pthread_setspecific(se->pipe_key, NULL);
749 fuse_ll_pipe_free(llp);
750 }
751 }
752
753 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
754 static int read_back(int fd, char *buf, size_t len)
755 {
756 int res;
757
758 res = read(fd, buf, len);
759 if (res == -1) {
760 fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
761 return -EIO;
762 }
763 if (res != len) {
764 fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
765 return -EIO;
766 }
767 return 0;
768 }
769
770 static int grow_pipe_to_max(int pipefd)
771 {
772 int max;
773 int res;
774 int maxfd;
775 char buf[32];
776
777 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
778 if (maxfd < 0)
779 return -errno;
780
781 res = read(maxfd, buf, sizeof(buf) - 1);
782 if (res < 0) {
783 int saved_errno;
784
785 saved_errno = errno;
786 close(maxfd);
787 return -saved_errno;
788 }
789 close(maxfd);
790 buf[res] = '\0';
791
792 max = atoi(buf);
793 res = fcntl(pipefd, F_SETPIPE_SZ, max);
794 if (res < 0)
795 return -errno;
796 return max;
797 }
798
799 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
800 struct iovec *iov, int iov_count,
801 struct fuse_bufvec *buf, unsigned int flags)
802 {
803 int res;
804 size_t len = fuse_buf_size(buf);
805 struct fuse_out_header *out = iov[0].iov_base;
806 struct fuse_ll_pipe *llp;
807 int splice_flags;
808 size_t pipesize;
809 size_t total_buf_size;
810 size_t idx;
811 size_t headerlen;
812 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
813
814 if (se->broken_splice_nonblock)
815 goto fallback;
816
817 if (flags & FUSE_BUF_NO_SPLICE)
818 goto fallback;
819
820 total_buf_size = 0;
821 for (idx = buf->idx; idx < buf->count; idx++) {
822 total_buf_size += buf->buf[idx].size;
823 if (idx == buf->idx)
824 total_buf_size -= buf->off;
825 }
826 if (total_buf_size < 2 * pagesize)
827 goto fallback;
828
829 if (se->conn.proto_minor < 14 ||
830 !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
831 goto fallback;
832
833 llp = fuse_ll_get_pipe(se);
834 if (llp == NULL)
835 goto fallback;
836
837
838 headerlen = iov_length(iov, iov_count);
839
840 out->len = headerlen + len;
841
842 /*
843 * Heuristic for the required pipe size, does not work if the
844 * source contains less than page size fragments
845 */
846 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
847
848 if (llp->size < pipesize) {
849 if (llp->can_grow) {
850 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
851 if (res == -1) {
852 res = grow_pipe_to_max(llp->pipe[0]);
853 if (res > 0)
854 llp->size = res;
855 llp->can_grow = 0;
856 goto fallback;
857 }
858 llp->size = res;
859 }
860 if (llp->size < pipesize)
861 goto fallback;
862 }
863
864
865 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
866 if (res == -1)
867 goto fallback;
868
869 if (res != headerlen) {
870 res = -EIO;
871 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
872 headerlen);
873 goto clear_pipe;
874 }
875
876 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
877 pipe_buf.buf[0].fd = llp->pipe[1];
878
879 res = fuse_buf_copy(&pipe_buf, buf,
880 FUSE_BUF_FORCE_SPLICE | FUSE_BUF_SPLICE_NONBLOCK);
881 if (res < 0) {
882 if (res == -EAGAIN || res == -EINVAL) {
883 /*
884 * Should only get EAGAIN on kernels with
885 * broken SPLICE_F_NONBLOCK support (<=
886 * 2.6.35) where this error or a short read is
887 * returned even if the pipe itself is not
888 * full
889 *
890 * EINVAL might mean that splice can't handle
891 * this combination of input and output.
892 */
893 if (res == -EAGAIN)
894 se->broken_splice_nonblock = 1;
895
896 pthread_setspecific(se->pipe_key, NULL);
897 fuse_ll_pipe_free(llp);
898 goto fallback;
899 }
900 res = -res;
901 goto clear_pipe;
902 }
903
904 if (res != 0 && res < len) {
905 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
906 void *mbuf;
907 size_t now_len = res;
908 /*
909 * For regular files a short count is either
910 * 1) due to EOF, or
911 * 2) because of broken SPLICE_F_NONBLOCK (see above)
912 *
913 * For other inputs it's possible that we overflowed
914 * the pipe because of small buffer fragments.
915 */
916
917 res = posix_memalign(&mbuf, pagesize, len);
918 if (res != 0)
919 goto clear_pipe;
920
921 mem_buf.buf[0].mem = mbuf;
922 mem_buf.off = now_len;
923 res = fuse_buf_copy(&mem_buf, buf, 0);
924 if (res > 0) {
925 char *tmpbuf;
926 size_t extra_len = res;
927 /*
928 * Trickiest case: got more data. Need to get
929 * back the data from the pipe and then fall
930 * back to regular write.
931 */
932 tmpbuf = malloc(headerlen);
933 if (tmpbuf == NULL) {
934 free(mbuf);
935 res = ENOMEM;
936 goto clear_pipe;
937 }
938 res = read_back(llp->pipe[0], tmpbuf, headerlen);
939 free(tmpbuf);
940 if (res != 0) {
941 free(mbuf);
942 goto clear_pipe;
943 }
944 res = read_back(llp->pipe[0], mbuf, now_len);
945 if (res != 0) {
946 free(mbuf);
947 goto clear_pipe;
948 }
949 len = now_len + extra_len;
950 iov[iov_count].iov_base = mbuf;
951 iov[iov_count].iov_len = len;
952 iov_count++;
953 res = fuse_send_msg(se, ch, iov, iov_count);
954 free(mbuf);
955 return res;
956 }
957 free(mbuf);
958 res = now_len;
959 }
960 len = res;
961 out->len = headerlen + len;
962
963 if (se->debug) {
964 fuse_log(FUSE_LOG_DEBUG,
965 " unique: %llu, success, outsize: %i (splice)\n",
966 (unsigned long long) out->unique, out->len);
967 }
968
969 splice_flags = 0;
970 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
971 (se->conn.want & FUSE_CAP_SPLICE_MOVE))
972 splice_flags |= SPLICE_F_MOVE;
973
974 if (se->io != NULL && se->io->splice_send != NULL) {
975 res = se->io->splice_send(llp->pipe[0], NULL,
976 ch ? ch->fd : se->fd, NULL, out->len,
977 splice_flags, se->userdata);
978 } else {
979 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
980 out->len, splice_flags);
981 }
982 if (res == -1) {
983 res = -errno;
984 perror("fuse: splice from pipe");
985 goto clear_pipe;
986 }
987 if (res != out->len) {
988 res = -EIO;
989 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
990 res, out->len);
991 goto clear_pipe;
992 }
993 return 0;
994
995 clear_pipe:
996 fuse_ll_clear_pipe(se);
997 return res;
998
999 fallback:
1000 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
1001 }
1002 #else
1003 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
1004 struct iovec *iov, int iov_count,
1005 struct fuse_bufvec *buf, unsigned int flags)
1006 {
1007 size_t len = fuse_buf_size(buf);
1008 (void) flags;
1009
1010 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
1011 }
1012 #endif
1013
1014 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
1015 enum fuse_buf_copy_flags flags)
1016 {
1017 struct iovec iov[2];
1018 struct fuse_out_header out;
1019 int res;
1020
1021 iov[0].iov_base = &out;
1022 iov[0].iov_len = sizeof(struct fuse_out_header);
1023
1024 out.unique = req->unique;
1025 out.error = 0;
1026
1027 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
1028 if (res <= 0) {
1029 fuse_free_req(req);
1030 return res;
1031 } else {
1032 return fuse_reply_err(req, res);
1033 }
1034 }
1035
1036 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
1037 {
1038 struct fuse_statfs_out arg;
1039 size_t size = req->se->conn.proto_minor < 4 ?
1040 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
1041
1042 memset(&arg, 0, sizeof(arg));
1043 convert_statfs(stbuf, &arg.st);
1044
1045 return send_reply_ok(req, &arg, size);
1046 }
1047
1048 int fuse_reply_xattr(fuse_req_t req, size_t count)
1049 {
1050 struct fuse_getxattr_out arg;
1051
1052 memset(&arg, 0, sizeof(arg));
1053 arg.size = count;
1054
1055 return send_reply_ok(req, &arg, sizeof(arg));
1056 }
1057
1058 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
1059 {
1060 struct fuse_lk_out arg;
1061
1062 memset(&arg, 0, sizeof(arg));
1063 arg.lk.type = lock->l_type;
1064 if (lock->l_type != F_UNLCK) {
1065 arg.lk.start = lock->l_start;
1066 if (lock->l_len == 0)
1067 arg.lk.end = OFFSET_MAX;
1068 else
1069 arg.lk.end = lock->l_start + lock->l_len - 1;
1070 }
1071 arg.lk.pid = lock->l_pid;
1072 return send_reply_ok(req, &arg, sizeof(arg));
1073 }
1074
1075 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
1076 {
1077 struct fuse_bmap_out arg;
1078
1079 memset(&arg, 0, sizeof(arg));
1080 arg.block = idx;
1081
1082 return send_reply_ok(req, &arg, sizeof(arg));
1083 }
1084
1085 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
1086 size_t count)
1087 {
1088 struct fuse_ioctl_iovec *fiov;
1089 size_t i;
1090
1091 fiov = malloc(sizeof(fiov[0]) * count);
1092 if (!fiov)
1093 return NULL;
1094
1095 for (i = 0; i < count; i++) {
1096 fiov[i].base = (uintptr_t) iov[i].iov_base;
1097 fiov[i].len = iov[i].iov_len;
1098 }
1099
1100 return fiov;
1101 }
1102
1103 int fuse_reply_ioctl_retry(fuse_req_t req,
1104 const struct iovec *in_iov, size_t in_count,
1105 const struct iovec *out_iov, size_t out_count)
1106 {
1107 struct fuse_ioctl_out arg;
1108 struct fuse_ioctl_iovec *in_fiov = NULL;
1109 struct fuse_ioctl_iovec *out_fiov = NULL;
1110 struct iovec iov[4];
1111 size_t count = 1;
1112 int res;
1113
1114 memset(&arg, 0, sizeof(arg));
1115 arg.flags |= FUSE_IOCTL_RETRY;
1116 arg.in_iovs = in_count;
1117 arg.out_iovs = out_count;
1118 iov[count].iov_base = &arg;
1119 iov[count].iov_len = sizeof(arg);
1120 count++;
1121
1122 if (req->se->conn.proto_minor < 16) {
1123 if (in_count) {
1124 iov[count].iov_base = (void *)in_iov;
1125 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1126 count++;
1127 }
1128
1129 if (out_count) {
1130 iov[count].iov_base = (void *)out_iov;
1131 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1132 count++;
1133 }
1134 } else {
1135 /* Can't handle non-compat 64bit ioctls on 32bit */
1136 if (sizeof(void *) == 4 && req->ioctl_64bit) {
1137 res = fuse_reply_err(req, EINVAL);
1138 goto out;
1139 }
1140
1141 if (in_count) {
1142 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1143 if (!in_fiov)
1144 goto enomem;
1145
1146 iov[count].iov_base = (void *)in_fiov;
1147 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1148 count++;
1149 }
1150 if (out_count) {
1151 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1152 if (!out_fiov)
1153 goto enomem;
1154
1155 iov[count].iov_base = (void *)out_fiov;
1156 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1157 count++;
1158 }
1159 }
1160
1161 res = send_reply_iov(req, 0, iov, count);
1162 out:
1163 free(in_fiov);
1164 free(out_fiov);
1165
1166 return res;
1167
1168 enomem:
1169 res = fuse_reply_err(req, ENOMEM);
1170 goto out;
1171 }
1172
1173 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1174 {
1175 struct fuse_ioctl_out arg;
1176 struct iovec iov[3];
1177 size_t count = 1;
1178
1179 memset(&arg, 0, sizeof(arg));
1180 arg.result = result;
1181 iov[count].iov_base = &arg;
1182 iov[count].iov_len = sizeof(arg);
1183 count++;
1184
1185 if (size) {
1186 iov[count].iov_base = (char *) buf;
1187 iov[count].iov_len = size;
1188 count++;
1189 }
1190
1191 return send_reply_iov(req, 0, iov, count);
1192 }
1193
1194 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1195 int count)
1196 {
1197 struct iovec *padded_iov;
1198 struct fuse_ioctl_out arg;
1199 int res;
1200
1201 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1202 if (padded_iov == NULL)
1203 return fuse_reply_err(req, ENOMEM);
1204
1205 memset(&arg, 0, sizeof(arg));
1206 arg.result = result;
1207 padded_iov[1].iov_base = &arg;
1208 padded_iov[1].iov_len = sizeof(arg);
1209
1210 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1211
1212 res = send_reply_iov(req, 0, padded_iov, count + 2);
1213 free(padded_iov);
1214
1215 return res;
1216 }
1217
1218 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1219 {
1220 struct fuse_poll_out arg;
1221
1222 memset(&arg, 0, sizeof(arg));
1223 arg.revents = revents;
1224
1225 return send_reply_ok(req, &arg, sizeof(arg));
1226 }
1227
1228 int fuse_reply_lseek(fuse_req_t req, off_t off)
1229 {
1230 struct fuse_lseek_out arg;
1231
1232 memset(&arg, 0, sizeof(arg));
1233 arg.offset = off;
1234
1235 return send_reply_ok(req, &arg, sizeof(arg));
1236 }
1237
1238 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1239 {
1240 char *name = (char *) inarg;
1241
1242 if (req->se->op.lookup)
1243 req->se->op.lookup(req, nodeid, name);
1244 else
1245 fuse_reply_err(req, ENOSYS);
1246 }
1247
1248 static void do_lookup_postfilter(fuse_req_t req, fuse_ino_t nodeid, uint32_t error_in,
1249 const void *inarg, size_t size)
1250 {
1251 if (req->se->op.lookup_postfilter) {
1252 char *name = (char *) inarg;
1253 size_t namelen = strlen(name);
1254
1255 if (size != namelen + 1 + sizeof(struct fuse_entry_out)
1256 + sizeof(struct fuse_entry_bpf_out)) {
1257 fuse_log(FUSE_LOG_ERR, "%s: Bad size", __func__);
1258 fuse_reply_err(req, EIO);
1259 } else {
1260 struct fuse_entry_out *feo = (void *) (name + namelen + 1);
1261 struct fuse_entry_bpf_out *febo = (char *) feo + sizeof(*feo);
1262
1263 req->se->op.lookup_postfilter(req, nodeid, error_in, name, feo,
1264 febo);
1265 }
1266 } else
1267 fuse_reply_err(req, ENOSYS);
1268 }
1269
1270 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1271 {
1272 struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1273
1274 if (req->se->op.forget)
1275 req->se->op.forget(req, nodeid, arg->nlookup);
1276 else
1277 fuse_reply_none(req);
1278 }
1279
1280 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1281 const void *inarg)
1282 {
1283 struct fuse_batch_forget_in *arg = (void *) inarg;
1284 struct fuse_forget_one *param = (void *) PARAM(arg);
1285 unsigned int i;
1286
1287 (void) nodeid;
1288
1289 if (req->se->op.forget_multi) {
1290 req->se->op.forget_multi(req, arg->count,
1291 (struct fuse_forget_data *) param);
1292 } else if (req->se->op.forget) {
1293 for (i = 0; i < arg->count; i++) {
1294 struct fuse_forget_one *forget = ¶m[i];
1295 struct fuse_req *dummy_req;
1296
1297 dummy_req = fuse_ll_alloc_req(req->se);
1298 if (dummy_req == NULL)
1299 break;
1300
1301 dummy_req->unique = req->unique;
1302 dummy_req->ctx = req->ctx;
1303 dummy_req->ch = NULL;
1304
1305 req->se->op.forget(dummy_req, forget->nodeid,
1306 forget->nlookup);
1307 }
1308 fuse_reply_none(req);
1309 } else {
1310 fuse_reply_none(req);
1311 }
1312 }
1313
1314 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1315 {
1316 struct fuse_file_info *fip = NULL;
1317 struct fuse_file_info fi;
1318
1319 if (req->se->conn.proto_minor >= 9) {
1320 struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1321
1322 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1323 memset(&fi, 0, sizeof(fi));
1324 fi.fh = arg->fh;
1325 fip = &fi;
1326 }
1327 }
1328
1329 if (req->se->op.getattr)
1330 req->se->op.getattr(req, nodeid, fip);
1331 else
1332 fuse_reply_err(req, ENOSYS);
1333 }
1334
1335 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1336 {
1337 struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1338
1339 if (req->se->op.setattr) {
1340 struct fuse_file_info *fi = NULL;
1341 struct fuse_file_info fi_store;
1342 struct stat stbuf;
1343 memset(&stbuf, 0, sizeof(stbuf));
1344 convert_attr(arg, &stbuf);
1345 if (arg->valid & FATTR_FH) {
1346 arg->valid &= ~FATTR_FH;
1347 memset(&fi_store, 0, sizeof(fi_store));
1348 fi = &fi_store;
1349 fi->fh = arg->fh;
1350 }
1351 arg->valid &=
1352 FUSE_SET_ATTR_MODE |
1353 FUSE_SET_ATTR_UID |
1354 FUSE_SET_ATTR_GID |
1355 FUSE_SET_ATTR_SIZE |
1356 FUSE_SET_ATTR_ATIME |
1357 FUSE_SET_ATTR_MTIME |
1358 FUSE_SET_ATTR_KILL_SUID |
1359 FUSE_SET_ATTR_KILL_SGID |
1360 FUSE_SET_ATTR_ATIME_NOW |
1361 FUSE_SET_ATTR_MTIME_NOW |
1362 FUSE_SET_ATTR_CTIME;
1363
1364 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1365 } else
1366 fuse_reply_err(req, ENOSYS);
1367 }
1368
1369 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1370 {
1371 struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1372
1373 if (req->se->op.access)
1374 req->se->op.access(req, nodeid, arg->mask);
1375 else
1376 fuse_reply_err(req, ENOSYS);
1377 }
1378
1379 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1380 {
1381 (void) inarg;
1382
1383 if (req->se->op.readlink)
1384 req->se->op.readlink(req, nodeid);
1385 else
1386 fuse_reply_err(req, ENOSYS);
1387 }
1388
1389 static void do_canonical_path(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1390 {
1391 (void) inarg;
1392
1393 if (req->se->op.canonical_path)
1394 req->se->op.canonical_path(req, nodeid);
1395 else
1396 fuse_reply_err(req, ENOSYS);
1397 }
1398
1399 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1400 {
1401 struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1402 char *name = PARAM(arg);
1403
1404 if (req->se->conn.proto_minor >= 12)
1405 req->ctx.umask = arg->umask;
1406 else
1407 name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1408
1409 if (req->se->op.mknod)
1410 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1411 else
1412 fuse_reply_err(req, ENOSYS);
1413 }
1414
1415 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1416 {
1417 struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1418
1419 if (req->se->conn.proto_minor >= 12)
1420 req->ctx.umask = arg->umask;
1421
1422 if (req->se->op.mkdir)
1423 req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1424 else
1425 fuse_reply_err(req, ENOSYS);
1426 }
1427
1428 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1429 {
1430 char *name = (char *) inarg;
1431
1432 if (req->se->op.unlink)
1433 req->se->op.unlink(req, nodeid, name);
1434 else
1435 fuse_reply_err(req, ENOSYS);
1436 }
1437
1438 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1439 {
1440 char *name = (char *) inarg;
1441
1442 if (req->se->op.rmdir)
1443 req->se->op.rmdir(req, nodeid, name);
1444 else
1445 fuse_reply_err(req, ENOSYS);
1446 }
1447
1448 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1449 {
1450 char *name = (char *) inarg;
1451 char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1452
1453 if (req->se->op.symlink)
1454 req->se->op.symlink(req, linkname, nodeid, name);
1455 else
1456 fuse_reply_err(req, ENOSYS);
1457 }
1458
1459 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1460 {
1461 struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1462 char *oldname = PARAM(arg);
1463 char *newname = oldname + strlen(oldname) + 1;
1464
1465 if (req->se->op.rename)
1466 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1467 0);
1468 else
1469 fuse_reply_err(req, ENOSYS);
1470 }
1471
1472 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1473 {
1474 struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1475 char *oldname = PARAM(arg);
1476 char *newname = oldname + strlen(oldname) + 1;
1477
1478 if (req->se->op.rename)
1479 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1480 arg->flags);
1481 else
1482 fuse_reply_err(req, ENOSYS);
1483 }
1484
1485 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1486 {
1487 struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1488
1489 if (req->se->op.link)
1490 req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1491 else
1492 fuse_reply_err(req, ENOSYS);
1493 }
1494
1495 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1496 {
1497 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1498
1499 if (req->se->op.create) {
1500 struct fuse_file_info fi;
1501 char *name = PARAM(arg);
1502
1503 memset(&fi, 0, sizeof(fi));
1504 fi.flags = arg->flags;
1505
1506 if (req->se->conn.proto_minor >= 12)
1507 req->ctx.umask = arg->umask;
1508 else
1509 name = (char *) inarg + sizeof(struct fuse_open_in);
1510
1511 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1512 } else
1513 fuse_reply_err(req, ENOSYS);
1514 }
1515
1516 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1517 {
1518 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1519 struct fuse_file_info fi;
1520
1521 memset(&fi, 0, sizeof(fi));
1522 fi.flags = arg->flags;
1523
1524 if (req->se->op.open)
1525 req->se->op.open(req, nodeid, &fi);
1526 else if (req->se->conn.want & FUSE_CAP_NO_OPEN_SUPPORT)
1527 fuse_reply_err(req, ENOSYS);
1528 else
1529 fuse_reply_open(req, &fi);
1530 }
1531
1532 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1533 {
1534 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1535
1536 if (req->se->op.read) {
1537 struct fuse_file_info fi;
1538
1539 memset(&fi, 0, sizeof(fi));
1540 fi.fh = arg->fh;
1541 if (req->se->conn.proto_minor >= 9) {
1542 fi.lock_owner = arg->lock_owner;
1543 fi.flags = arg->flags;
1544 }
1545 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1546 } else
1547 fuse_reply_err(req, ENOSYS);
1548 }
1549
1550 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1551 {
1552 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1553 struct fuse_file_info fi;
1554 char *param;
1555
1556 memset(&fi, 0, sizeof(fi));
1557 fi.fh = arg->fh;
1558 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1559
1560 if (req->se->conn.proto_minor < 9) {
1561 param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1562 } else {
1563 fi.lock_owner = arg->lock_owner;
1564 fi.flags = arg->flags;
1565 param = PARAM(arg);
1566 }
1567
1568 if (req->se->op.write)
1569 req->se->op.write(req, nodeid, param, arg->size,
1570 arg->offset, &fi);
1571 else
1572 fuse_reply_err(req, ENOSYS);
1573 }
1574
1575 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1576 const struct fuse_buf *ibuf)
1577 {
1578 struct fuse_session *se = req->se;
1579 struct fuse_bufvec bufv = {
1580 .buf[0] = *ibuf,
1581 .count = 1,
1582 };
1583 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1584 struct fuse_file_info fi;
1585
1586 memset(&fi, 0, sizeof(fi));
1587 fi.fh = arg->fh;
1588 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1589
1590 if (se->conn.proto_minor < 9) {
1591 bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1592 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1593 FUSE_COMPAT_WRITE_IN_SIZE;
1594 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1595 } else {
1596 fi.lock_owner = arg->lock_owner;
1597 fi.flags = arg->flags;
1598 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1599 bufv.buf[0].mem = PARAM(arg);
1600
1601 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1602 sizeof(struct fuse_write_in);
1603 }
1604 if (bufv.buf[0].size < arg->size) {
1605 fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1606 fuse_reply_err(req, EIO);
1607 goto out;
1608 }
1609 bufv.buf[0].size = arg->size;
1610
1611 se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1612
1613 out:
1614 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1615 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1616 fuse_ll_clear_pipe(se);
1617 }
1618
1619 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1620 {
1621 struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1622 struct fuse_file_info fi;
1623
1624 memset(&fi, 0, sizeof(fi));
1625 fi.fh = arg->fh;
1626 fi.flush = 1;
1627 if (req->se->conn.proto_minor >= 7)
1628 fi.lock_owner = arg->lock_owner;
1629
1630 if (req->se->op.flush)
1631 req->se->op.flush(req, nodeid, &fi);
1632 else
1633 fuse_reply_err(req, ENOSYS);
1634 }
1635
1636 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1637 {
1638 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1639 struct fuse_file_info fi;
1640
1641 memset(&fi, 0, sizeof(fi));
1642 fi.flags = arg->flags;
1643 fi.fh = arg->fh;
1644 if (req->se->conn.proto_minor >= 8) {
1645 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1646 fi.lock_owner = arg->lock_owner;
1647 }
1648 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1649 fi.flock_release = 1;
1650 fi.lock_owner = arg->lock_owner;
1651 }
1652
1653 if (req->se->op.release)
1654 req->se->op.release(req, nodeid, &fi);
1655 else
1656 fuse_reply_err(req, 0);
1657 }
1658
1659 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1660 {
1661 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1662 struct fuse_file_info fi;
1663 int datasync = arg->fsync_flags & 1;
1664
1665 memset(&fi, 0, sizeof(fi));
1666 fi.fh = arg->fh;
1667
1668 if (req->se->op.fsync)
1669 req->se->op.fsync(req, nodeid, datasync, &fi);
1670 else
1671 fuse_reply_err(req, ENOSYS);
1672 }
1673
1674 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1675 {
1676 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1677 struct fuse_file_info fi;
1678
1679 memset(&fi, 0, sizeof(fi));
1680 fi.flags = arg->flags;
1681
1682 if (req->se->op.opendir)
1683 req->se->op.opendir(req, nodeid, &fi);
1684 else if (req->se->conn.want & FUSE_CAP_NO_OPENDIR_SUPPORT)
1685 fuse_reply_err(req, ENOSYS);
1686 else
1687 fuse_reply_open(req, &fi);
1688 }
1689
1690 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1691 {
1692 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1693 struct fuse_file_info fi;
1694
1695 memset(&fi, 0, sizeof(fi));
1696 fi.fh = arg->fh;
1697
1698 if (req->se->op.readdir)
1699 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1700 else
1701 fuse_reply_err(req, ENOSYS);
1702 }
1703
1704 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1705 {
1706 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1707 struct fuse_file_info fi;
1708
1709 memset(&fi, 0, sizeof(fi));
1710 fi.fh = arg->fh;
1711
1712 if (req->se->op.readdirplus)
1713 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1714 else
1715 fuse_reply_err(req, ENOSYS);
1716 }
1717
1718 static void do_readdir_postfilter(fuse_req_t req, fuse_ino_t nodeid,
1719 uint32_t error_in, const void *inarg,
1720 size_t size) {
1721 struct fuse_read_in *fri = (struct fuse_read_in *) inarg;
1722 struct fuse_read_out *fro = (struct fuse_read_out *) (fri + 1);
1723 struct fuse_dirent *dirents = (struct fuse_dirent *) (fro + 1);
1724 struct fuse_file_info fi;
1725
1726 memset(&fi, 0, sizeof(fi));
1727 fi.fh = fri->fh;
1728
1729 if (req->se->op.readdirpostfilter)
1730 req->se->op.readdirpostfilter(req, nodeid, error_in, fri->offset,
1731 fro->offset,
1732 size - sizeof(*fri) - sizeof(*fro),
1733 dirents, &fi);
1734 else
1735 fuse_reply_err(req, ENOSYS);
1736 }
1737
1738 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1739 {
1740 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1741 struct fuse_file_info fi;
1742
1743 memset(&fi, 0, sizeof(fi));
1744 fi.flags = arg->flags;
1745 fi.fh = arg->fh;
1746
1747 if (req->se->op.releasedir)
1748 req->se->op.releasedir(req, nodeid, &fi);
1749 else
1750 fuse_reply_err(req, 0);
1751 }
1752
1753 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1754 {
1755 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1756 struct fuse_file_info fi;
1757 int datasync = arg->fsync_flags & 1;
1758
1759 memset(&fi, 0, sizeof(fi));
1760 fi.fh = arg->fh;
1761
1762 if (req->se->op.fsyncdir)
1763 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1764 else
1765 fuse_reply_err(req, ENOSYS);
1766 }
1767
1768 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1769 {
1770 (void) nodeid;
1771 (void) inarg;
1772
1773 if (req->se->op.statfs)
1774 req->se->op.statfs(req, nodeid);
1775 else {
1776 struct statvfs buf = {
1777 .f_namemax = 255,
1778 .f_bsize = 512,
1779 };
1780 fuse_reply_statfs(req, &buf);
1781 }
1782 }
1783
1784 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1785 {
1786 struct fuse_session *se = req->se;
1787 unsigned int xattr_ext = !!(se->conn.want & FUSE_CAP_SETXATTR_EXT);
1788 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1789 char *name = xattr_ext ? PARAM(arg) :
1790 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1791 char *value = name + strlen(name) + 1;
1792
1793 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1794 if (req->se->op.setxattr)
1795 req->se->op.setxattr(req, nodeid, name, value, arg->size,
1796 arg->flags);
1797 else
1798 fuse_reply_err(req, ENOSYS);
1799 }
1800
1801 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1802 {
1803 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1804
1805 if (req->se->op.getxattr)
1806 req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1807 else
1808 fuse_reply_err(req, ENOSYS);
1809 }
1810
1811 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1812 {
1813 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1814
1815 if (req->se->op.listxattr)
1816 req->se->op.listxattr(req, nodeid, arg->size);
1817 else
1818 fuse_reply_err(req, ENOSYS);
1819 }
1820
1821 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1822 {
1823 char *name = (char *) inarg;
1824
1825 if (req->se->op.removexattr)
1826 req->se->op.removexattr(req, nodeid, name);
1827 else
1828 fuse_reply_err(req, ENOSYS);
1829 }
1830
1831 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1832 struct flock *flock)
1833 {
1834 memset(flock, 0, sizeof(struct flock));
1835 flock->l_type = fl->type;
1836 flock->l_whence = SEEK_SET;
1837 flock->l_start = fl->start;
1838 if (fl->end == OFFSET_MAX)
1839 flock->l_len = 0;
1840 else
1841 flock->l_len = fl->end - fl->start + 1;
1842 flock->l_pid = fl->pid;
1843 }
1844
1845 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1846 {
1847 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1848 struct fuse_file_info fi;
1849 struct flock flock;
1850
1851 memset(&fi, 0, sizeof(fi));
1852 fi.fh = arg->fh;
1853 fi.lock_owner = arg->owner;
1854
1855 convert_fuse_file_lock(&arg->lk, &flock);
1856 if (req->se->op.getlk)
1857 req->se->op.getlk(req, nodeid, &fi, &flock);
1858 else
1859 fuse_reply_err(req, ENOSYS);
1860 }
1861
1862 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1863 const void *inarg, int sleep)
1864 {
1865 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1866 struct fuse_file_info fi;
1867 struct flock flock;
1868
1869 memset(&fi, 0, sizeof(fi));
1870 fi.fh = arg->fh;
1871 fi.lock_owner = arg->owner;
1872
1873 if (arg->lk_flags & FUSE_LK_FLOCK) {
1874 int op = 0;
1875
1876 switch (arg->lk.type) {
1877 case F_RDLCK:
1878 op = LOCK_SH;
1879 break;
1880 case F_WRLCK:
1881 op = LOCK_EX;
1882 break;
1883 case F_UNLCK:
1884 op = LOCK_UN;
1885 break;
1886 }
1887 if (!sleep)
1888 op |= LOCK_NB;
1889
1890 if (req->se->op.flock)
1891 req->se->op.flock(req, nodeid, &fi, op);
1892 else
1893 fuse_reply_err(req, ENOSYS);
1894 } else {
1895 convert_fuse_file_lock(&arg->lk, &flock);
1896 if (req->se->op.setlk)
1897 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1898 else
1899 fuse_reply_err(req, ENOSYS);
1900 }
1901 }
1902
1903 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1904 {
1905 do_setlk_common(req, nodeid, inarg, 0);
1906 }
1907
1908 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1909 {
1910 do_setlk_common(req, nodeid, inarg, 1);
1911 }
1912
1913 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1914 {
1915 struct fuse_req *curr;
1916
1917 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1918 if (curr->unique == req->u.i.unique) {
1919 fuse_interrupt_func_t func;
1920 void *data;
1921
1922 curr->ctr++;
1923 pthread_mutex_unlock(&se->lock);
1924
1925 /* Ugh, ugly locking */
1926 pthread_mutex_lock(&curr->lock);
1927 pthread_mutex_lock(&se->lock);
1928 curr->interrupted = 1;
1929 func = curr->u.ni.func;
1930 data = curr->u.ni.data;
1931 pthread_mutex_unlock(&se->lock);
1932 if (func)
1933 func(curr, data);
1934 pthread_mutex_unlock(&curr->lock);
1935
1936 pthread_mutex_lock(&se->lock);
1937 curr->ctr--;
1938 if (!curr->ctr) {
1939 destroy_req(curr);
1940 }
1941
1942 return 1;
1943 }
1944 }
1945 for (curr = se->interrupts.next; curr != &se->interrupts;
1946 curr = curr->next) {
1947 if (curr->u.i.unique == req->u.i.unique)
1948 return 1;
1949 }
1950 return 0;
1951 }
1952
1953 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1954 {
1955 struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1956 struct fuse_session *se = req->se;
1957
1958 (void) nodeid;
1959 if (se->debug)
1960 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1961 (unsigned long long) arg->unique);
1962
1963 req->u.i.unique = arg->unique;
1964
1965 pthread_mutex_lock(&se->lock);
1966 if (find_interrupted(se, req)) {
1967 fuse_chan_put(req->ch);
1968 req->ch = NULL;
1969 destroy_req(req);
1970 } else
1971 list_add_req(req, &se->interrupts);
1972 pthread_mutex_unlock(&se->lock);
1973 }
1974
1975 static struct fuse_req *check_interrupt(struct fuse_session *se,
1976 struct fuse_req *req)
1977 {
1978 struct fuse_req *curr;
1979
1980 for (curr = se->interrupts.next; curr != &se->interrupts;
1981 curr = curr->next) {
1982 if (curr->u.i.unique == req->unique) {
1983 req->interrupted = 1;
1984 list_del_req(curr);
1985 fuse_chan_put(curr->ch);
1986 curr->ch = NULL;
1987 destroy_req(curr);
1988 return NULL;
1989 }
1990 }
1991 curr = se->interrupts.next;
1992 if (curr != &se->interrupts) {
1993 list_del_req(curr);
1994 list_init_req(curr);
1995 return curr;
1996 } else
1997 return NULL;
1998 }
1999
2000 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2001 {
2002 struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
2003
2004 if (req->se->op.bmap)
2005 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
2006 else
2007 fuse_reply_err(req, ENOSYS);
2008 }
2009
2010 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2011 {
2012 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
2013 unsigned int flags = arg->flags;
2014 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
2015 struct fuse_file_info fi;
2016
2017 if (flags & FUSE_IOCTL_DIR &&
2018 !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
2019 fuse_reply_err(req, ENOTTY);
2020 return;
2021 }
2022
2023 memset(&fi, 0, sizeof(fi));
2024 fi.fh = arg->fh;
2025
2026 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
2027 !(flags & FUSE_IOCTL_32BIT)) {
2028 req->ioctl_64bit = 1;
2029 }
2030
2031 if (req->se->op.ioctl)
2032 req->se->op.ioctl(req, nodeid, arg->cmd,
2033 (void *)(uintptr_t)arg->arg, &fi, flags,
2034 in_buf, arg->in_size, arg->out_size);
2035 else
2036 fuse_reply_err(req, ENOSYS);
2037 }
2038
2039 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
2040 {
2041 free(ph);
2042 }
2043
2044 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2045 {
2046 struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
2047 struct fuse_file_info fi;
2048
2049 memset(&fi, 0, sizeof(fi));
2050 fi.fh = arg->fh;
2051 fi.poll_events = arg->events;
2052
2053 if (req->se->op.poll) {
2054 struct fuse_pollhandle *ph = NULL;
2055
2056 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
2057 ph = malloc(sizeof(struct fuse_pollhandle));
2058 if (ph == NULL) {
2059 fuse_reply_err(req, ENOMEM);
2060 return;
2061 }
2062 ph->kh = arg->kh;
2063 ph->se = req->se;
2064 }
2065
2066 req->se->op.poll(req, nodeid, &fi, ph);
2067 } else {
2068 fuse_reply_err(req, ENOSYS);
2069 }
2070 }
2071
2072 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2073 {
2074 struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
2075 struct fuse_file_info fi;
2076
2077 memset(&fi, 0, sizeof(fi));
2078 fi.fh = arg->fh;
2079
2080 if (req->se->op.fallocate)
2081 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
2082 else
2083 fuse_reply_err(req, ENOSYS);
2084 }
2085
2086 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
2087 {
2088 struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
2089 struct fuse_file_info fi_in, fi_out;
2090
2091 memset(&fi_in, 0, sizeof(fi_in));
2092 fi_in.fh = arg->fh_in;
2093
2094 memset(&fi_out, 0, sizeof(fi_out));
2095 fi_out.fh = arg->fh_out;
2096
2097
2098 if (req->se->op.copy_file_range)
2099 req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
2100 &fi_in, arg->nodeid_out,
2101 arg->off_out, &fi_out, arg->len,
2102 arg->flags);
2103 else
2104 fuse_reply_err(req, ENOSYS);
2105 }
2106
2107 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2108 {
2109 struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
2110 struct fuse_file_info fi;
2111
2112 memset(&fi, 0, sizeof(fi));
2113 fi.fh = arg->fh;
2114
2115 if (req->se->op.lseek)
2116 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
2117 else
2118 fuse_reply_err(req, ENOSYS);
2119 }
2120
2121 /* Prevent bogus data races (bogus since "init" is called before
2122 * multi-threading becomes relevant */
2123 static __attribute__((no_sanitize("thread")))
2124 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2125 {
2126 struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
2127 struct fuse_init_out outarg;
2128 struct fuse_session *se = req->se;
2129 size_t bufsize = se->bufsize;
2130 size_t outargsize = sizeof(outarg);
2131 uint64_t inargflags = 0;
2132 uint64_t outargflags = 0;
2133 (void) nodeid;
2134 if (se->debug) {
2135 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2136 if (arg->major == 7 && arg->minor >= 6) {
2137 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2138 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2139 arg->max_readahead);
2140 }
2141 }
2142 se->conn.proto_major = arg->major;
2143 se->conn.proto_minor = arg->minor;
2144 se->conn.capable = 0;
2145 se->conn.want = 0;
2146
2147 memset(&outarg, 0, sizeof(outarg));
2148 outarg.major = FUSE_KERNEL_VERSION;
2149 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2150
2151 if (arg->major < 7) {
2152 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2153 arg->major, arg->minor);
2154 fuse_reply_err(req, EPROTO);
2155 return;
2156 }
2157
2158 if (arg->major > 7) {
2159 /* Wait for a second INIT request with a 7.X version */
2160 send_reply_ok(req, &outarg, sizeof(outarg));
2161 return;
2162 }
2163
2164 if (arg->minor >= 6) {
2165 if (arg->max_readahead < se->conn.max_readahead)
2166 se->conn.max_readahead = arg->max_readahead;
2167 inargflags = arg->flags;
2168 /* Unpatched Android Kernels using the old value for passthrough may
2169 * accidentally set all extended init values, while not meaning to
2170 * set any. If the old passthrough value is used, ignore extended
2171 * flags
2172 */
2173 if ((inargflags & FUSE_INIT_EXT) && (inargflags & (1ULL << 31)))
2174 inargflags &= ~FUSE_INIT_EXT;
2175 if (inargflags & FUSE_INIT_EXT)
2176 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2177 if (inargflags & FUSE_ASYNC_READ)
2178 se->conn.capable |= FUSE_CAP_ASYNC_READ;
2179 if (inargflags & FUSE_POSIX_LOCKS)
2180 se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
2181 if (inargflags & FUSE_ATOMIC_O_TRUNC)
2182 se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
2183 if (inargflags & FUSE_EXPORT_SUPPORT)
2184 se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
2185 if (inargflags & FUSE_DONT_MASK)
2186 se->conn.capable |= FUSE_CAP_DONT_MASK;
2187 if (inargflags & FUSE_FLOCK_LOCKS)
2188 se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
2189 if (inargflags & FUSE_AUTO_INVAL_DATA)
2190 se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
2191 if (inargflags & FUSE_DO_READDIRPLUS)
2192 se->conn.capable |= FUSE_CAP_READDIRPLUS;
2193 if (inargflags & FUSE_READDIRPLUS_AUTO)
2194 se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
2195 if (inargflags & FUSE_ASYNC_DIO)
2196 se->conn.capable |= FUSE_CAP_ASYNC_DIO;
2197 if (inargflags & FUSE_WRITEBACK_CACHE)
2198 se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
2199 if (inargflags & FUSE_NO_OPEN_SUPPORT)
2200 se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
2201 if (inargflags & FUSE_PARALLEL_DIROPS)
2202 se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
2203 if (inargflags & FUSE_POSIX_ACL)
2204 se->conn.capable |= FUSE_CAP_POSIX_ACL;
2205 if (inargflags & FUSE_HANDLE_KILLPRIV)
2206 se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
2207 if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2208 se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2209 if (inargflags & FUSE_CACHE_SYMLINKS)
2210 se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
2211 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2212 se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2213 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2214 se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2215 if (inargflags & FUSE_SETXATTR_EXT)
2216 se->conn.capable |= FUSE_CAP_SETXATTR_EXT;
2217 if (!(inargflags & FUSE_MAX_PAGES)) {
2218 size_t max_bufsize =
2219 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2220 + FUSE_BUFFER_HEADER_SIZE;
2221 if (bufsize > max_bufsize) {
2222 bufsize = max_bufsize;
2223 }
2224 }
2225 if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2226 se->conn.capable |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2227 if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2228 se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2229 if (inargflags & FUSE_PASSTHROUGH_UPSTREAM)
2230 se->conn.capable |= FUSE_CAP_PASSTHROUGH_UPSTREAM;
2231 if (inargflags & FUSE_INIT_EXT) {
2232 if (inargflags & (1ULL << 63))
2233 se->conn.capable |= FUSE_CAP_PASSTHROUGH;
2234 } else {
2235 if (inargflags & (1ULL << 31))
2236 se->conn.capable |= FUSE_CAP_PASSTHROUGH;
2237 }
2238 } else {
2239 se->conn.max_readahead = 0;
2240 }
2241
2242 if (se->conn.proto_minor >= 14) {
2243 #ifdef HAVE_SPLICE
2244 #ifdef HAVE_VMSPLICE
2245 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2246 se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2247 }
2248 #endif
2249 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2250 se->conn.capable |= FUSE_CAP_SPLICE_READ;
2251 }
2252 #endif
2253 }
2254 if (se->conn.proto_minor >= 18)
2255 se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2256
2257 /* Default settings for modern filesystems.
2258 *
2259 * Most of these capabilities were disabled by default in
2260 * libfuse2 for backwards compatibility reasons. In libfuse3,
2261 * we can finally enable them by default (as long as they're
2262 * supported by the kernel).
2263 */
2264 #define LL_SET_DEFAULT(cond, cap) \
2265 if ((cond) && (se->conn.capable & (cap))) \
2266 se->conn.want |= (cap)
2267 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2268 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2269 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2270 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2271 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2272 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2273 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2274 FUSE_CAP_POSIX_LOCKS);
2275 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2276 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2277 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2278 FUSE_CAP_READDIRPLUS_AUTO);
2279
2280 /* This could safely become default, but libfuse needs an API extension
2281 * to support it
2282 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2283 */
2284
2285 se->conn.time_gran = 1;
2286
2287 if (bufsize < FUSE_MIN_READ_BUFFER) {
2288 fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2289 bufsize);
2290 bufsize = FUSE_MIN_READ_BUFFER;
2291 }
2292 se->bufsize = bufsize;
2293
2294 se->got_init = 1;
2295 if (se->op.init)
2296 se->op.init(se->userdata, &se->conn);
2297
2298 if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2299 se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2300
2301 if (se->conn.want & (~se->conn.capable)) {
2302 fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2303 "0x%x that are not supported by kernel, aborting.\n",
2304 se->conn.want & (~se->conn.capable));
2305 fuse_reply_err(req, EPROTO);
2306 se->error = -EPROTO;
2307 fuse_session_exit(se);
2308 return;
2309 }
2310
2311 unsigned max_read_mo = get_max_read(se->mo);
2312 if (se->conn.max_read != max_read_mo) {
2313 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2314 "requested different maximum read size (%u vs %u)\n",
2315 se->conn.max_read, max_read_mo);
2316 fuse_reply_err(req, EPROTO);
2317 se->error = -EPROTO;
2318 fuse_session_exit(se);
2319 return;
2320 }
2321
2322 if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2323 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2324 }
2325 if (arg->flags & FUSE_MAX_PAGES) {
2326 outarg.flags |= FUSE_MAX_PAGES;
2327 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2328 }
2329 outargflags = outarg.flags;
2330 /* Always enable big writes, this is superseded
2331 by the max_write option */
2332 outargflags |= FUSE_BIG_WRITES;
2333
2334 if (se->conn.want & FUSE_CAP_ASYNC_READ)
2335 outargflags |= FUSE_ASYNC_READ;
2336 if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2337 outargflags |= FUSE_POSIX_LOCKS;
2338 if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2339 outargflags |= FUSE_ATOMIC_O_TRUNC;
2340 if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2341 outargflags |= FUSE_EXPORT_SUPPORT;
2342 if (se->conn.want & FUSE_CAP_DONT_MASK)
2343 outargflags |= FUSE_DONT_MASK;
2344 if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2345 outargflags |= FUSE_FLOCK_LOCKS;
2346 if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2347 outargflags |= FUSE_AUTO_INVAL_DATA;
2348 if (se->conn.want & FUSE_CAP_READDIRPLUS)
2349 outargflags |= FUSE_DO_READDIRPLUS;
2350 if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2351 outargflags |= FUSE_READDIRPLUS_AUTO;
2352 if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2353 outargflags |= FUSE_ASYNC_DIO;
2354 if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2355 outargflags |= FUSE_WRITEBACK_CACHE;
2356 if (se->conn.want & FUSE_CAP_PARALLEL_DIROPS)
2357 outargflags |= FUSE_PARALLEL_DIROPS;
2358 if (se->conn.want & FUSE_CAP_POSIX_ACL)
2359 outargflags |= FUSE_POSIX_ACL;
2360 if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV)
2361 outargflags |= FUSE_HANDLE_KILLPRIV;
2362 if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV_V2)
2363 outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2364 if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2365 outargflags |= FUSE_CACHE_SYMLINKS;
2366 if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2367 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2368 if (se->conn.want & FUSE_CAP_SETXATTR_EXT)
2369 outargflags |= FUSE_SETXATTR_EXT;
2370 if (se->conn.want & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2371 outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2372 if (se->conn.want & FUSE_CAP_PASSTHROUGH_UPSTREAM) {
2373 outargflags |= FUSE_PASSTHROUGH_UPSTREAM;
2374 /*
2375 * outarg.max_stack_depth includes the fuse stack layer,
2376 * so it is one more than max_backing_stack_depth.
2377 */
2378 outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2379 }
2380 if (se->conn.want & FUSE_CAP_PASSTHROUGH) {
2381 if (inargflags & FUSE_INIT_EXT)
2382 outargflags |= (1ULL << 63);
2383 else
2384 outargflags |= (1ULL << 31);
2385 }
2386 if (inargflags & FUSE_INIT_EXT) {
2387 outargflags |= FUSE_INIT_EXT;
2388 outarg.flags2 = outargflags >> 32;
2389 }
2390
2391 outarg.flags = outargflags;
2392
2393 outarg.max_readahead = se->conn.max_readahead;
2394 outarg.max_write = se->conn.max_write;
2395 if (se->conn.proto_minor >= 13) {
2396 if (se->conn.max_background >= (1 << 16))
2397 se->conn.max_background = (1 << 16) - 1;
2398 if (se->conn.congestion_threshold > se->conn.max_background)
2399 se->conn.congestion_threshold = se->conn.max_background;
2400 if (!se->conn.congestion_threshold) {
2401 se->conn.congestion_threshold =
2402 se->conn.max_background * 3 / 4;
2403 }
2404
2405 outarg.max_background = se->conn.max_background;
2406 outarg.congestion_threshold = se->conn.congestion_threshold;
2407 }
2408 if (se->conn.proto_minor >= 23)
2409 outarg.time_gran = se->conn.time_gran;
2410
2411 if (se->debug) {
2412 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2413 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2414 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2415 outarg.max_readahead);
2416 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2417 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2418 outarg.max_background);
2419 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2420 outarg.congestion_threshold);
2421 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2422 outarg.time_gran);
2423 if (se->conn.want & FUSE_CAP_PASSTHROUGH)
2424 fuse_log(FUSE_LOG_DEBUG, " max_stack_depth=%u\n",
2425 outarg.max_stack_depth);
2426 }
2427 if (arg->minor < 5)
2428 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2429 else if (arg->minor < 23)
2430 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2431
2432 send_reply_ok(req, &outarg, outargsize);
2433 }
2434
2435 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2436 {
2437 struct fuse_session *se = req->se;
2438
2439 (void) nodeid;
2440 (void) inarg;
2441
2442 se->got_destroy = 1;
2443 se->got_init = 0;
2444 if (se->op.destroy)
2445 se->op.destroy(se->userdata);
2446
2447 send_reply_ok(req, NULL, 0);
2448 }
2449
2450 static void list_del_nreq(struct fuse_notify_req *nreq)
2451 {
2452 struct fuse_notify_req *prev = nreq->prev;
2453 struct fuse_notify_req *next = nreq->next;
2454 prev->next = next;
2455 next->prev = prev;
2456 }
2457
2458 static void list_add_nreq(struct fuse_notify_req *nreq,
2459 struct fuse_notify_req *next)
2460 {
2461 struct fuse_notify_req *prev = next->prev;
2462 nreq->next = next;
2463 nreq->prev = prev;
2464 prev->next = nreq;
2465 next->prev = nreq;
2466 }
2467
2468 static void list_init_nreq(struct fuse_notify_req *nreq)
2469 {
2470 nreq->next = nreq;
2471 nreq->prev = nreq;
2472 }
2473
2474 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2475 const void *inarg, const struct fuse_buf *buf)
2476 {
2477 struct fuse_session *se = req->se;
2478 struct fuse_notify_req *nreq;
2479 struct fuse_notify_req *head;
2480
2481 pthread_mutex_lock(&se->lock);
2482 head = &se->notify_list;
2483 for (nreq = head->next; nreq != head; nreq = nreq->next) {
2484 if (nreq->unique == req->unique) {
2485 list_del_nreq(nreq);
2486 break;
2487 }
2488 }
2489 pthread_mutex_unlock(&se->lock);
2490
2491 if (nreq != head)
2492 nreq->reply(nreq, req, nodeid, inarg, buf);
2493 }
2494
2495 static int send_notify_iov(struct fuse_session *se, int notify_code,
2496 struct iovec *iov, int count)
2497 {
2498 struct fuse_out_header out;
2499
2500 if (!se->got_init)
2501 return -ENOTCONN;
2502
2503 out.unique = 0;
2504 out.error = notify_code;
2505 iov[0].iov_base = &out;
2506 iov[0].iov_len = sizeof(struct fuse_out_header);
2507
2508 return fuse_send_msg(se, NULL, iov, count);
2509 }
2510
2511 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2512 {
2513 if (ph != NULL) {
2514 struct fuse_notify_poll_wakeup_out outarg;
2515 struct iovec iov[2];
2516
2517 outarg.kh = ph->kh;
2518
2519 iov[1].iov_base = &outarg;
2520 iov[1].iov_len = sizeof(outarg);
2521
2522 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2523 } else {
2524 return 0;
2525 }
2526 }
2527
2528 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2529 off_t off, off_t len)
2530 {
2531 struct fuse_notify_inval_inode_out outarg;
2532 struct iovec iov[2];
2533
2534 if (!se)
2535 return -EINVAL;
2536
2537 if (se->conn.proto_minor < 12)
2538 return -ENOSYS;
2539
2540 outarg.ino = ino;
2541 outarg.off = off;
2542 outarg.len = len;
2543
2544 iov[1].iov_base = &outarg;
2545 iov[1].iov_len = sizeof(outarg);
2546
2547 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2548 }
2549
2550 /**
2551 * Notify parent attributes and the dentry matching parent/name
2552 *
2553 * Underlying base function for fuse_lowlevel_notify_inval_entry() and
2554 * fuse_lowlevel_notify_expire_entry().
2555 *
2556 * @warning
2557 * Only checks if fuse_lowlevel_notify_inval_entry() is supported by
2558 * the kernel. All other flags will fall back to
2559 * fuse_lowlevel_notify_inval_entry() if not supported!
2560 * DO THE PROPER CHECKS IN THE DERIVED FUNCTION!
2561 *
2562 * @param se the session object
2563 * @param parent inode number
2564 * @param name file name
2565 * @param namelen strlen() of file name
2566 * @param flags flags to control if the entry should be expired or invalidated
2567 * @return zero for success, -errno for failure
2568 */
2569 static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2570 const char *name, size_t namelen,
2571 enum fuse_notify_entry_flags flags)
2572 {
2573 struct fuse_notify_inval_entry_out outarg;
2574 struct iovec iov[3];
2575
2576 if (!se)
2577 return -EINVAL;
2578
2579 if (se->conn.proto_minor < 12)
2580 return -ENOSYS;
2581
2582 outarg.parent = parent;
2583 outarg.namelen = namelen;
2584 outarg.flags = 0;
2585 if (flags & FUSE_LL_EXPIRE_ONLY)
2586 outarg.flags |= FUSE_EXPIRE_ONLY;
2587
2588 iov[1].iov_base = &outarg;
2589 iov[1].iov_len = sizeof(outarg);
2590 iov[2].iov_base = (void *)name;
2591 iov[2].iov_len = namelen + 1;
2592
2593 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2594 }
2595
2596 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2597 const char *name, size_t namelen)
2598 {
2599 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2600 }
2601
2602 int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2603 const char *name, size_t namelen)
2604 {
2605 if (!se)
2606 return -EINVAL;
2607
2608 if (!(se->conn.capable & FUSE_CAP_EXPIRE_ONLY))
2609 return -ENOSYS;
2610
2611 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2612 }
2613
2614
2615 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2616 fuse_ino_t parent, fuse_ino_t child,
2617 const char *name, size_t namelen)
2618 {
2619 struct fuse_notify_delete_out outarg;
2620 struct iovec iov[3];
2621
2622 if (!se)
2623 return -EINVAL;
2624
2625 if (se->conn.proto_minor < 18)
2626 return -ENOSYS;
2627
2628 outarg.parent = parent;
2629 outarg.child = child;
2630 outarg.namelen = namelen;
2631 outarg.padding = 0;
2632
2633 iov[1].iov_base = &outarg;
2634 iov[1].iov_len = sizeof(outarg);
2635 iov[2].iov_base = (void *)name;
2636 iov[2].iov_len = namelen + 1;
2637
2638 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2639 }
2640
2641 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2642 off_t offset, struct fuse_bufvec *bufv,
2643 enum fuse_buf_copy_flags flags)
2644 {
2645 struct fuse_out_header out;
2646 struct fuse_notify_store_out outarg;
2647 struct iovec iov[3];
2648 size_t size = fuse_buf_size(bufv);
2649 int res;
2650
2651 if (!se)
2652 return -EINVAL;
2653
2654 if (se->conn.proto_minor < 15)
2655 return -ENOSYS;
2656
2657 out.unique = 0;
2658 out.error = FUSE_NOTIFY_STORE;
2659
2660 outarg.nodeid = ino;
2661 outarg.offset = offset;
2662 outarg.size = size;
2663 outarg.padding = 0;
2664
2665 iov[0].iov_base = &out;
2666 iov[0].iov_len = sizeof(out);
2667 iov[1].iov_base = &outarg;
2668 iov[1].iov_len = sizeof(outarg);
2669
2670 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2671 if (res > 0)
2672 res = -res;
2673
2674 return res;
2675 }
2676
2677 struct fuse_retrieve_req {
2678 struct fuse_notify_req nreq;
2679 void *cookie;
2680 };
2681
2682 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2683 fuse_req_t req, fuse_ino_t ino,
2684 const void *inarg,
2685 const struct fuse_buf *ibuf)
2686 {
2687 struct fuse_session *se = req->se;
2688 struct fuse_retrieve_req *rreq =
2689 container_of(nreq, struct fuse_retrieve_req, nreq);
2690 const struct fuse_notify_retrieve_in *arg = inarg;
2691 struct fuse_bufvec bufv = {
2692 .buf[0] = *ibuf,
2693 .count = 1,
2694 };
2695
2696 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2697 bufv.buf[0].mem = PARAM(arg);
2698
2699 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2700 sizeof(struct fuse_notify_retrieve_in);
2701
2702 if (bufv.buf[0].size < arg->size) {
2703 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2704 fuse_reply_none(req);
2705 goto out;
2706 }
2707 bufv.buf[0].size = arg->size;
2708
2709 if (se->op.retrieve_reply) {
2710 se->op.retrieve_reply(req, rreq->cookie, ino,
2711 arg->offset, &bufv);
2712 } else {
2713 fuse_reply_none(req);
2714 }
2715 out:
2716 free(rreq);
2717 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2718 fuse_ll_clear_pipe(se);
2719 }
2720
2721 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2722 size_t size, off_t offset, void *cookie)
2723 {
2724 struct fuse_notify_retrieve_out outarg;
2725 struct iovec iov[2];
2726 struct fuse_retrieve_req *rreq;
2727 int err;
2728
2729 if (!se)
2730 return -EINVAL;
2731
2732 if (se->conn.proto_minor < 15)
2733 return -ENOSYS;
2734
2735 rreq = malloc(sizeof(*rreq));
2736 if (rreq == NULL)
2737 return -ENOMEM;
2738
2739 pthread_mutex_lock(&se->lock);
2740 rreq->cookie = cookie;
2741 rreq->nreq.unique = se->notify_ctr++;
2742 rreq->nreq.reply = fuse_ll_retrieve_reply;
2743 list_add_nreq(&rreq->nreq, &se->notify_list);
2744 pthread_mutex_unlock(&se->lock);
2745
2746 outarg.notify_unique = rreq->nreq.unique;
2747 outarg.nodeid = ino;
2748 outarg.offset = offset;
2749 outarg.size = size;
2750 outarg.padding = 0;
2751
2752 iov[1].iov_base = &outarg;
2753 iov[1].iov_len = sizeof(outarg);
2754
2755 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2756 if (err) {
2757 pthread_mutex_lock(&se->lock);
2758 list_del_nreq(&rreq->nreq);
2759 pthread_mutex_unlock(&se->lock);
2760 free(rreq);
2761 }
2762
2763 return err;
2764 }
2765
2766 void *fuse_req_userdata(fuse_req_t req)
2767 {
2768 return req->se->userdata;
2769 }
2770
2771 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2772 {
2773 return &req->ctx;
2774 }
2775
2776 void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func,
2777 void *data)
2778 {
2779 pthread_mutex_lock(&req->lock);
2780 pthread_mutex_lock(&req->se->lock);
2781 req->u.ni.func = func;
2782 req->u.ni.data = data;
2783 pthread_mutex_unlock(&req->se->lock);
2784 if (req->interrupted && func)
2785 func(req, data);
2786 pthread_mutex_unlock(&req->lock);
2787 }
2788
2789 int fuse_req_interrupted(fuse_req_t req)
2790 {
2791 int interrupted;
2792
2793 pthread_mutex_lock(&req->se->lock);
2794 interrupted = req->interrupted;
2795 pthread_mutex_unlock(&req->se->lock);
2796
2797 return interrupted;
2798 }
2799
2800 static struct {
2801 void (*func)(fuse_req_t, fuse_ino_t, const void *);
2802 const char *name;
2803 } fuse_ll_ops[] = {
2804 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2805 [FUSE_FORGET] = { do_forget, "FORGET" },
2806 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2807 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2808 [FUSE_READLINK] = { do_readlink, "READLINK" },
2809 [FUSE_CANONICAL_PATH] = { do_canonical_path, "CANONICAL_PATH" },
2810 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2811 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2812 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2813 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2814 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2815 [FUSE_RENAME] = { do_rename, "RENAME" },
2816 [FUSE_LINK] = { do_link, "LINK" },
2817 [FUSE_OPEN] = { do_open, "OPEN" },
2818 [FUSE_READ] = { do_read, "READ" },
2819 [FUSE_WRITE] = { do_write, "WRITE" },
2820 [FUSE_STATFS] = { do_statfs, "STATFS" },
2821 [FUSE_RELEASE] = { do_release, "RELEASE" },
2822 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2823 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2824 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2825 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2826 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2827 [FUSE_FLUSH] = { do_flush, "FLUSH" },
2828 [FUSE_INIT] = { do_init, "INIT" },
2829 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2830 [FUSE_READDIR] = { do_readdir, "READDIR" },
2831 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2832 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2833 [FUSE_GETLK] = { do_getlk, "GETLK" },
2834 [FUSE_SETLK] = { do_setlk, "SETLK" },
2835 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2836 [FUSE_ACCESS] = { do_access, "ACCESS" },
2837 [FUSE_CREATE] = { do_create, "CREATE" },
2838 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2839 [FUSE_BMAP] = { do_bmap, "BMAP" },
2840 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2841 [FUSE_POLL] = { do_poll, "POLL" },
2842 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2843 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2844 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2845 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2846 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2847 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2848 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2849 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2850 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2851 };
2852
2853 static struct {
2854 void (*func)( fuse_req_t, fuse_ino_t, const void *);
2855 const char *name;
2856 } fuse_ll_prefilter_ops[] = {};
2857
2858 static struct {
2859 void (*func)( fuse_req_t, fuse_ino_t, uint32_t, const void *, size_t size);
2860 } fuse_ll_postfilter_ops[] = {
2861 [FUSE_LOOKUP] = {do_lookup_postfilter},
2862 [FUSE_READDIR] = {do_readdir_postfilter},
2863 };
2864
2865 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2866
2867 static const char *opname(enum fuse_opcode opcode)
2868 {
2869 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2870 return "???";
2871 else
2872 return fuse_ll_ops[opcode].name;
2873 }
2874
2875 static const char *opfiltername(int filter)
2876 {
2877 switch (filter) {
2878 case 0:
2879 return "NONE";
2880 case FUSE_PREFILTER:
2881 return "FUSE_PREFILTER";
2882 case FUSE_POSTFILTER:
2883 return "FUSE_POSTFILTER";
2884 case FUSE_PREFILTER | FUSE_POSTFILTER:
2885 return "FUSE_PREFILTER | FUSE_POSTFILTER";
2886 default:
2887 return "???";
2888 }
2889 }
2890
2891 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2892 struct fuse_bufvec *src)
2893 {
2894 ssize_t res = fuse_buf_copy(dst, src, 0);
2895 if (res < 0) {
2896 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2897 return res;
2898 }
2899 if ((size_t)res < fuse_buf_size(dst)) {
2900 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2901 return -1;
2902 }
2903 return 0;
2904 }
2905
2906 void fuse_session_process_buf(struct fuse_session *se,
2907 const struct fuse_buf *buf)
2908 {
2909 fuse_session_process_buf_int(se, buf, NULL);
2910 }
2911
2912 void fuse_session_process_buf_int(struct fuse_session *se,
2913 const struct fuse_buf *buf, struct fuse_chan *ch)
2914 {
2915 const size_t write_header_size = sizeof(struct fuse_in_header) +
2916 sizeof(struct fuse_write_in);
2917 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2918 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2919 struct fuse_in_header *in;
2920 const void *inarg;
2921 struct fuse_req *req;
2922 void *mbuf = NULL;
2923 int err;
2924 int res;
2925 int opcode_filter;
2926
2927 if (buf->flags & FUSE_BUF_IS_FD) {
2928 if (buf->size < tmpbuf.buf[0].size)
2929 tmpbuf.buf[0].size = buf->size;
2930
2931 mbuf = malloc(tmpbuf.buf[0].size);
2932 if (mbuf == NULL) {
2933 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2934 goto clear_pipe;
2935 }
2936 tmpbuf.buf[0].mem = mbuf;
2937
2938 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2939 if (res < 0)
2940 goto clear_pipe;
2941
2942 in = mbuf;
2943 } else {
2944 in = buf->mem;
2945 }
2946
2947 /* Cleanup opcode most significant bits used by FUSE BPF */
2948 opcode_filter = in->opcode & ~FUSE_OPCODE_FILTER;
2949 in->opcode &= FUSE_OPCODE_FILTER;
2950
2951 if (se->debug) {
2952 fuse_log(FUSE_LOG_DEBUG,
2953 "unique: %llu, opcode: %s (%i), opcode filter: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2954 (unsigned long long) in->unique,
2955 opname((enum fuse_opcode) in->opcode), in->opcode,
2956 opfiltername((enum fuse_opcode) opcode_filter), opcode_filter,
2957 (unsigned long long) in->nodeid, buf->size, in->pid);
2958 }
2959
2960 req = fuse_ll_alloc_req(se);
2961 if (req == NULL) {
2962 struct fuse_out_header out = {
2963 .unique = in->unique,
2964 .error = -ENOMEM,
2965 };
2966 struct iovec iov = {
2967 .iov_base = &out,
2968 .iov_len = sizeof(struct fuse_out_header),
2969 };
2970
2971 fuse_send_msg(se, ch, &iov, 1);
2972 goto clear_pipe;
2973 }
2974
2975 req->unique = in->unique;
2976 req->ctx.uid = in->uid;
2977 req->ctx.gid = in->gid;
2978 req->ctx.pid = in->pid;
2979 req->ch = ch ? fuse_chan_get(ch) : NULL;
2980
2981 err = EIO;
2982 if (!se->got_init) {
2983 enum fuse_opcode expected;
2984
2985 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2986 if (in->opcode != expected)
2987 goto reply_err;
2988 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2989 goto reply_err;
2990
2991 err = EACCES;
2992 /* Implement -o allow_root */
2993 if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2994 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2995 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2996 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2997 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2998 in->opcode != FUSE_NOTIFY_REPLY &&
2999 in->opcode != FUSE_READDIRPLUS)
3000 goto reply_err;
3001
3002 err = ENOSYS;
3003 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
3004 goto reply_err;
3005 if (in->opcode != FUSE_INTERRUPT) {
3006 struct fuse_req *intr;
3007 pthread_mutex_lock(&se->lock);
3008 intr = check_interrupt(se, req);
3009 list_add_req(req, &se->list);
3010 pthread_mutex_unlock(&se->lock);
3011 if (intr)
3012 fuse_reply_err(intr, EAGAIN);
3013 }
3014
3015 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
3016 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
3017 in->opcode != FUSE_NOTIFY_REPLY) {
3018 void *newmbuf;
3019
3020 err = ENOMEM;
3021 newmbuf = realloc(mbuf, buf->size);
3022 if (newmbuf == NULL)
3023 goto reply_err;
3024 mbuf = newmbuf;
3025
3026 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
3027 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
3028
3029 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
3030 err = -res;
3031 if (res < 0)
3032 goto reply_err;
3033
3034 in = mbuf;
3035 }
3036
3037 inarg = (void *) &in[1];
3038 if (in->opcode == FUSE_WRITE && se->op.write_buf)
3039 do_write_buf(req, in->nodeid, inarg, buf);
3040 else if (in->opcode == FUSE_NOTIFY_REPLY)
3041 do_notify_reply(req, in->nodeid, inarg, buf);
3042 else if (!opcode_filter)
3043 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
3044 else if (opcode_filter == FUSE_PREFILTER && fuse_ll_prefilter_ops[in->opcode].func)
3045 fuse_ll_prefilter_ops[in->opcode].func(req, in->nodeid, inarg);
3046 else if (opcode_filter == FUSE_POSTFILTER
3047 && fuse_ll_postfilter_ops[in->opcode].func)
3048 fuse_ll_postfilter_ops[in->opcode].func(
3049 req, in->nodeid, in->error_in, inarg,
3050 buf->size - sizeof(struct fuse_in_header));
3051 else {
3052 fuse_log(FUSE_LOG_ERR, "Bad opcode");
3053 err = ENOSYS;
3054 goto reply_err;
3055 }
3056
3057 out_free:
3058 free(mbuf);
3059 return;
3060
3061 reply_err:
3062 fuse_reply_err(req, err);
3063 clear_pipe:
3064 if (buf->flags & FUSE_BUF_IS_FD)
3065 fuse_ll_clear_pipe(se);
3066 goto out_free;
3067 }
3068
3069 #define LL_OPTION(n,o,v) \
3070 { n, offsetof(struct fuse_session, o), v }
3071
3072 static const struct fuse_opt fuse_ll_opts[] = {
3073 LL_OPTION("debug", debug, 1),
3074 LL_OPTION("-d", debug, 1),
3075 LL_OPTION("--debug", debug, 1),
3076 LL_OPTION("allow_root", deny_others, 1),
3077 FUSE_OPT_END
3078 };
3079
3080 void fuse_lowlevel_version(void)
3081 {
3082 printf("using FUSE kernel interface version %i.%i\n",
3083 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
3084 fuse_mount_version();
3085 }
3086
3087 void fuse_lowlevel_help(void)
3088 {
3089 /* These are not all options, but the ones that are
3090 potentially of interest to an end-user */
3091 printf(
3092 " -o allow_other allow access by all users\n"
3093 " -o allow_root allow access by root\n"
3094 " -o auto_unmount auto unmount on process termination\n");
3095 }
3096
3097 void fuse_session_destroy(struct fuse_session *se)
3098 {
3099 struct fuse_ll_pipe *llp;
3100
3101 if (se->got_init && !se->got_destroy) {
3102 if (se->op.destroy)
3103 se->op.destroy(se->userdata);
3104 }
3105 llp = pthread_getspecific(se->pipe_key);
3106 if (llp != NULL)
3107 fuse_ll_pipe_free(llp);
3108 pthread_key_delete(se->pipe_key);
3109 pthread_mutex_destroy(&se->lock);
3110 free(se->cuse_data);
3111 if (se->fd != -1)
3112 close(se->fd);
3113 if (se->io != NULL)
3114 free(se->io);
3115 destroy_mount_opts(se->mo);
3116 free(se);
3117 }
3118
3119
3120 static void fuse_ll_pipe_destructor(void *data)
3121 {
3122 struct fuse_ll_pipe *llp = data;
3123 fuse_ll_pipe_free(llp);
3124 }
3125
3126 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
3127 {
3128 return fuse_session_receive_buf_int(se, buf, NULL);
3129 }
3130
3131 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
3132 struct fuse_chan *ch)
3133 {
3134 int err;
3135 ssize_t res;
3136 #ifdef HAVE_SPLICE
3137 size_t bufsize = se->bufsize;
3138 struct fuse_ll_pipe *llp;
3139 struct fuse_buf tmpbuf;
3140
3141 if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
3142 goto fallback;
3143
3144 llp = fuse_ll_get_pipe(se);
3145 if (llp == NULL)
3146 goto fallback;
3147
3148 if (llp->size < bufsize) {
3149 if (llp->can_grow) {
3150 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3151 if (res == -1) {
3152 llp->can_grow = 0;
3153 res = grow_pipe_to_max(llp->pipe[0]);
3154 if (res > 0)
3155 llp->size = res;
3156 goto fallback;
3157 }
3158 llp->size = res;
3159 }
3160 if (llp->size < bufsize)
3161 goto fallback;
3162 }
3163
3164 if (se->io != NULL && se->io->splice_receive != NULL) {
3165 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3166 llp->pipe[1], NULL, bufsize, 0,
3167 se->userdata);
3168 } else {
3169 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3170 bufsize, 0);
3171 }
3172 err = errno;
3173
3174 if (fuse_session_exited(se))
3175 return 0;
3176
3177 if (res == -1) {
3178 if (err == ENODEV) {
3179 /* Filesystem was unmounted, or connection was aborted
3180 via /sys/fs/fuse/connections */
3181 fuse_session_exit(se);
3182 return 0;
3183 }
3184 if (err != EINTR && err != EAGAIN)
3185 perror("fuse: splice from device");
3186 return -err;
3187 }
3188
3189 if (res < sizeof(struct fuse_in_header)) {
3190 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3191 return -EIO;
3192 }
3193
3194 tmpbuf = (struct fuse_buf) {
3195 .size = res,
3196 .flags = FUSE_BUF_IS_FD,
3197 .fd = llp->pipe[0],
3198 };
3199
3200 /*
3201 * Don't bother with zero copy for small requests.
3202 * fuse_loop_mt() needs to check for FORGET so this more than
3203 * just an optimization.
3204 */
3205 if (res < sizeof(struct fuse_in_header) +
3206 sizeof(struct fuse_write_in) + pagesize) {
3207 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3208 struct fuse_bufvec dst = { .count = 1 };
3209
3210 if (!buf->mem) {
3211 buf->mem = malloc(se->bufsize);
3212 if (!buf->mem) {
3213 fuse_log(FUSE_LOG_ERR,
3214 "fuse: failed to allocate read buffer\n");
3215 return -ENOMEM;
3216 }
3217 }
3218 buf->size = se->bufsize;
3219 buf->flags = 0;
3220 dst.buf[0] = *buf;
3221
3222 res = fuse_buf_copy(&dst, &src, 0);
3223 if (res < 0) {
3224 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3225 strerror(-res));
3226 fuse_ll_clear_pipe(se);
3227 return res;
3228 }
3229 if (res < tmpbuf.size) {
3230 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
3231 fuse_ll_clear_pipe(se);
3232 return -EIO;
3233 }
3234 assert(res == tmpbuf.size);
3235
3236 } else {
3237 /* Don't overwrite buf->mem, as that would cause a leak */
3238 buf->fd = tmpbuf.fd;
3239 buf->flags = tmpbuf.flags;
3240 }
3241 buf->size = tmpbuf.size;
3242
3243 return res;
3244
3245 fallback:
3246 #endif
3247 if (!buf->mem) {
3248 buf->mem = malloc(se->bufsize);
3249 if (!buf->mem) {
3250 fuse_log(FUSE_LOG_ERR,
3251 "fuse: failed to allocate read buffer\n");
3252 return -ENOMEM;
3253 }
3254 }
3255
3256 restart:
3257 if (se->io != NULL) {
3258 /* se->io->read is never NULL if se->io is not NULL as
3259 specified by fuse_session_custom_io()*/
3260 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
3261 se->userdata);
3262 } else {
3263 res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
3264 }
3265 err = errno;
3266
3267 if (fuse_session_exited(se))
3268 return 0;
3269 if (res == -1) {
3270 /* ENOENT means the operation was interrupted, it's safe
3271 to restart */
3272 if (err == ENOENT)
3273 goto restart;
3274
3275 if (err == ENODEV) {
3276 /* Filesystem was unmounted, or connection was aborted
3277 via /sys/fs/fuse/connections */
3278 fuse_session_exit(se);
3279 return 0;
3280 }
3281 /* Errors occurring during normal operation: EINTR (read
3282 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
3283 umounted) */
3284 if (err != EINTR && err != EAGAIN)
3285 perror("fuse: reading device");
3286 return -err;
3287 }
3288 if ((size_t) res < sizeof(struct fuse_in_header)) {
3289 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3290 return -EIO;
3291 }
3292
3293 buf->size = res;
3294
3295 return res;
3296 }
3297
3298 FUSE_SYMVER("_fuse_session_new_317", "_fuse_session_new@@FUSE_3.17")
3299 struct fuse_session *_fuse_session_new_317(struct fuse_args *args,
3300 const struct fuse_lowlevel_ops *op,
3301 size_t op_size,
3302 struct libfuse_version *version,
3303 void *userdata)
3304 {
3305 int err;
3306 struct fuse_session *se;
3307 struct mount_opts *mo;
3308
3309 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3310 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3311 op_size = sizeof(struct fuse_lowlevel_ops);
3312 }
3313
3314 if (args->argc == 0) {
3315 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3316 return NULL;
3317 }
3318
3319 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3320 if (se == NULL) {
3321 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3322 goto out1;
3323 }
3324 se->fd = -1;
3325 se->conn.max_write = UINT_MAX;
3326 se->conn.max_readahead = UINT_MAX;
3327
3328 /* Parse options */
3329 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3330 goto out2;
3331 if(se->deny_others) {
3332 /* Allowing access only by root is done by instructing
3333 * kernel to allow access by everyone, and then restricting
3334 * access to root and mountpoint owner in libfuse.
3335 */
3336 // We may be adding the option a second time, but
3337 // that doesn't hurt.
3338 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3339 goto out2;
3340 }
3341 mo = parse_mount_opts(args);
3342 if (mo == NULL)
3343 goto out3;
3344
3345 if(args->argc == 1 &&
3346 args->argv[0][0] == '-') {
3347 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3348 "will be ignored\n");
3349 } else if (args->argc != 1) {
3350 int i;
3351 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3352 for(i = 1; i < args->argc-1; i++)
3353 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3354 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3355 goto out4;
3356 }
3357
3358 if (se->debug)
3359 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3360
3361 se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3362 FUSE_BUFFER_HEADER_SIZE;
3363
3364 list_init_req(&se->list);
3365 list_init_req(&se->interrupts);
3366 list_init_nreq(&se->notify_list);
3367 se->notify_ctr = 1;
3368 pthread_mutex_init(&se->lock, NULL);
3369
3370 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3371 if (err) {
3372 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3373 strerror(err));
3374 goto out5;
3375 }
3376
3377 memcpy(&se->op, op, op_size);
3378 se->owner = getuid();
3379 se->userdata = userdata;
3380
3381 se->mo = mo;
3382
3383 /* Fuse server application should pass the version it was compiled
3384 * against and pass it. If a libfuse version accidentally introduces an
3385 * ABI incompatibility, it might be possible to 'fix' that at run time,
3386 * by checking the version numbers.
3387 */
3388 se->version = *version;
3389
3390 return se;
3391
3392 out5:
3393 pthread_mutex_destroy(&se->lock);
3394 out4:
3395 fuse_opt_free_args(args);
3396 out3:
3397 if (mo != NULL)
3398 destroy_mount_opts(mo);
3399 out2:
3400 free(se);
3401 out1:
3402 return NULL;
3403 }
3404
3405 struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3406 const struct fuse_lowlevel_ops *op,
3407 size_t op_size,
3408 void *userdata);
3409 FUSE_SYMVER("fuse_session_new_30", "fuse_session_new@FUSE_3.0")
3410 struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3411 const struct fuse_lowlevel_ops *op,
3412 size_t op_size,
3413 void *userdata)
3414 {
3415 /* unknown version */
3416 struct libfuse_version version = { 0 };
3417
3418 return _fuse_session_new_317(args, op, op_size, &version, userdata);
3419 }
3420
3421 int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3422 int fd)
3423 {
3424 if (fd < 0) {
3425 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3426 "fuse_session_custom_io()\n", fd);
3427 return -EBADF;
3428 }
3429 if (io == NULL) {
3430 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3431 "fuse_session_custom_io()\n");
3432 return -EINVAL;
3433 } else if (io->read == NULL || io->writev == NULL) {
3434 /* If the user provides their own file descriptor, we can't
3435 guarantee that the default behavior of the io operations made
3436 in libfuse will function properly. Therefore, we enforce the
3437 user to implement these io operations when using custom io. */
3438 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3439 "implement both io->read() and io->writev\n");
3440 return -EINVAL;
3441 }
3442
3443 se->io = malloc(sizeof(struct fuse_custom_io));
3444 if (se->io == NULL) {
3445 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3446 "Error: %s\n", strerror(errno));
3447 return -errno;
3448 }
3449
3450 se->fd = fd;
3451 *se->io = *io;
3452 return 0;
3453 }
3454
3455 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3456 {
3457 int fd;
3458
3459 /*
3460 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3461 * would ensue.
3462 */
3463 do {
3464 fd = open("/dev/null", O_RDWR);
3465 if (fd > 2)
3466 close(fd);
3467 } while (fd >= 0 && fd <= 2);
3468
3469 /*
3470 * To allow FUSE daemons to run without privileges, the caller may open
3471 * /dev/fuse before launching the file system and pass on the file
3472 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3473 * parent process takes care of performing the mount in this case.
3474 */
3475 fd = fuse_mnt_parse_fuse_fd(mountpoint);
3476 if (fd != -1) {
3477 if (fcntl(fd, F_GETFD) == -1) {
3478 fuse_log(FUSE_LOG_ERR,
3479 "fuse: Invalid file descriptor /dev/fd/%u\n",
3480 fd);
3481 return -1;
3482 }
3483 se->fd = fd;
3484 return 0;
3485 }
3486
3487 /* Open channel */
3488 fd = fuse_kern_mount(mountpoint, se->mo);
3489 if (fd == -1)
3490 return -1;
3491 se->fd = fd;
3492
3493 /* Save mountpoint */
3494 se->mountpoint = strdup(mountpoint);
3495 if (se->mountpoint == NULL)
3496 goto error_out;
3497
3498 return 0;
3499
3500 error_out:
3501 fuse_kern_unmount(mountpoint, fd);
3502 return -1;
3503 }
3504
3505 int fuse_session_fd(struct fuse_session *se)
3506 {
3507 return se->fd;
3508 }
3509
3510 void fuse_session_unmount(struct fuse_session *se)
3511 {
3512 if (se->mountpoint != NULL) {
3513 fuse_kern_unmount(se->mountpoint, se->fd);
3514 se->fd = -1;
3515 free(se->mountpoint);
3516 se->mountpoint = NULL;
3517 }
3518 }
3519
3520 #ifdef linux
3521 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3522 {
3523 char *buf;
3524 size_t bufsize = 1024;
3525 char path[128];
3526 int ret;
3527 int fd;
3528 unsigned long pid = req->ctx.pid;
3529 char *s;
3530
3531 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3532
3533 retry:
3534 buf = malloc(bufsize);
3535 if (buf == NULL)
3536 return -ENOMEM;
3537
3538 ret = -EIO;
3539 fd = open(path, O_RDONLY);
3540 if (fd == -1)
3541 goto out_free;
3542
3543 ret = read(fd, buf, bufsize);
3544 close(fd);
3545 if (ret < 0) {
3546 ret = -EIO;
3547 goto out_free;
3548 }
3549
3550 if ((size_t)ret == bufsize) {
3551 free(buf);
3552 bufsize *= 4;
3553 goto retry;
3554 }
3555
3556 ret = -EIO;
3557 s = strstr(buf, "\nGroups:");
3558 if (s == NULL)
3559 goto out_free;
3560
3561 s += 8;
3562 ret = 0;
3563 while (1) {
3564 char *end;
3565 unsigned long val = strtoul(s, &end, 0);
3566 if (end == s)
3567 break;
3568
3569 s = end;
3570 if (ret < size)
3571 list[ret] = val;
3572 ret++;
3573 }
3574
3575 out_free:
3576 free(buf);
3577 return ret;
3578 }
3579 #else /* linux */
3580 /*
3581 * This is currently not implemented on other than Linux...
3582 */
3583 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3584 {
3585 (void) req; (void) size; (void) list;
3586 return -ENOSYS;
3587 }
3588 #endif
3589
3590 /* Prevent spurious data race warning - we don't care
3591 * about races for this flag */
3592 __attribute__((no_sanitize_thread))
3593 void fuse_session_exit(struct fuse_session *se)
3594 {
3595 se->exited = 1;
3596 }
3597
3598 __attribute__((no_sanitize_thread))
3599 void fuse_session_reset(struct fuse_session *se)
3600 {
3601 se->exited = 0;
3602 se->error = 0;
3603 }
3604
3605 __attribute__((no_sanitize_thread))
3606 int fuse_session_exited(struct fuse_session *se)
3607 {
3608 return se->exited;
3609 }
3610