1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 Copyright (C) 2017 Nikolaus Rath <Nikolaus@rath.org>
5 Copyright (C) 2018 Valve, Inc
6
7 This program can be distributed under the terms of the GNU GPLv2.
8 See the file COPYING.
9 */
10
11 /** @file
12 *
13 * This is a "high-performance" version of passthrough_ll.c. While
14 * passthrough_ll.c is designed to be as simple as possible, this
15 * example intended to be as efficient and correct as possible.
16 *
17 * passthrough_hp.cc mirrors a specified "source" directory under a
18 * specified the mountpoint with as much fidelity and performance as
19 * possible.
20 *
21 * If --nocache is specified, the source directory may be changed
22 * directly even while mounted and the filesystem will continue
23 * to work correctly.
24 *
25 * Without --nocache, the source directory is assumed to be modified
26 * only through the passthrough filesystem. This enables much better
27 * performance, but if changes are made directly to the source, they
28 * may not be immediately visible under the mountpoint and further
29 * access to the mountpoint may result in incorrect behavior,
30 * including data-loss.
31 *
32 * On its own, this filesystem fulfills no practical purpose. It is
33 * intended as a template upon which additional functionality can be
34 * built.
35 *
36 * Unless --nocache is specified, is only possible to write to files
37 * for which the mounting user has read permissions. This is because
38 * the writeback cache requires the kernel to be able to issue read
39 * requests for all files (which the passthrough filesystem cannot
40 * satisfy if it can't read the file in the underlying filesystem).
41 *
42 * ## Source code ##
43 * \include passthrough_hp.cc
44 */
45
46 #define FUSE_USE_VERSION 35
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #ifndef _GNU_SOURCE
53 #define _GNU_SOURCE
54 #endif
55
56 // C includes
57 #include <dirent.h>
58 #include <err.h>
59 #include <errno.h>
60 #include <ftw.h>
61 #include <fuse_lowlevel.h>
62 #include <inttypes.h>
63 #include <string.h>
64 #include <sys/file.h>
65 #include <sys/resource.h>
66 #include <sys/xattr.h>
67 #include <time.h>
68 #include <unistd.h>
69 #include <pthread.h>
70 #include <limits.h>
71
72 // C++ includes
73 #include <cstddef>
74 #include <cstdio>
75 #include <cstdlib>
76 #include <list>
77 #include "cxxopts.hpp"
78 #include <mutex>
79 #include <fstream>
80 #include <thread>
81 #include <iomanip>
82
83 using namespace std;
84
85 /* We are re-using pointers to our `struct sfs_inode` and `struct
86 sfs_dirp` elements as inodes and file handles. This means that we
87 must be able to store pointer a pointer in both a fuse_ino_t
88 variable and a uint64_t variable (used for file handles). */
89 static_assert(sizeof(fuse_ino_t) >= sizeof(void*),
90 "void* must fit into fuse_ino_t");
91 static_assert(sizeof(fuse_ino_t) >= sizeof(uint64_t),
92 "fuse_ino_t must be at least 64 bits");
93
94
95 /* Forward declarations */
96 struct Inode;
97 static Inode& get_inode(fuse_ino_t ino);
98 static void forget_one(fuse_ino_t ino, uint64_t n);
99
100 // Uniquely identifies a file in the source directory tree. This could
101 // be simplified to just ino_t since we require the source directory
102 // not to contain any mountpoints. This hasn't been done yet in case
103 // we need to reconsider this constraint (but relaxing this would have
104 // the drawback that we can no longer re-use inode numbers, and thus
105 // readdir() would need to do a full lookup() in order to report the
106 // right inode number).
107 typedef std::pair<ino_t, dev_t> SrcId;
108
109 // Define a hash function for SrcId
110 namespace std {
111 template<>
112 struct hash<SrcId> {
operator ()std::hash113 size_t operator()(const SrcId& id) const {
114 return hash<ino_t>{}(id.first) ^ hash<dev_t>{}(id.second);
115 }
116 };
117 }
118
119 // Maps files in the source directory tree to inodes
120 typedef std::unordered_map<SrcId, Inode> InodeMap;
121
122 struct Inode {
123 int fd {-1};
124 dev_t src_dev {0};
125 ino_t src_ino {0};
126 int generation {0};
127 uint64_t nopen {0};
128 uint64_t nlookup {0};
129 std::mutex m;
130
131 // Delete copy constructor and assignments. We could implement
132 // move if we need it.
133 Inode() = default;
134 Inode(const Inode&) = delete;
135 Inode(Inode&& inode) = delete;
136 Inode& operator=(Inode&& inode) = delete;
137 Inode& operator=(const Inode&) = delete;
138
~InodeInode139 ~Inode() {
140 if(fd > 0)
141 close(fd);
142 }
143 };
144
145 struct Fs {
146 // Must be acquired *after* any Inode.m locks.
147 std::mutex mutex;
148 InodeMap inodes; // protected by mutex
149 Inode root;
150 double timeout;
151 bool debug;
152 std::string source;
153 size_t blocksize;
154 dev_t src_dev;
155 bool nosplice;
156 bool nocache;
157 };
158 static Fs fs{};
159
160
161 #define FUSE_BUF_COPY_FLAGS \
162 (fs.nosplice ? \
163 FUSE_BUF_NO_SPLICE : \
164 static_cast<fuse_buf_copy_flags>(0))
165
166
get_inode(fuse_ino_t ino)167 static Inode& get_inode(fuse_ino_t ino) {
168 if (ino == FUSE_ROOT_ID)
169 return fs.root;
170
171 Inode* inode = reinterpret_cast<Inode*>(ino);
172 if(inode->fd == -1) {
173 cerr << "INTERNAL ERROR: Unknown inode " << ino << endl;
174 abort();
175 }
176 return *inode;
177 }
178
179
get_fs_fd(fuse_ino_t ino)180 static int get_fs_fd(fuse_ino_t ino) {
181 int fd = get_inode(ino).fd;
182 return fd;
183 }
184
185
sfs_init(void * userdata,fuse_conn_info * conn)186 static void sfs_init(void *userdata, fuse_conn_info *conn) {
187 (void)userdata;
188 if (conn->capable & FUSE_CAP_EXPORT_SUPPORT)
189 conn->want |= FUSE_CAP_EXPORT_SUPPORT;
190
191 if (fs.timeout && conn->capable & FUSE_CAP_WRITEBACK_CACHE)
192 conn->want |= FUSE_CAP_WRITEBACK_CACHE;
193
194 if (conn->capable & FUSE_CAP_FLOCK_LOCKS)
195 conn->want |= FUSE_CAP_FLOCK_LOCKS;
196
197 // Use splicing if supported. Since we are using writeback caching
198 // and readahead, individual requests should have a decent size so
199 // that splicing between fd's is well worth it.
200 if (conn->capable & FUSE_CAP_SPLICE_WRITE && !fs.nosplice)
201 conn->want |= FUSE_CAP_SPLICE_WRITE;
202 if (conn->capable & FUSE_CAP_SPLICE_READ && !fs.nosplice)
203 conn->want |= FUSE_CAP_SPLICE_READ;
204 }
205
206
sfs_getattr(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)207 static void sfs_getattr(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
208 (void)fi;
209 Inode& inode = get_inode(ino);
210 struct stat attr;
211 auto res = fstatat(inode.fd, "", &attr,
212 AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
213 if (res == -1) {
214 fuse_reply_err(req, errno);
215 return;
216 }
217 fuse_reply_attr(req, &attr, fs.timeout);
218 }
219
220
do_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int valid,struct fuse_file_info * fi)221 static void do_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
222 int valid, struct fuse_file_info* fi) {
223 Inode& inode = get_inode(ino);
224 int ifd = inode.fd;
225 int res;
226
227 if (valid & FUSE_SET_ATTR_MODE) {
228 if (fi) {
229 res = fchmod(fi->fh, attr->st_mode);
230 } else {
231 char procname[64];
232 sprintf(procname, "/proc/self/fd/%i", ifd);
233 res = chmod(procname, attr->st_mode);
234 }
235 if (res == -1)
236 goto out_err;
237 }
238 if (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)) {
239 uid_t uid = (valid & FUSE_SET_ATTR_UID) ? attr->st_uid : static_cast<uid_t>(-1);
240 gid_t gid = (valid & FUSE_SET_ATTR_GID) ? attr->st_gid : static_cast<gid_t>(-1);
241
242 res = fchownat(ifd, "", uid, gid, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
243 if (res == -1)
244 goto out_err;
245 }
246 if (valid & FUSE_SET_ATTR_SIZE) {
247 if (fi) {
248 res = ftruncate(fi->fh, attr->st_size);
249 } else {
250 char procname[64];
251 sprintf(procname, "/proc/self/fd/%i", ifd);
252 res = truncate(procname, attr->st_size);
253 }
254 if (res == -1)
255 goto out_err;
256 }
257 if (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
258 struct timespec tv[2];
259
260 tv[0].tv_sec = 0;
261 tv[1].tv_sec = 0;
262 tv[0].tv_nsec = UTIME_OMIT;
263 tv[1].tv_nsec = UTIME_OMIT;
264
265 if (valid & FUSE_SET_ATTR_ATIME_NOW)
266 tv[0].tv_nsec = UTIME_NOW;
267 else if (valid & FUSE_SET_ATTR_ATIME)
268 tv[0] = attr->st_atim;
269
270 if (valid & FUSE_SET_ATTR_MTIME_NOW)
271 tv[1].tv_nsec = UTIME_NOW;
272 else if (valid & FUSE_SET_ATTR_MTIME)
273 tv[1] = attr->st_mtim;
274
275 if (fi)
276 res = futimens(fi->fh, tv);
277 else {
278 #ifdef HAVE_UTIMENSAT
279 char procname[64];
280 sprintf(procname, "/proc/self/fd/%i", ifd);
281 res = utimensat(AT_FDCWD, procname, tv, 0);
282 #else
283 res = -1;
284 errno = EOPNOTSUPP;
285 #endif
286 }
287 if (res == -1)
288 goto out_err;
289 }
290 return sfs_getattr(req, ino, fi);
291
292 out_err:
293 fuse_reply_err(req, errno);
294 }
295
296
sfs_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int valid,fuse_file_info * fi)297 static void sfs_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
298 int valid, fuse_file_info *fi) {
299 (void) ino;
300 do_setattr(req, ino, attr, valid, fi);
301 }
302
303
do_lookup(fuse_ino_t parent,const char * name,fuse_entry_param * e)304 static int do_lookup(fuse_ino_t parent, const char *name,
305 fuse_entry_param *e) {
306 if (fs.debug)
307 cerr << "DEBUG: lookup(): name=" << name
308 << ", parent=" << parent << endl;
309 memset(e, 0, sizeof(*e));
310 e->attr_timeout = fs.timeout;
311 e->entry_timeout = fs.timeout;
312
313 auto newfd = openat(get_fs_fd(parent), name, O_PATH | O_NOFOLLOW);
314 if (newfd == -1)
315 return errno;
316
317 auto res = fstatat(newfd, "", &e->attr, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
318 if (res == -1) {
319 auto saveerr = errno;
320 close(newfd);
321 if (fs.debug)
322 cerr << "DEBUG: lookup(): fstatat failed" << endl;
323 return saveerr;
324 }
325
326 if (e->attr.st_dev != fs.src_dev) {
327 cerr << "WARNING: Mountpoints in the source directory tree will be hidden." << endl;
328 return ENOTSUP;
329 } else if (e->attr.st_ino == FUSE_ROOT_ID) {
330 cerr << "ERROR: Source directory tree must not include inode "
331 << FUSE_ROOT_ID << endl;
332 return EIO;
333 }
334
335 SrcId id {e->attr.st_ino, e->attr.st_dev};
336 unique_lock<mutex> fs_lock {fs.mutex};
337 Inode* inode_p;
338 try {
339 inode_p = &fs.inodes[id];
340 } catch (std::bad_alloc&) {
341 return ENOMEM;
342 }
343 e->ino = reinterpret_cast<fuse_ino_t>(inode_p);
344 Inode& inode {*inode_p};
345 e->generation = inode.generation;
346
347 if (inode.fd == -ENOENT) { // found unlinked inode
348 if (fs.debug)
349 cerr << "DEBUG: lookup(): inode " << e->attr.st_ino
350 << " recycled; generation=" << inode.generation << endl;
351 /* fallthrough to new inode but keep existing inode.nlookup */
352 }
353
354 if (inode.fd > 0) { // found existing inode
355 fs_lock.unlock();
356 if (fs.debug)
357 cerr << "DEBUG: lookup(): inode " << e->attr.st_ino
358 << " (userspace) already known; fd = " << inode.fd << endl;
359 lock_guard<mutex> g {inode.m};
360 inode.nlookup++;
361 close(newfd);
362 } else { // no existing inode
363 /* This is just here to make Helgrind happy. It violates the
364 lock ordering requirement (inode.m must be acquired before
365 fs.mutex), but this is of no consequence because at this
366 point no other thread has access to the inode mutex */
367 lock_guard<mutex> g {inode.m};
368 inode.src_ino = e->attr.st_ino;
369 inode.src_dev = e->attr.st_dev;
370 inode.nlookup++;
371 inode.fd = newfd;
372 fs_lock.unlock();
373
374 if (fs.debug)
375 cerr << "DEBUG: lookup(): created userspace inode " << e->attr.st_ino
376 << "; fd = " << inode.fd << endl;
377 }
378
379 return 0;
380 }
381
382
sfs_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)383 static void sfs_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) {
384 fuse_entry_param e {};
385 auto err = do_lookup(parent, name, &e);
386 if (err == ENOENT) {
387 e.attr_timeout = fs.timeout;
388 e.entry_timeout = fs.timeout;
389 e.ino = e.attr.st_ino = 0;
390 fuse_reply_entry(req, &e);
391 } else if (err) {
392 if (err == ENFILE || err == EMFILE)
393 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
394 fuse_reply_err(req, err);
395 } else {
396 fuse_reply_entry(req, &e);
397 }
398 }
399
400
mknod_symlink(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev,const char * link)401 static void mknod_symlink(fuse_req_t req, fuse_ino_t parent,
402 const char *name, mode_t mode, dev_t rdev,
403 const char *link) {
404 int res;
405 Inode& inode_p = get_inode(parent);
406 auto saverr = ENOMEM;
407
408 if (S_ISDIR(mode))
409 res = mkdirat(inode_p.fd, name, mode);
410 else if (S_ISLNK(mode))
411 res = symlinkat(link, inode_p.fd, name);
412 else
413 res = mknodat(inode_p.fd, name, mode, rdev);
414 saverr = errno;
415 if (res == -1)
416 goto out;
417
418 fuse_entry_param e;
419 saverr = do_lookup(parent, name, &e);
420 if (saverr)
421 goto out;
422
423 fuse_reply_entry(req, &e);
424 return;
425
426 out:
427 if (saverr == ENFILE || saverr == EMFILE)
428 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
429 fuse_reply_err(req, saverr);
430 }
431
432
sfs_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)433 static void sfs_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
434 mode_t mode, dev_t rdev) {
435 mknod_symlink(req, parent, name, mode, rdev, nullptr);
436 }
437
438
sfs_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)439 static void sfs_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
440 mode_t mode) {
441 mknod_symlink(req, parent, name, S_IFDIR | mode, 0, nullptr);
442 }
443
444
sfs_symlink(fuse_req_t req,const char * link,fuse_ino_t parent,const char * name)445 static void sfs_symlink(fuse_req_t req, const char *link, fuse_ino_t parent,
446 const char *name) {
447 mknod_symlink(req, parent, name, S_IFLNK, 0, link);
448 }
449
450
sfs_link(fuse_req_t req,fuse_ino_t ino,fuse_ino_t parent,const char * name)451 static void sfs_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent,
452 const char *name) {
453 Inode& inode = get_inode(ino);
454 Inode& inode_p = get_inode(parent);
455 fuse_entry_param e {};
456
457 e.attr_timeout = fs.timeout;
458 e.entry_timeout = fs.timeout;
459
460 char procname[64];
461 sprintf(procname, "/proc/self/fd/%i", inode.fd);
462 auto res = linkat(AT_FDCWD, procname, inode_p.fd, name, AT_SYMLINK_FOLLOW);
463 if (res == -1) {
464 fuse_reply_err(req, errno);
465 return;
466 }
467
468 res = fstatat(inode.fd, "", &e.attr, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
469 if (res == -1) {
470 fuse_reply_err(req, errno);
471 return;
472 }
473 e.ino = reinterpret_cast<fuse_ino_t>(&inode);
474 {
475 lock_guard<mutex> g {inode.m};
476 inode.nlookup++;
477 }
478
479 fuse_reply_entry(req, &e);
480 return;
481 }
482
483
sfs_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)484 static void sfs_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name) {
485 Inode& inode_p = get_inode(parent);
486 lock_guard<mutex> g {inode_p.m};
487 auto res = unlinkat(inode_p.fd, name, AT_REMOVEDIR);
488 fuse_reply_err(req, res == -1 ? errno : 0);
489 }
490
491
sfs_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t newparent,const char * newname,unsigned int flags)492 static void sfs_rename(fuse_req_t req, fuse_ino_t parent, const char *name,
493 fuse_ino_t newparent, const char *newname,
494 unsigned int flags) {
495 Inode& inode_p = get_inode(parent);
496 Inode& inode_np = get_inode(newparent);
497 if (flags) {
498 fuse_reply_err(req, EINVAL);
499 return;
500 }
501
502 auto res = renameat(inode_p.fd, name, inode_np.fd, newname);
503 fuse_reply_err(req, res == -1 ? errno : 0);
504 }
505
506
sfs_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)507 static void sfs_unlink(fuse_req_t req, fuse_ino_t parent, const char *name) {
508 Inode& inode_p = get_inode(parent);
509 // Release inode.fd before last unlink like nfsd EXPORT_OP_CLOSE_BEFORE_UNLINK
510 // to test reused inode numbers.
511 // Skip this when inode has an open file and when writeback cache is enabled.
512 if (!fs.timeout) {
513 fuse_entry_param e;
514 auto err = do_lookup(parent, name, &e);
515 if (err) {
516 fuse_reply_err(req, err);
517 return;
518 }
519 if (e.attr.st_nlink == 1) {
520 Inode& inode = get_inode(e.ino);
521 lock_guard<mutex> g {inode.m};
522 if (inode.fd > 0 && !inode.nopen) {
523 if (fs.debug)
524 cerr << "DEBUG: unlink: release inode " << e.attr.st_ino
525 << "; fd=" << inode.fd << endl;
526 lock_guard<mutex> g_fs {fs.mutex};
527 close(inode.fd);
528 inode.fd = -ENOENT;
529 inode.generation++;
530 }
531 }
532 }
533 auto res = unlinkat(inode_p.fd, name, 0);
534 fuse_reply_err(req, res == -1 ? errno : 0);
535 }
536
537
forget_one(fuse_ino_t ino,uint64_t n)538 static void forget_one(fuse_ino_t ino, uint64_t n) {
539 Inode& inode = get_inode(ino);
540 unique_lock<mutex> l {inode.m};
541
542 if(n > inode.nlookup) {
543 cerr << "INTERNAL ERROR: Negative lookup count for inode "
544 << inode.src_ino << endl;
545 abort();
546 }
547 inode.nlookup -= n;
548 if (!inode.nlookup) {
549 if (fs.debug)
550 cerr << "DEBUG: forget: cleaning up inode " << inode.src_ino << endl;
551 {
552 lock_guard<mutex> g_fs {fs.mutex};
553 l.unlock();
554 fs.inodes.erase({inode.src_ino, inode.src_dev});
555 }
556 } else if (fs.debug)
557 cerr << "DEBUG: forget: inode " << inode.src_ino
558 << " lookup count now " << inode.nlookup << endl;
559 }
560
sfs_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)561 static void sfs_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
562 forget_one(ino, nlookup);
563 fuse_reply_none(req);
564 }
565
566
sfs_forget_multi(fuse_req_t req,size_t count,fuse_forget_data * forgets)567 static void sfs_forget_multi(fuse_req_t req, size_t count,
568 fuse_forget_data *forgets) {
569 for (int i = 0; i < count; i++)
570 forget_one(forgets[i].ino, forgets[i].nlookup);
571 fuse_reply_none(req);
572 }
573
574
sfs_readlink(fuse_req_t req,fuse_ino_t ino)575 static void sfs_readlink(fuse_req_t req, fuse_ino_t ino) {
576 Inode& inode = get_inode(ino);
577 char buf[PATH_MAX + 1];
578 auto res = readlinkat(inode.fd, "", buf, sizeof(buf));
579 if (res == -1)
580 fuse_reply_err(req, errno);
581 else if (res == sizeof(buf))
582 fuse_reply_err(req, ENAMETOOLONG);
583 else {
584 buf[res] = '\0';
585 fuse_reply_readlink(req, buf);
586 }
587 }
588
589
590 struct DirHandle {
591 DIR *dp {nullptr};
592 off_t offset;
593
594 DirHandle() = default;
595 DirHandle(const DirHandle&) = delete;
596 DirHandle& operator=(const DirHandle&) = delete;
597
~DirHandleDirHandle598 ~DirHandle() {
599 if(dp)
600 closedir(dp);
601 }
602 };
603
604
get_dir_handle(fuse_file_info * fi)605 static DirHandle *get_dir_handle(fuse_file_info *fi) {
606 return reinterpret_cast<DirHandle*>(fi->fh);
607 }
608
609
sfs_opendir(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)610 static void sfs_opendir(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
611 Inode& inode = get_inode(ino);
612 auto d = new (nothrow) DirHandle;
613 if (d == nullptr) {
614 fuse_reply_err(req, ENOMEM);
615 return;
616 }
617
618 // Make Helgrind happy - it can't know that there's an implicit
619 // synchronization due to the fact that other threads cannot
620 // access d until we've called fuse_reply_*.
621 lock_guard<mutex> g {inode.m};
622
623 auto fd = openat(inode.fd, ".", O_RDONLY);
624 if (fd == -1)
625 goto out_errno;
626
627 // On success, dir stream takes ownership of fd, so we
628 // do not have to close it.
629 d->dp = fdopendir(fd);
630 if(d->dp == nullptr)
631 goto out_errno;
632
633 d->offset = 0;
634
635 fi->fh = reinterpret_cast<uint64_t>(d);
636 if(fs.timeout) {
637 fi->keep_cache = 1;
638 fi->cache_readdir = 1;
639 }
640 fuse_reply_open(req, fi);
641 return;
642
643 out_errno:
644 auto error = errno;
645 delete d;
646 if (error == ENFILE || error == EMFILE)
647 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
648 fuse_reply_err(req, error);
649 }
650
651
is_dot_or_dotdot(const char * name)652 static bool is_dot_or_dotdot(const char *name) {
653 return name[0] == '.' &&
654 (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'));
655 }
656
657
do_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t offset,fuse_file_info * fi,int plus)658 static void do_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
659 off_t offset, fuse_file_info *fi, int plus) {
660 auto d = get_dir_handle(fi);
661 Inode& inode = get_inode(ino);
662 lock_guard<mutex> g {inode.m};
663 char *p;
664 auto rem = size;
665 int err = 0, count = 0;
666
667 if (fs.debug)
668 cerr << "DEBUG: readdir(): started with offset "
669 << offset << endl;
670
671 auto buf = new (nothrow) char[size];
672 if (!buf) {
673 fuse_reply_err(req, ENOMEM);
674 return;
675 }
676 p = buf;
677
678 if (offset != d->offset) {
679 if (fs.debug)
680 cerr << "DEBUG: readdir(): seeking to " << offset << endl;
681 seekdir(d->dp, offset);
682 d->offset = offset;
683 }
684
685 while (1) {
686 struct dirent *entry;
687 errno = 0;
688 entry = readdir(d->dp);
689 if (!entry) {
690 if(errno) {
691 err = errno;
692 if (fs.debug)
693 warn("DEBUG: readdir(): readdir failed with");
694 goto error;
695 }
696 break; // End of stream
697 }
698 d->offset = entry->d_off;
699 if (is_dot_or_dotdot(entry->d_name))
700 continue;
701
702 fuse_entry_param e{};
703 size_t entsize;
704 if(plus) {
705 err = do_lookup(ino, entry->d_name, &e);
706 if (err)
707 goto error;
708 entsize = fuse_add_direntry_plus(req, p, rem, entry->d_name, &e, entry->d_off);
709
710 if (entsize > rem) {
711 if (fs.debug)
712 cerr << "DEBUG: readdir(): buffer full, returning data. " << endl;
713 forget_one(e.ino, 1);
714 break;
715 }
716 } else {
717 e.attr.st_ino = entry->d_ino;
718 e.attr.st_mode = entry->d_type << 12;
719 entsize = fuse_add_direntry(req, p, rem, entry->d_name, &e.attr, entry->d_off);
720
721 if (entsize > rem) {
722 if (fs.debug)
723 cerr << "DEBUG: readdir(): buffer full, returning data. " << endl;
724 break;
725 }
726 }
727
728 p += entsize;
729 rem -= entsize;
730 count++;
731 if (fs.debug) {
732 cerr << "DEBUG: readdir(): added to buffer: " << entry->d_name
733 << ", ino " << e.attr.st_ino << ", offset " << entry->d_off << endl;
734 }
735 }
736 err = 0;
737 error:
738
739 // If there's an error, we can only signal it if we haven't stored
740 // any entries yet - otherwise we'd end up with wrong lookup
741 // counts for the entries that are already in the buffer. So we
742 // return what we've collected until that point.
743 if (err && rem == size) {
744 if (err == ENFILE || err == EMFILE)
745 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
746 fuse_reply_err(req, err);
747 } else {
748 if (fs.debug)
749 cerr << "DEBUG: readdir(): returning " << count
750 << " entries, curr offset " << d->offset << endl;
751 fuse_reply_buf(req, buf, size - rem);
752 }
753 delete[] buf;
754 return;
755 }
756
757
sfs_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t offset,fuse_file_info * fi)758 static void sfs_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
759 off_t offset, fuse_file_info *fi) {
760 // operation logging is done in readdir to reduce code duplication
761 do_readdir(req, ino, size, offset, fi, 0);
762 }
763
764
sfs_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t offset,fuse_file_info * fi)765 static void sfs_readdirplus(fuse_req_t req, fuse_ino_t ino, size_t size,
766 off_t offset, fuse_file_info *fi) {
767 // operation logging is done in readdir to reduce code duplication
768 do_readdir(req, ino, size, offset, fi, 1);
769 }
770
771
sfs_releasedir(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)772 static void sfs_releasedir(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
773 (void) ino;
774 auto d = get_dir_handle(fi);
775 delete d;
776 fuse_reply_err(req, 0);
777 }
778
779
sfs_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,fuse_file_info * fi)780 static void sfs_create(fuse_req_t req, fuse_ino_t parent, const char *name,
781 mode_t mode, fuse_file_info *fi) {
782 Inode& inode_p = get_inode(parent);
783
784 auto fd = openat(inode_p.fd, name,
785 (fi->flags | O_CREAT) & ~O_NOFOLLOW, mode);
786 if (fd == -1) {
787 auto err = errno;
788 if (err == ENFILE || err == EMFILE)
789 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
790 fuse_reply_err(req, err);
791 return;
792 }
793
794 fi->fh = fd;
795 fuse_entry_param e;
796 auto err = do_lookup(parent, name, &e);
797 if (err) {
798 if (err == ENFILE || err == EMFILE)
799 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
800 fuse_reply_err(req, err);
801 return;
802 }
803
804 Inode& inode = get_inode(e.ino);
805 lock_guard<mutex> g {inode.m};
806 inode.nopen++;
807 fuse_reply_create(req, &e, fi);
808 }
809
810
sfs_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,fuse_file_info * fi)811 static void sfs_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
812 fuse_file_info *fi) {
813 (void) ino;
814 int res;
815 int fd = dirfd(get_dir_handle(fi)->dp);
816 if (datasync)
817 res = fdatasync(fd);
818 else
819 res = fsync(fd);
820 fuse_reply_err(req, res == -1 ? errno : 0);
821 }
822
823
sfs_open(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)824 static void sfs_open(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
825 Inode& inode = get_inode(ino);
826
827 /* With writeback cache, kernel may send read requests even
828 when userspace opened write-only */
829 if (fs.timeout && (fi->flags & O_ACCMODE) == O_WRONLY) {
830 fi->flags &= ~O_ACCMODE;
831 fi->flags |= O_RDWR;
832 }
833
834 /* With writeback cache, O_APPEND is handled by the kernel. This
835 breaks atomicity (since the file may change in the underlying
836 filesystem, so that the kernel's idea of the end of the file
837 isn't accurate anymore). However, no process should modify the
838 file in the underlying filesystem once it has been read, so
839 this is not a problem. */
840 if (fs.timeout && fi->flags & O_APPEND)
841 fi->flags &= ~O_APPEND;
842
843 /* Unfortunately we cannot use inode.fd, because this was opened
844 with O_PATH (so it doesn't allow read/write access). */
845 char buf[64];
846 sprintf(buf, "/proc/self/fd/%i", inode.fd);
847 auto fd = open(buf, fi->flags & ~O_NOFOLLOW);
848 if (fd == -1) {
849 auto err = errno;
850 if (err == ENFILE || err == EMFILE)
851 cerr << "ERROR: Reached maximum number of file descriptors." << endl;
852 fuse_reply_err(req, err);
853 return;
854 }
855
856 lock_guard<mutex> g {inode.m};
857 inode.nopen++;
858 fi->keep_cache = (fs.timeout != 0);
859 fi->fh = fd;
860 fuse_reply_open(req, fi);
861 }
862
863
sfs_release(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)864 static void sfs_release(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
865 Inode& inode = get_inode(ino);
866 lock_guard<mutex> g {inode.m};
867 inode.nopen--;
868 close(fi->fh);
869 fuse_reply_err(req, 0);
870 }
871
872
sfs_flush(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi)873 static void sfs_flush(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi) {
874 (void) ino;
875 auto res = close(dup(fi->fh));
876 fuse_reply_err(req, res == -1 ? errno : 0);
877 }
878
879
sfs_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,fuse_file_info * fi)880 static void sfs_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
881 fuse_file_info *fi) {
882 (void) ino;
883 int res;
884 if (datasync)
885 res = fdatasync(fi->fh);
886 else
887 res = fsync(fi->fh);
888 fuse_reply_err(req, res == -1 ? errno : 0);
889 }
890
891
do_read(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi)892 static void do_read(fuse_req_t req, size_t size, off_t off, fuse_file_info *fi) {
893
894 fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
895 buf.buf[0].flags = static_cast<fuse_buf_flags>(
896 FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
897 buf.buf[0].fd = fi->fh;
898 buf.buf[0].pos = off;
899
900 fuse_reply_data(req, &buf, FUSE_BUF_COPY_FLAGS);
901 }
902
sfs_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,fuse_file_info * fi)903 static void sfs_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
904 fuse_file_info *fi) {
905 (void) ino;
906 do_read(req, size, off, fi);
907 }
908
909
do_write_buf(fuse_req_t req,size_t size,off_t off,fuse_bufvec * in_buf,fuse_file_info * fi)910 static void do_write_buf(fuse_req_t req, size_t size, off_t off,
911 fuse_bufvec *in_buf, fuse_file_info *fi) {
912 fuse_bufvec out_buf = FUSE_BUFVEC_INIT(size);
913 out_buf.buf[0].flags = static_cast<fuse_buf_flags>(
914 FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
915 out_buf.buf[0].fd = fi->fh;
916 out_buf.buf[0].pos = off;
917
918 auto res = fuse_buf_copy(&out_buf, in_buf, FUSE_BUF_COPY_FLAGS);
919 if (res < 0)
920 fuse_reply_err(req, -res);
921 else
922 fuse_reply_write(req, (size_t)res);
923 }
924
925
sfs_write_buf(fuse_req_t req,fuse_ino_t ino,fuse_bufvec * in_buf,off_t off,fuse_file_info * fi)926 static void sfs_write_buf(fuse_req_t req, fuse_ino_t ino, fuse_bufvec *in_buf,
927 off_t off, fuse_file_info *fi) {
928 (void) ino;
929 auto size {fuse_buf_size(in_buf)};
930 do_write_buf(req, size, off, in_buf, fi);
931 }
932
933
sfs_statfs(fuse_req_t req,fuse_ino_t ino)934 static void sfs_statfs(fuse_req_t req, fuse_ino_t ino) {
935 struct statvfs stbuf;
936
937 auto res = fstatvfs(get_fs_fd(ino), &stbuf);
938 if (res == -1)
939 fuse_reply_err(req, errno);
940 else
941 fuse_reply_statfs(req, &stbuf);
942 }
943
944
945 #ifdef HAVE_POSIX_FALLOCATE
sfs_fallocate(fuse_req_t req,fuse_ino_t ino,int mode,off_t offset,off_t length,fuse_file_info * fi)946 static void sfs_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
947 off_t offset, off_t length, fuse_file_info *fi) {
948 (void) ino;
949 if (mode) {
950 fuse_reply_err(req, EOPNOTSUPP);
951 return;
952 }
953
954 auto err = posix_fallocate(fi->fh, offset, length);
955 fuse_reply_err(req, err);
956 }
957 #endif
958
sfs_flock(fuse_req_t req,fuse_ino_t ino,fuse_file_info * fi,int op)959 static void sfs_flock(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi,
960 int op) {
961 (void) ino;
962 auto res = flock(fi->fh, op);
963 fuse_reply_err(req, res == -1 ? errno : 0);
964 }
965
966
967 #ifdef HAVE_SETXATTR
sfs_getxattr(fuse_req_t req,fuse_ino_t ino,const char * name,size_t size)968 static void sfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
969 size_t size) {
970 char *value = nullptr;
971 Inode& inode = get_inode(ino);
972 ssize_t ret;
973 int saverr;
974
975 char procname[64];
976 sprintf(procname, "/proc/self/fd/%i", inode.fd);
977
978 if (size) {
979 value = new (nothrow) char[size];
980 if (value == nullptr) {
981 saverr = ENOMEM;
982 goto out;
983 }
984
985 ret = getxattr(procname, name, value, size);
986 if (ret == -1)
987 goto out_err;
988 saverr = 0;
989 if (ret == 0)
990 goto out;
991
992 fuse_reply_buf(req, value, ret);
993 } else {
994 ret = getxattr(procname, name, nullptr, 0);
995 if (ret == -1)
996 goto out_err;
997
998 fuse_reply_xattr(req, ret);
999 }
1000 out_free:
1001 delete[] value;
1002 return;
1003
1004 out_err:
1005 saverr = errno;
1006 out:
1007 fuse_reply_err(req, saverr);
1008 goto out_free;
1009 }
1010
1011
sfs_listxattr(fuse_req_t req,fuse_ino_t ino,size_t size)1012 static void sfs_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) {
1013 char *value = nullptr;
1014 Inode& inode = get_inode(ino);
1015 ssize_t ret;
1016 int saverr;
1017
1018 char procname[64];
1019 sprintf(procname, "/proc/self/fd/%i", inode.fd);
1020
1021 if (size) {
1022 value = new (nothrow) char[size];
1023 if (value == nullptr) {
1024 saverr = ENOMEM;
1025 goto out;
1026 }
1027
1028 ret = listxattr(procname, value, size);
1029 if (ret == -1)
1030 goto out_err;
1031 saverr = 0;
1032 if (ret == 0)
1033 goto out;
1034
1035 fuse_reply_buf(req, value, ret);
1036 } else {
1037 ret = listxattr(procname, nullptr, 0);
1038 if (ret == -1)
1039 goto out_err;
1040
1041 fuse_reply_xattr(req, ret);
1042 }
1043 out_free:
1044 delete[] value;
1045 return;
1046 out_err:
1047 saverr = errno;
1048 out:
1049 fuse_reply_err(req, saverr);
1050 goto out_free;
1051 }
1052
1053
sfs_setxattr(fuse_req_t req,fuse_ino_t ino,const char * name,const char * value,size_t size,int flags)1054 static void sfs_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1055 const char *value, size_t size, int flags) {
1056 Inode& inode = get_inode(ino);
1057 ssize_t ret;
1058 int saverr;
1059
1060 char procname[64];
1061 sprintf(procname, "/proc/self/fd/%i", inode.fd);
1062
1063 ret = setxattr(procname, name, value, size, flags);
1064 saverr = ret == -1 ? errno : 0;
1065
1066 fuse_reply_err(req, saverr);
1067 }
1068
1069
sfs_removexattr(fuse_req_t req,fuse_ino_t ino,const char * name)1070 static void sfs_removexattr(fuse_req_t req, fuse_ino_t ino, const char *name) {
1071 char procname[64];
1072 Inode& inode = get_inode(ino);
1073 ssize_t ret;
1074 int saverr;
1075
1076 sprintf(procname, "/proc/self/fd/%i", inode.fd);
1077 ret = removexattr(procname, name);
1078 saverr = ret == -1 ? errno : 0;
1079
1080 fuse_reply_err(req, saverr);
1081 }
1082 #endif
1083
1084
assign_operations(fuse_lowlevel_ops & sfs_oper)1085 static void assign_operations(fuse_lowlevel_ops &sfs_oper) {
1086 sfs_oper.init = sfs_init;
1087 sfs_oper.lookup = sfs_lookup;
1088 sfs_oper.mkdir = sfs_mkdir;
1089 sfs_oper.mknod = sfs_mknod;
1090 sfs_oper.symlink = sfs_symlink;
1091 sfs_oper.link = sfs_link;
1092 sfs_oper.unlink = sfs_unlink;
1093 sfs_oper.rmdir = sfs_rmdir;
1094 sfs_oper.rename = sfs_rename;
1095 sfs_oper.forget = sfs_forget;
1096 sfs_oper.forget_multi = sfs_forget_multi;
1097 sfs_oper.getattr = sfs_getattr;
1098 sfs_oper.setattr = sfs_setattr;
1099 sfs_oper.readlink = sfs_readlink;
1100 sfs_oper.opendir = sfs_opendir;
1101 sfs_oper.readdir = sfs_readdir;
1102 sfs_oper.readdirplus = sfs_readdirplus;
1103 sfs_oper.releasedir = sfs_releasedir;
1104 sfs_oper.fsyncdir = sfs_fsyncdir;
1105 sfs_oper.create = sfs_create;
1106 sfs_oper.open = sfs_open;
1107 sfs_oper.release = sfs_release;
1108 sfs_oper.flush = sfs_flush;
1109 sfs_oper.fsync = sfs_fsync;
1110 sfs_oper.read = sfs_read;
1111 sfs_oper.write_buf = sfs_write_buf;
1112 sfs_oper.statfs = sfs_statfs;
1113 #ifdef HAVE_POSIX_FALLOCATE
1114 sfs_oper.fallocate = sfs_fallocate;
1115 #endif
1116 sfs_oper.flock = sfs_flock;
1117 #ifdef HAVE_SETXATTR
1118 sfs_oper.setxattr = sfs_setxattr;
1119 sfs_oper.getxattr = sfs_getxattr;
1120 sfs_oper.listxattr = sfs_listxattr;
1121 sfs_oper.removexattr = sfs_removexattr;
1122 #endif
1123 }
1124
print_usage(char * prog_name)1125 static void print_usage(char *prog_name) {
1126 cout << "Usage: " << prog_name << " --help\n"
1127 << " " << prog_name << " [options] <source> <mountpoint>\n";
1128 }
1129
parse_wrapper(cxxopts::Options & parser,int & argc,char ** & argv)1130 static cxxopts::ParseResult parse_wrapper(cxxopts::Options& parser, int& argc, char**& argv) {
1131 try {
1132 return parser.parse(argc, argv);
1133 } catch (cxxopts::option_not_exists_exception& exc) {
1134 std::cout << argv[0] << ": " << exc.what() << std::endl;
1135 print_usage(argv[0]);
1136 exit(2);
1137 }
1138 }
1139
1140
parse_options(int argc,char ** argv)1141 static cxxopts::ParseResult parse_options(int argc, char **argv) {
1142 cxxopts::Options opt_parser(argv[0]);
1143 opt_parser.add_options()
1144 ("debug", "Enable filesystem debug messages")
1145 ("debug-fuse", "Enable libfuse debug messages")
1146 ("help", "Print help")
1147 ("nocache", "Disable all caching")
1148 ("nosplice", "Do not use splice(2) to transfer data")
1149 ("single", "Run single-threaded");
1150
1151 // FIXME: Find a better way to limit the try clause to just
1152 // opt_parser.parse() (cf. https://github.com/jarro2783/cxxopts/issues/146)
1153 auto options = parse_wrapper(opt_parser, argc, argv);
1154
1155 if (options.count("help")) {
1156 print_usage(argv[0]);
1157 // Strip everything before the option list from the
1158 // default help string.
1159 auto help = opt_parser.help();
1160 std::cout << std::endl << "options:"
1161 << help.substr(help.find("\n\n") + 1, string::npos);
1162 exit(0);
1163
1164 } else if (argc != 3) {
1165 std::cout << argv[0] << ": invalid number of arguments\n";
1166 print_usage(argv[0]);
1167 exit(2);
1168 }
1169
1170 fs.debug = options.count("debug") != 0;
1171 fs.nosplice = options.count("nosplice") != 0;
1172 char* resolved_path = realpath(argv[1], NULL);
1173 if (resolved_path == NULL)
1174 warn("WARNING: realpath() failed with");
1175 fs.source = std::string {resolved_path};
1176 free(resolved_path);
1177
1178 return options;
1179 }
1180
1181
maximize_fd_limit()1182 static void maximize_fd_limit() {
1183 struct rlimit lim {};
1184 auto res = getrlimit(RLIMIT_NOFILE, &lim);
1185 if (res != 0) {
1186 warn("WARNING: getrlimit() failed with");
1187 return;
1188 }
1189 lim.rlim_cur = lim.rlim_max;
1190 res = setrlimit(RLIMIT_NOFILE, &lim);
1191 if (res != 0)
1192 warn("WARNING: setrlimit() failed with");
1193 }
1194
1195
main(int argc,char * argv[])1196 int main(int argc, char *argv[]) {
1197
1198 // Parse command line options
1199 auto options {parse_options(argc, argv)};
1200
1201 // We need an fd for every dentry in our the filesystem that the
1202 // kernel knows about. This is way more than most processes need,
1203 // so try to get rid of any resource softlimit.
1204 maximize_fd_limit();
1205
1206 // Initialize filesystem root
1207 fs.root.fd = -1;
1208 fs.root.nlookup = 9999;
1209 fs.timeout = options.count("nocache") ? 0 : 86400.0;
1210
1211 struct stat stat;
1212 auto ret = lstat(fs.source.c_str(), &stat);
1213 if (ret == -1)
1214 err(1, "ERROR: failed to stat source (\"%s\")", fs.source.c_str());
1215 if (!S_ISDIR(stat.st_mode))
1216 errx(1, "ERROR: source is not a directory");
1217 fs.src_dev = stat.st_dev;
1218
1219 fs.root.fd = open(fs.source.c_str(), O_PATH);
1220 if (fs.root.fd == -1)
1221 err(1, "ERROR: open(\"%s\", O_PATH)", fs.source.c_str());
1222
1223 // Initialize fuse
1224 fuse_args args = FUSE_ARGS_INIT(0, nullptr);
1225 if (fuse_opt_add_arg(&args, argv[0]) ||
1226 fuse_opt_add_arg(&args, "-o") ||
1227 fuse_opt_add_arg(&args, "default_permissions,fsname=hpps") ||
1228 (options.count("debug-fuse") && fuse_opt_add_arg(&args, "-odebug")))
1229 errx(3, "ERROR: Out of memory");
1230
1231 fuse_lowlevel_ops sfs_oper {};
1232 assign_operations(sfs_oper);
1233 auto se = fuse_session_new(&args, &sfs_oper, sizeof(sfs_oper), &fs);
1234 if (se == nullptr)
1235 goto err_out1;
1236
1237 if (fuse_set_signal_handlers(se) != 0)
1238 goto err_out2;
1239
1240 // Don't apply umask, use modes exactly as specified
1241 umask(0);
1242
1243 // Mount and run main loop
1244 struct fuse_loop_config loop_config;
1245 loop_config.clone_fd = 0;
1246 loop_config.max_idle_threads = 10;
1247 if (fuse_session_mount(se, argv[2]) != 0)
1248 goto err_out3;
1249 if (options.count("single"))
1250 ret = fuse_session_loop(se);
1251 else
1252 ret = fuse_session_loop_mt(se, &loop_config);
1253
1254 fuse_session_unmount(se);
1255
1256 err_out3:
1257 fuse_remove_signal_handlers(se);
1258 err_out2:
1259 fuse_session_destroy(se);
1260 err_out1:
1261 fuse_opt_free_args(&args);
1262
1263 return ret ? 1 : 0;
1264 }
1265
1266