• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18 
19 #include "FuseDaemon.h"
20 
21 #include <android-base/logging.h>
22 #include <android-base/properties.h>
23 #include <android/log.h>
24 #include <android/trace.h>
25 #include <ctype.h>
26 #include <dirent.h>
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <fuse_i.h>
30 #include <fuse_log.h>
31 #include <fuse_lowlevel.h>
32 #include <inttypes.h>
33 #include <limits.h>
34 #include <linux/fuse.h>
35 #include <stdbool.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <sys/inotify.h>
40 #include <sys/mman.h>
41 #include <sys/mount.h>
42 #include <sys/param.h>
43 #include <sys/resource.h>
44 #include <sys/stat.h>
45 #include <sys/statfs.h>
46 #include <sys/statvfs.h>
47 #include <sys/time.h>
48 #include <sys/types.h>
49 #include <sys/uio.h>
50 #include <unistd.h>
51 
52 #include <iostream>
53 #include <list>
54 #include <map>
55 #include <mutex>
56 #include <queue>
57 #include <regex>
58 #include <thread>
59 #include <unordered_map>
60 #include <unordered_set>
61 #include <vector>
62 
63 #include "MediaProviderWrapper.h"
64 #include "libfuse_jni/FuseUtils.h"
65 #include "libfuse_jni/ReaddirHelper.h"
66 #include "libfuse_jni/RedactionInfo.h"
67 #include "node-inl.h"
68 
69 using mediaprovider::fuse::DirectoryEntry;
70 using mediaprovider::fuse::dirhandle;
71 using mediaprovider::fuse::handle;
72 using mediaprovider::fuse::node;
73 using mediaprovider::fuse::RedactionInfo;
74 using std::list;
75 using std::string;
76 using std::vector;
77 
78 // logging macros to avoid duplication.
79 #define TRACE_NODE(__node, __req)                                                  \
80     LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
81                  << "] (uid=" << __req->ctx.uid << ") "
82 
83 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
84 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
85 
86 class ScopedTrace {
87   public:
ScopedTrace(const char * name)88     explicit inline ScopedTrace(const char *name) {
89       ATrace_beginSection(name);
90     }
91 
~ScopedTrace()92     inline ~ScopedTrace() {
93       ATrace_endSection();
94     }
95 };
96 
97 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
98 
99 #define FUSE_UNKNOWN_INO 0xffffffff
100 
101 // Stolen from: android_filesystem_config.h
102 #define AID_APP_START 10000
103 
104 constexpr size_t MAX_READ_SIZE = 128 * 1024;
105 // Stolen from: UserHandle#getUserId
106 constexpr int PER_USER_RANGE = 100000;
107 
108 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
109 const std::regex PATTERN_OWNED_PATH(
110     "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb|sandbox)/([^/]+)(/?.*)?",
111     std::regex_constants::icase);
112 
113 /*
114  * In order to avoid double caching with fuse, call fadvise on the file handles
115  * in the underlying file system. However, if this is done on every read/write,
116  * the fadvises cause a very significant slowdown in tests (specifically fio
117  * seq_write). So call fadvise on the file handles with the most reads/writes
118  * only after a threshold is passed.
119  */
120 class FAdviser {
121   public:
FAdviser()122     FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
123 
~FAdviser()124     ~FAdviser() {
125         SendMessage(Message::quit);
126         thread_.join();
127     }
128 
Record(int fd,size_t size)129     void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
130 
Close(int fd)131     void Close(int fd) { SendMessage(Message::close, fd); }
132 
133   private:
134     struct Message {
135         enum Type { record, close, quit };
136         Type type;
137         int fd;
138         size_t size;
139     };
140 
RecordImpl(int fd,size_t size)141     void RecordImpl(int fd, size_t size) {
142         total_size_ += size;
143 
144         // Find or create record in files_
145         // Remove record from sizes_ if it exists, adjusting size appropriately
146         auto file = files_.find(fd);
147         if (file != files_.end()) {
148             auto old_size = file->second;
149             size += old_size->first;
150             sizes_.erase(old_size);
151         } else {
152             file = files_.insert(Files::value_type(fd, sizes_.end())).first;
153         }
154 
155         // Now (re) insert record in sizes_
156         auto new_size = sizes_.insert(Sizes::value_type(size, fd));
157         file->second = new_size;
158 
159         if (total_size_ < threshold_) return;
160 
161         LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
162         while (!sizes_.empty() && total_size_ > target_) {
163             auto size = --sizes_.end();
164             total_size_ -= size->first;
165             posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
166             files_.erase(size->second);
167             sizes_.erase(size);
168         }
169         LOG(INFO) << "Threshold now " << total_size_;
170     }
171 
CloseImpl(int fd)172     void CloseImpl(int fd) {
173         auto file = files_.find(fd);
174         if (file == files_.end()) return;
175 
176         total_size_ -= file->second->first;
177         sizes_.erase(file->second);
178         files_.erase(file);
179     }
180 
MessageLoopImpl()181     void MessageLoopImpl() {
182         while (1) {
183             Message message;
184 
185             {
186                 std::unique_lock<std::mutex> lock(mutex_);
187                 cv_.wait(lock, [this] { return !queue_.empty(); });
188                 message = queue_.front();
189                 queue_.pop();
190             }
191 
192             switch (message.type) {
193                 case Message::record:
194                     RecordImpl(message.fd, message.size);
195                     break;
196 
197                 case Message::close:
198                     CloseImpl(message.fd);
199                     break;
200 
201                 case Message::quit:
202                     return;
203             }
204         }
205     }
206 
MessageLoop(FAdviser * ptr)207     static int MessageLoop(FAdviser* ptr) {
208         ptr->MessageLoopImpl();
209         return 0;
210     }
211 
SendMessage(Message::Type type,int fd=-1,size_t size=0)212     void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
213         {
214             std::unique_lock<std::mutex> lock(mutex_);
215             Message message = {type, fd, size};
216             queue_.push(message);
217         }
218         cv_.notify_one();
219     }
220 
221     std::mutex mutex_;
222     std::condition_variable cv_;
223     std::queue<Message> queue_;
224     std::thread thread_;
225 
226     typedef std::multimap<size_t, int> Sizes;
227     typedef std::map<int, Sizes::iterator> Files;
228 
229     Files files_;
230     Sizes sizes_;
231     size_t total_size_;
232 
233     const size_t threshold_ = 64 * 1024 * 1024;
234     const size_t target_ = 32 * 1024 * 1024;
235 };
236 
237 /* Single FUSE mount */
238 struct fuse {
fusefuse239     explicit fuse(const std::string& _path)
240         : path(_path),
241           tracker(mediaprovider::fuse::NodeTracker(&lock)),
242           root(node::CreateRoot(_path, &lock, &tracker)),
243           mp(0),
244           zero_addr(0) {}
245 
IsRootfuse246     inline bool IsRoot(const node* node) const { return node == root; }
247 
GetEffectiveRootPathfuse248     inline string GetEffectiveRootPath() {
249         if (path.find("/storage/emulated", 0) == 0) {
250             return path + "/" + std::to_string(getuid() / PER_USER_RANGE);
251         }
252         return path;
253     }
254 
255     // Note that these two (FromInode / ToInode) conversion wrappers are required
256     // because fuse_lowlevel_ops documents that the root inode is always one
257     // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
258     // on any of the other inodes in the FS.
FromInodefuse259     inline node* FromInode(__u64 inode) {
260         if (inode == FUSE_ROOT_ID) {
261             return root;
262         }
263 
264         return node::FromInode(inode, &tracker);
265     }
266 
ToInodefuse267     inline __u64 ToInode(node* node) const {
268         if (IsRoot(node)) {
269             return FUSE_ROOT_ID;
270         }
271 
272         return node::ToInode(node);
273     }
274 
275     std::recursive_mutex lock;
276     const string path;
277     // The Inode tracker associated with this FUSE instance.
278     mediaprovider::fuse::NodeTracker tracker;
279     node* const root;
280     struct fuse_session* se;
281 
282     /*
283      * Used to make JNI calls to MediaProvider.
284      * Responsibility of freeing this object falls on corresponding
285      * FuseDaemon object.
286      */
287     mediaprovider::fuse::MediaProviderWrapper* mp;
288 
289     /*
290      * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
291      * The memory is read only and should never be modified.
292      */
293     /* const */ char* zero_addr;
294 
295     FAdviser fadviser;
296 
297     std::atomic_bool* active;
298 };
299 
get_name(node * n)300 static inline string get_name(node* n) {
301     if (n) {
302         std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
303         name += "node_path: " + n->BuildSafePath();
304         return name;
305     }
306     return "?";
307 }
308 
ptr_to_id(void * ptr)309 static inline __u64 ptr_to_id(void* ptr) {
310     return (__u64)(uintptr_t) ptr;
311 }
312 
313 /*
314  * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
315  *
316  * This is called before the MediaProvider returns fd from the lower file
317  * system to an app over the ContentResolver interface. This allows us
318  * check with is_file_locked if any reference to that fd is still open.
319  */
set_file_lock(int fd,bool for_read,const std::string & path)320 static int set_file_lock(int fd, bool for_read, const std::string& path) {
321     std::string lock_str = (for_read ? "read" : "write");
322 
323     struct flock fl{};
324     fl.l_type = for_read ? F_RDLCK : F_WRLCK;
325     fl.l_whence = SEEK_SET;
326 
327     int res = fcntl(fd, F_OFD_SETLK, &fl);
328     if (res) {
329         PLOG(WARNING) << "Failed to set lock: " << lock_str;
330         return res;
331     }
332     return res;
333 }
334 
335 /*
336  * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
337  *
338  * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
339  * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
340  * allowing us to know if any reference to that fd is still open here.
341  *
342  * Returns true if fd may have a lock, false otherwise
343  */
is_file_locked(int fd,const std::string & path)344 static bool is_file_locked(int fd, const std::string& path) {
345     struct flock fl{};
346     fl.l_type = F_WRLCK;
347     fl.l_whence = SEEK_SET;
348 
349     int res = fcntl(fd, F_OFD_GETLK, &fl);
350     if (res) {
351         PLOG(WARNING) << "Failed to check lock";
352         // Assume worst
353         return true;
354     }
355     bool locked = fl.l_type != F_UNLCK;
356     return locked;
357 }
358 
get_fuse(fuse_req_t req)359 static struct fuse* get_fuse(fuse_req_t req) {
360     return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
361 }
362 
is_package_owned_path(const string & path,const string & fuse_path)363 static bool is_package_owned_path(const string& path, const string& fuse_path) {
364     if (path.rfind(fuse_path, 0) != 0) {
365         return false;
366     }
367     return std::regex_match(path, PATTERN_OWNED_PATH);
368 }
369 
370 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
371 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)372 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
373                        const string& child_name, const string& path) {
374     if (mediaprovider::fuse::containsMount(path, std::to_string(getuid() / PER_USER_RANGE))) {
375         LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
376         return;
377     }
378 
379     if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
380         // Invalidating the dentry can fail if there's no dcache entry, however, there may still
381         // be cached attributes, so attempt to invalidate those by invalidating the inode
382         fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
383     }
384 }
385 
get_timeout(struct fuse * fuse,const string & path,bool should_inval)386 static double get_timeout(struct fuse* fuse, const string& path, bool should_inval) {
387     string media_path = fuse->GetEffectiveRootPath() + "/Android/media";
388     if (should_inval || path.find(media_path, 0) == 0 || is_package_owned_path(path, fuse->path)) {
389         // We set dentry timeout to 0 for the following reasons:
390         // 1. Case-insensitive lookups need to invalidate other case-insensitive dentry matches
391         // 2. Installd might delete Android/media/<package> dirs when app data is cleared.
392         // This can leave a stale entry in the kernel dcache, and break subsequent creation of the
393         // dir via FUSE.
394         // 3. With app data isolation enabled, app A should not guess existence of app B from the
395         // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
396         // information.
397         return 0;
398     }
399     return std::numeric_limits<double>::max();
400 }
401 
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & path,struct fuse_entry_param * e,int * error_code)402 static node* make_node_entry(fuse_req_t req, node* parent, const string& name, const string& path,
403                              struct fuse_entry_param* e, int* error_code) {
404     struct fuse* fuse = get_fuse(req);
405     const struct fuse_ctx* ctx = fuse_req_ctx(req);
406     node* node;
407 
408     memset(e, 0, sizeof(*e));
409     if (lstat(path.c_str(), &e->attr) < 0) {
410         *error_code = errno;
411         return NULL;
412     }
413 
414     bool should_inval = false;
415     node = parent->LookupChildByName(name, true /* acquire */);
416     if (!node) {
417         node = ::node::Create(parent, name, &fuse->lock, &fuse->tracker);
418     } else if (!mediaprovider::fuse::containsMount(path, std::to_string(getuid() / PER_USER_RANGE))) {
419         should_inval = true;
420         // Only invalidate a path if it does not contain mount.
421         // Invalidate both names to ensure there's no dentry left in the kernel after the following
422         // operations:
423         // 1) touch foo, touch FOO, unlink *foo*
424         // 2) touch foo, touch FOO, unlink *FOO*
425         // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
426         // |should_inval| invalidates lookup_name by using 0 timeout below and we explicitly
427         // invalidate node_name if different case
428         // Note that we invalidate async otherwise we will deadlock the kernel
429         if (name != node->GetName()) {
430             // Make copies of the node name and path so we're not attempting to acquire
431             // any node locks from the invalidation thread. Depending on timing, we may end
432             // up invalidating the wrong inode but that shouldn't result in correctness issues.
433             const fuse_ino_t parent_ino = fuse->ToInode(parent);
434             const fuse_ino_t child_ino = fuse->ToInode(node);
435             const std::string& node_name = node->GetName();
436 
437             std::thread t([=]() { fuse_inval(fuse->se, parent_ino, child_ino, node_name, path); });
438             t.detach();
439         }
440     }
441     TRACE_NODE(node, req);
442 
443     // This FS is not being exported via NFS so just a fixed generation number
444     // for now. If we do need this, we need to increment the generation ID each
445     // time the fuse daemon restarts because that's what it takes for us to
446     // reuse inode numbers.
447     e->generation = 0;
448     e->ino = fuse->ToInode(node);
449     e->entry_timeout = get_timeout(fuse, path, should_inval);
450     e->attr_timeout = is_package_owned_path(path, fuse->path) || should_inval
451                               ? 0
452                               : std::numeric_limits<double>::max();
453 
454     return node;
455 }
456 
is_requesting_write(int flags)457 static inline bool is_requesting_write(int flags) {
458     return flags & (O_WRONLY | O_RDWR);
459 }
460 
461 namespace mediaprovider {
462 namespace fuse {
463 
464 /**
465  * Function implementations
466  *
467  * These implement the various functions in fuse_lowlevel_ops
468  *
469  */
470 
pf_init(void * userdata,struct fuse_conn_info * conn)471 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
472     // We don't want a getattr request with every read request
473     conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
474     unsigned mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
475                      FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
476                      FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
477     conn->want |= conn->capable & mask;
478     conn->max_read = MAX_READ_SIZE;
479 
480     struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
481     fuse->active->store(true, std::memory_order_release);
482 }
483 
pf_destroy(void * userdata)484 static void pf_destroy(void* userdata) {
485     struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
486     LOG(INFO) << "DESTROY " << fuse->path;
487 
488     node::DeleteTree(fuse->root);
489 }
490 
491 // Return true if the path is accessible for that uid.
is_app_accessible_path(MediaProviderWrapper * mp,const string & path,uid_t uid)492 static bool is_app_accessible_path(MediaProviderWrapper* mp, const string& path, uid_t uid) {
493     if (uid < AID_APP_START) {
494         return true;
495     }
496 
497     if (path == "/storage/emulated") {
498         // Apps should never refer to /storage/emulated - they should be using the user-spcific
499         // subdirs, eg /storage/emulated/0
500         return false;
501     }
502 
503     std::smatch match;
504     if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
505         const std::string& pkg = match[1];
506         // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
507         // and it's not an external file/directory of any package
508         if (pkg == ".nomedia") {
509             return true;
510         }
511         if (!mp->IsUidForPackage(pkg, uid)) {
512             PLOG(WARNING) << "Invalid other package file access from " << pkg << "(: " << path;
513             return false;
514         }
515     }
516     return true;
517 }
518 
519 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code)520 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
521                        struct fuse_entry_param* e, int* error_code) {
522     struct fuse* fuse = get_fuse(req);
523     node* parent_node = fuse->FromInode(parent);
524     if (!parent_node) {
525         *error_code = ENOENT;
526         return nullptr;
527     }
528     string parent_path = parent_node->BuildPath();
529     // We should always allow lookups on the root, because failing them could cause
530     // bind mounts to be invalidated.
531     if (!fuse->IsRoot(parent_node) && !is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
532         *error_code = ENOENT;
533         return nullptr;
534     }
535 
536     string child_path = parent_path + "/" + name;
537 
538     TRACE_NODE(parent_node, req);
539 
540     std::smatch match;
541     std::regex_search(child_path, match, storage_emulated_regex);
542     if (match.size() == 2 && std::to_string(getuid() / PER_USER_RANGE) != match[1].str()) {
543         // Ensure the FuseDaemon user id matches the user id in requested path
544         *error_code = EPERM;
545         return nullptr;
546     }
547     return make_node_entry(req, parent_node, name, child_path, e, error_code);
548 }
549 
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)550 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
551     ATRACE_CALL();
552     struct fuse_entry_param e;
553 
554     int error_code = 0;
555     if (do_lookup(req, parent, name, &e, &error_code)) {
556         fuse_reply_entry(req, &e);
557     } else {
558         CHECK(error_code != 0);
559         fuse_reply_err(req, error_code);
560     }
561 }
562 
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)563 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
564     node* node = fuse->FromInode(ino);
565     TRACE_NODE(node, req);
566     if (node) {
567         // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
568         // some reason we only keep 32 bit refcounts but the kernel issues
569         // forget requests with a 64 bit counter.
570         node->Release(static_cast<uint32_t>(nlookup));
571     }
572 }
573 
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)574 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
575     // Always allow to forget so no need to check is_app_accessible_path()
576     ATRACE_CALL();
577     node* node;
578     struct fuse* fuse = get_fuse(req);
579 
580     do_forget(req, fuse, ino, nlookup);
581     fuse_reply_none(req);
582 }
583 
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)584 static void pf_forget_multi(fuse_req_t req,
585                             size_t count,
586                             struct fuse_forget_data* forgets) {
587     ATRACE_CALL();
588     struct fuse* fuse = get_fuse(req);
589 
590     for (int i = 0; i < count; i++) {
591         do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
592     }
593     fuse_reply_none(req);
594 }
595 
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)596 static void pf_getattr(fuse_req_t req,
597                        fuse_ino_t ino,
598                        struct fuse_file_info* fi) {
599     ATRACE_CALL();
600     struct fuse* fuse = get_fuse(req);
601     node* node = fuse->FromInode(ino);
602     if (!node) {
603         fuse_reply_err(req, ENOENT);
604         return;
605     }
606     string path = node->BuildPath();
607     if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
608         fuse_reply_err(req, ENOENT);
609         return;
610     }
611     TRACE_NODE(node, req);
612 
613     struct stat s;
614     memset(&s, 0, sizeof(s));
615     if (lstat(path.c_str(), &s) < 0) {
616         fuse_reply_err(req, errno);
617     } else {
618         fuse_reply_attr(req, &s, is_package_owned_path(path, fuse->path) ?
619                 0 : std::numeric_limits<double>::max());
620     }
621 }
622 
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)623 static void pf_setattr(fuse_req_t req,
624                        fuse_ino_t ino,
625                        struct stat* attr,
626                        int to_set,
627                        struct fuse_file_info* fi) {
628     ATRACE_CALL();
629     struct fuse* fuse = get_fuse(req);
630     node* node = fuse->FromInode(ino);
631     if (!node) {
632         fuse_reply_err(req, ENOENT);
633         return;
634     }
635     string path = node->BuildPath();
636     if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
637         fuse_reply_err(req, ENOENT);
638         return;
639     }
640 
641     int fd = -1;
642     if (fi) {
643         // If we have a file_info, setattr was called with an fd so use the fd instead of path
644         handle* h = reinterpret_cast<handle*>(fi->fh);
645         fd = h->fd;
646     } else {
647         const struct fuse_ctx* ctx = fuse_req_ctx(req);
648         int status = fuse->mp->IsOpenAllowed(path, ctx->uid, true);
649         if (status) {
650             fuse_reply_err(req, EACCES);
651             return;
652         }
653     }
654     struct timespec times[2];
655     TRACE_NODE(node, req);
656 
657     /* XXX: incomplete implementation on purpose.
658      * chmod/chown should NEVER be implemented.*/
659 
660     if ((to_set & FUSE_SET_ATTR_SIZE)) {
661         int res = 0;
662         if (fd == -1) {
663             res = truncate64(path.c_str(), attr->st_size);
664         } else {
665             res = ftruncate64(fd, attr->st_size);
666         }
667 
668         if (res < 0) {
669             fuse_reply_err(req, errno);
670             return;
671         }
672     }
673 
674     /* Handle changing atime and mtime.  If FATTR_ATIME_and FATTR_ATIME_NOW
675      * are both set, then set it to the current time.  Else, set it to the
676      * time specified in the request.  Same goes for mtime.  Use utimensat(2)
677      * as it allows ATIME and MTIME to be changed independently, and has
678      * nanosecond resolution which fuse also has.
679      */
680     if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
681         times[0].tv_nsec = UTIME_OMIT;
682         times[1].tv_nsec = UTIME_OMIT;
683         if (to_set & FATTR_ATIME) {
684             if (to_set & FATTR_ATIME_NOW) {
685                 times[0].tv_nsec = UTIME_NOW;
686             } else {
687                 times[0] = attr->st_atim;
688             }
689         }
690 
691         if (to_set & FATTR_MTIME) {
692             if (to_set & FATTR_MTIME_NOW) {
693                 times[1].tv_nsec = UTIME_NOW;
694             } else {
695                 times[1] = attr->st_mtim;
696             }
697         }
698 
699         TRACE_NODE(node, req);
700         int res = 0;
701         if (fd == -1) {
702             res = utimensat(-1, path.c_str(), times, 0);
703         } else {
704             res = futimens(fd, times);
705         }
706 
707         if (res < 0) {
708             fuse_reply_err(req, errno);
709             return;
710         }
711     }
712 
713     lstat(path.c_str(), attr);
714     fuse_reply_attr(req, attr, is_package_owned_path(path, fuse->path) ?
715             0 : std::numeric_limits<double>::max());
716 }
717 
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)718 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
719 {
720     struct fuse* fuse = get_fuse(req);
721     node* node = fuse->FromInode(ino);
722     string path = node ? node->BuildPath() : "";
723 
724     if (node && is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
725         // TODO(b/147482155): Check that uid has access to |path| and its contents
726         fuse_reply_canonical_path(req, path.c_str());
727         return;
728     }
729     fuse_reply_err(req, ENOENT);
730 }
731 
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)732 static void pf_mknod(fuse_req_t req,
733                      fuse_ino_t parent,
734                      const char* name,
735                      mode_t mode,
736                      dev_t rdev) {
737     ATRACE_CALL();
738     struct fuse* fuse = get_fuse(req);
739     node* parent_node = fuse->FromInode(parent);
740     if (!parent_node) {
741         fuse_reply_err(req, ENOENT);
742         return;
743     }
744     string parent_path = parent_node->BuildPath();
745     if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
746         fuse_reply_err(req, ENOENT);
747         return;
748     }
749 
750     TRACE_NODE(parent_node, req);
751 
752     const string child_path = parent_path + "/" + name;
753 
754     mode = (mode & (~0777)) | 0664;
755     if (mknod(child_path.c_str(), mode, rdev) < 0) {
756         fuse_reply_err(req, errno);
757         return;
758     }
759 
760     int error_code = 0;
761     struct fuse_entry_param e;
762     if (make_node_entry(req, parent_node, name, child_path, &e, &error_code)) {
763         fuse_reply_entry(req, &e);
764     } else {
765         CHECK(error_code != 0);
766         fuse_reply_err(req, error_code);
767     }
768 }
769 
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)770 static void pf_mkdir(fuse_req_t req,
771                      fuse_ino_t parent,
772                      const char* name,
773                      mode_t mode) {
774     ATRACE_CALL();
775     struct fuse* fuse = get_fuse(req);
776     node* parent_node = fuse->FromInode(parent);
777     if (!parent_node) {
778         fuse_reply_err(req, ENOENT);
779         return;
780     }
781     const struct fuse_ctx* ctx = fuse_req_ctx(req);
782     const string parent_path = parent_node->BuildPath();
783     if (!is_app_accessible_path(fuse->mp, parent_path, ctx->uid)) {
784         fuse_reply_err(req, ENOENT);
785         return;
786     }
787 
788     TRACE_NODE(parent_node, req);
789 
790     const string child_path = parent_path + "/" + name;
791 
792     int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
793     if (status) {
794         fuse_reply_err(req, status);
795         return;
796     }
797 
798     mode = (mode & (~0777)) | 0775;
799     if (mkdir(child_path.c_str(), mode) < 0) {
800         fuse_reply_err(req, errno);
801         return;
802     }
803 
804     int error_code = 0;
805     struct fuse_entry_param e;
806     if (make_node_entry(req, parent_node, name, child_path, &e, &error_code)) {
807         fuse_reply_entry(req, &e);
808     } else {
809         CHECK(error_code != 0);
810         fuse_reply_err(req, error_code);
811     }
812 }
813 
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)814 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
815     ATRACE_CALL();
816     struct fuse* fuse = get_fuse(req);
817     node* parent_node = fuse->FromInode(parent);
818     if (!parent_node) {
819         fuse_reply_err(req, ENOENT);
820         return;
821     }
822     const struct fuse_ctx* ctx = fuse_req_ctx(req);
823     const string parent_path = parent_node->BuildPath();
824     if (!is_app_accessible_path(fuse->mp, parent_path, ctx->uid)) {
825         fuse_reply_err(req, ENOENT);
826         return;
827     }
828 
829     TRACE_NODE(parent_node, req);
830 
831     const string child_path = parent_path + "/" + name;
832 
833     int status = fuse->mp->DeleteFile(child_path, ctx->uid);
834     if (status) {
835         fuse_reply_err(req, status);
836         return;
837     }
838 
839     node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
840     TRACE_NODE(child_node, req);
841     if (child_node) {
842         child_node->SetDeleted();
843     }
844 
845     fuse_reply_err(req, 0);
846 }
847 
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)848 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
849     ATRACE_CALL();
850     struct fuse* fuse = get_fuse(req);
851     node* parent_node = fuse->FromInode(parent);
852     if (!parent_node) {
853         fuse_reply_err(req, ENOENT);
854         return;
855     }
856     const string parent_path = parent_node->BuildPath();
857     if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
858         fuse_reply_err(req, ENOENT);
859         return;
860     }
861     TRACE_NODE(parent_node, req);
862 
863     const string child_path = parent_path + "/" + name;
864 
865     int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
866     if (status) {
867         fuse_reply_err(req, status);
868         return;
869     }
870 
871     if (rmdir(child_path.c_str()) < 0) {
872         fuse_reply_err(req, errno);
873         return;
874     }
875 
876     node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
877     TRACE_NODE(child_node, req);
878     if (child_node) {
879         child_node->SetDeleted();
880     }
881 
882     fuse_reply_err(req, 0);
883 }
884 /*
885 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
886                          const char* name)
887 {
888     cout << "TODO:" << __func__;
889 }
890 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)891 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
892                      const char* new_name, unsigned int flags) {
893     ATRACE_CALL();
894     struct fuse* fuse = get_fuse(req);
895 
896     if (flags != 0) {
897         return EINVAL;
898     }
899 
900     node* old_parent_node = fuse->FromInode(parent);
901     if (!old_parent_node) return ENOENT;
902     const struct fuse_ctx* ctx = fuse_req_ctx(req);
903     const string old_parent_path = old_parent_node->BuildPath();
904     if (!is_app_accessible_path(fuse->mp, old_parent_path, ctx->uid)) {
905         return ENOENT;
906     }
907 
908     node* new_parent_node = fuse->FromInode(new_parent);
909     if (!new_parent_node) return ENOENT;
910     const string new_parent_path = new_parent_node->BuildPath();
911     if (!is_app_accessible_path(fuse->mp, new_parent_path, ctx->uid)) {
912         return ENOENT;
913     }
914 
915     if (!old_parent_node || !new_parent_node) {
916         return ENOENT;
917     } else if (parent == new_parent && name == new_name) {
918         // No rename required.
919         return 0;
920     }
921 
922     TRACE_NODE(old_parent_node, req);
923     TRACE_NODE(new_parent_node, req);
924 
925     node* child_node = old_parent_node->LookupChildByName(name, true /* acquire */);
926     TRACE_NODE(child_node, req) << "old_child";
927 
928     const string old_child_path = child_node->BuildPath();
929     const string new_child_path = new_parent_path + "/" + new_name;
930 
931     // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
932     const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
933     // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
934     // EFAULT/EIO is reported due to JNI exception.
935     if (res == 0) {
936         child_node->Rename(new_name, new_parent_node);
937     }
938     TRACE_NODE(child_node, req) << "new_child";
939 
940     child_node->Release(1);
941     return res;
942 }
943 
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)944 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
945                       const char* new_name, unsigned int flags) {
946     int res = do_rename(req, parent, name, new_parent, new_name, flags);
947     fuse_reply_err(req, res);
948 }
949 
950 /*
951 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
952                       const char* new_name)
953 {
954     cout << "TODO:" << __func__;
955 }
956 */
957 
create_handle_for_node(struct fuse * fuse,const string & path,int fd,node * node,const RedactionInfo * ri)958 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, node* node,
959                                       const RedactionInfo* ri) {
960     std::lock_guard<std::recursive_mutex> guard(fuse->lock);
961     // We don't want to use the FUSE VFS cache in two cases:
962     // 1. When redaction is needed because app A with EXIF access might access
963     // a region that should have been redacted for app B without EXIF access, but app B on
964     // a subsequent read, will be able to see the EXIF data because the read request for
965     // that region will be served from cache and not get to the FUSE daemon
966     // 2. When the file has a read or write lock on it. This means that the MediaProvider
967     // has given an fd to the lower file system to an app. There are two cases where using
968     // the cache in this case can be a problem:
969     // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
970     // subsequent read from the lower fs fd will not see the write.
971     // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
972     // the lower fs fd because those writes did not go through the FUSE layer and reads from
973     // FUSE after that write may be served from cache
974     bool direct_io = ri->isRedactionNeeded() || is_file_locked(fd, path);
975 
976     handle* h = new handle(fd, ri, !direct_io);
977     node->AddHandle(h);
978     return h;
979 }
980 
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)981 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
982     ATRACE_CALL();
983     struct fuse* fuse = get_fuse(req);
984     node* node = fuse->FromInode(ino);
985     if (!node) {
986         fuse_reply_err(req, ENOENT);
987         return;
988     }
989     const struct fuse_ctx* ctx = fuse_req_ctx(req);
990     const string path = node->BuildPath();
991     if (!is_app_accessible_path(fuse->mp, path, ctx->uid)) {
992         fuse_reply_err(req, ENOENT);
993         return;
994     }
995 
996     TRACE_NODE(node, req) << (is_requesting_write(fi->flags) ? "write" : "read");
997 
998     if (fi->flags & O_DIRECT) {
999         fi->flags &= ~O_DIRECT;
1000         fi->direct_io = true;
1001     }
1002 
1003     int status = fuse->mp->IsOpenAllowed(path, ctx->uid, is_requesting_write(fi->flags));
1004     if (status) {
1005         fuse_reply_err(req, status);
1006         return;
1007     }
1008 
1009     // With the writeback cache enabled, FUSE may generate READ requests even for files that
1010     // were opened O_WRONLY; so make sure we open it O_RDWR instead.
1011     int open_flags = fi->flags;
1012     if (open_flags & O_WRONLY) {
1013         open_flags &= ~O_WRONLY;
1014         open_flags |= O_RDWR;
1015     }
1016 
1017     if (open_flags & O_APPEND) {
1018         open_flags &= ~O_APPEND;
1019     }
1020 
1021     const int fd = open(path.c_str(), open_flags);
1022     if (fd < 0) {
1023         fuse_reply_err(req, errno);
1024         return;
1025     }
1026 
1027     // We don't redact if the caller was granted write permission for this file
1028     std::unique_ptr<RedactionInfo> ri;
1029     if (is_requesting_write(fi->flags)) {
1030         ri = std::make_unique<RedactionInfo>();
1031     } else {
1032         ri = fuse->mp->GetRedactionInfo(path, req->ctx.uid, req->ctx.pid);
1033     }
1034 
1035     if (!ri) {
1036         close(fd);
1037         fuse_reply_err(req, EFAULT);
1038         return;
1039     }
1040 
1041     handle* h = create_handle_for_node(fuse, path, fd, node, ri.release());
1042     fi->fh = ptr_to_id(h);
1043     fi->keep_cache = 1;
1044     fi->direct_io = !h->cached;
1045     fuse_reply_open(req, fi);
1046 }
1047 
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi)1048 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi) {
1049     handle* h = reinterpret_cast<handle*>(fi->fh);
1050     struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1051 
1052     buf.buf[0].fd = h->fd;
1053     buf.buf[0].pos = off;
1054     buf.buf[0].flags =
1055             (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1056 
1057     fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags) 0);
1058 }
1059 
range_contains(const RedactionRange & rr,off_t off)1060 static bool range_contains(const RedactionRange& rr, off_t off) {
1061     return rr.first <= off && off <= rr.second;
1062 }
1063 
1064 /**
1065  * Sets the parameters for a fuse_buf that reads from memory, including flags.
1066  * Makes buf->mem point to an already mapped region of zeroized memory.
1067  * This memory is read only.
1068  */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1069 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1070     buf->size = size;
1071     buf->mem = fuse->zero_addr;
1072     buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1073     buf->pos = -1;
1074     buf->fd = -1;
1075 }
1076 
1077 /**
1078  * Sets the parameters for a fuse_buf that reads from file, including flags.
1079  */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1080 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1081     buf->size = size;
1082     buf->fd = fd;
1083     buf->pos = pos;
1084     buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1085     buf->mem = nullptr;
1086 }
1087 
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi)1088 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi) {
1089     handle* h = reinterpret_cast<handle*>(fi->fh);
1090     auto overlapping_rr = h->ri->getOverlappingRedactionRanges(size, off);
1091 
1092     if (overlapping_rr->size() <= 0) {
1093         // no relevant redaction ranges for this request
1094         do_read(req, size, off, fi);
1095         return;
1096     }
1097     // the number of buffers we need, if the read doesn't start or end with
1098     //  a redaction range.
1099     int num_bufs = overlapping_rr->size() * 2 + 1;
1100     if (overlapping_rr->front().first <= off) {
1101         // the beginning of the read request is redacted
1102         num_bufs--;
1103     }
1104     if (overlapping_rr->back().second >= off + size) {
1105         // the end of the read request is redacted
1106         num_bufs--;
1107     }
1108     auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1109             reinterpret_cast<fuse_bufvec*>(
1110                     malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1111             free};
1112     fuse_bufvec& bufvec = *bufvec_ptr;
1113 
1114     // initialize bufvec
1115     bufvec.count = num_bufs;
1116     bufvec.idx = 0;
1117     bufvec.off = 0;
1118 
1119     int rr_idx = 0;
1120     off_t start = off;
1121     // Add a dummy redaction range to make sure we don't go out of vector
1122     // limits when computing the end of the last non-redacted range.
1123     // This ranges is invalid because its starting point is larger than it's ending point.
1124     overlapping_rr->push_back(RedactionRange(LLONG_MAX, LLONG_MAX - 1));
1125 
1126     for (int i = 0; i < num_bufs; ++i) {
1127         off_t end;
1128         if (range_contains(overlapping_rr->at(rr_idx), start)) {
1129             // Handle a redacted range
1130             // end should be the end of the redacted range, but can't be out of
1131             // the read request bounds
1132             end = std::min(static_cast<off_t>(off + size - 1), overlapping_rr->at(rr_idx).second);
1133             create_mem_fuse_buf(/*size*/ end - start + 1, &(bufvec.buf[i]), get_fuse(req));
1134             ++rr_idx;
1135         } else {
1136             // Handle a non-redacted range
1137             // end should be right before the next redaction range starts or
1138             // the end of the read request
1139             end = std::min(static_cast<off_t>(off + size - 1),
1140                     overlapping_rr->at(rr_idx).first - 1);
1141             create_file_fuse_buf(/*size*/ end - start + 1, start, h->fd, &(bufvec.buf[i]));
1142         }
1143         start = end + 1;
1144     }
1145 
1146     fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1147 }
1148 
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1149 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1150                     struct fuse_file_info* fi) {
1151     ATRACE_CALL();
1152     handle* h = reinterpret_cast<handle*>(fi->fh);
1153     struct fuse* fuse = get_fuse(req);
1154 
1155     fuse->fadviser.Record(h->fd, size);
1156 
1157     if (h->ri->isRedactionNeeded()) {
1158         do_read_with_redaction(req, size, off, fi);
1159     } else {
1160         do_read(req, size, off, fi);
1161     }
1162 }
1163 
1164 /*
1165 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1166                        size_t size, off_t off, struct fuse_file_info* fi)
1167 {
1168     cout << "TODO:" << __func__;
1169 }
1170 */
1171 
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1172 static void pf_write_buf(fuse_req_t req,
1173                          fuse_ino_t ino,
1174                          struct fuse_bufvec* bufv,
1175                          off_t off,
1176                          struct fuse_file_info* fi) {
1177     ATRACE_CALL();
1178     handle* h = reinterpret_cast<handle*>(fi->fh);
1179     struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1180     ssize_t size;
1181     struct fuse* fuse = get_fuse(req);
1182 
1183     buf.buf[0].fd = h->fd;
1184     buf.buf[0].pos = off;
1185     buf.buf[0].flags =
1186             (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1187     size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1188 
1189     if (size < 0)
1190         fuse_reply_err(req, -size);
1191     else {
1192         fuse_reply_write(req, size);
1193         fuse->fadviser.Record(h->fd, size);
1194     }
1195 }
1196 // Haven't tested this one. Not sure what calls it.
1197 #if 0
1198 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1199                                  off_t off_in, struct fuse_file_info* fi_in,
1200                                  fuse_ino_t ino_out, off_t off_out,
1201                                  struct fuse_file_info* fi_out, size_t len,
1202                                  int flags)
1203 {
1204     handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1205     handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1206     struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1207     struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1208     ssize_t size;
1209 
1210     buf_in.buf[0].fd = h_in->fd;
1211     buf_in.buf[0].pos = off_in;
1212     buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1213 
1214     buf_out.buf[0].fd = h_out->fd;
1215     buf_out.buf[0].pos = off_out;
1216     buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1217     size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1218 
1219     if (size < 0) {
1220         fuse_reply_err(req, -size);
1221     }
1222 
1223     fuse_reply_write(req, size);
1224 }
1225 #endif
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1226 static void pf_flush(fuse_req_t req,
1227                      fuse_ino_t ino,
1228                      struct fuse_file_info* fi) {
1229     ATRACE_CALL();
1230     struct fuse* fuse = get_fuse(req);
1231     TRACE_NODE(nullptr, req) << "noop";
1232     fuse_reply_err(req, 0);
1233 }
1234 
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1235 static void pf_release(fuse_req_t req,
1236                        fuse_ino_t ino,
1237                        struct fuse_file_info* fi) {
1238     ATRACE_CALL();
1239     struct fuse* fuse = get_fuse(req);
1240 
1241     node* node = fuse->FromInode(ino);
1242     handle* h = reinterpret_cast<handle*>(fi->fh);
1243     TRACE_NODE(node, req);
1244 
1245     fuse->fadviser.Close(h->fd);
1246     if (node) {
1247         node->DestroyHandle(h);
1248     }
1249 
1250     fuse_reply_err(req, 0);
1251 }
1252 
do_sync_common(int fd,bool datasync)1253 static int do_sync_common(int fd, bool datasync) {
1254     int res = datasync ? fdatasync(fd) : fsync(fd);
1255 
1256     if (res == -1) return errno;
1257     return 0;
1258 }
1259 
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1260 static void pf_fsync(fuse_req_t req,
1261                      fuse_ino_t ino,
1262                      int datasync,
1263                      struct fuse_file_info* fi) {
1264     ATRACE_CALL();
1265     handle* h = reinterpret_cast<handle*>(fi->fh);
1266     int err = do_sync_common(h->fd, datasync);
1267 
1268     fuse_reply_err(req, err);
1269 }
1270 
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1271 static void pf_fsyncdir(fuse_req_t req,
1272                         fuse_ino_t ino,
1273                         int datasync,
1274                         struct fuse_file_info* fi) {
1275     dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1276     int err = do_sync_common(dirfd(h->d), datasync);
1277 
1278     fuse_reply_err(req, err);
1279 }
1280 
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1281 static void pf_opendir(fuse_req_t req,
1282                        fuse_ino_t ino,
1283                        struct fuse_file_info* fi) {
1284     ATRACE_CALL();
1285     struct fuse* fuse = get_fuse(req);
1286     node* node = fuse->FromInode(ino);
1287     if (!node) {
1288         fuse_reply_err(req, ENOENT);
1289         return;
1290     }
1291     const struct fuse_ctx* ctx = fuse_req_ctx(req);
1292     const string path = node->BuildPath();
1293     if (!is_app_accessible_path(fuse->mp, path, ctx->uid)) {
1294         fuse_reply_err(req, ENOENT);
1295         return;
1296     }
1297 
1298     TRACE_NODE(node, req);
1299 
1300     int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1301     if (status) {
1302         fuse_reply_err(req, status);
1303         return;
1304     }
1305 
1306     DIR* dir = opendir(path.c_str());
1307     if (!dir) {
1308         fuse_reply_err(req, errno);
1309         return;
1310     }
1311 
1312     dirhandle* h = new dirhandle(dir);
1313     node->AddDirHandle(h);
1314 
1315     fi->fh = ptr_to_id(h);
1316     fuse_reply_open(req, fi);
1317 }
1318 
1319 #define READDIR_BUF 8192LU
1320 
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1321 static void do_readdir_common(fuse_req_t req,
1322                               fuse_ino_t ino,
1323                               size_t size,
1324                               off_t off,
1325                               struct fuse_file_info* fi,
1326                               bool plus) {
1327     struct fuse* fuse = get_fuse(req);
1328     dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1329     size_t len = std::min<size_t>(size, READDIR_BUF);
1330     char buf[READDIR_BUF];
1331     size_t used = 0;
1332     std::shared_ptr<DirectoryEntry> de;
1333 
1334     struct fuse_entry_param e;
1335     size_t entry_size = 0;
1336 
1337     node* node = fuse->FromInode(ino);
1338     if (!node) {
1339         fuse_reply_err(req, ENOENT);
1340         return;
1341     }
1342     const string path = node->BuildPath();
1343     if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
1344         fuse_reply_err(req, ENOENT);
1345         return;
1346     }
1347 
1348     TRACE_NODE(node, req);
1349     // Get all directory entries from MediaProvider on first readdir() call of
1350     // directory handle. h->next_off = 0 indicates that current readdir() call
1351     // is first readdir() call for the directory handle, Avoid multiple JNI calls
1352     // for single directory handle.
1353     if (h->next_off == 0) {
1354         h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
1355     }
1356     // If the last entry in the previous readdir() call was rejected due to
1357     // buffer capacity constraints, update directory offset to start from
1358     // previously rejected entry. Directory offset can also change if there was
1359     // a seekdir() on the given directory handle.
1360     if (off != h->next_off) {
1361         h->next_off = off;
1362     }
1363     const int num_directory_entries = h->de.size();
1364     // Check for errors. Any error/exception occurred while obtaining directory
1365     // entries will be indicated by marking first directory entry name as empty
1366     // string. In the erroneous case corresponding d_type will hold error number.
1367     if (num_directory_entries && h->de[0]->d_name.empty()) {
1368         fuse_reply_err(req, h->de[0]->d_type);
1369         return;
1370     }
1371 
1372     while (h->next_off < num_directory_entries) {
1373         de = h->de[h->next_off];
1374         entry_size = 0;
1375         h->next_off++;
1376         if (plus) {
1377             int error_code = 0;
1378             if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code)) {
1379                 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
1380                                                     &e, h->next_off);
1381             } else {
1382                 // Ignore lookup errors on
1383                 // 1. non-existing files returned from MediaProvider database.
1384                 // 2. path that doesn't match FuseDaemon UID and calling uid.
1385                 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES) continue;
1386                 fuse_reply_err(req, error_code);
1387                 return;
1388             }
1389         } else {
1390             // This should never happen because we have readdir_plus enabled without adaptive
1391             // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
1392             LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
1393             e.attr.st_ino = FUSE_UNKNOWN_INO;
1394             e.attr.st_mode = de->d_type << 12;
1395             entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
1396                                            h->next_off);
1397         }
1398         // If buffer in fuse_add_direntry[_plus] is not large enough then
1399         // the entry is not added to buffer but the size of the entry is still
1400         // returned. Check available buffer size + returned entry size is less
1401         // than actual buffer size to confirm entry is added to buffer.
1402         if (used + entry_size > len) {
1403             // When an entry is rejected, lookup called by readdir_plus will not be tracked by
1404             // kernel. Call forget on the rejected node to decrement the reference count.
1405             if (plus) {
1406                 do_forget(req, fuse, e.ino, 1);
1407             }
1408             break;
1409         }
1410         used += entry_size;
1411     }
1412     fuse_reply_buf(req, buf, used);
1413 }
1414 
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1415 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1416                        struct fuse_file_info* fi) {
1417     ATRACE_CALL();
1418     do_readdir_common(req, ino, size, off, fi, false);
1419 }
1420 
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1421 static void pf_readdirplus(fuse_req_t req,
1422                            fuse_ino_t ino,
1423                            size_t size,
1424                            off_t off,
1425                            struct fuse_file_info* fi) {
1426     ATRACE_CALL();
1427     do_readdir_common(req, ino, size, off, fi, true);
1428 }
1429 
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1430 static void pf_releasedir(fuse_req_t req,
1431                           fuse_ino_t ino,
1432                           struct fuse_file_info* fi) {
1433     ATRACE_CALL();
1434     struct fuse* fuse = get_fuse(req);
1435 
1436     node* node = fuse->FromInode(ino);
1437 
1438     dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1439     TRACE_NODE(node, req);
1440     if (node) {
1441         node->DestroyDirHandle(h);
1442     }
1443 
1444     fuse_reply_err(req, 0);
1445 }
1446 
pf_statfs(fuse_req_t req,fuse_ino_t ino)1447 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
1448     ATRACE_CALL();
1449     struct statvfs st;
1450     struct fuse* fuse = get_fuse(req);
1451 
1452     if (statvfs(fuse->root->GetName().c_str(), &st))
1453         fuse_reply_err(req, errno);
1454     else
1455         fuse_reply_statfs(req, &st);
1456 }
1457 /*
1458 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
1459                           const char* value, size_t size, int flags)
1460 {
1461     cout << "TODO:" << __func__;
1462 }
1463 
1464 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
1465                           size_t size)
1466 {
1467     cout << "TODO:" << __func__;
1468 }
1469 
1470 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
1471 {
1472     cout << "TODO:" << __func__;
1473 }
1474 
1475 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
1476 {
1477     cout << "TODO:" << __func__;
1478 }*/
1479 
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)1480 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
1481     ATRACE_CALL();
1482     struct fuse* fuse = get_fuse(req);
1483 
1484     node* node = fuse->FromInode(ino);
1485     if (!node) {
1486         fuse_reply_err(req, ENOENT);
1487         return;
1488     }
1489     const string path = node->BuildPath();
1490     if (path != "/storage/emulated" && !is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
1491         fuse_reply_err(req, ENOENT);
1492         return;
1493     }
1494     TRACE_NODE(node, req);
1495 
1496     // exists() checks are always allowed.
1497     if (mask == F_OK) {
1498         int res = access(path.c_str(), F_OK);
1499         fuse_reply_err(req, res ? errno : 0);
1500         return;
1501     }
1502     struct stat stat;
1503     if (lstat(path.c_str(), &stat)) {
1504         // File doesn't exist
1505         fuse_reply_err(req, ENOENT);
1506         return;
1507     }
1508 
1509     // For read and write permission checks we go to MediaProvider.
1510     int status = 0;
1511     bool for_write = mask & W_OK;
1512     bool is_directory = S_ISDIR(stat.st_mode);
1513     if (is_directory) {
1514         if (path == "/storage/emulated" && mask == X_OK) {
1515             // Special case for this path: apps should be allowed to enter it,
1516             // but not list directory contents (which would be user numbers).
1517             int res = access(path.c_str(), X_OK);
1518             fuse_reply_err(req, res ? errno : 0);
1519             return;
1520         }
1521         status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
1522     } else {
1523         if (mask & X_OK) {
1524             // Fuse is mounted with MS_NOEXEC.
1525             fuse_reply_err(req, EACCES);
1526             return;
1527         }
1528 
1529         status = fuse->mp->IsOpenAllowed(path, req->ctx.uid, for_write);
1530     }
1531 
1532     fuse_reply_err(req, status);
1533 }
1534 
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)1535 static void pf_create(fuse_req_t req,
1536                       fuse_ino_t parent,
1537                       const char* name,
1538                       mode_t mode,
1539                       struct fuse_file_info* fi) {
1540     ATRACE_CALL();
1541     struct fuse* fuse = get_fuse(req);
1542     node* parent_node = fuse->FromInode(parent);
1543     if (!parent_node) {
1544         fuse_reply_err(req, ENOENT);
1545         return;
1546     }
1547     const string parent_path = parent_node->BuildPath();
1548     if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
1549         fuse_reply_err(req, ENOENT);
1550         return;
1551     }
1552 
1553     TRACE_NODE(parent_node, req);
1554 
1555     const string child_path = parent_path + "/" + name;
1556 
1557     int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
1558     if (mp_return_code) {
1559         fuse_reply_err(req, mp_return_code);
1560         return;
1561     }
1562 
1563     // With the writeback cache enabled, FUSE may generate READ requests even for files that
1564     // were opened O_WRONLY; so make sure we open it O_RDWR instead.
1565     int open_flags = fi->flags;
1566     if (open_flags & O_WRONLY) {
1567         open_flags &= ~O_WRONLY;
1568         open_flags |= O_RDWR;
1569     }
1570 
1571     if (open_flags & O_APPEND) {
1572         open_flags &= ~O_APPEND;
1573     }
1574 
1575     mode = (mode & (~0777)) | 0664;
1576     int fd = open(child_path.c_str(), open_flags, mode);
1577     if (fd < 0) {
1578         int error_code = errno;
1579         // We've already inserted the file into the MP database before the
1580         // failed open(), so that needs to be rolled back here.
1581         fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
1582         fuse_reply_err(req, error_code);
1583         return;
1584     }
1585 
1586     int error_code = 0;
1587     struct fuse_entry_param e;
1588     node* node = make_node_entry(req, parent_node, name, child_path, &e, &error_code);
1589     TRACE_NODE(node, req);
1590     if (!node) {
1591         CHECK(error_code != 0);
1592         fuse_reply_err(req, error_code);
1593         return;
1594     }
1595 
1596     // Let MediaProvider know we've created a new file
1597     fuse->mp->OnFileCreated(child_path);
1598 
1599     // TODO(b/147274248): Assume there will be no EXIF to redact.
1600     // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
1601     // to the file before all the EXIF content is written. We could special case reads before the
1602     // first close after a file has just been created.
1603     handle* h = create_handle_for_node(fuse, child_path, fd, node, new RedactionInfo());
1604     fi->fh = ptr_to_id(h);
1605     fi->keep_cache = 1;
1606     fi->direct_io = !h->cached;
1607     fuse_reply_create(req, &e, fi);
1608 }
1609 /*
1610 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
1611                        struct fuse_file_info* fi, struct flock* lock)
1612 {
1613     cout << "TODO:" << __func__;
1614 }
1615 
1616 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
1617                        struct fuse_file_info* fi,
1618                        struct flock* lock, int sleep)
1619 {
1620     cout << "TODO:" << __func__;
1621 }
1622 
1623 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
1624                       uint64_t idx)
1625 {
1626     cout << "TODO:" << __func__;
1627 }
1628 
1629 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
1630                        void* arg, struct fuse_file_info* fi, unsigned flags,
1631                        const void* in_buf, size_t in_bufsz, size_t out_bufsz)
1632 {
1633     cout << "TODO:" << __func__;
1634 }
1635 
1636 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
1637                       struct fuse_pollhandle* ph)
1638 {
1639     cout << "TODO:" << __func__;
1640 }
1641 
1642 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
1643                                 off_t offset, struct fuse_bufvec* bufv)
1644 {
1645     cout << "TODO:" << __func__;
1646 }
1647 
1648 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
1649                        struct fuse_file_info* fi, int op)
1650 {
1651     cout << "TODO:" << __func__;
1652 }
1653 
1654 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
1655                        off_t offset, off_t length, struct fuse_file_info* fi)
1656 {
1657     cout << "TODO:" << __func__;
1658 }
1659 */
1660 
1661 static struct fuse_lowlevel_ops ops{
1662     .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup, .forget = pf_forget,
1663     .getattr = pf_getattr, .setattr = pf_setattr, .canonical_path = pf_canonical_path,
1664     .mknod = pf_mknod, .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
1665     /*.symlink = pf_symlink,*/
1666     .rename = pf_rename,
1667     /*.link = pf_link,*/
1668     .open = pf_open, .read = pf_read,
1669     /*.write = pf_write,*/
1670     .flush = pf_flush,
1671     .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir, .readdir = pf_readdir,
1672     .releasedir = pf_releasedir, .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
1673     /*.setxattr = pf_setxattr,
1674     .getxattr = pf_getxattr,
1675     .listxattr = pf_listxattr,
1676     .removexattr = pf_removexattr,*/
1677     .access = pf_access, .create = pf_create,
1678     /*.getlk = pf_getlk,
1679     .setlk = pf_setlk,
1680     .bmap = pf_bmap,
1681     .ioctl = pf_ioctl,
1682     .poll = pf_poll,*/
1683     .write_buf = pf_write_buf,
1684     /*.retrieve_reply = pf_retrieve_reply,*/
1685     .forget_multi = pf_forget_multi,
1686     /*.flock = pf_flock,
1687     .fallocate = pf_fallocate,*/
1688     .readdirplus = pf_readdirplus,
1689     /*.copy_file_range = pf_copy_file_range,*/
1690 };
1691 
1692 static struct fuse_loop_config config = {
1693         .clone_fd = 1,
1694         .max_idle_threads = 10,
1695 };
1696 
1697 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
1698     {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
1699     {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
1700     {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
1701     {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
1702     {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
1703     {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
1704     {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
1705     {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
1706     });
1707 
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)1708 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
1709     __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
1710 }
1711 
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)1712 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
1713     bool use_fuse = false;
1714 
1715     if (active.load(std::memory_order_acquire)) {
1716         std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1717         const node* node = node::LookupAbsolutePath(fuse->root, path);
1718         if (node && node->HasCachedHandle()) {
1719             use_fuse = true;
1720         } else {
1721             // If we are unable to set a lock, we should use fuse since we can't track
1722             // when all fd references (including dups) are closed. This can happen when
1723             // we try to set a write lock twice on the same file
1724             use_fuse = set_file_lock(fd, for_read, path);
1725         }
1726     } else {
1727         LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
1728     }
1729 
1730     return use_fuse;
1731 }
1732 
InvalidateFuseDentryCache(const std::string & path)1733 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
1734     LOG(VERBOSE) << "Invalidating FUSE dentry cache";
1735     if (active.load(std::memory_order_acquire)) {
1736         string name;
1737         fuse_ino_t parent;
1738         fuse_ino_t child;
1739         {
1740             std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1741             const node* node = node::LookupAbsolutePath(fuse->root, path);
1742             if (node) {
1743                 name = node->GetName();
1744                 child = fuse->ToInode(const_cast<class node*>(node));
1745                 parent = fuse->ToInode(node->GetParent());
1746             }
1747         }
1748 
1749         if (!name.empty()) {
1750             fuse_inval(fuse->se, parent, child, name, path);
1751         }
1752     } else {
1753         LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
1754     }
1755 }
1756 
FuseDaemon(JNIEnv * env,jobject mediaProvider)1757 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
1758                                                              active(false), fuse(nullptr) {}
1759 
IsStarted() const1760 bool FuseDaemon::IsStarted() const {
1761     return active.load(std::memory_order_acquire);
1762 }
1763 
Start(android::base::unique_fd fd,const std::string & path)1764 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path) {
1765     android::base::SetDefaultTag(LOG_TAG);
1766 
1767     struct fuse_args args;
1768     struct fuse_cmdline_opts opts;
1769 
1770     struct stat stat;
1771 
1772     if (lstat(path.c_str(), &stat)) {
1773         PLOG(ERROR) << "ERROR: failed to stat source " << path;
1774         return;
1775     }
1776 
1777     if (!S_ISDIR(stat.st_mode)) {
1778         PLOG(ERROR) << "ERROR: source is not a directory";
1779         return;
1780     }
1781 
1782     args = FUSE_ARGS_INIT(0, nullptr);
1783     if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
1784         fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
1785         LOG(ERROR) << "ERROR: failed to set options";
1786         return;
1787     }
1788 
1789     struct fuse fuse_default(path);
1790     fuse_default.mp = &mp;
1791     // fuse_default is stack allocated, but it's safe to save it as an instance variable because
1792     // this method blocks and FuseDaemon#active tells if we are currently blocking
1793     fuse = &fuse_default;
1794 
1795     // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
1796     // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
1797     // on each read.
1798     fuse_default.zero_addr = static_cast<char*>(mmap(
1799             NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
1800     if (fuse_default.zero_addr == MAP_FAILED) {
1801         LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
1802     }
1803 
1804     // Custom logging for libfuse
1805     if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
1806         fuse_set_log_func(fuse_logger);
1807     }
1808 
1809     struct fuse_session
1810             * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
1811     if (!se) {
1812         PLOG(ERROR) << "Failed to create session ";
1813         return;
1814     }
1815     fuse_default.se = se;
1816     fuse_default.active = &active;
1817     se->fd = fd.release();  // libfuse owns the FD now
1818     se->mountpoint = strdup(path.c_str());
1819 
1820     // Single thread. Useful for debugging
1821     // fuse_session_loop(se);
1822     // Multi-threaded
1823     LOG(INFO) << "Starting fuse...";
1824     fuse_session_loop_mt(se, &config);
1825     fuse->active->store(false, std::memory_order_release);
1826     LOG(INFO) << "Ending fuse...";
1827 
1828     if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
1829         PLOG(ERROR) << "munmap failed!";
1830     }
1831 
1832     fuse_opt_free_args(&args);
1833     fuse_session_destroy(se);
1834     LOG(INFO) << "Ended fuse";
1835     return;
1836 }
1837 } //namespace fuse
1838 }  // namespace mediaprovider
1839