1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18
19 #include "FuseDaemon.h"
20
21 #include <android-base/file.h>
22 #include <android-base/logging.h>
23 #include <android-base/properties.h>
24 #include <android-base/strings.h>
25 #include <android/log.h>
26 #include <android/trace.h>
27 #include <ctype.h>
28 #include <dirent.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <fuse_i.h>
32 #include <fuse_kernel.h>
33 #include <fuse_log.h>
34 #include <fuse_lowlevel.h>
35 #include <inttypes.h>
36 #include <limits.h>
37 #include <stdbool.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/inotify.h>
42 #include <sys/mman.h>
43 #include <sys/mount.h>
44 #include <sys/param.h>
45 #include <sys/resource.h>
46 #include <sys/stat.h>
47 #include <sys/statfs.h>
48 #include <sys/statvfs.h>
49 #include <sys/time.h>
50 #include <sys/types.h>
51 #include <sys/uio.h>
52 #include <unistd.h>
53
54 #include <iostream>
55 #include <map>
56 #include <mutex>
57 #include <queue>
58 #include <regex>
59 #include <thread>
60 #include <unordered_map>
61 #include <unordered_set>
62 #include <vector>
63
64 #define BPF_FD_JUST_USE_INT
65 #include "BpfSyscallWrappers.h"
66 #include "MediaProviderWrapper.h"
67 #include "leveldb/db.h"
68 #include "libfuse_jni/FuseUtils.h"
69 #include "libfuse_jni/ReaddirHelper.h"
70 #include "libfuse_jni/RedactionInfo.h"
71
72 using mediaprovider::fuse::DirectoryEntry;
73 using mediaprovider::fuse::dirhandle;
74 using mediaprovider::fuse::handle;
75 using mediaprovider::fuse::node;
76 using mediaprovider::fuse::RedactionInfo;
77 using std::string;
78 using std::vector;
79
80 // logging macros to avoid duplication.
81 #define TRACE_NODE(__node, __req) \
82 LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
83 << "] (uid=" << (__req)->ctx.uid << ") "
84
85 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
86 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
87
88 class ScopedTrace {
89 public:
ScopedTrace(const char * name)90 explicit inline ScopedTrace(const char *name) {
91 ATrace_beginSection(name);
92 }
93
~ScopedTrace()94 inline ~ScopedTrace() {
95 ATrace_endSection();
96 }
97 };
98
99 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
100
101 #define FUSE_UNKNOWN_INO 0xffffffff
102
103 // Stolen from: android_filesystem_config.h
104 #define AID_APP_START 10000
105
106 constexpr size_t MAX_READ_SIZE = 128 * 1024;
107 // Stolen from: UserHandle#getUserId
108 constexpr int PER_USER_RANGE = 100000;
109
110 // Stolen from: UserManagerService
111 constexpr int MAX_USER_ID = UINT32_MAX / PER_USER_RANGE;
112
113 const int MY_UID = getuid();
114 const int MY_USER_ID = MY_UID / PER_USER_RANGE;
115 const std::string MY_USER_ID_STRING(std::to_string(MY_UID / PER_USER_RANGE));
116
117 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
118 const std::regex PATTERN_OWNED_PATH(
119 "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb)/([^/]+)(/?.*)?",
120 std::regex_constants::icase);
121 const std::regex PATTERN_BPF_BACKING_PATH("^/storage/[^/]+/[0-9]+/Android/(data|obb)$",
122 std::regex_constants::icase);
123
124 static constexpr char TRANSFORM_SYNTHETIC_DIR[] = "synthetic";
125 static constexpr char TRANSFORM_TRANSCODE_DIR[] = "transcode";
126 static constexpr char PRIMARY_VOLUME_PREFIX[] = "/storage/emulated";
127 static constexpr char STORAGE_PREFIX[] = "/storage";
128
129 static constexpr char VOLUME_INTERNAL[] = "internal";
130 static constexpr char VOLUME_EXTERNAL_PRIMARY[] = "external_primary";
131
132 static constexpr char OWNERSHIP_RELATION[] = "ownership";
133
134 static constexpr char FUSE_BPF_PROG_PATH[] = "/sys/fs/bpf/prog_fuseMedia_fuse_media";
135
136 enum class BpfFd { REMOVE = -1 };
137
138 /*
139 * In order to avoid double caching with fuse, call fadvise on the file handles
140 * in the underlying file system. However, if this is done on every read/write,
141 * the fadvises cause a very significant slowdown in tests (specifically fio
142 * seq_write). So call fadvise on the file handles with the most reads/writes
143 * only after a threshold is passed.
144 */
145 class FAdviser {
146 public:
FAdviser()147 FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
148
~FAdviser()149 ~FAdviser() {
150 SendMessage(Message::quit);
151 thread_.join();
152 }
153
Record(int fd,size_t size)154 void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
155
Close(int fd)156 void Close(int fd) { SendMessage(Message::close, fd); }
157
158 private:
159 struct Message {
160 enum Type { record, close, quit };
161 Type type;
162 int fd;
163 size_t size;
164 };
165
RecordImpl(int fd,size_t size)166 void RecordImpl(int fd, size_t size) {
167 total_size_ += size;
168
169 // Find or create record in files_
170 // Remove record from sizes_ if it exists, adjusting size appropriately
171 auto file = files_.find(fd);
172 if (file != files_.end()) {
173 auto old_size = file->second;
174 size += old_size->first;
175 sizes_.erase(old_size);
176 } else {
177 file = files_.insert(Files::value_type(fd, sizes_.end())).first;
178 }
179
180 // Now (re) insert record in sizes_
181 auto new_size = sizes_.insert(Sizes::value_type(size, fd));
182 file->second = new_size;
183
184 if (total_size_ < threshold_) return;
185
186 LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
187 while (!sizes_.empty() && total_size_ > target_) {
188 auto size = --sizes_.end();
189 total_size_ -= size->first;
190 posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
191 files_.erase(size->second);
192 sizes_.erase(size);
193 }
194 LOG(INFO) << "Threshold now " << total_size_;
195 }
196
CloseImpl(int fd)197 void CloseImpl(int fd) {
198 auto file = files_.find(fd);
199 if (file == files_.end()) return;
200
201 total_size_ -= file->second->first;
202 sizes_.erase(file->second);
203 files_.erase(file);
204 }
205
MessageLoopImpl()206 void MessageLoopImpl() {
207 while (1) {
208 Message message;
209
210 {
211 std::unique_lock<std::mutex> lock(mutex_);
212 cv_.wait(lock, [this] { return !queue_.empty(); });
213 message = queue_.front();
214 queue_.pop();
215 }
216
217 switch (message.type) {
218 case Message::record:
219 RecordImpl(message.fd, message.size);
220 break;
221
222 case Message::close:
223 CloseImpl(message.fd);
224 break;
225
226 case Message::quit:
227 return;
228 }
229 }
230 }
231
MessageLoop(FAdviser * ptr)232 static int MessageLoop(FAdviser* ptr) {
233 ptr->MessageLoopImpl();
234 return 0;
235 }
236
SendMessage(Message::Type type,int fd=-1,size_t size=0)237 void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
238 {
239 std::unique_lock<std::mutex> lock(mutex_);
240 Message message = {type, fd, size};
241 queue_.push(message);
242 }
243 cv_.notify_one();
244 }
245
246 std::mutex mutex_;
247 std::condition_variable cv_;
248 std::queue<Message> queue_;
249 std::thread thread_;
250
251 typedef std::multimap<size_t, int> Sizes;
252 typedef std::map<int, Sizes::iterator> Files;
253
254 Files files_;
255 Sizes sizes_;
256 size_t total_size_;
257
258 const size_t threshold_ = 64 * 1024 * 1024;
259 const size_t target_ = 32 * 1024 * 1024;
260 };
261
262 /* Single FUSE mount */
263 struct fuse {
fusefuse264 explicit fuse(const std::string& _path, const ino_t _ino, const bool _uncached_mode,
265 const bool _bpf, const int _bpf_fd,
266 const std::vector<string>& _supported_transcoding_relative_paths,
267 const std::vector<string>& _supported_uncached_relative_paths)
268 : path(_path),
269 tracker(mediaprovider::fuse::NodeTracker(&lock)),
270 root(node::CreateRoot(_path, &lock, _ino, &tracker)),
271 uncached_mode(_uncached_mode),
272 mp(0),
273 zero_addr(0),
274 disable_dentry_cache(false),
275 passthrough(false),
276 bpf(_bpf),
277 bpf_fd(_bpf_fd),
278 supported_transcoding_relative_paths(_supported_transcoding_relative_paths),
279 supported_uncached_relative_paths(_supported_uncached_relative_paths) {}
280
IsRootfuse281 inline bool IsRoot(const node* node) const { return node == root; }
282
GetEffectiveRootPathfuse283 inline string GetEffectiveRootPath() {
284 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
285 return path + "/" + MY_USER_ID_STRING;
286 }
287 return path;
288 }
289
GetTransformsDirfuse290 inline string GetTransformsDir() { return GetEffectiveRootPath() + "/.transforms"; }
291
292 // Note that these two (FromInode / ToInode) conversion wrappers are required
293 // because fuse_lowlevel_ops documents that the root inode is always one
294 // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
295 // on any of the other inodes in the FS.
FromInodefuse296 inline node* FromInode(__u64 inode) {
297 if (inode == FUSE_ROOT_ID) {
298 return root;
299 }
300
301 return node::FromInode(inode, &tracker);
302 }
303
FromInodeNoThrowfuse304 inline node* FromInodeNoThrow(__u64 inode) {
305 if (inode == FUSE_ROOT_ID) {
306 return root;
307 }
308
309 return node::FromInodeNoThrow(inode, &tracker);
310 }
311
ToInodefuse312 inline __u64 ToInode(node* node) const {
313 if (IsRoot(node)) {
314 return FUSE_ROOT_ID;
315 }
316
317 return node::ToInode(node);
318 }
319
IsTranscodeSupportedPathfuse320 inline bool IsTranscodeSupportedPath(const string& path) {
321 // Keep in sync with MediaProvider#supportsTranscode
322 if (!android::base::EndsWithIgnoreCase(path, ".mp4")) {
323 return false;
324 }
325
326 const std::string& base_path = GetEffectiveRootPath() + "/";
327 for (const std::string& relative_path : supported_transcoding_relative_paths) {
328 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
329 return true;
330 }
331 }
332
333 return false;
334 }
335
IsUncachedPathfuse336 inline bool IsUncachedPath(const std::string& path) {
337 const std::string base_path = GetEffectiveRootPath() + "/";
338 for (const std::string& relative_path : supported_uncached_relative_paths) {
339 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
340 return true;
341 }
342 }
343
344 return false;
345 }
346
ShouldNotCachefuse347 inline bool ShouldNotCache(const std::string& path) {
348 if (uncached_mode) {
349 // Cache is disabled for the entire volume.
350 return true;
351 }
352
353 if (supported_uncached_relative_paths.empty()) {
354 // By default there is no supported uncached path. Just return early in this case.
355 return false;
356 }
357
358 if (!android::base::StartsWithIgnoreCase(path, PRIMARY_VOLUME_PREFIX)) {
359 // Uncached path config applies only to primary volumes.
360 return false;
361 }
362
363 if (android::base::EndsWith(path, "/")) {
364 return IsUncachedPath(path);
365 } else {
366 // Append a slash at the end to make sure that the exact match is picked up.
367 return IsUncachedPath(path + "/");
368 }
369 }
370
371 std::recursive_mutex lock;
372 const string path;
373 // The Inode tracker associated with this FUSE instance.
374 mediaprovider::fuse::NodeTracker tracker;
375 node* const root;
376 struct fuse_session* se;
377
378 const bool uncached_mode;
379
380 /*
381 * Used to make JNI calls to MediaProvider.
382 * Responsibility of freeing this object falls on corresponding
383 * FuseDaemon object.
384 */
385 mediaprovider::fuse::MediaProviderWrapper* mp;
386
387 /*
388 * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
389 * The memory is read only and should never be modified.
390 */
391 /* const */ char* zero_addr;
392
393 FAdviser fadviser;
394
395 std::atomic_bool* active;
396 std::atomic_bool disable_dentry_cache;
397 std::atomic_bool passthrough;
398 std::atomic_bool bpf;
399
400 const int bpf_fd;
401
402 // FUSE device id.
403 std::atomic_uint dev;
404 const std::vector<string> supported_transcoding_relative_paths;
405 const std::vector<string> supported_uncached_relative_paths;
406
407 // LevelDb Connection Map
408 std::map<std::string, leveldb::DB*> level_db_connection_map;
409 std::mutex level_db_mutex;
410 };
411
412 struct OpenInfo {
413 int flags;
414 bool for_write;
415 bool direct_io;
416 };
417
418 enum class FuseOp { lookup, readdir, mknod, mkdir, create };
419
get_name(node * n)420 static inline string get_name(node* n) {
421 if (n) {
422 std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
423 name += "node_path: " + n->BuildSafePath();
424 return name;
425 }
426 return "?";
427 }
428
ptr_to_id(const void * ptr)429 static inline __u64 ptr_to_id(const void* ptr) {
430 return (__u64)(uintptr_t) ptr;
431 }
432
433 /*
434 * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
435 *
436 * This is called before the MediaProvider returns fd from the lower file
437 * system to an app over the ContentResolver interface. This allows us
438 * check with is_file_locked if any reference to that fd is still open.
439 */
set_file_lock(int fd,bool for_read,const std::string & path)440 static int set_file_lock(int fd, bool for_read, const std::string& path) {
441 std::string lock_str = (for_read ? "read" : "write");
442
443 struct flock fl{};
444 fl.l_type = for_read ? F_RDLCK : F_WRLCK;
445 fl.l_whence = SEEK_SET;
446
447 int res = fcntl(fd, F_OFD_SETLK, &fl);
448 if (res) {
449 PLOG(WARNING) << "Failed to set lock: " << lock_str;
450 return res;
451 }
452 return res;
453 }
454
455 /*
456 * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
457 *
458 * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
459 * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
460 * allowing us to know if any reference to that fd is still open here.
461 *
462 * Returns true if fd may have a lock, false otherwise
463 */
is_file_locked(int fd,const std::string & path)464 static bool is_file_locked(int fd, const std::string& path) {
465 struct flock fl{};
466 fl.l_type = F_WRLCK;
467 fl.l_whence = SEEK_SET;
468
469 int res = fcntl(fd, F_OFD_GETLK, &fl);
470 if (res) {
471 PLOG(WARNING) << "Failed to check lock";
472 // Assume worst
473 return true;
474 }
475 bool locked = fl.l_type != F_UNLCK;
476 return locked;
477 }
478
get_fuse(fuse_req_t req)479 static struct fuse* get_fuse(fuse_req_t req) {
480 return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
481 }
482
is_package_owned_path(const string & path,const string & fuse_path)483 static bool is_package_owned_path(const string& path, const string& fuse_path) {
484 if (path.rfind(fuse_path, 0) != 0) {
485 return false;
486 }
487 return std::regex_match(path, PATTERN_OWNED_PATH);
488 }
489
is_bpf_backing_path(const string & path)490 static bool is_bpf_backing_path(const string& path) {
491 return std::regex_match(path, PATTERN_BPF_BACKING_PATH);
492 }
493
494 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
495 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)496 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
497 const string& child_name, const string& path) {
498 if (mediaprovider::fuse::containsMount(path)) {
499 LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
500 return;
501 }
502
503 if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
504 // Invalidating the dentry can fail if there's no dcache entry, however, there may still
505 // be cached attributes, so attempt to invalidate those by invalidating the inode
506 fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
507 }
508 }
509
get_entry_timeout(const string & path,bool should_inval,struct fuse * fuse)510 static double get_entry_timeout(const string& path, bool should_inval, struct fuse* fuse) {
511 if (fuse->disable_dentry_cache || should_inval || is_package_owned_path(path, fuse->path) ||
512 fuse->ShouldNotCache(path)) {
513 // We set dentry timeout to 0 for the following reasons:
514 // 1. The dentry cache was completely disabled for the entire volume.
515 // 2.1 Case-insensitive lookups need to invalidate other case-insensitive dentry matches
516 // 2.2 Nodes supporting transforms need to be invalidated, so that subsequent lookups by a
517 // uid requiring a transform is guaranteed to come to the FUSE daemon.
518 // 3. With app data isolation enabled, app A should not guess existence of app B from the
519 // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
520 // information.
521 // 4. The dentry cache was completely disabled for the given path.
522 return 0;
523 }
524 return std::numeric_limits<double>::max();
525 }
526
get_path(node * node)527 static std::string get_path(node* node) {
528 const string& io_path = node->GetIoPath();
529 return io_path.empty() ? node->BuildPath() : io_path;
530 }
531
532 // Returns true if the path resides under .transforms/synthetic.
533 // NOTE: currently only file paths corresponding to redacted URIs reside under this folder. The path
534 // itself never exists and just a link for transformation.
is_synthetic_path(const string & path,struct fuse * fuse)535 static inline bool is_synthetic_path(const string& path, struct fuse* fuse) {
536 return android::base::StartsWithIgnoreCase(
537 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR);
538 }
539
is_transforms_dir_path(const string & path,struct fuse * fuse)540 static inline bool is_transforms_dir_path(const string& path, struct fuse* fuse) {
541 return android::base::StartsWithIgnoreCase(path, fuse->GetTransformsDir());
542 }
543
validate_node_path(const std::string & path,const std::string & name,fuse_req_t req,int * error_code,struct fuse_entry_param * e,const FuseOp op)544 static std::unique_ptr<mediaprovider::fuse::FileLookupResult> validate_node_path(
545 const std::string& path, const std::string& name, fuse_req_t req, int* error_code,
546 struct fuse_entry_param* e, const FuseOp op) {
547 struct fuse* fuse = get_fuse(req);
548 const struct fuse_ctx* ctx = fuse_req_ctx(req);
549 memset(e, 0, sizeof(*e));
550
551 const bool synthetic_path = is_synthetic_path(path, fuse);
552 if (lstat(path.c_str(), &e->attr) < 0 && !(op == FuseOp::lookup && synthetic_path)) {
553 *error_code = errno;
554 return nullptr;
555 }
556
557 if (is_transforms_dir_path(path, fuse)) {
558 if (op == FuseOp::lookup) {
559 // Lookups are only allowed under .transforms/synthetic dir
560 if (!(android::base::EqualsIgnoreCase(path, fuse->GetTransformsDir()) ||
561 android::base::StartsWithIgnoreCase(
562 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR))) {
563 *error_code = ENONET;
564 return nullptr;
565 }
566 } else {
567 // user-code is only allowed to make lookups under .transforms dir, and that too only
568 // under .transforms/synthetic dir
569 *error_code = ENOENT;
570 return nullptr;
571 }
572 }
573
574 if (S_ISDIR(e->attr.st_mode)) {
575 // now that we have reached this point, ops on directories are safe and require no
576 // transformation.
577 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
578 }
579
580 if (!synthetic_path && !fuse->IsTranscodeSupportedPath(path)) {
581 // Transforms are only supported for synthetic or transcode-supported paths
582 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
583 }
584
585 // Handle potential file transforms
586 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
587 fuse->mp->FileLookup(path, req->ctx.uid, req->ctx.pid);
588
589 if (!file_lookup_result) {
590 // Fail lookup if we can't fetch FileLookupResult for path
591 LOG(WARNING) << "Failed to fetch FileLookupResult for " << path;
592 *error_code = EFAULT;
593 return nullptr;
594 }
595
596 const string& io_path = file_lookup_result->io_path;
597 // Update size with io_path iff there's an io_path
598 if (!io_path.empty() && (lstat(io_path.c_str(), &e->attr) < 0)) {
599 *error_code = errno;
600 return nullptr;
601 }
602
603 return file_lookup_result;
604 }
605
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & parent_path,const string & path,struct fuse_entry_param * e,int * error_code,const FuseOp op)606 static node* make_node_entry(fuse_req_t req, node* parent, const string& name,
607 const string& parent_path, const string& path,
608 struct fuse_entry_param* e, int* error_code, const FuseOp op) {
609 struct fuse* fuse = get_fuse(req);
610 const struct fuse_ctx* ctx = fuse_req_ctx(req);
611 node* node;
612
613 memset(e, 0, sizeof(*e));
614
615 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
616 validate_node_path(path, name, req, error_code, e, op);
617 if (!file_lookup_result) {
618 // Fail lookup if we can't validate |path, |errno| would have already been set
619 return nullptr;
620 }
621
622 bool should_invalidate = file_lookup_result->transforms_supported;
623 const bool transforms_complete = file_lookup_result->transforms_complete;
624 const int transforms = file_lookup_result->transforms;
625 const int transforms_reason = file_lookup_result->transforms_reason;
626 const string& io_path = file_lookup_result->io_path;
627 if (transforms) {
628 // If the node requires transforms, we MUST never cache it in the VFS
629 CHECK(should_invalidate);
630 }
631
632 node = parent->LookupChildByName(name, true /* acquire */, transforms);
633 if (!node) {
634 ino_t ino = e->attr.st_ino;
635 node = ::node::Create(parent, name, io_path, transforms_complete, transforms,
636 transforms_reason, &fuse->lock, ino, &fuse->tracker);
637 } else if (!mediaprovider::fuse::containsMount(path)) {
638 // Only invalidate a path if it does not contain mount and |name| != node->GetName.
639 // Invalidate both names to ensure there's no dentry left in the kernel after the following
640 // operations:
641 // 1) touch foo, touch FOO, unlink *foo*
642 // 2) touch foo, touch FOO, unlink *FOO*
643 // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
644 // -Set |should_invalidate| to true to invalidate lookup_name by using 0 timeout below
645 // -Explicitly invalidate node_name. Note that we invalidate async otherwise we will
646 // deadlock the kernel
647 if (name != node->GetName()) {
648 // Force node invalidation to fix the kernel dentry cache for case (1) above
649 should_invalidate = true;
650 // Make copies of the node name and path so we're not attempting to acquire
651 // any node locks from the invalidation thread. Depending on timing, we may end
652 // up invalidating the wrong inode but that shouldn't result in correctness issues.
653 const fuse_ino_t parent_ino = fuse->ToInode(parent);
654 const fuse_ino_t child_ino = fuse->ToInode(node);
655 const std::string& node_name = node->GetName();
656 std::thread t([=]() { fuse_inval(fuse->se, parent_ino, child_ino, node_name, path); });
657 t.detach();
658 // Update the name after |node_name| reference above has been captured in lambda
659 // This avoids invalidating the node again on subsequent accesses with |name|
660 node->SetName(name);
661 }
662
663 // This updated value allows us correctly decide if to keep_cache and use direct_io during
664 // FUSE_OPEN. Between the last lookup and this lookup, we might have deleted a cached
665 // transcoded file on the lower fs. A subsequent transcode at FUSE_READ should ensure we
666 // don't reuse any stale transcode page cache content.
667 node->SetTransformsComplete(transforms_complete);
668 }
669 TRACE_NODE(node, req);
670
671 if (should_invalidate && fuse->IsTranscodeSupportedPath(path)) {
672 // Some components like the MTP stack need an efficient mechanism to determine if a file
673 // supports transcoding. This allows them workaround an issue with MTP clients on windows
674 // where those clients incorrectly use the original file size instead of the transcoded file
675 // size to copy files from the device. This size misuse causes transcoded files to be
676 // truncated to the original file size, hence corrupting the transcoded file.
677 //
678 // We expose the transcode bit via the st_nlink stat field. This should be safe because the
679 // field is not supported on FAT filesystems which FUSE is emulating.
680 // WARNING: Apps should never rely on this behavior as it is NOT supported API and will be
681 // removed in a future release when the MTP stack has better support for transcoded files on
682 // Windows OS.
683 e->attr.st_nlink = 2;
684 }
685
686 // This FS is not being exported via NFS so just a fixed generation number
687 // for now. If we do need this, we need to increment the generation ID each
688 // time the fuse daemon restarts because that's what it takes for us to
689 // reuse inode numbers.
690 e->generation = 0;
691 e->ino = fuse->ToInode(node);
692
693 // When FUSE BPF is used, the caching of node attributes and lookups is
694 // disabled to avoid possible inconsistencies between the FUSE cache and
695 // the lower file system state.
696 // With FUSE BPF the file system requests are forwarded to the lower file
697 // system bypassing the FUSE daemon, so dropping the caching does not
698 // introduce a performance regression.
699 // Currently FUSE BPF is limited to the Android/data and Android/obb
700 // directories.
701 if (!fuse->bpf || !is_bpf_backing_path(parent_path)) {
702 e->entry_timeout = get_entry_timeout(path, should_invalidate, fuse);
703 e->attr_timeout = std::numeric_limits<double>::max();
704 }
705 return node;
706 }
707
708 namespace mediaprovider {
709 namespace fuse {
710
711 /**
712 * Function implementations
713 *
714 * These implement the various functions in fuse_lowlevel_ops
715 *
716 */
717
pf_init(void * userdata,struct fuse_conn_info * conn)718 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
719 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
720
721 // We don't want a getattr request with every read request
722 conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
723 uint64_t mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
724 FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
725 FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
726
727 bool disable_splice_write = false;
728 if (fuse->passthrough) {
729 if (conn->capable & FUSE_CAP_PASSTHROUGH) {
730 mask |= FUSE_CAP_PASSTHROUGH;
731
732 // SPLICE_WRITE seems to cause linux kernel cache corruption with passthrough enabled.
733 // It is still under investigation but while running
734 // ScopedStorageDeviceTest#testAccessMediaLocationInvalidation, we notice test flakes
735 // of about 1/20 for the following reason:
736 // 1. App without ACCESS_MEDIA_LOCATION permission reads redacted bytes via FUSE cache
737 // 2. App with ACCESS_MEDIA_LOCATION permission reads non-redacted bytes via passthrough
738 // cache
739 // (2) fails because bytes from (1) sneak into the passthrough cache??
740 // To workaround, we disable splice for write when passthrough is enabled.
741 // This shouldn't have any performance regression if comparing passthrough devices to
742 // no-passthrough devices for the following reasons:
743 // 1. No-op for no-passthrough devices
744 // 2. Passthrough devices
745 // a. Files not requiring redaction use passthrough which bypasses FUSE_READ entirely
746 // b. Files requiring redaction are still faster than no-passthrough devices that use
747 // direct_io
748 disable_splice_write = true;
749 } else {
750 LOG(WARNING) << "Passthrough feature not supported by the kernel";
751 fuse->passthrough = false;
752 }
753 }
754
755 conn->want |= conn->capable & mask;
756 if (disable_splice_write) {
757 conn->want &= ~FUSE_CAP_SPLICE_WRITE;
758 }
759
760 conn->max_read = MAX_READ_SIZE;
761
762 fuse->active->store(true, std::memory_order_release);
763 }
764
pf_destroy(void * userdata)765 static void pf_destroy(void* userdata) {
766 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
767 LOG(INFO) << "DESTROY " << fuse->path;
768
769 node::DeleteTree(fuse->root);
770 }
771
772 // Return true if the path is accessible for that uid.
is_app_accessible_path(struct fuse * fuse,const string & path,uid_t uid)773 static bool is_app_accessible_path(struct fuse* fuse, const string& path, uid_t uid) {
774 MediaProviderWrapper* mp = fuse->mp;
775
776 if (uid < AID_APP_START || uid == MY_UID) {
777 return true;
778 }
779
780 if (path == PRIMARY_VOLUME_PREFIX) {
781 // Apps should never refer to /storage/emulated - they should be using the user-spcific
782 // subdirs, eg /storage/emulated/0
783 return false;
784 }
785
786 std::smatch match;
787 if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
788 const std::string& pkg = match[1];
789 // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
790 // and it's not an external file/directory of any package
791 if (pkg == ".nomedia") {
792 return true;
793 }
794 if (!fuse->bpf && android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
795 // Emulated storage bind-mounts app-private data directories, and so these
796 // should not be accessible through FUSE anyway.
797 LOG(WARNING) << "Rejected access to app-private dir on FUSE: " << path
798 << " from uid: " << uid;
799 return false;
800 }
801 if (!mp->isUidAllowedAccessToDataOrObbPath(uid, path)) {
802 PLOG(WARNING) << "Invalid other package file access from " << uid << "(: " << path;
803 return false;
804 }
805 }
806 return true;
807 }
808
fuse_bpf_fill_entries(const string & path,const int bpf_fd,struct fuse_entry_param * e,int & backing_fd)809 void fuse_bpf_fill_entries(const string& path, const int bpf_fd, struct fuse_entry_param* e,
810 int& backing_fd) {
811 /*
812 * The file descriptor `fd` must not be closed as it is closed
813 * automatically by the kernel as soon as it consumes the FUSE reply. This
814 * mechanism is necessary because userspace doesn't know when the kernel
815 * will consume the FUSE response containing `fd`, thus it may close the
816 * `fd` too soon, with the risk of assigning a backing file which is either
817 * invalid or corresponds to the wrong file in the lower file system.
818 */
819 backing_fd = open(path.c_str(), O_CLOEXEC | O_DIRECTORY | O_RDONLY);
820 if (backing_fd < 0) {
821 PLOG(ERROR) << "Failed to open: " << path;
822 return;
823 }
824
825 e->backing_action = FUSE_ACTION_REPLACE;
826 e->backing_fd = backing_fd;
827
828 if (bpf_fd >= 0) {
829 e->bpf_action = FUSE_ACTION_REPLACE;
830 e->bpf_fd = bpf_fd;
831 } else if (bpf_fd == static_cast<int>(BpfFd::REMOVE)) {
832 e->bpf_action = FUSE_ACTION_REMOVE;
833 } else {
834 e->bpf_action = FUSE_ACTION_KEEP;
835 }
836 }
837
fuse_bpf_install(struct fuse * fuse,struct fuse_entry_param * e,const string & child_path,int & backing_fd)838 void fuse_bpf_install(struct fuse* fuse, struct fuse_entry_param* e, const string& child_path,
839 int& backing_fd) {
840 // TODO(b/211873756) Enable only for the primary volume. Must be
841 // extended for other media devices.
842 if (android::base::StartsWith(child_path, PRIMARY_VOLUME_PREFIX)) {
843 if (is_bpf_backing_path(child_path)) {
844 fuse_bpf_fill_entries(child_path, fuse->bpf_fd, e, backing_fd);
845 } else if (is_package_owned_path(child_path, fuse->path)) {
846 fuse_bpf_fill_entries(child_path, static_cast<int>(BpfFd::REMOVE), e, backing_fd);
847 }
848 }
849 }
850
851 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code,const FuseOp op,int * backing_fd=NULL)852 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
853 struct fuse_entry_param* e, int* error_code, const FuseOp op,
854 int* backing_fd = NULL) {
855 struct fuse* fuse = get_fuse(req);
856 node* parent_node = fuse->FromInode(parent);
857 if (!parent_node) {
858 *error_code = ENOENT;
859 return nullptr;
860 }
861 string parent_path = parent_node->BuildPath();
862 // We should always allow lookups on the root, because failing them could cause
863 // bind mounts to be invalidated.
864 if (!fuse->IsRoot(parent_node) && !is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
865 *error_code = ENOENT;
866 return nullptr;
867 }
868
869 TRACE_NODE(parent_node, req);
870
871 const string child_path = parent_path + "/" + name;
872 std::smatch match;
873 std::regex_search(child_path, match, storage_emulated_regex);
874
875 // Ensure the FuseDaemon user id matches the user id or cross-user lookups are allowed in
876 // requested path
877 if (match.size() == 2 && MY_USER_ID_STRING != match[1].str()) {
878 // If user id mismatch, check cross-user lookups
879 long userId = strtol(match[1].str().c_str(), nullptr, 10);
880 if (userId < 0 || userId > MAX_USER_ID ||
881 !fuse->mp->ShouldAllowLookup(req->ctx.uid, userId)) {
882 *error_code = EACCES;
883 return nullptr;
884 }
885 }
886
887 auto node = make_node_entry(req, parent_node, name, parent_path, child_path, e, error_code, op);
888
889 if (fuse->bpf) {
890 if (op == FuseOp::lookup) {
891 // Only direct lookup calls support setting backing_fd and bpf program
892 fuse_bpf_install(fuse, e, child_path, *backing_fd);
893 } else if (is_bpf_backing_path(child_path) && op == FuseOp::readdir) {
894 // Fuse-bpf driver implementation doesn’t support providing backing_fd
895 // and bpf program as a part of readdirplus lookup. So we make sure
896 // here we're not making any lookups on backed files because we want
897 // to receive separate lookup calls for them later to set backing_fd and bpf.
898 e->ino = 0;
899 }
900 }
901
902 return node;
903 }
904
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)905 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
906 ATRACE_CALL();
907 struct fuse_entry_param e;
908 int backing_fd = -1;
909
910 int error_code = 0;
911 if (do_lookup(req, parent, name, &e, &error_code, FuseOp::lookup, &backing_fd)) {
912 fuse_reply_entry(req, &e);
913 } else {
914 CHECK(error_code != 0);
915 fuse_reply_err(req, error_code);
916 }
917
918 if (backing_fd != -1) close(backing_fd);
919 }
920
pf_lookup_postfilter(fuse_req_t req,fuse_ino_t parent,uint32_t error_in,const char * name,struct fuse_entry_out * feo,struct fuse_entry_bpf_out * febo)921 static void pf_lookup_postfilter(fuse_req_t req, fuse_ino_t parent, uint32_t error_in,
922 const char* name, struct fuse_entry_out* feo,
923 struct fuse_entry_bpf_out* febo) {
924 struct fuse* fuse = get_fuse(req);
925
926 ATRACE_CALL();
927 node* parent_node = fuse->FromInode(parent);
928 if (!parent_node) {
929 fuse_reply_err(req, ENOENT);
930 return;
931 }
932
933 TRACE_NODE(parent_node, req);
934 const string path = parent_node->BuildPath() + "/" + name;
935 if (strcmp(name, ".nomedia") != 0 &&
936 !fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, path)) {
937 fuse_reply_err(req, ENOENT);
938 return;
939 }
940
941 struct {
942 struct fuse_entry_out feo;
943 struct fuse_entry_bpf_out febo;
944 } buf = {*feo, *febo};
945
946 fuse_reply_buf(req, (const char*)&buf, sizeof(buf));
947 }
948
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)949 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
950 node* node = fuse->FromInode(ino);
951 TRACE_NODE(node, req);
952 if (node) {
953 // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
954 // some reason we only keep 32 bit refcounts but the kernel issues
955 // forget requests with a 64 bit counter.
956 node->Release(static_cast<uint32_t>(nlookup));
957 }
958 }
959
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)960 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
961 // Always allow to forget so no need to check is_app_accessible_path()
962 ATRACE_CALL();
963 node* node;
964 struct fuse* fuse = get_fuse(req);
965
966 do_forget(req, fuse, ino, nlookup);
967 fuse_reply_none(req);
968 }
969
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)970 static void pf_forget_multi(fuse_req_t req,
971 size_t count,
972 struct fuse_forget_data* forgets) {
973 ATRACE_CALL();
974 struct fuse* fuse = get_fuse(req);
975
976 for (int i = 0; i < count; i++) {
977 do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
978 }
979 fuse_reply_none(req);
980 }
981
pf_fallocate(fuse_req_t req,fuse_ino_t ino,int mode,off_t offset,off_t length,fuse_file_info * fi)982 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length,
983 fuse_file_info* fi) {
984 ATRACE_CALL();
985 struct fuse* fuse = get_fuse(req);
986
987 handle* h = reinterpret_cast<handle*>(fi->fh);
988 auto err = fallocate(h->fd, mode, offset, length);
989 fuse_reply_err(req, err ? errno : 0);
990 }
991
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)992 static void pf_getattr(fuse_req_t req,
993 fuse_ino_t ino,
994 struct fuse_file_info* fi) {
995 ATRACE_CALL();
996 struct fuse* fuse = get_fuse(req);
997 node* node = fuse->FromInode(ino);
998 if (!node) {
999 fuse_reply_err(req, ENOENT);
1000 return;
1001 }
1002 const string& path = get_path(node);
1003 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1004 fuse_reply_err(req, ENOENT);
1005 return;
1006 }
1007 TRACE_NODE(node, req);
1008
1009 struct stat s;
1010 memset(&s, 0, sizeof(s));
1011 if (lstat(path.c_str(), &s) < 0) {
1012 fuse_reply_err(req, errno);
1013 } else {
1014 fuse_reply_attr(req, &s, std::numeric_limits<double>::max());
1015 }
1016 }
1017
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)1018 static void pf_setattr(fuse_req_t req,
1019 fuse_ino_t ino,
1020 struct stat* attr,
1021 int to_set,
1022 struct fuse_file_info* fi) {
1023 ATRACE_CALL();
1024 struct fuse* fuse = get_fuse(req);
1025 node* node = fuse->FromInode(ino);
1026 if (!node) {
1027 fuse_reply_err(req, ENOENT);
1028 return;
1029 }
1030 const string& path = get_path(node);
1031 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1032 fuse_reply_err(req, ENOENT);
1033 return;
1034 }
1035
1036 int fd = -1;
1037 if (fi) {
1038 // If we have a file_info, setattr was called with an fd so use the fd instead of path
1039 handle* h = reinterpret_cast<handle*>(fi->fh);
1040 fd = h->fd;
1041 } else {
1042 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1043 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1044 path, path, ctx->uid, ctx->pid, node->GetTransformsReason(), true /* for_write */,
1045 false /* redact */, false /* log_transforms_metrics */);
1046
1047 if (!result) {
1048 fuse_reply_err(req, EFAULT);
1049 return;
1050 }
1051
1052 if (result->status) {
1053 fuse_reply_err(req, EACCES);
1054 return;
1055 }
1056 }
1057 struct timespec times[2];
1058 TRACE_NODE(node, req);
1059
1060 /* XXX: incomplete implementation on purpose.
1061 * chmod/chown should NEVER be implemented.*/
1062
1063 if ((to_set & FUSE_SET_ATTR_SIZE)) {
1064 int res = 0;
1065 if (fd == -1) {
1066 res = truncate64(path.c_str(), attr->st_size);
1067 } else {
1068 res = ftruncate64(fd, attr->st_size);
1069 }
1070
1071 if (res < 0) {
1072 fuse_reply_err(req, errno);
1073 return;
1074 }
1075 }
1076
1077 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
1078 * are both set, then set it to the current time. Else, set it to the
1079 * time specified in the request. Same goes for mtime. Use utimensat(2)
1080 * as it allows ATIME and MTIME to be changed independently, and has
1081 * nanosecond resolution which fuse also has.
1082 */
1083 if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
1084 times[0].tv_nsec = UTIME_OMIT;
1085 times[1].tv_nsec = UTIME_OMIT;
1086 if (to_set & FATTR_ATIME) {
1087 if (to_set & FATTR_ATIME_NOW) {
1088 times[0].tv_nsec = UTIME_NOW;
1089 } else {
1090 times[0] = attr->st_atim;
1091 }
1092 }
1093
1094 if (to_set & FATTR_MTIME) {
1095 if (to_set & FATTR_MTIME_NOW) {
1096 times[1].tv_nsec = UTIME_NOW;
1097 } else {
1098 times[1] = attr->st_mtim;
1099 }
1100 }
1101
1102 TRACE_NODE(node, req);
1103 int res = 0;
1104 if (fd == -1) {
1105 res = utimensat(-1, path.c_str(), times, 0);
1106 } else {
1107 res = futimens(fd, times);
1108 }
1109
1110 if (res < 0) {
1111 fuse_reply_err(req, errno);
1112 return;
1113 }
1114 }
1115
1116 lstat(path.c_str(), attr);
1117 fuse_reply_attr(req, attr, std::numeric_limits<double>::max());
1118 }
1119
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)1120 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
1121 {
1122 struct fuse* fuse = get_fuse(req);
1123 node* node = fuse->FromInode(ino);
1124 const string& path = node ? get_path(node) : "";
1125
1126 if (node && is_app_accessible_path(fuse, path, req->ctx.uid)) {
1127 // TODO(b/147482155): Check that uid has access to |path| and its contents
1128 fuse_reply_canonical_path(req, path.c_str());
1129 return;
1130 }
1131 fuse_reply_err(req, ENOENT);
1132 }
1133
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)1134 static void pf_mknod(fuse_req_t req,
1135 fuse_ino_t parent,
1136 const char* name,
1137 mode_t mode,
1138 dev_t rdev) {
1139 ATRACE_CALL();
1140 struct fuse* fuse = get_fuse(req);
1141 node* parent_node = fuse->FromInode(parent);
1142 if (!parent_node) {
1143 fuse_reply_err(req, ENOENT);
1144 return;
1145 }
1146 string parent_path = parent_node->BuildPath();
1147 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1148 fuse_reply_err(req, ENOENT);
1149 return;
1150 }
1151
1152 TRACE_NODE(parent_node, req);
1153
1154 const string child_path = parent_path + "/" + name;
1155
1156 mode = (mode & (~0777)) | 0664;
1157 if (mknod(child_path.c_str(), mode, rdev) < 0) {
1158 fuse_reply_err(req, errno);
1159 return;
1160 }
1161
1162 int error_code = 0;
1163 struct fuse_entry_param e;
1164 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1165 FuseOp::mknod)) {
1166 fuse_reply_entry(req, &e);
1167 } else {
1168 CHECK(error_code != 0);
1169 fuse_reply_err(req, error_code);
1170 }
1171 }
1172
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)1173 static void pf_mkdir(fuse_req_t req,
1174 fuse_ino_t parent,
1175 const char* name,
1176 mode_t mode) {
1177 ATRACE_CALL();
1178 struct fuse* fuse = get_fuse(req);
1179 node* parent_node = fuse->FromInode(parent);
1180 if (!parent_node) {
1181 fuse_reply_err(req, ENOENT);
1182 return;
1183 }
1184 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1185 const string parent_path = parent_node->BuildPath();
1186 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1187 fuse_reply_err(req, ENOENT);
1188 return;
1189 }
1190
1191 TRACE_NODE(parent_node, req);
1192
1193 const string child_path = parent_path + "/" + name;
1194
1195 int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
1196 if (status) {
1197 fuse_reply_err(req, status);
1198 return;
1199 }
1200
1201 mode = (mode & (~0777)) | 0775;
1202 if (mkdir(child_path.c_str(), mode) < 0) {
1203 fuse_reply_err(req, errno);
1204 return;
1205 }
1206
1207 int error_code = 0;
1208 struct fuse_entry_param e;
1209 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1210 FuseOp::mkdir)) {
1211 fuse_reply_entry(req, &e);
1212 } else {
1213 CHECK(error_code != 0);
1214 fuse_reply_err(req, error_code);
1215 }
1216 }
1217
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)1218 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
1219 ATRACE_CALL();
1220 struct fuse* fuse = get_fuse(req);
1221 node* parent_node = fuse->FromInode(parent);
1222 if (!parent_node) {
1223 fuse_reply_err(req, ENOENT);
1224 return;
1225 }
1226 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1227 const string parent_path = parent_node->BuildPath();
1228 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1229 fuse_reply_err(req, ENOENT);
1230 return;
1231 }
1232
1233 TRACE_NODE(parent_node, req);
1234
1235 const string child_path = parent_path + "/" + name;
1236
1237 int status = fuse->mp->DeleteFile(child_path, ctx->uid);
1238 if (status) {
1239 fuse_reply_err(req, status);
1240 return;
1241 }
1242
1243 // TODO(b/169306422): Log each deleted node
1244 parent_node->SetDeletedForChild(name);
1245 fuse_reply_err(req, 0);
1246 }
1247
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)1248 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
1249 ATRACE_CALL();
1250 struct fuse* fuse = get_fuse(req);
1251 node* parent_node = fuse->FromInode(parent);
1252 if (!parent_node) {
1253 fuse_reply_err(req, ENOENT);
1254 return;
1255 }
1256 const string parent_path = parent_node->BuildPath();
1257 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1258 fuse_reply_err(req, ENOENT);
1259 return;
1260 }
1261
1262 if (is_transforms_dir_path(parent_path, fuse)) {
1263 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1264 // readdir, and any dir operations attempted on it should fail
1265 fuse_reply_err(req, ENOENT);
1266 return;
1267 }
1268
1269 TRACE_NODE(parent_node, req);
1270
1271 const string child_path = parent_path + "/" + name;
1272
1273 int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
1274 if (status) {
1275 fuse_reply_err(req, status);
1276 return;
1277 }
1278
1279 if (rmdir(child_path.c_str()) < 0) {
1280 fuse_reply_err(req, errno);
1281 return;
1282 }
1283
1284 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
1285 TRACE_NODE(child_node, req);
1286 if (child_node) {
1287 child_node->SetDeleted();
1288 }
1289
1290 fuse_reply_err(req, 0);
1291 }
1292 /*
1293 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
1294 const char* name)
1295 {
1296 cout << "TODO:" << __func__;
1297 }
1298 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1299 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1300 const char* new_name, unsigned int flags) {
1301 ATRACE_CALL();
1302 struct fuse* fuse = get_fuse(req);
1303
1304 if (flags != 0) {
1305 return EINVAL;
1306 }
1307
1308 node* old_parent_node = fuse->FromInode(parent);
1309 if (!old_parent_node) return ENOENT;
1310 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1311 const string old_parent_path = old_parent_node->BuildPath();
1312 if (!is_app_accessible_path(fuse, old_parent_path, ctx->uid)) {
1313 return ENOENT;
1314 }
1315
1316 if (is_transforms_dir_path(old_parent_path, fuse)) {
1317 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1318 // readdir, and any dir operations attempted on it should fail
1319 return ENOENT;
1320 }
1321
1322 node* new_parent_node;
1323 if (fuse->bpf) {
1324 new_parent_node = fuse->FromInodeNoThrow(new_parent);
1325 if (!new_parent_node) return EXDEV;
1326 } else {
1327 new_parent_node = fuse->FromInode(new_parent);
1328 if (!new_parent_node) return ENOENT;
1329 }
1330 const string new_parent_path = new_parent_node->BuildPath();
1331 if (!is_app_accessible_path(fuse, new_parent_path, ctx->uid)) {
1332 return ENOENT;
1333 }
1334
1335 if (!old_parent_node || !new_parent_node) {
1336 return ENOENT;
1337 } else if (parent == new_parent && name == new_name) {
1338 // No rename required.
1339 return 0;
1340 }
1341
1342 TRACE_NODE(old_parent_node, req);
1343 TRACE_NODE(new_parent_node, req);
1344
1345 const string old_child_path = old_parent_path + "/" + name;
1346 const string new_child_path = new_parent_path + "/" + new_name;
1347
1348 if (android::base::EqualsIgnoreCase(fuse->GetEffectiveRootPath() + "/android", old_child_path)) {
1349 // Prevent renaming Android/ dir since it contains bind-mounts on the primary volume
1350 return EACCES;
1351 }
1352
1353 // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
1354 const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
1355 // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
1356 // EFAULT/EIO is reported due to JNI exception.
1357 if (res == 0) {
1358 // Mark any existing destination nodes as deleted. This fixes the following edge case:
1359 // 1. New destination node is forgotten
1360 // 2. Old destination node is not forgotten because there's still an open fd ref to it
1361 // 3. Lookup for |new_name| returns old destination node with stale metadata
1362 new_parent_node->SetDeletedForChild(new_name);
1363 // TODO(b/169306422): Log each renamed node
1364 old_parent_node->RenameChild(name, new_name, new_parent_node);
1365 }
1366 return res;
1367 }
1368
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1369 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1370 const char* new_name, unsigned int flags) {
1371 int res = do_rename(req, parent, name, new_parent, new_name, flags);
1372 fuse_reply_err(req, res);
1373 }
1374
1375 /*
1376 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
1377 const char* new_name)
1378 {
1379 cout << "TODO:" << __func__;
1380 }
1381 */
1382
create_handle_for_node(struct fuse * fuse,const string & path,int fd,uid_t uid,uid_t transforms_uid,node * node,const RedactionInfo * ri,const bool allow_passthrough,const bool open_info_direct_io,int * keep_cache)1383 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, uid_t uid,
1384 uid_t transforms_uid, node* node, const RedactionInfo* ri,
1385 const bool allow_passthrough, const bool open_info_direct_io,
1386 int* keep_cache) {
1387 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1388
1389 bool redaction_needed = ri->isRedactionNeeded();
1390 handle* handle = nullptr;
1391 int transforms = node->GetTransforms();
1392 bool transforms_complete = node->IsTransformsComplete();
1393 if (transforms_uid > 0) {
1394 CHECK(transforms);
1395 }
1396
1397 if (fuse->passthrough && allow_passthrough) {
1398 *keep_cache = transforms_complete;
1399 // We only enabled passthrough iff these 2 conditions hold
1400 // 1. Redaction is not needed
1401 // 2. Node transforms are completed, e.g transcoding.
1402 // (2) is important because we transcode lazily (on the first read) and with passthrough,
1403 // we will never get a read into the FUSE daemon, so passthrough would have returned
1404 // arbitrary bytes the first time around. However, if we ensure that transforms are
1405 // completed, then it's safe to use passthrough. Additionally, transcoded nodes never
1406 // require redaction so (2) implies (1)
1407 handle = new struct handle(fd, ri, !open_info_direct_io /* cached */,
1408 !redaction_needed && transforms_complete /* passthrough */, uid,
1409 transforms_uid);
1410 } else {
1411 // Without fuse->passthrough, we don't want to use the FUSE VFS cache in two cases:
1412 // 1. When redaction is needed because app A with EXIF access might access
1413 // a region that should have been redacted for app B without EXIF access, but app B on
1414 // a subsequent read, will be able to see the EXIF data because the read request for
1415 // that region will be served from cache and not get to the FUSE daemon
1416 // 2. When the file has a read or write lock on it. This means that the MediaProvider
1417 // has given an fd to the lower file system to an app. There are two cases where using
1418 // the cache in this case can be a problem:
1419 // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
1420 // subsequent read from the lower fs fd will not see the write.
1421 // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
1422 // the lower fs fd because those writes did not go through the FUSE layer and reads from
1423 // FUSE after that write may be served from cache
1424 bool has_redacted = node->HasRedactedCache();
1425 bool is_redaction_change =
1426 (redaction_needed && !has_redacted) || (!redaction_needed && has_redacted);
1427 bool is_cached_file_open = node->HasCachedHandle();
1428 bool direct_io = open_info_direct_io || (is_cached_file_open && is_redaction_change) ||
1429 is_file_locked(fd, path) || fuse->ShouldNotCache(path);
1430
1431 if (!is_cached_file_open && is_redaction_change) {
1432 node->SetRedactedCache(redaction_needed);
1433 // Purges stale page cache before open
1434 *keep_cache = 0;
1435 } else {
1436 *keep_cache = transforms_complete;
1437 }
1438 handle = new struct handle(fd, ri, !direct_io /* cached */, false /* passthrough */, uid,
1439 transforms_uid);
1440 }
1441
1442 node->AddHandle(handle);
1443 return handle;
1444 }
1445
do_passthrough_enable(fuse_req_t req,struct fuse_file_info * fi,unsigned int fd)1446 static bool do_passthrough_enable(fuse_req_t req, struct fuse_file_info* fi, unsigned int fd) {
1447 int passthrough_fh = fuse_passthrough_enable(req, fd);
1448
1449 if (passthrough_fh <= 0) {
1450 return false;
1451 }
1452
1453 fi->passthrough_fh = passthrough_fh;
1454 return true;
1455 }
1456
parse_open_flags(const string & path,const int in_flags)1457 static OpenInfo parse_open_flags(const string& path, const int in_flags) {
1458 const bool for_write = in_flags & (O_WRONLY | O_RDWR);
1459 int out_flags = in_flags;
1460 bool direct_io = false;
1461
1462 if (in_flags & O_DIRECT) {
1463 // Set direct IO on the FUSE fs file
1464 direct_io = true;
1465
1466 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
1467 // Remove O_DIRECT because there are strict alignment requirements for direct IO and
1468 // there were some historical bugs affecting encrypted block devices.
1469 // Hence, this is only supported on public volumes.
1470 out_flags &= ~O_DIRECT;
1471 }
1472 }
1473 if (in_flags & O_WRONLY) {
1474 // Replace O_WRONLY with O_RDWR because even if the FUSE fd is opened write-only, the FUSE
1475 // driver might issue reads on the lower fs ith the writeback cache enabled
1476 out_flags &= ~O_WRONLY;
1477 out_flags |= O_RDWR;
1478 }
1479 if (in_flags & O_APPEND) {
1480 // Remove O_APPEND because passing it to the lower fs can lead to file corruption when
1481 // multiple FUSE threads race themselves reading. With writeback cache enabled, the FUSE
1482 // driver already handles the O_APPEND
1483 out_flags &= ~O_APPEND;
1484 }
1485
1486 return {.flags = out_flags, .for_write = for_write, .direct_io = direct_io};
1487 }
1488
fill_fuse_file_info(const handle * handle,const OpenInfo * open_info,const int keep_cache,struct fuse_file_info * fi)1489 static void fill_fuse_file_info(const handle* handle, const OpenInfo* open_info,
1490 const int keep_cache, struct fuse_file_info* fi) {
1491 fi->fh = ptr_to_id(handle);
1492 fi->keep_cache = keep_cache;
1493 fi->direct_io = !handle->cached;
1494 }
1495
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1496 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
1497 ATRACE_CALL();
1498 struct fuse* fuse = get_fuse(req);
1499 node* node = fuse->FromInode(ino);
1500 if (!node) {
1501 fuse_reply_err(req, ENOENT);
1502 return;
1503 }
1504 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1505 const string& io_path = get_path(node);
1506 const string& build_path = node->BuildPath();
1507 if (!is_app_accessible_path(fuse, io_path, ctx->uid)) {
1508 fuse_reply_err(req, ENOENT);
1509 return;
1510 }
1511
1512 const OpenInfo open_info = parse_open_flags(io_path, fi->flags);
1513
1514 if (open_info.for_write && node->GetTransforms()) {
1515 TRACE_NODE(node, req) << "write with transforms";
1516 } else {
1517 TRACE_NODE(node, req) << (open_info.for_write ? "write" : "read");
1518 }
1519
1520 // Force permission check with the build path because the MediaProvider database might not be
1521 // aware of the io_path
1522 // We don't redact if the caller was granted write permission for this file
1523 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1524 build_path, io_path, ctx->uid, ctx->pid, node->GetTransformsReason(),
1525 open_info.for_write, !open_info.for_write /* redact */,
1526 true /* log_transforms_metrics */);
1527 if (!result) {
1528 fuse_reply_err(req, EFAULT);
1529 return;
1530 }
1531
1532 if (result->status) {
1533 fuse_reply_err(req, result->status);
1534 return;
1535 }
1536
1537 int fd = -1;
1538 const bool is_fd_from_java = result->fd >= 0;
1539 if (is_fd_from_java) {
1540 fd = result->fd;
1541 TRACE_NODE(node, req) << "opened in Java";
1542 } else {
1543 fd = open(io_path.c_str(), open_info.flags);
1544 if (fd < 0) {
1545 fuse_reply_err(req, errno);
1546 return;
1547 }
1548 }
1549
1550 int keep_cache = 1;
1551 // If is_fd_from_java==true, we disallow passthrough because the fd can be pointing to the
1552 // FUSE fs if gotten from another process
1553 const handle* h = create_handle_for_node(fuse, io_path, fd, result->uid, result->transforms_uid,
1554 node, result->redaction_info.release(),
1555 /* allow_passthrough */ !is_fd_from_java,
1556 open_info.direct_io, &keep_cache);
1557 fill_fuse_file_info(h, &open_info, keep_cache, fi);
1558
1559 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
1560 // user FUSE passthrough is a conservative rule and might be dropped as
1561 // soon as demonstrated its correctness.
1562 if (h->passthrough && !do_passthrough_enable(req, fi, fd)) {
1563 // TODO: Should we crash here so we can find errors easily?
1564 PLOG(ERROR) << "Passthrough OPEN failed for " << io_path;
1565 fuse_reply_err(req, EFAULT);
1566 return;
1567 }
1568
1569 fuse_reply_open(req, fi);
1570 }
1571
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi,bool direct_io)1572 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi,
1573 bool direct_io) {
1574 handle* h = reinterpret_cast<handle*>(fi->fh);
1575 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1576
1577 buf.buf[0].fd = h->fd;
1578 buf.buf[0].pos = off;
1579 buf.buf[0].flags =
1580 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1581 if (direct_io) {
1582 // sdcardfs does not register splice_read_file_operations and some requests fail with EFAULT
1583 // Specifically, FUSE splice is only enabled for 8KB+ buffers, hence such reads fail
1584 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)FUSE_BUF_NO_SPLICE);
1585 } else {
1586 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)0);
1587 }
1588 }
1589
1590 /**
1591 * Sets the parameters for a fuse_buf that reads from memory, including flags.
1592 * Makes buf->mem point to an already mapped region of zeroized memory.
1593 * This memory is read only.
1594 */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1595 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1596 buf->size = size;
1597 buf->mem = fuse->zero_addr;
1598 buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1599 buf->pos = -1;
1600 buf->fd = -1;
1601 }
1602
1603 /**
1604 * Sets the parameters for a fuse_buf that reads from file, including flags.
1605 */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1606 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1607 buf->size = size;
1608 buf->fd = fd;
1609 buf->pos = pos;
1610 buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1611 buf->mem = nullptr;
1612 }
1613
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi,bool direct_io)1614 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi,
1615 bool direct_io) {
1616 handle* h = reinterpret_cast<handle*>(fi->fh);
1617
1618 std::vector<ReadRange> ranges;
1619 h->ri->getReadRanges(off, size, &ranges);
1620
1621 // As an optimization, return early if there are no ranges to redact.
1622 if (ranges.size() == 0) {
1623 do_read(req, size, off, fi, direct_io);
1624 return;
1625 }
1626
1627 const size_t num_bufs = ranges.size();
1628 auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1629 reinterpret_cast<fuse_bufvec*>(
1630 malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1631 free};
1632 fuse_bufvec& bufvec = *bufvec_ptr;
1633
1634 // initialize bufvec
1635 bufvec.count = num_bufs;
1636 bufvec.idx = 0;
1637 bufvec.off = 0;
1638
1639 for (int i = 0; i < num_bufs; ++i) {
1640 const ReadRange& range = ranges[i];
1641 if (range.is_redaction) {
1642 create_mem_fuse_buf(range.size, &(bufvec.buf[i]), get_fuse(req));
1643 } else {
1644 create_file_fuse_buf(range.size, range.start, h->fd, &(bufvec.buf[i]));
1645 }
1646 }
1647
1648 fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1649 }
1650
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1651 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1652 struct fuse_file_info* fi) {
1653 ATRACE_CALL();
1654 handle* h = reinterpret_cast<handle*>(fi->fh);
1655 const bool direct_io = !h->cached;
1656 struct fuse* fuse = get_fuse(req);
1657
1658 node* node = fuse->FromInode(ino);
1659
1660 if (!node->IsTransformsComplete()) {
1661 if (!fuse->mp->Transform(node->BuildPath(), node->GetIoPath(), node->GetTransforms(),
1662 node->GetTransformsReason(), req->ctx.uid, h->uid,
1663 h->transforms_uid)) {
1664 fuse_reply_err(req, EFAULT);
1665 return;
1666 }
1667 node->SetTransformsComplete(true);
1668 }
1669
1670 fuse->fadviser.Record(h->fd, size);
1671
1672 if (h->ri->isRedactionNeeded()) {
1673 do_read_with_redaction(req, size, off, fi, direct_io);
1674 } else {
1675 do_read(req, size, off, fi, direct_io);
1676 }
1677 }
1678
1679 /*
1680 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1681 size_t size, off_t off, struct fuse_file_info* fi)
1682 {
1683 cout << "TODO:" << __func__;
1684 }
1685 */
1686
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1687 static void pf_write_buf(fuse_req_t req,
1688 fuse_ino_t ino,
1689 struct fuse_bufvec* bufv,
1690 off_t off,
1691 struct fuse_file_info* fi) {
1692 ATRACE_CALL();
1693 handle* h = reinterpret_cast<handle*>(fi->fh);
1694 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1695 ssize_t size;
1696 struct fuse* fuse = get_fuse(req);
1697
1698 buf.buf[0].fd = h->fd;
1699 buf.buf[0].pos = off;
1700 buf.buf[0].flags =
1701 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1702 size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1703
1704 if (size < 0)
1705 fuse_reply_err(req, -size);
1706 else {
1707 // Execute Record *before* fuse_reply_write to avoid the following ordering:
1708 // fuse_reply_write -> pf_release (destroy handle) -> Record (use handle after free)
1709 fuse->fadviser.Record(h->fd, size);
1710 fuse_reply_write(req, size);
1711 }
1712 }
1713 // Haven't tested this one. Not sure what calls it.
1714 #if 0
1715 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1716 off_t off_in, struct fuse_file_info* fi_in,
1717 fuse_ino_t ino_out, off_t off_out,
1718 struct fuse_file_info* fi_out, size_t len,
1719 int flags)
1720 {
1721 handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1722 handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1723 struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1724 struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1725 ssize_t size;
1726
1727 buf_in.buf[0].fd = h_in->fd;
1728 buf_in.buf[0].pos = off_in;
1729 buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1730
1731 buf_out.buf[0].fd = h_out->fd;
1732 buf_out.buf[0].pos = off_out;
1733 buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1734 size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1735
1736 if (size < 0) {
1737 fuse_reply_err(req, -size);
1738 }
1739
1740 fuse_reply_write(req, size);
1741 }
1742 #endif
1743
1744 /*
1745 * This function does nothing except being a placeholder to keep the FUSE
1746 * driver handling flushes on close(2).
1747 * In fact, kernels prior to 5.8 stop attempting flushing the cache on close(2)
1748 * if the .flush operation is not implemented by the FUSE daemon.
1749 * This has been fixed in the kernel by commit 614c026e8a46 ("fuse: always
1750 * flush dirty data on close(2)"), merged in Linux 5.8, but until then
1751 * userspace must mitigate this behavior by not leaving the .flush function
1752 * pointer empty.
1753 */
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1754 static void pf_flush(fuse_req_t req,
1755 fuse_ino_t ino,
1756 struct fuse_file_info* fi) {
1757 ATRACE_CALL();
1758 struct fuse* fuse = get_fuse(req);
1759 TRACE_NODE(nullptr, req) << "noop";
1760 fuse_reply_err(req, 0);
1761 }
1762
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1763 static void pf_release(fuse_req_t req,
1764 fuse_ino_t ino,
1765 struct fuse_file_info* fi) {
1766 ATRACE_CALL();
1767 struct fuse* fuse = get_fuse(req);
1768
1769 node* node = fuse->FromInode(ino);
1770 handle* h = reinterpret_cast<handle*>(fi->fh);
1771 TRACE_NODE(node, req);
1772
1773 fuse->fadviser.Close(h->fd);
1774 if (node) {
1775 node->DestroyHandle(h);
1776 }
1777
1778 fuse_reply_err(req, 0);
1779 }
1780
do_sync_common(int fd,bool datasync)1781 static int do_sync_common(int fd, bool datasync) {
1782 int res = datasync ? fdatasync(fd) : fsync(fd);
1783
1784 if (res == -1) return errno;
1785 return 0;
1786 }
1787
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1788 static void pf_fsync(fuse_req_t req,
1789 fuse_ino_t ino,
1790 int datasync,
1791 struct fuse_file_info* fi) {
1792 ATRACE_CALL();
1793 handle* h = reinterpret_cast<handle*>(fi->fh);
1794 int err = do_sync_common(h->fd, datasync);
1795
1796 fuse_reply_err(req, err);
1797 }
1798
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1799 static void pf_fsyncdir(fuse_req_t req,
1800 fuse_ino_t ino,
1801 int datasync,
1802 struct fuse_file_info* fi) {
1803 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1804 int err = do_sync_common(dirfd(h->d), datasync);
1805
1806 fuse_reply_err(req, err);
1807 }
1808
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1809 static void pf_opendir(fuse_req_t req,
1810 fuse_ino_t ino,
1811 struct fuse_file_info* fi) {
1812 ATRACE_CALL();
1813 struct fuse* fuse = get_fuse(req);
1814 node* node = fuse->FromInode(ino);
1815 if (!node) {
1816 fuse_reply_err(req, ENOENT);
1817 return;
1818 }
1819 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1820 const string path = node->BuildPath();
1821 if (!is_app_accessible_path(fuse, path, ctx->uid)) {
1822 fuse_reply_err(req, ENOENT);
1823 return;
1824 }
1825
1826 TRACE_NODE(node, req);
1827
1828 int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1829 if (status) {
1830 fuse_reply_err(req, status);
1831 return;
1832 }
1833
1834 DIR* dir = opendir(path.c_str());
1835 if (!dir) {
1836 fuse_reply_err(req, errno);
1837 return;
1838 }
1839
1840 dirhandle* h = new dirhandle(dir);
1841 node->AddDirHandle(h);
1842
1843 fi->fh = ptr_to_id(h);
1844 fuse_reply_open(req, fi);
1845 }
1846
1847 #define READDIR_BUF 8192LU
1848
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1849 static void do_readdir_common(fuse_req_t req,
1850 fuse_ino_t ino,
1851 size_t size,
1852 off_t off,
1853 struct fuse_file_info* fi,
1854 bool plus) {
1855 struct fuse* fuse = get_fuse(req);
1856 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1857 size_t len = std::min<size_t>(size, READDIR_BUF);
1858 char buf[READDIR_BUF];
1859 size_t used = 0;
1860 std::shared_ptr<DirectoryEntry> de;
1861
1862 struct fuse_entry_param e;
1863 size_t entry_size = 0;
1864
1865 node* node = fuse->FromInode(ino);
1866 if (!node) {
1867 fuse_reply_err(req, ENOENT);
1868 return;
1869 }
1870 const string path = node->BuildPath();
1871 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1872 fuse_reply_err(req, ENOENT);
1873 return;
1874 }
1875
1876 TRACE_NODE(node, req);
1877 // Get all directory entries from MediaProvider on first readdir() call of
1878 // directory handle. h->next_off = 0 indicates that current readdir() call
1879 // is first readdir() call for the directory handle, Avoid multiple JNI calls
1880 // for single directory handle.
1881 if (h->next_off == 0) {
1882 h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
1883 }
1884 // If the last entry in the previous readdir() call was rejected due to
1885 // buffer capacity constraints, update directory offset to start from
1886 // previously rejected entry. Directory offset can also change if there was
1887 // a seekdir() on the given directory handle.
1888 if (off != h->next_off) {
1889 h->next_off = off;
1890 }
1891 const int num_directory_entries = h->de.size();
1892 // Check for errors. Any error/exception occurred while obtaining directory
1893 // entries will be indicated by marking first directory entry name as empty
1894 // string. In the erroneous case corresponding d_type will hold error number.
1895 if (num_directory_entries && h->de[0]->d_name.empty()) {
1896 fuse_reply_err(req, h->de[0]->d_type);
1897 return;
1898 }
1899
1900 while (h->next_off < num_directory_entries) {
1901 de = h->de[h->next_off];
1902 entry_size = 0;
1903 h->next_off++;
1904 if (plus) {
1905 int error_code = 0;
1906 if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code, FuseOp::readdir)) {
1907 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
1908 &e, h->next_off);
1909 } else {
1910 // Ignore lookup errors on
1911 // 1. non-existing files returned from MediaProvider database.
1912 // 2. path that doesn't match FuseDaemon UID and calling uid.
1913 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES
1914 || error_code == EIO) continue;
1915 fuse_reply_err(req, error_code);
1916 return;
1917 }
1918 } else {
1919 // This should never happen because we have readdir_plus enabled without adaptive
1920 // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
1921 LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
1922 e.attr.st_ino = FUSE_UNKNOWN_INO;
1923 e.attr.st_mode = de->d_type << 12;
1924 entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
1925 h->next_off);
1926 }
1927 // If buffer in fuse_add_direntry[_plus] is not large enough then
1928 // the entry is not added to buffer but the size of the entry is still
1929 // returned. Check available buffer size + returned entry size is less
1930 // than actual buffer size to confirm entry is added to buffer.
1931 if (used + entry_size > len) {
1932 // When an entry is rejected, lookup called by readdir_plus will not be tracked by
1933 // kernel. Call forget on the rejected node to decrement the reference count.
1934 if (plus) {
1935 do_forget(req, fuse, e.ino, 1);
1936 }
1937 break;
1938 }
1939 used += entry_size;
1940 }
1941 fuse_reply_buf(req, buf, used);
1942 }
1943
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1944 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1945 struct fuse_file_info* fi) {
1946 ATRACE_CALL();
1947 do_readdir_common(req, ino, size, off, fi, false);
1948 }
1949
round_up(off_t o,size_t s)1950 static off_t round_up(off_t o, size_t s) {
1951 return (o + s - 1) / s * s;
1952 }
1953
pf_readdir_postfilter(fuse_req_t req,fuse_ino_t ino,uint32_t error_in,off_t off_in,off_t off_out,size_t size_out,const void * dirents_in,struct fuse_file_info * fi)1954 static void pf_readdir_postfilter(fuse_req_t req, fuse_ino_t ino, uint32_t error_in, off_t off_in,
1955 off_t off_out, size_t size_out, const void* dirents_in,
1956 struct fuse_file_info* fi) {
1957 struct fuse* fuse = get_fuse(req);
1958 char buf[READDIR_BUF];
1959 struct fuse_read_out* fro = (struct fuse_read_out*)(buf);
1960 size_t used = sizeof(*fro);
1961 char* dirents_out = (char*)(fro + 1);
1962
1963 ATRACE_CALL();
1964 node* node = fuse->FromInode(ino);
1965 if (!node) {
1966 fuse_reply_err(req, ENOENT);
1967 return;
1968 }
1969
1970 TRACE_NODE(node, req);
1971 const string path = node->BuildPath();
1972
1973 *fro = (struct fuse_read_out){
1974 .offset = (uint64_t)off_out,
1975 };
1976
1977 for (off_t in = 0; in < size_out;) {
1978 struct fuse_dirent* dirent_in = (struct fuse_dirent*)((char*)dirents_in + in);
1979 struct fuse_dirent* dirent_out = (struct fuse_dirent*)((char*)dirents_out + fro->size);
1980 struct stat stats;
1981 int err;
1982 std::string child_path = path + "/" + dirent_in->name;
1983
1984 in += sizeof(*dirent_in) + round_up(dirent_in->namelen, sizeof(uint64_t));
1985 err = stat(child_path.c_str(), &stats);
1986 if (err == 0 &&
1987 ((stats.st_mode & 0001) || ((stats.st_mode & 0010) && req->ctx.gid == stats.st_gid) ||
1988 ((stats.st_mode & 0100) && req->ctx.uid == stats.st_uid) ||
1989 fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, child_path) ||
1990 strcmp(dirent_in->name, ".nomedia") == 0)) {
1991 *dirent_out = *dirent_in;
1992 strcpy(dirent_out->name, dirent_in->name);
1993 fro->size += sizeof(*dirent_out) + round_up(dirent_out->namelen, sizeof(uint64_t));
1994 }
1995 }
1996 used += fro->size;
1997 fuse_reply_buf(req, buf, used);
1998 }
1999
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2000 static void pf_readdirplus(fuse_req_t req,
2001 fuse_ino_t ino,
2002 size_t size,
2003 off_t off,
2004 struct fuse_file_info* fi) {
2005 ATRACE_CALL();
2006 do_readdir_common(req, ino, size, off, fi, true);
2007 }
2008
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)2009 static void pf_releasedir(fuse_req_t req,
2010 fuse_ino_t ino,
2011 struct fuse_file_info* fi) {
2012 ATRACE_CALL();
2013 struct fuse* fuse = get_fuse(req);
2014
2015 node* node = fuse->FromInode(ino);
2016
2017 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
2018 TRACE_NODE(node, req);
2019 if (node) {
2020 node->DestroyDirHandle(h);
2021 }
2022
2023 fuse_reply_err(req, 0);
2024 }
2025
pf_statfs(fuse_req_t req,fuse_ino_t ino)2026 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
2027 ATRACE_CALL();
2028 struct statvfs st;
2029 struct fuse* fuse = get_fuse(req);
2030
2031 if (statvfs(fuse->root->GetName().c_str(), &st))
2032 fuse_reply_err(req, errno);
2033 else
2034 fuse_reply_statfs(req, &st);
2035 }
2036 /*
2037 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2038 const char* value, size_t size, int flags)
2039 {
2040 cout << "TODO:" << __func__;
2041 }
2042
2043 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2044 size_t size)
2045 {
2046 cout << "TODO:" << __func__;
2047 }
2048
2049 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
2050 {
2051 cout << "TODO:" << __func__;
2052 }
2053
2054 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
2055 {
2056 cout << "TODO:" << __func__;
2057 }*/
2058
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)2059 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
2060 ATRACE_CALL();
2061 struct fuse* fuse = get_fuse(req);
2062
2063 node* node = fuse->FromInode(ino);
2064 if (!node) {
2065 fuse_reply_err(req, ENOENT);
2066 return;
2067 }
2068 const string path = node->BuildPath();
2069 if (path != PRIMARY_VOLUME_PREFIX && !is_app_accessible_path(fuse, path, req->ctx.uid)) {
2070 fuse_reply_err(req, ENOENT);
2071 return;
2072 }
2073 TRACE_NODE(node, req);
2074
2075 // exists() checks are always allowed.
2076 if (mask == F_OK) {
2077 int res = access(path.c_str(), F_OK);
2078 fuse_reply_err(req, res ? errno : 0);
2079 return;
2080 }
2081 struct stat stat;
2082 if (lstat(path.c_str(), &stat)) {
2083 // File doesn't exist
2084 fuse_reply_err(req, ENOENT);
2085 return;
2086 }
2087
2088 // For read and write permission checks we go to MediaProvider.
2089 int status = 0;
2090 bool for_write = mask & W_OK;
2091 bool is_directory = S_ISDIR(stat.st_mode);
2092 if (is_directory) {
2093 if (path == PRIMARY_VOLUME_PREFIX && mask == X_OK) {
2094 // Special case for this path: apps should be allowed to enter it,
2095 // but not list directory contents (which would be user numbers).
2096 int res = access(path.c_str(), X_OK);
2097 fuse_reply_err(req, res ? errno : 0);
2098 return;
2099 }
2100 status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
2101 } else {
2102 if (mask & X_OK) {
2103 // Fuse is mounted with MS_NOEXEC.
2104 fuse_reply_err(req, EACCES);
2105 return;
2106 }
2107
2108 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
2109 path, path, req->ctx.uid, req->ctx.pid, node->GetTransformsReason(), for_write,
2110 false /* redact */, false /* log_transforms_metrics */);
2111 if (!result) {
2112 status = EFAULT;
2113 } else if (result->status) {
2114 status = EACCES;
2115 }
2116 }
2117
2118 fuse_reply_err(req, status);
2119 }
2120
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)2121 static void pf_create(fuse_req_t req,
2122 fuse_ino_t parent,
2123 const char* name,
2124 mode_t mode,
2125 struct fuse_file_info* fi) {
2126 ATRACE_CALL();
2127 struct fuse* fuse = get_fuse(req);
2128 node* parent_node = fuse->FromInode(parent);
2129 if (!parent_node) {
2130 fuse_reply_err(req, ENOENT);
2131 return;
2132 }
2133 const string parent_path = parent_node->BuildPath();
2134 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
2135 fuse_reply_err(req, ENOENT);
2136 return;
2137 }
2138
2139 TRACE_NODE(parent_node, req);
2140
2141 const string child_path = parent_path + "/" + name;
2142
2143 const OpenInfo open_info = parse_open_flags(child_path, fi->flags);
2144
2145 int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
2146 if (mp_return_code) {
2147 fuse_reply_err(req, mp_return_code);
2148 return;
2149 }
2150
2151 mode = (mode & (~0777)) | 0664;
2152 int fd = open(child_path.c_str(), open_info.flags, mode);
2153 if (fd < 0) {
2154 int error_code = errno;
2155 // We've already inserted the file into the MP database before the
2156 // failed open(), so that needs to be rolled back here.
2157 fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
2158 fuse_reply_err(req, error_code);
2159 return;
2160 }
2161
2162 int error_code = 0;
2163 struct fuse_entry_param e;
2164 node* node = make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
2165 FuseOp::create);
2166 TRACE_NODE(node, req);
2167 if (!node) {
2168 CHECK(error_code != 0);
2169 fuse_reply_err(req, error_code);
2170 return;
2171 }
2172
2173 // Let MediaProvider know we've created a new file
2174 fuse->mp->OnFileCreated(child_path);
2175
2176 // TODO(b/147274248): Assume there will be no EXIF to redact.
2177 // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
2178 // to the file before all the EXIF content is written. We could special case reads before the
2179 // first close after a file has just been created.
2180 int keep_cache = 1;
2181 const handle* h = create_handle_for_node(
2182 fuse, child_path, fd, req->ctx.uid, 0 /* transforms_uid */, node, new RedactionInfo(),
2183 /* allow_passthrough */ true, open_info.direct_io, &keep_cache);
2184 fill_fuse_file_info(h, &open_info, keep_cache, fi);
2185
2186 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
2187 // user FUSE passthrough is a conservative rule and might be dropped as
2188 // soon as demonstrated its correctness.
2189 if (h->passthrough && !do_passthrough_enable(req, fi, fd)) {
2190 PLOG(ERROR) << "Passthrough CREATE failed for " << child_path;
2191 fuse_reply_err(req, EFAULT);
2192 return;
2193 }
2194
2195 fuse_reply_create(req, &e, fi);
2196 }
2197 /*
2198 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
2199 struct fuse_file_info* fi, struct flock* lock)
2200 {
2201 cout << "TODO:" << __func__;
2202 }
2203
2204 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
2205 struct fuse_file_info* fi,
2206 struct flock* lock, int sleep)
2207 {
2208 cout << "TODO:" << __func__;
2209 }
2210
2211 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
2212 uint64_t idx)
2213 {
2214 cout << "TODO:" << __func__;
2215 }
2216
2217 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
2218 void* arg, struct fuse_file_info* fi, unsigned flags,
2219 const void* in_buf, size_t in_bufsz, size_t out_bufsz)
2220 {
2221 cout << "TODO:" << __func__;
2222 }
2223
2224 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
2225 struct fuse_pollhandle* ph)
2226 {
2227 cout << "TODO:" << __func__;
2228 }
2229
2230 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
2231 off_t offset, struct fuse_bufvec* bufv)
2232 {
2233 cout << "TODO:" << __func__;
2234 }
2235
2236 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
2237 struct fuse_file_info* fi, int op)
2238 {
2239 cout << "TODO:" << __func__;
2240 }
2241
2242 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
2243 off_t offset, off_t length, struct fuse_file_info* fi)
2244 {
2245 cout << "TODO:" << __func__;
2246 }
2247 */
2248
2249 static struct fuse_lowlevel_ops ops{
2250 .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup,
2251 .lookup_postfilter = pf_lookup_postfilter, .forget = pf_forget, .getattr = pf_getattr,
2252 .setattr = pf_setattr, .canonical_path = pf_canonical_path, .mknod = pf_mknod,
2253 .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
2254 /*.symlink = pf_symlink,*/
2255 .rename = pf_rename,
2256 /*.link = pf_link,*/
2257 .open = pf_open, .read = pf_read,
2258 /*.write = pf_write,*/
2259 .flush = pf_flush, .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir,
2260 .readdir = pf_readdir, .readdirpostfilter = pf_readdir_postfilter, .releasedir = pf_releasedir,
2261 .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
2262 /*.setxattr = pf_setxattr,
2263 .getxattr = pf_getxattr,
2264 .listxattr = pf_listxattr,
2265 .removexattr = pf_removexattr,*/
2266 .access = pf_access, .create = pf_create,
2267 /*.getlk = pf_getlk,
2268 .setlk = pf_setlk,
2269 .bmap = pf_bmap,
2270 .ioctl = pf_ioctl,
2271 .poll = pf_poll,*/
2272 .write_buf = pf_write_buf,
2273 /*.retrieve_reply = pf_retrieve_reply,*/
2274 .forget_multi = pf_forget_multi,
2275 /*.flock = pf_flock,*/
2276 .fallocate = pf_fallocate, .readdirplus = pf_readdirplus,
2277 /*.copy_file_range = pf_copy_file_range,*/
2278 };
2279
2280 static struct fuse_loop_config config = {
2281 .clone_fd = 1,
2282 .max_idle_threads = 10,
2283 };
2284
2285 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
2286 {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
2287 {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
2288 {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
2289 {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
2290 {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
2291 {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
2292 {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
2293 {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
2294 });
2295
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)2296 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
2297 __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
2298 }
2299
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)2300 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
2301 if (fuse->passthrough) {
2302 // Always open with FUSE if passthrough is enabled. This avoids the delicate file lock
2303 // acquisition below to ensure VFS cache consistency and doesn't impact filesystem
2304 // performance since read(2)/write(2) happen in the kernel
2305 return true;
2306 }
2307
2308 bool use_fuse = false;
2309
2310 if (active.load(std::memory_order_acquire)) {
2311 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2312 const node* node = node::LookupAbsolutePath(fuse->root, path);
2313 if (node && node->HasCachedHandle()) {
2314 use_fuse = true;
2315 } else {
2316 // If we are unable to set a lock, we should use fuse since we can't track
2317 // when all fd references (including dups) are closed. This can happen when
2318 // we try to set a write lock twice on the same file
2319 use_fuse = set_file_lock(fd, for_read, path);
2320 }
2321 } else {
2322 LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
2323 }
2324
2325 return use_fuse;
2326 }
2327
UsesFusePassthrough() const2328 bool FuseDaemon::UsesFusePassthrough() const {
2329 return fuse->passthrough;
2330 }
2331
InvalidateFuseDentryCache(const std::string & path)2332 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
2333 LOG(VERBOSE) << "Invalidating FUSE dentry cache";
2334 if (active.load(std::memory_order_acquire)) {
2335 string name;
2336 fuse_ino_t parent;
2337 fuse_ino_t child;
2338 {
2339 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2340 const node* node = node::LookupAbsolutePath(fuse->root, path);
2341 if (node) {
2342 name = node->GetName();
2343 child = fuse->ToInode(const_cast<class node*>(node));
2344 parent = fuse->ToInode(node->GetParent());
2345 }
2346 }
2347
2348 if (!name.empty()) {
2349 fuse_inval(fuse->se, parent, child, name, path);
2350 }
2351 } else {
2352 LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
2353 }
2354 }
2355
FuseDaemon(JNIEnv * env,jobject mediaProvider)2356 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
2357 active(false), fuse(nullptr) {}
2358
IsStarted() const2359 bool FuseDaemon::IsStarted() const {
2360 return active.load(std::memory_order_acquire);
2361 }
2362
IsPropertySet(const char * name,bool & value)2363 static bool IsPropertySet(const char* name, bool& value) {
2364 if (android::base::GetProperty(name, "") == "") return false;
2365
2366 value = android::base::GetBoolProperty(name, false);
2367 LOG(INFO) << "fuse-bpf is " << (value ? "enabled" : "disabled") << " because of property "
2368 << name;
2369 return true;
2370 }
2371
IsFuseBpfEnabled()2372 bool IsFuseBpfEnabled() {
2373 // ro.fuse.bpf.is_running may not be set when first reading this property, so we have to
2374 // reproduce the vold/Utils.cpp:isFuseBpfEnabled() logic here
2375
2376 bool is_enabled;
2377 if (IsPropertySet("ro.fuse.bpf.is_running", is_enabled)) return is_enabled;
2378 if (IsPropertySet("persist.sys.fuse.bpf.override", is_enabled)) return is_enabled;
2379 if (IsPropertySet("ro.fuse.bpf.enabled", is_enabled)) return is_enabled;
2380
2381 // If the kernel has fuse-bpf, /sys/fs/fuse/features/fuse_bpf will exist and have the contents
2382 // 'supported\n' - see fs/fuse/inode.c in the kernel source
2383 string contents;
2384 const char* filename = "/sys/fs/fuse/features/fuse_bpf";
2385 if (!android::base::ReadFileToString(filename, &contents)) {
2386 LOG(INFO) << "fuse-bpf is disabled because " << filename << " cannot be read";
2387 return false;
2388 }
2389
2390 if (contents == "supported\n") {
2391 LOG(INFO) << "fuse-bpf is enabled because " << filename << " reads 'supported'";
2392 return true;
2393 } else {
2394 LOG(INFO) << "fuse-bpf is disabled because " << filename << " does not read 'supported'";
2395 return false;
2396 }
2397 }
2398
Start(android::base::unique_fd fd,const std::string & path,const bool uncached_mode,const std::vector<std::string> & supported_transcoding_relative_paths,const std::vector<std::string> & supported_uncached_relative_paths)2399 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path,
2400 const bool uncached_mode,
2401 const std::vector<std::string>& supported_transcoding_relative_paths,
2402 const std::vector<std::string>& supported_uncached_relative_paths) {
2403 android::base::SetDefaultTag(LOG_TAG);
2404
2405 struct fuse_args args;
2406 struct fuse_cmdline_opts opts;
2407
2408 struct stat stat;
2409
2410 if (lstat(path.c_str(), &stat)) {
2411 PLOG(ERROR) << "ERROR: failed to stat source " << path;
2412 return;
2413 }
2414
2415 if (!S_ISDIR(stat.st_mode)) {
2416 PLOG(ERROR) << "ERROR: source is not a directory";
2417 return;
2418 }
2419
2420 args = FUSE_ARGS_INIT(0, nullptr);
2421 if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
2422 fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
2423 LOG(ERROR) << "ERROR: failed to set options";
2424 return;
2425 }
2426
2427 bool bpf_enabled = IsFuseBpfEnabled();
2428 int bpf_fd = -1;
2429 if (bpf_enabled) {
2430 bpf_fd = android::bpf::bpfFdGet(FUSE_BPF_PROG_PATH, BPF_F_RDONLY);
2431 if (bpf_fd < 0) {
2432 PLOG(ERROR) << "Failed to fetch BPF prog fd: " << bpf_fd;
2433 bpf_enabled = false;
2434 } else {
2435 LOG(INFO) << "Using FUSE BPF, BPF prog fd fetched";
2436 }
2437 }
2438
2439 if (!bpf_enabled) {
2440 LOG(INFO) << "Not using FUSE BPF";
2441 }
2442
2443 struct fuse fuse_default(path, stat.st_ino, uncached_mode, bpf_enabled, bpf_fd,
2444 supported_transcoding_relative_paths,
2445 supported_uncached_relative_paths);
2446 fuse_default.mp = ∓
2447 // fuse_default is stack allocated, but it's safe to save it as an instance variable because
2448 // this method blocks and FuseDaemon#active tells if we are currently blocking
2449 fuse = &fuse_default;
2450
2451 // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
2452 // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
2453 // on each read.
2454 fuse_default.zero_addr = static_cast<char*>(mmap(
2455 NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
2456 if (fuse_default.zero_addr == MAP_FAILED) {
2457 LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
2458 }
2459
2460 // Custom logging for libfuse
2461 if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
2462 fuse_set_log_func(fuse_logger);
2463 }
2464
2465 if (MY_USER_ID != 0 && mp.IsAppCloneUser(MY_USER_ID)) {
2466 // Disable dentry caching for the app clone user
2467 fuse->disable_dentry_cache = true;
2468 }
2469
2470 fuse->passthrough = android::base::GetBoolProperty("persist.sys.fuse.passthrough.enable", false);
2471 if (fuse->passthrough) {
2472 LOG(INFO) << "Using FUSE passthrough";
2473 }
2474
2475 struct fuse_session
2476 * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
2477 if (!se) {
2478 PLOG(ERROR) << "Failed to create session ";
2479 return;
2480 }
2481 fuse_default.se = se;
2482 fuse_default.active = &active;
2483 se->fd = fd.release(); // libfuse owns the FD now
2484 se->mountpoint = strdup(path.c_str());
2485
2486 // Single thread. Useful for debugging
2487 // fuse_session_loop(se);
2488 // Multi-threaded
2489 LOG(INFO) << "Starting fuse...";
2490 fuse_session_loop_mt(se, &config);
2491 fuse->active->store(false, std::memory_order_release);
2492 LOG(INFO) << "Ending fuse...";
2493
2494 if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
2495 PLOG(ERROR) << "munmap failed!";
2496 }
2497
2498 fuse_opt_free_args(&args);
2499 fuse_session_destroy(se);
2500 LOG(INFO) << "Ended fuse";
2501 return;
2502 }
2503
CheckFdAccess(int fd,uid_t uid) const2504 std::unique_ptr<FdAccessResult> FuseDaemon::CheckFdAccess(int fd, uid_t uid) const {
2505 struct stat s;
2506 memset(&s, 0, sizeof(s));
2507 if (fstat(fd, &s) < 0) {
2508 PLOG(DEBUG) << "CheckFdAccess fstat failed.";
2509 return std::make_unique<FdAccessResult>(string(), false);
2510 }
2511
2512 ino_t ino = s.st_ino;
2513 dev_t dev = s.st_dev;
2514
2515 dev_t fuse_dev = fuse->dev.load(std::memory_order_acquire);
2516 if (dev != fuse_dev) {
2517 PLOG(DEBUG) << "CheckFdAccess FUSE device id does not match.";
2518 return std::make_unique<FdAccessResult>(string(), false);
2519 }
2520
2521 const node* node = node::LookupInode(fuse->root, ino);
2522 if (!node) {
2523 PLOG(DEBUG) << "CheckFdAccess no node found with given ino";
2524 return std::make_unique<FdAccessResult>(string(), false);
2525 }
2526
2527 return node->CheckHandleForUid(uid);
2528 }
2529
InitializeDeviceId(const std::string & path)2530 void FuseDaemon::InitializeDeviceId(const std::string& path) {
2531 struct stat stat;
2532
2533 if (lstat(path.c_str(), &stat)) {
2534 PLOG(ERROR) << "InitializeDeviceId failed to stat given path " << path;
2535 return;
2536 }
2537
2538 fuse->dev.store(stat.st_dev, std::memory_order_release);
2539 }
2540
SetupLevelDbConnection(const std::string & instance_name)2541 void FuseDaemon::SetupLevelDbConnection(const std::string& instance_name) {
2542 if (CheckLevelDbConnection(instance_name)) {
2543 LOG(DEBUG) << "Leveldb connection already exists for :" << instance_name;
2544 return;
2545 }
2546
2547 std::string leveldbPath = "/storage/emulated/" + MY_USER_ID_STRING +
2548 "/.transforms/recovery/leveldb-" + instance_name;
2549 leveldb::Options options;
2550 options.create_if_missing = true;
2551 leveldb::DB* leveldb;
2552 leveldb::Status status = leveldb::DB::Open(options, leveldbPath, &leveldb);
2553 if (status.ok()) {
2554 fuse->level_db_connection_map.insert(
2555 std::pair<std::string, leveldb::DB*>(instance_name, leveldb));
2556 LOG(INFO) << "Leveldb connection established for :" << instance_name;
2557 } else {
2558 LOG(ERROR) << "Leveldb connection failed for :" << instance_name
2559 << " with error:" << status.ToString();
2560 }
2561 }
2562
SetupLevelDbInstances()2563 void FuseDaemon::SetupLevelDbInstances() {
2564 if (android::base::StartsWith(fuse->root->GetIoPath(), PRIMARY_VOLUME_PREFIX)) {
2565 // Setup leveldb instance for both external primary and internal volume.
2566 fuse->level_db_mutex.lock();
2567 // Create level db instance for internal volume
2568 SetupLevelDbConnection(VOLUME_INTERNAL);
2569 // Create level db instance for external primary volume
2570 SetupLevelDbConnection(VOLUME_EXTERNAL_PRIMARY);
2571 // Create level db instance to store owner id to owner package name and vice versa relation
2572 SetupLevelDbConnection(OWNERSHIP_RELATION);
2573 fuse->level_db_mutex.unlock();
2574 }
2575 }
2576
deriveVolumeName(const std::string & path)2577 std::string deriveVolumeName(const std::string& path) {
2578 std::string volume_name;
2579 if (!android::base::StartsWith(path, STORAGE_PREFIX)) {
2580 volume_name = VOLUME_INTERNAL;
2581 } else if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
2582 volume_name = VOLUME_EXTERNAL_PRIMARY;
2583 } else {
2584 size_t size = sizeof(STORAGE_PREFIX) / sizeof(STORAGE_PREFIX[0]);
2585 volume_name = volume_name.substr(size);
2586 }
2587 return volume_name;
2588 }
2589
DeleteFromLevelDb(const std::string & key)2590 void FuseDaemon::DeleteFromLevelDb(const std::string& key) {
2591 std::string volume_name = deriveVolumeName(key);
2592 if (!CheckLevelDbConnection(volume_name)) {
2593 LOG(ERROR) << "Failure in leveldb delete in volume:" << volume_name << " for key:" << key;
2594 return;
2595 }
2596
2597 leveldb::Status status;
2598 status = fuse->level_db_connection_map[volume_name]->Delete(leveldb::WriteOptions(), key);
2599 if (!status.ok()) {
2600 LOG(ERROR) << "Failure in leveldb delete for key: " << key <<
2601 " from volume:" << volume_name;
2602 }
2603 }
2604
InsertInLevelDb(const std::string & key,const std::string & value)2605 void FuseDaemon::InsertInLevelDb(const std::string& key, const std::string& value) {
2606 std::string volume_name = deriveVolumeName(key);
2607 if (!CheckLevelDbConnection(volume_name)) {
2608 LOG(ERROR) << "Failure in leveldb insert in volume:" << volume_name << " for key:" << key;
2609 return;
2610 }
2611
2612 leveldb::Status status;
2613 status = fuse->level_db_connection_map[volume_name]->Put(leveldb::WriteOptions(), key, value);
2614 if (!status.ok()) {
2615 LOG(ERROR) << "Failure in leveldb insert for key: " << key << " in volume:" << volume_name;
2616 }
2617 }
2618
ReadFilePathsFromLevelDb(const std::string & volume_name,const std::string & last_read_value,int limit)2619 std::vector<std::string> FuseDaemon::ReadFilePathsFromLevelDb(const std::string& volume_name,
2620 const std::string& last_read_value,
2621 int limit) {
2622 int counter = 0;
2623 std::vector<std::string> file_paths;
2624
2625 if (!CheckLevelDbConnection(volume_name)) {
2626 LOG(ERROR) << "Failure in leveldb file paths read for volume:" << volume_name;
2627 return file_paths;
2628 }
2629
2630 leveldb::Iterator* it =
2631 fuse->level_db_connection_map[volume_name]->NewIterator(leveldb::ReadOptions());
2632 if (android::base::EqualsIgnoreCase(last_read_value, "")) {
2633 it->SeekToFirst();
2634 } else {
2635 // Start after last read value
2636 leveldb::Slice slice = last_read_value;
2637 it->Seek(slice);
2638 it->Next();
2639 }
2640 for (; it->Valid() && counter < limit; it->Next()) {
2641 file_paths.push_back(it->key().ToString());
2642 counter++;
2643 }
2644 return file_paths;
2645 }
2646
ReadBackedUpDataFromLevelDb(const std::string & filePath)2647 std::string FuseDaemon::ReadBackedUpDataFromLevelDb(const std::string& filePath) {
2648 std::string data = "";
2649 std::string volume_name = deriveVolumeName(filePath);
2650 if (!CheckLevelDbConnection(volume_name)) {
2651 LOG(ERROR) << "Failure in leveldb data read for key:" << filePath;
2652 return data;
2653 }
2654
2655 leveldb::Status status = fuse->level_db_connection_map[volume_name]->Get(leveldb::ReadOptions(),
2656 filePath, &data);
2657 if (!status.ok()) {
2658 LOG(WARNING) << "Failure in leveldb read for key: " << filePath << status.ToString();
2659 } else {
2660 LOG(DEBUG) << "Read successful for key: " << filePath;
2661 }
2662 return data;
2663 }
2664
ReadOwnership(const std::string & key)2665 std::string FuseDaemon::ReadOwnership(const std::string& key) {
2666 // Return empty string if key not found
2667 std::string data = "";
2668 if (CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2669 leveldb::Status status = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Get(
2670 leveldb::ReadOptions(), key, &data);
2671 if (!status.ok()) {
2672 LOG(WARNING) << "Failure in leveldb read for key: " << key << status.ToString();
2673 } else {
2674 LOG(DEBUG) << "Read successful for key: " << key;
2675 }
2676 }
2677 return data;
2678 }
2679
CreateOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2680 void FuseDaemon::CreateOwnerIdRelation(const std::string& ownerId,
2681 const std::string& ownerPackageIdentifier) {
2682 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2683 LOG(ERROR) << "Failure in leveldb insert for ownership relation.";
2684 return;
2685 }
2686
2687 leveldb::Status status1, status2;
2688 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2689 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2690 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2691 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2692 if (!status1.ok() || !status2.ok()) {
2693 // If both inserts did not go through, remove both.
2694 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2695 ownerId);
2696 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2697 ownerPackageIdentifier);
2698 LOG(ERROR) << "Failure in leveldb insert for owner_id: " << ownerId
2699 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2700 }
2701 }
2702
RemoveOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2703 void FuseDaemon::RemoveOwnerIdRelation(const std::string& ownerId,
2704 const std::string& ownerPackageIdentifier) {
2705 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2706 LOG(ERROR) << "Failure in leveldb delete for ownership relation.";
2707 return;
2708 }
2709
2710 leveldb::Status status1, status2;
2711 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2712 ownerId);
2713 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2714 ownerPackageIdentifier);
2715 if (status1.ok() && status2.ok()) {
2716 LOG(INFO) << "Successfully deleted rows in leveldb for owner_id: " << ownerId
2717 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2718 } else {
2719 // If both deletes did not go through, revert both.
2720 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2721 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2722 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2723 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2724 LOG(ERROR) << "Failure in leveldb delete for owner_id: " << ownerId
2725 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2726 }
2727 }
2728
GetOwnerRelationship()2729 std::map<std::string, std::string> FuseDaemon::GetOwnerRelationship() {
2730 std::map<std::string, std::string> resultMap;
2731 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2732 LOG(ERROR) << "Failure in leveldb read for ownership relation.";
2733 return resultMap;
2734 }
2735
2736 leveldb::Status status;
2737 // Get the key-value pairs from the database.
2738 leveldb::Iterator* it =
2739 fuse->level_db_connection_map[OWNERSHIP_RELATION]->NewIterator(leveldb::ReadOptions());
2740 for (it->SeekToFirst(); it->Valid(); it->Next()) {
2741 std::string key = it->key().ToString();
2742 std::string value = it->value().ToString();
2743 resultMap.insert(std::pair<std::string, std::string>(key, value));
2744 }
2745 return resultMap;
2746 }
2747
CheckLevelDbConnection(const std::string & instance_name)2748 bool FuseDaemon::CheckLevelDbConnection(const std::string& instance_name) {
2749 if (fuse->level_db_connection_map.find(instance_name) == fuse->level_db_connection_map.end()) {
2750 LOG(ERROR) << "Leveldb setup is missing for :" << instance_name;
2751 return false;
2752 }
2753 return true;
2754 }
2755
2756 } //namespace fuse
2757 } // namespace mediaprovider
2758