1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18
19 #include "FuseDaemon.h"
20
21 #include <android-base/file.h>
22 #include <android-base/logging.h>
23 #include <android-base/properties.h>
24 #include <android-base/strings.h>
25 #include <android/log.h>
26 #include <android/trace.h>
27 #include <ctype.h>
28 #include <dirent.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <fuse_i.h>
32 #include <fuse_kernel.h>
33 #include <fuse_log.h>
34 #include <fuse_lowlevel.h>
35 #include <inttypes.h>
36 #include <limits.h>
37 #include <stdbool.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/inotify.h>
42 #include <sys/mman.h>
43 #include <sys/mount.h>
44 #include <sys/param.h>
45 #include <sys/resource.h>
46 #include <sys/stat.h>
47 #include <sys/statfs.h>
48 #include <sys/statvfs.h>
49 #include <sys/time.h>
50 #include <sys/types.h>
51 #include <sys/uio.h>
52 #include <unistd.h>
53
54 #include <iostream>
55 #include <map>
56 #include <mutex>
57 #include <queue>
58 #include <regex>
59 #include <thread>
60 #include <unordered_map>
61 #include <unordered_set>
62 #include <vector>
63
64 #include "BpfSyscallWrappers.h"
65 #include "MediaProviderWrapper.h"
66 #include "leveldb/db.h"
67 #include "libfuse_jni/FuseUtils.h"
68 #include "libfuse_jni/ReaddirHelper.h"
69 #include "libfuse_jni/RedactionInfo.h"
70
71 using mediaprovider::fuse::DirectoryEntry;
72 using mediaprovider::fuse::dirhandle;
73 using mediaprovider::fuse::handle;
74 using mediaprovider::fuse::node;
75 using mediaprovider::fuse::RedactionInfo;
76 using std::string;
77 using std::vector;
78
79 // logging macros to avoid duplication.
80 #define TRACE_NODE(__node, __req) \
81 LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
82 << "] (uid=" << (__req)->ctx.uid << ") "
83
84 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
85 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
86
87 class ScopedTrace {
88 public:
ScopedTrace(const char * name)89 explicit inline ScopedTrace(const char *name) {
90 ATrace_beginSection(name);
91 }
92
~ScopedTrace()93 inline ~ScopedTrace() {
94 ATrace_endSection();
95 }
96 };
97
98 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
99
100 #define FUSE_UNKNOWN_INO 0xffffffff
101
102 // Stolen from: android_filesystem_config.h
103 #define AID_APP_START 10000
104
105 #define FUSE_MAX_MAX_PAGES 256
106
107 const size_t MAX_READ_SIZE = FUSE_MAX_MAX_PAGES * getpagesize();
108 // Stolen from: UserHandle#getUserId
109 constexpr int PER_USER_RANGE = 100000;
110
111 // Stolen from: UserManagerService
112 constexpr int MAX_USER_ID = UINT32_MAX / PER_USER_RANGE;
113
114 const int MY_UID = getuid();
115 const int MY_USER_ID = MY_UID / PER_USER_RANGE;
116 const std::string MY_USER_ID_STRING(std::to_string(MY_UID / PER_USER_RANGE));
117
118 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
119 const std::regex PATTERN_OWNED_PATH(
120 "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb)/([^/]+)(/?.*)?",
121 std::regex_constants::icase);
122 const std::regex PATTERN_BPF_BACKING_PATH("^/storage/[^/]+/[0-9]+/Android/(data|obb)$",
123 std::regex_constants::icase);
124
125 static constexpr char TRANSFORM_SYNTHETIC_DIR[] = "synthetic";
126 static constexpr char TRANSFORM_TRANSCODE_DIR[] = "transcode";
127
128 static constexpr char OWNERSHIP_RELATION[] = "ownership";
129
130 static constexpr char FUSE_BPF_PROG_PATH[] = "/sys/fs/bpf/prog_fuseMedia_fuse_media";
131
132 enum class BpfFd { REMOVE = -2 };
133
134 /*
135 * In order to avoid double caching with fuse, call fadvise on the file handles
136 * in the underlying file system. However, if this is done on every read/write,
137 * the fadvises cause a very significant slowdown in tests (specifically fio
138 * seq_write). So call fadvise on the file handles with the most reads/writes
139 * only after a threshold is passed.
140 */
141 class FAdviser {
142 public:
FAdviser()143 FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
144
~FAdviser()145 ~FAdviser() {
146 SendMessage(Message::quit);
147 thread_.join();
148 }
149
Record(int fd,size_t size)150 void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
151
Close(int fd)152 void Close(int fd) { SendMessage(Message::close, fd); }
153
154 private:
155 struct Message {
156 enum Type { record, close, quit };
157 Type type;
158 int fd;
159 size_t size;
160 };
161
RecordImpl(int fd,size_t size)162 void RecordImpl(int fd, size_t size) {
163 total_size_ += size;
164
165 // Find or create record in files_
166 // Remove record from sizes_ if it exists, adjusting size appropriately
167 auto file = files_.find(fd);
168 if (file != files_.end()) {
169 auto old_size = file->second;
170 size += old_size->first;
171 sizes_.erase(old_size);
172 } else {
173 file = files_.insert(Files::value_type(fd, sizes_.end())).first;
174 }
175
176 // Now (re) insert record in sizes_
177 auto new_size = sizes_.insert(Sizes::value_type(size, fd));
178 file->second = new_size;
179
180 if (total_size_ < threshold_) return;
181
182 LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
183 while (!sizes_.empty() && total_size_ > target_) {
184 auto size = --sizes_.end();
185 total_size_ -= size->first;
186 posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
187 files_.erase(size->second);
188 sizes_.erase(size);
189 }
190 LOG(INFO) << "Threshold now " << total_size_;
191 }
192
CloseImpl(int fd)193 void CloseImpl(int fd) {
194 auto file = files_.find(fd);
195 if (file == files_.end()) return;
196
197 total_size_ -= file->second->first;
198 sizes_.erase(file->second);
199 files_.erase(file);
200 }
201
MessageLoopImpl()202 void MessageLoopImpl() {
203 while (1) {
204 Message message;
205
206 {
207 std::unique_lock<std::mutex> lock(mutex_);
208 cv_.wait(lock, [this] { return !queue_.empty(); });
209 message = queue_.front();
210 queue_.pop();
211 }
212
213 switch (message.type) {
214 case Message::record:
215 RecordImpl(message.fd, message.size);
216 break;
217
218 case Message::close:
219 CloseImpl(message.fd);
220 break;
221
222 case Message::quit:
223 return;
224 }
225 }
226 }
227
MessageLoop(FAdviser * ptr)228 static int MessageLoop(FAdviser* ptr) {
229 ptr->MessageLoopImpl();
230 return 0;
231 }
232
SendMessage(Message::Type type,int fd=-1,size_t size=0)233 void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
234 {
235 std::unique_lock<std::mutex> lock(mutex_);
236 Message message = {type, fd, size};
237 queue_.push(message);
238 }
239 cv_.notify_one();
240 }
241
242 std::mutex mutex_;
243 std::condition_variable cv_;
244 std::queue<Message> queue_;
245 std::thread thread_;
246
247 typedef std::multimap<size_t, int> Sizes;
248 typedef std::map<int, Sizes::iterator> Files;
249
250 Files files_;
251 Sizes sizes_;
252 size_t total_size_;
253
254 const size_t threshold_ = 64 * 1024 * 1024;
255 const size_t target_ = 32 * 1024 * 1024;
256 };
257
258 /* Single FUSE mount */
259 struct fuse {
fusefuse260 explicit fuse(const std::string& _path, const ino_t _ino, const bool _uncached_mode,
261 const bool _bpf, android::base::unique_fd&& _bpf_fd,
262 const std::vector<string>& _supported_transcoding_relative_paths,
263 const std::vector<string>& _supported_uncached_relative_paths)
264 : path(_path),
265 tracker(mediaprovider::fuse::NodeTracker(&lock)),
266 root(node::CreateRoot(_path, &lock, _ino, &tracker)),
267 uncached_mode(_uncached_mode),
268 mp(0),
269 zero_addr(0),
270 disable_dentry_cache(false),
271 passthrough(false),
272 upstream_passthrough(false),
273 bpf(_bpf),
274 bpf_fd(std::move(_bpf_fd)),
275 supported_transcoding_relative_paths(_supported_transcoding_relative_paths),
276 supported_uncached_relative_paths(_supported_uncached_relative_paths) {}
277
IsRootfuse278 inline bool IsRoot(const node* node) const { return node == root; }
279
GetEffectiveRootPathfuse280 inline string GetEffectiveRootPath() {
281 if (android::base::StartsWith(path, mediaprovider::fuse::PRIMARY_VOLUME_PREFIX)) {
282 return path + "/" + MY_USER_ID_STRING;
283 }
284 return path;
285 }
286
GetTransformsDirfuse287 inline string GetTransformsDir() { return GetEffectiveRootPath() + "/.transforms"; }
GetPickerTranscodedDirfuse288 inline string GetPickerTranscodedDir() {
289 return GetEffectiveRootPath() + "/.picker_transcoded";
290 }
291
292 // Note that these two (FromInode / ToInode) conversion wrappers are required
293 // because fuse_lowlevel_ops documents that the root inode is always one
294 // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
295 // on any of the other inodes in the FS.
FromInodefuse296 inline node* FromInode(__u64 inode) {
297 if (inode == FUSE_ROOT_ID) {
298 return root;
299 }
300
301 return node::FromInode(inode, &tracker);
302 }
303
FromInodeNoThrowfuse304 inline node* FromInodeNoThrow(__u64 inode) {
305 if (inode == FUSE_ROOT_ID) {
306 return root;
307 }
308
309 return node::FromInodeNoThrow(inode, &tracker);
310 }
311
ToInodefuse312 inline __u64 ToInode(node* node) const {
313 if (IsRoot(node)) {
314 return FUSE_ROOT_ID;
315 }
316
317 return node::ToInode(node);
318 }
319
IsTranscodeSupportedPathfuse320 inline bool IsTranscodeSupportedPath(const string& path) {
321 // Keep in sync with MediaProvider#supportsTranscode
322 if (!android::base::EndsWithIgnoreCase(path, ".mp4")) {
323 return false;
324 }
325
326 const std::string& base_path = GetEffectiveRootPath() + "/";
327 for (const std::string& relative_path : supported_transcoding_relative_paths) {
328 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
329 return true;
330 }
331 }
332
333 return false;
334 }
335
IsUncachedPathfuse336 inline bool IsUncachedPath(const std::string& path) {
337 const std::string base_path = GetEffectiveRootPath() + "/";
338 for (const std::string& relative_path : supported_uncached_relative_paths) {
339 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
340 return true;
341 }
342 }
343
344 return false;
345 }
346
ShouldNotCachefuse347 inline bool ShouldNotCache(const std::string& path) {
348 if (uncached_mode) {
349 // Cache is disabled for the entire volume.
350 return true;
351 }
352
353 if (supported_uncached_relative_paths.empty()) {
354 // By default there is no supported uncached path. Just return early in this case.
355 return false;
356 }
357
358 if (!android::base::StartsWithIgnoreCase(path, mediaprovider::fuse::PRIMARY_VOLUME_PREFIX)) {
359 // Uncached path config applies only to primary volumes.
360 return false;
361 }
362
363 if (android::base::EndsWith(path, "/")) {
364 return IsUncachedPath(path);
365 } else {
366 // Append a slash at the end to make sure that the exact match is picked up.
367 return IsUncachedPath(path + "/");
368 }
369 }
370
371 std::recursive_mutex lock;
372 const string path;
373 // The Inode tracker associated with this FUSE instance.
374 mediaprovider::fuse::NodeTracker tracker;
375 node* const root;
376 struct fuse_session* se;
377
378 const bool uncached_mode;
379
380 /*
381 * Used to make JNI calls to MediaProvider.
382 * Responsibility of freeing this object falls on corresponding
383 * FuseDaemon object.
384 */
385 mediaprovider::fuse::MediaProviderWrapper* mp;
386
387 /*
388 * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
389 * The memory is read only and should never be modified.
390 */
391 /* const */ char* zero_addr;
392
393 FAdviser fadviser;
394
395 std::atomic_bool* active;
396 std::atomic_bool disable_dentry_cache;
397 std::atomic_bool passthrough;
398 std::atomic_bool upstream_passthrough;
399 std::atomic_bool bpf;
400
401 const android::base::unique_fd bpf_fd;
402
403 // FUSE device id.
404 std::atomic_uint dev;
405 const std::vector<string> supported_transcoding_relative_paths;
406 const std::vector<string> supported_uncached_relative_paths;
407
408 // LevelDb Connection Map
409 std::map<std::string, leveldb::DB*> level_db_connection_map;
410 std::recursive_mutex level_db_mutex;
411 };
412
413 struct OpenInfo {
414 int flags;
415 bool for_write;
416 bool direct_io;
417 };
418
419 enum class FuseOp { lookup, readdir, mknod, mkdir, create };
420
get_name(node * n)421 static inline string get_name(node* n) {
422 if (n) {
423 std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
424 name += "node_path: " + n->BuildSafePath();
425 return name;
426 }
427 return "?";
428 }
429
ptr_to_id(const void * ptr)430 static inline __u64 ptr_to_id(const void* ptr) {
431 return (__u64)(uintptr_t) ptr;
432 }
433
434 /*
435 * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
436 *
437 * This is called before the MediaProvider returns fd from the lower file
438 * system to an app over the ContentResolver interface. This allows us
439 * check with is_file_locked if any reference to that fd is still open.
440 */
set_file_lock(int fd,bool for_read,const std::string & path)441 static int set_file_lock(int fd, bool for_read, const std::string& path) {
442 std::string lock_str = (for_read ? "read" : "write");
443
444 struct flock fl{};
445 fl.l_type = for_read ? F_RDLCK : F_WRLCK;
446 fl.l_whence = SEEK_SET;
447
448 int res = fcntl(fd, F_OFD_SETLK, &fl);
449 if (res) {
450 PLOG(WARNING) << "Failed to set lock: " << lock_str;
451 return res;
452 }
453 return res;
454 }
455
456 /*
457 * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
458 *
459 * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
460 * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
461 * allowing us to know if any reference to that fd is still open here.
462 *
463 * Returns true if fd may have a lock, false otherwise
464 */
is_file_locked(int fd,const std::string & path)465 static bool is_file_locked(int fd, const std::string& path) {
466 struct flock fl{};
467 fl.l_type = F_WRLCK;
468 fl.l_whence = SEEK_SET;
469
470 int res = fcntl(fd, F_OFD_GETLK, &fl);
471 if (res) {
472 PLOG(WARNING) << "Failed to check lock";
473 // Assume worst
474 return true;
475 }
476 bool locked = fl.l_type != F_UNLCK;
477 return locked;
478 }
479
get_fuse(fuse_req_t req)480 static struct fuse* get_fuse(fuse_req_t req) {
481 return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
482 }
483
is_package_owned_path(const string & path,const string & fuse_path)484 static bool is_package_owned_path(const string& path, const string& fuse_path) {
485 if (path.rfind(fuse_path, 0) != 0) {
486 return false;
487 }
488 return std::regex_match(path, PATTERN_OWNED_PATH);
489 }
490
is_bpf_backing_path(const string & path)491 static bool is_bpf_backing_path(const string& path) {
492 return std::regex_match(path, PATTERN_BPF_BACKING_PATH);
493 }
494
495 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
496 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)497 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
498 const string& child_name, const string& path) {
499 if (mediaprovider::fuse::containsMount(path)) {
500 LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
501 return;
502 }
503
504 if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
505 // Invalidating the dentry can fail if there's no dcache entry, however, there may still
506 // be cached attributes, so attempt to invalidate those by invalidating the inode
507 fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
508 }
509 }
510
get_entry_timeout(const string & path,bool should_inval,struct fuse * fuse)511 static double get_entry_timeout(const string& path, bool should_inval, struct fuse* fuse) {
512 if (fuse->disable_dentry_cache || should_inval || is_package_owned_path(path, fuse->path) ||
513 fuse->ShouldNotCache(path)) {
514 // We set dentry timeout to 0 for the following reasons:
515 // 1. The dentry cache was completely disabled for the entire volume.
516 // 2.1 Case-insensitive lookups need to invalidate other case-insensitive dentry matches
517 // 2.2 Nodes supporting transforms need to be invalidated, so that subsequent lookups by a
518 // uid requiring a transform is guaranteed to come to the FUSE daemon.
519 // 3. With app data isolation enabled, app A should not guess existence of app B from the
520 // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
521 // information.
522 // 4. The dentry cache was completely disabled for the given path.
523 return 0;
524 }
525 return std::numeric_limits<double>::max();
526 }
527
get_path(node * node)528 static std::string get_path(node* node) {
529 const string& io_path = node->GetIoPath();
530 return io_path.empty() ? node->BuildPath() : io_path;
531 }
532
533 // Returns true if the path resides under .transforms/synthetic.
534 // NOTE: currently only file paths corresponding to redacted URIs reside under this folder. The path
535 // itself never exists and just a link for transformation.
is_synthetic_path(const string & path,struct fuse * fuse)536 static inline bool is_synthetic_path(const string& path, struct fuse* fuse) {
537 return android::base::StartsWithIgnoreCase(
538 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR);
539 }
540
is_transforms_dir_path(const string & path,struct fuse * fuse)541 static inline bool is_transforms_dir_path(const string& path, struct fuse* fuse) {
542 return android::base::StartsWithIgnoreCase(path, fuse->GetTransformsDir());
543 }
544
is_picker_transcoded_dir_path(const string & path,struct fuse * fuse)545 static inline bool is_picker_transcoded_dir_path(const string& path, struct fuse* fuse) {
546 return android::base::StartsWithIgnoreCase(path, fuse->GetPickerTranscodedDir());
547 }
548
is_hidden_dir_path(const string & path,struct fuse * fuse)549 static inline bool is_hidden_dir_path(const string& path, struct fuse* fuse) {
550 return is_transforms_dir_path(path, fuse) || is_picker_transcoded_dir_path(path, fuse);
551 }
552
validate_node_path(const std::string & path,const std::string & name,fuse_req_t req,int * error_code,struct fuse_entry_param * e,const FuseOp op)553 static std::unique_ptr<mediaprovider::fuse::FileLookupResult> validate_node_path(
554 const std::string& path, const std::string& name, fuse_req_t req, int* error_code,
555 struct fuse_entry_param* e, const FuseOp op) {
556 struct fuse* fuse = get_fuse(req);
557 const struct fuse_ctx* ctx = fuse_req_ctx(req);
558 memset(e, 0, sizeof(*e));
559
560 const bool synthetic_path = is_synthetic_path(path, fuse);
561 if (lstat(path.c_str(), &e->attr) < 0 && !(op == FuseOp::lookup && synthetic_path)) {
562 *error_code = errno;
563 return nullptr;
564 }
565
566 if (is_hidden_dir_path(path, fuse)) {
567 if (op == FuseOp::lookup) {
568 // Lookups are only allowed under .transforms/synthetic dir
569 if (!(android::base::EqualsIgnoreCase(path, fuse->GetTransformsDir()) ||
570 android::base::StartsWithIgnoreCase(
571 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR))) {
572 *error_code = ENONET;
573 return nullptr;
574 }
575 } else {
576 // user-code is only allowed to make lookups under .transforms dir, and that too only
577 // under .transforms/synthetic dir
578 // Additionally No operations are allowed on .picker_transcoded directory
579 // as it should be only used by MediaProvider
580 *error_code = ENOENT;
581 return nullptr;
582 }
583 }
584
585 if (S_ISDIR(e->attr.st_mode)) {
586 // now that we have reached this point, ops on directories are safe and require no
587 // transformation.
588 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
589 }
590
591 if (!synthetic_path && !fuse->IsTranscodeSupportedPath(path)) {
592 // Transforms are only supported for synthetic or transcode-supported paths
593 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
594 }
595
596 // Handle potential file transforms
597 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
598 fuse->mp->FileLookup(path, req->ctx.uid, req->ctx.pid);
599
600 if (!file_lookup_result) {
601 // Fail lookup if we can't fetch FileLookupResult for path
602 LOG(WARNING) << "Failed to fetch FileLookupResult for " << path;
603 *error_code = EFAULT;
604 return nullptr;
605 }
606
607 const string& io_path = file_lookup_result->io_path;
608 // Update size with io_path iff there's an io_path
609 if (!io_path.empty() && (lstat(io_path.c_str(), &e->attr) < 0)) {
610 *error_code = errno;
611 return nullptr;
612 }
613
614 return file_lookup_result;
615 }
616
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & parent_path,const string & path,struct fuse_entry_param * e,int * error_code,const FuseOp op)617 static node* make_node_entry(fuse_req_t req, node* parent, const string& name,
618 const string& parent_path, const string& path,
619 struct fuse_entry_param* e, int* error_code, const FuseOp op) {
620 struct fuse* fuse = get_fuse(req);
621 const struct fuse_ctx* ctx = fuse_req_ctx(req);
622 node* node;
623
624 memset(e, 0, sizeof(*e));
625
626 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
627 validate_node_path(path, name, req, error_code, e, op);
628 if (!file_lookup_result) {
629 // Fail lookup if we can't validate |path, |errno| would have already been set
630 return nullptr;
631 }
632
633 bool should_invalidate = file_lookup_result->transforms_supported;
634 const bool transforms_complete = file_lookup_result->transforms_complete;
635 const int transforms = file_lookup_result->transforms;
636 const int transforms_reason = file_lookup_result->transforms_reason;
637 const string& io_path = file_lookup_result->io_path;
638 if (transforms) {
639 // If the node requires transforms, we MUST never cache it in the VFS
640 CHECK(should_invalidate);
641 }
642
643 node = parent->LookupChildByName(name, true /* acquire */, transforms);
644 if (!node) {
645 ino_t ino = e->attr.st_ino;
646 node = ::node::Create(parent, name, io_path, transforms_complete, transforms,
647 transforms_reason, &fuse->lock, ino, &fuse->tracker);
648 } else if (!mediaprovider::fuse::containsMount(path)) {
649 // Only invalidate a path if it does not contain mount and |name| != node->GetName.
650 // Invalidate both names to ensure there's no dentry left in the kernel after the following
651 // operations:
652 // 1) touch foo, touch FOO, unlink *foo*
653 // 2) touch foo, touch FOO, unlink *FOO*
654 // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
655 // -Set |should_invalidate| to true to invalidate lookup_name by using 0 timeout below
656 // -Explicitly invalidate node_name. Note that we invalidate async otherwise we will
657 // deadlock the kernel
658 if (name != node->GetName()) {
659 // Force node invalidation to fix the kernel dentry cache for case (1) above
660 should_invalidate = true;
661 // Make copies of the node name and path so we're not attempting to acquire
662 // any node locks from the invalidation thread. Depending on timing, we may end
663 // up invalidating the wrong inode but that shouldn't result in correctness issues.
664 const fuse_ino_t parent_ino = fuse->ToInode(parent);
665 const fuse_ino_t child_ino = fuse->ToInode(node);
666 const std::string& node_name = node->GetName();
667 std::thread t([=]() { fuse_inval(fuse->se, parent_ino, child_ino, node_name, path); });
668 t.detach();
669 // Update the name after |node_name| reference above has been captured in lambda
670 // This avoids invalidating the node again on subsequent accesses with |name|
671 node->SetName(name);
672 }
673
674 // This updated value allows us correctly decide if to keep_cache and use direct_io during
675 // FUSE_OPEN. Between the last lookup and this lookup, we might have deleted a cached
676 // transcoded file on the lower fs. A subsequent transcode at FUSE_READ should ensure we
677 // don't reuse any stale transcode page cache content.
678 node->SetTransformsComplete(transforms_complete);
679 }
680 TRACE_NODE(node, req);
681
682 if (should_invalidate && fuse->IsTranscodeSupportedPath(path)) {
683 // Some components like the MTP stack need an efficient mechanism to determine if a file
684 // supports transcoding. This allows them workaround an issue with MTP clients on windows
685 // where those clients incorrectly use the original file size instead of the transcoded file
686 // size to copy files from the device. This size misuse causes transcoded files to be
687 // truncated to the original file size, hence corrupting the transcoded file.
688 //
689 // We expose the transcode bit via the st_nlink stat field. This should be safe because the
690 // field is not supported on FAT filesystems which FUSE is emulating.
691 // WARNING: Apps should never rely on this behavior as it is NOT supported API and will be
692 // removed in a future release when the MTP stack has better support for transcoded files on
693 // Windows OS.
694 e->attr.st_nlink = 2;
695 }
696
697 // This FS is not being exported via NFS so just a fixed generation number
698 // for now. If we do need this, we need to increment the generation ID each
699 // time the fuse daemon restarts because that's what it takes for us to
700 // reuse inode numbers.
701 e->generation = 0;
702 e->ino = fuse->ToInode(node);
703
704 // When FUSE BPF is used, the caching of node attributes and lookups is
705 // disabled to avoid possible inconsistencies between the FUSE cache and
706 // the lower file system state.
707 // With FUSE BPF the file system requests are forwarded to the lower file
708 // system bypassing the FUSE daemon, so dropping the caching does not
709 // introduce a performance regression.
710 // Currently FUSE BPF is limited to the Android/data and Android/obb
711 // directories.
712 if (!fuse->bpf || !is_bpf_backing_path(parent_path)) {
713 e->entry_timeout = get_entry_timeout(path, should_invalidate, fuse);
714 e->attr_timeout = fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max();
715 }
716 return node;
717 }
718
719 namespace mediaprovider {
720 namespace fuse {
721
722 /**
723 * Function implementations
724 *
725 * These implement the various functions in fuse_lowlevel_ops
726 *
727 */
728
IsUpstreamPassthroughSupported()729 bool IsUpstreamPassthroughSupported() {
730 // Upstream passthrough requires some modifications to work. If those are present,
731 // /sys/fs/fuse/fuse_passthrough will read 'supported\n'
732 // - see fs/fuse/inode.c in the kernel source
733
734 string contents;
735 const char* filename = "/sys/fs/fuse/features/fuse_passthrough";
736 if (!android::base::ReadFileToString(filename, &contents)) {
737 LOG(INFO) << "fuse-passthrough is disabled because " << filename << " cannot be read";
738 return false;
739 }
740
741 if (contents == "supported\n") {
742 LOG(INFO) << "fuse-passthrough is enabled because " << filename << " reads 'supported'";
743 return true;
744 } else {
745 LOG(INFO) << "fuse-passthrough is disabled because " << filename
746 << " does not read 'supported'";
747 return false;
748 }
749 }
750
pf_init(void * userdata,struct fuse_conn_info * conn)751 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
752 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
753
754 // Check the same property as android.os.Build.IS_ARC.
755 const bool is_arc = android::base::GetBoolProperty("ro.boot.container", false);
756
757 // We don't want a getattr request with every read request
758 conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
759 uint64_t mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
760 FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
761 FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
762 // Disable writeback cache if it's uncached mode or if it's ARC. In ARC, due to the Downloads
763 // bind-mount, we need to disable it on the primary emulated volume as well as on StubVolumes.
764 if (fuse->uncached_mode || is_arc) {
765 mask &= ~FUSE_CAP_WRITEBACK_CACHE;
766 }
767
768 bool disable_splice_write = false;
769 if (fuse->passthrough) {
770 if (conn->capable & FUSE_CAP_PASSTHROUGH) {
771 mask |= FUSE_CAP_PASSTHROUGH;
772
773 // SPLICE_WRITE seems to cause linux kernel cache corruption with passthrough enabled.
774 // It is still under investigation but while running
775 // ScopedStorageDeviceTest#testAccessMediaLocationInvalidation, we notice test flakes
776 // of about 1/20 for the following reason:
777 // 1. App without ACCESS_MEDIA_LOCATION permission reads redacted bytes via FUSE cache
778 // 2. App with ACCESS_MEDIA_LOCATION permission reads non-redacted bytes via passthrough
779 // cache
780 // (2) fails because bytes from (1) sneak into the passthrough cache??
781 // To workaround, we disable splice for write when passthrough is enabled.
782 // This shouldn't have any performance regression if comparing passthrough devices to
783 // no-passthrough devices for the following reasons:
784 // 1. No-op for no-passthrough devices
785 // 2. Passthrough devices
786 // a. Files not requiring redaction use passthrough which bypasses FUSE_READ entirely
787 // b. Files requiring redaction are still faster than no-passthrough devices that use
788 // direct_io
789 disable_splice_write = true;
790 } else if ((conn->capable & FUSE_CAP_PASSTHROUGH_UPSTREAM) &&
791 IsUpstreamPassthroughSupported()) {
792 mask |= FUSE_CAP_PASSTHROUGH_UPSTREAM;
793 disable_splice_write = true;
794 fuse->upstream_passthrough = true;
795 } else {
796 LOG(WARNING) << "Passthrough feature not supported by the kernel";
797 fuse->passthrough = false;
798 }
799 }
800
801 conn->want |= conn->capable & mask;
802 if (disable_splice_write) {
803 conn->want &= ~FUSE_CAP_SPLICE_WRITE;
804 }
805
806 conn->max_read = MAX_READ_SIZE;
807
808 fuse->active->store(true, std::memory_order_release);
809 }
810
removeInstance(struct fuse * fuse,std::string instance_name)811 static void removeInstance(struct fuse* fuse, std::string instance_name) {
812 if (fuse->level_db_connection_map.find(instance_name) != fuse->level_db_connection_map.end()) {
813 delete fuse->level_db_connection_map[instance_name];
814 (fuse->level_db_connection_map).erase(instance_name);
815 LOG(INFO) << "Removed leveldb connection for " << instance_name;
816 }
817 }
818
removeLevelDbConnection(struct fuse * fuse)819 static void removeLevelDbConnection(struct fuse* fuse) {
820 fuse->level_db_mutex.lock();
821 if (android::base::StartsWith(fuse->path, PRIMARY_VOLUME_PREFIX)) {
822 removeInstance(fuse, VOLUME_INTERNAL);
823 removeInstance(fuse, OWNERSHIP_RELATION);
824 removeInstance(fuse, VOLUME_EXTERNAL_PRIMARY);
825 } else {
826 // Return "C58E-1702" from the path like "/storage/C58E-1702"
827 std::string volume_name = (fuse->path).substr(9);
828 // Convert to lowercase
829 std::transform(volume_name.begin(), volume_name.end(), volume_name.begin(), ::tolower);
830 removeInstance(fuse, volume_name);
831 }
832 fuse->level_db_mutex.unlock();
833 }
834
pf_destroy(void * userdata)835 static void pf_destroy(void* userdata) {
836 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
837 removeLevelDbConnection(fuse);
838 LOG(INFO) << "DESTROY " << fuse->path;
839
840 node::DeleteTree(fuse->root);
841 }
842
843 // Return true if the path is accessible for that uid.
is_app_accessible_path(struct fuse * fuse,const string & path,uid_t uid)844 static bool is_app_accessible_path(struct fuse* fuse, const string& path, uid_t uid) {
845 MediaProviderWrapper* mp = fuse->mp;
846
847 if (uid < AID_APP_START || uid == MY_UID) {
848 return true;
849 }
850
851 if (path == PRIMARY_VOLUME_PREFIX) {
852 // Apps should never refer to /storage/emulated - they should be using the user-spcific
853 // subdirs, eg /storage/emulated/0
854 return false;
855 }
856
857 std::smatch match;
858 if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
859 const std::string& pkg = match[1];
860 // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
861 // and it's not an external file/directory of any package
862 if (pkg == ".nomedia") {
863 return true;
864 }
865 if (!fuse->bpf && android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
866 // Emulated storage bind-mounts app-private data directories, and so these
867 // should not be accessible through FUSE anyway.
868 LOG(WARNING) << "Rejected access to app-private dir on FUSE: " << path
869 << " from uid: " << uid;
870 return false;
871 }
872 if (!mp->isUidAllowedAccessToDataOrObbPath(uid, path)) {
873 PLOG(WARNING) << "Invalid other package file access from " << uid << "(: " << path;
874 return false;
875 }
876 }
877 return true;
878 }
879
fuse_bpf_fill_entries(const string & path,const int bpf_fd,struct fuse_entry_param * e,int & backing_fd)880 void fuse_bpf_fill_entries(const string& path, const int bpf_fd, struct fuse_entry_param* e,
881 int& backing_fd) {
882 /*
883 * The file descriptor `backing_fd` must not be closed as it is closed
884 * automatically by the kernel as soon as it consumes the FUSE reply. This
885 * mechanism is necessary because userspace doesn't know when the kernel
886 * will consume the FUSE response containing `backing_fd`, thus it may close
887 * the `backing_fd` too soon, with the risk of assigning a backing file
888 * which is either invalid or corresponds to the wrong file in the lower
889 * file system.
890 */
891 backing_fd = open(path.c_str(), O_CLOEXEC | O_DIRECTORY | O_RDONLY);
892 if (backing_fd < 0) {
893 PLOG(ERROR) << "Failed to open: " << path;
894 return;
895 }
896
897 e->backing_action = FUSE_ACTION_REPLACE;
898 e->backing_fd = backing_fd;
899
900 if (bpf_fd >= 0) {
901 e->bpf_action = FUSE_ACTION_REPLACE;
902 e->bpf_fd = bpf_fd;
903 } else if (bpf_fd == static_cast<int>(BpfFd::REMOVE)) {
904 e->bpf_action = FUSE_ACTION_REMOVE;
905 } else {
906 e->bpf_action = FUSE_ACTION_KEEP;
907 }
908 }
909
fuse_bpf_install(struct fuse * fuse,struct fuse_entry_param * e,const string & child_path,int & backing_fd)910 void fuse_bpf_install(struct fuse* fuse, struct fuse_entry_param* e, const string& child_path,
911 int& backing_fd) {
912 // TODO(b/211873756) Enable only for the primary volume. Must be
913 // extended for other media devices.
914 if (android::base::StartsWith(child_path, PRIMARY_VOLUME_PREFIX)) {
915 if (is_bpf_backing_path(child_path)) {
916 fuse_bpf_fill_entries(child_path, fuse->bpf_fd.get(), e, backing_fd);
917 } else if (is_package_owned_path(child_path, fuse->path)) {
918 fuse_bpf_fill_entries(child_path, static_cast<int>(BpfFd::REMOVE), e, backing_fd);
919 }
920 }
921 }
922
923 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
924
is_user_accessible_path(fuse_req_t req,const struct fuse * fuse,const string & path)925 static bool is_user_accessible_path(fuse_req_t req, const struct fuse* fuse, const string& path) {
926 std::smatch match;
927 std::regex_search(path, match, storage_emulated_regex);
928
929 // Ensure the FuseDaemon user id matches the user id or cross-user lookups are allowed in
930 // requested path
931 if (match.size() == 2 && std::to_string(getuid() / PER_USER_RANGE) != match[1].str()) {
932 // If user id mismatch, check cross-user lookups
933 long userId = strtol(match[1].str().c_str(), nullptr, 10);
934 if (userId < 0 || userId > MAX_USER_ID ||
935 !fuse->mp->ShouldAllowLookup(req->ctx.uid, userId)) {
936 return false;
937 }
938 }
939 return true;
940 }
941
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code,const FuseOp op,const bool validate_access,int * backing_fd=NULL)942 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
943 struct fuse_entry_param* e, int* error_code, const FuseOp op,
944 const bool validate_access, int* backing_fd = NULL) {
945 struct fuse* fuse = get_fuse(req);
946 node* parent_node = fuse->FromInode(parent);
947 if (!parent_node) {
948 *error_code = ENOENT;
949 return nullptr;
950 }
951 string parent_path = parent_node->BuildPath();
952
953 // We should always allow lookups on the root, because failing them could cause
954 // bind mounts to be invalidated.
955 if (validate_access && !fuse->IsRoot(parent_node) &&
956 !is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
957 *error_code = ENOENT;
958 return nullptr;
959 }
960
961 TRACE_NODE(parent_node, req);
962
963 const string child_path = parent_path + "/" + name;
964
965 if (validate_access && !is_user_accessible_path(req, fuse, child_path)) {
966 *error_code = EACCES;
967 return nullptr;
968 }
969
970 auto node = make_node_entry(req, parent_node, name, parent_path, child_path, e, error_code, op);
971
972 if (fuse->bpf) {
973 if (op == FuseOp::lookup) {
974 // Only direct lookup calls support setting backing_fd and bpf program
975 fuse_bpf_install(fuse, e, child_path, *backing_fd);
976 } else if (is_bpf_backing_path(child_path) && op == FuseOp::readdir) {
977 // Fuse-bpf driver implementation doesn’t support providing backing_fd
978 // and bpf program as a part of readdirplus lookup. So we make sure
979 // here we're not making any lookups on backed files because we want
980 // to receive separate lookup calls for them later to set backing_fd and bpf.
981 e->ino = 0;
982 }
983 }
984
985 return node;
986 }
987
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)988 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
989 ATRACE_CALL();
990 struct fuse_entry_param e;
991 int backing_fd = -1;
992
993 int error_code = 0;
994 if (do_lookup(req, parent, name, &e, &error_code, FuseOp::lookup, true, &backing_fd)) {
995 fuse_reply_entry(req, &e);
996 } else {
997 CHECK(error_code != 0);
998 fuse_reply_err(req, error_code);
999 }
1000
1001 if (backing_fd != -1) close(backing_fd);
1002 }
1003
pf_lookup_postfilter(fuse_req_t req,fuse_ino_t parent,uint32_t error_in,const char * name,struct fuse_entry_out * feo,struct fuse_entry_bpf_out * febo)1004 static void pf_lookup_postfilter(fuse_req_t req, fuse_ino_t parent, uint32_t error_in,
1005 const char* name, struct fuse_entry_out* feo,
1006 struct fuse_entry_bpf_out* febo) {
1007 struct fuse* fuse = get_fuse(req);
1008
1009 ATRACE_CALL();
1010 node* parent_node = fuse->FromInode(parent);
1011 if (!parent_node) {
1012 fuse_reply_err(req, ENOENT);
1013 return;
1014 }
1015
1016 TRACE_NODE(parent_node, req);
1017 const string path = parent_node->BuildPath() + "/" + name;
1018 if (strcmp(name, ".nomedia") != 0 &&
1019 !fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, path)) {
1020 fuse_reply_err(req, ENOENT);
1021 return;
1022 }
1023
1024 struct {
1025 struct fuse_entry_out feo;
1026 struct fuse_entry_bpf_out febo;
1027 } buf = {*feo, *febo};
1028
1029 fuse_reply_buf(req, (const char*)&buf, sizeof(buf));
1030 }
1031
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)1032 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
1033 node* node = fuse->FromInode(ino);
1034 TRACE_NODE(node, req);
1035 if (node) {
1036 // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
1037 // some reason we only keep 32 bit refcounts but the kernel issues
1038 // forget requests with a 64 bit counter.
1039 int backing_id = node->GetBackingId();
1040 if (node->Release(static_cast<uint32_t>(nlookup))) {
1041 if (backing_id) fuse_passthrough_close(req, backing_id);
1042 }
1043 }
1044 }
1045
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)1046 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
1047 // Always allow to forget so no need to check is_app_accessible_path()
1048 ATRACE_CALL();
1049 node* node;
1050 struct fuse* fuse = get_fuse(req);
1051
1052 do_forget(req, fuse, ino, nlookup);
1053 fuse_reply_none(req);
1054 }
1055
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)1056 static void pf_forget_multi(fuse_req_t req,
1057 size_t count,
1058 struct fuse_forget_data* forgets) {
1059 ATRACE_CALL();
1060 struct fuse* fuse = get_fuse(req);
1061
1062 for (int i = 0; i < count; i++) {
1063 do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
1064 }
1065 fuse_reply_none(req);
1066 }
1067
pf_fallocate(fuse_req_t req,fuse_ino_t ino,int mode,off_t offset,off_t length,fuse_file_info * fi)1068 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length,
1069 fuse_file_info* fi) {
1070 ATRACE_CALL();
1071 struct fuse* fuse = get_fuse(req);
1072
1073 handle* h = reinterpret_cast<handle*>(fi->fh);
1074 auto err = fallocate(h->fd, mode, offset, length);
1075 fuse_reply_err(req, err ? errno : 0);
1076 }
1077
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1078 static void pf_getattr(fuse_req_t req,
1079 fuse_ino_t ino,
1080 struct fuse_file_info* fi) {
1081 ATRACE_CALL();
1082 struct fuse* fuse = get_fuse(req);
1083 node* node = fuse->FromInode(ino);
1084 if (!node) {
1085 fuse_reply_err(req, ENOENT);
1086 return;
1087 }
1088 const string& path = get_path(node);
1089 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1090 fuse_reply_err(req, ENOENT);
1091 return;
1092 }
1093 TRACE_NODE(node, req);
1094
1095 struct stat s;
1096 memset(&s, 0, sizeof(s));
1097 if (lstat(path.c_str(), &s) < 0) {
1098 fuse_reply_err(req, errno);
1099 } else {
1100 fuse_reply_attr(req, &s,
1101 fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1102 }
1103 }
1104
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)1105 static void pf_setattr(fuse_req_t req,
1106 fuse_ino_t ino,
1107 struct stat* attr,
1108 int to_set,
1109 struct fuse_file_info* fi) {
1110 ATRACE_CALL();
1111 struct fuse* fuse = get_fuse(req);
1112 node* node = fuse->FromInode(ino);
1113 if (!node) {
1114 fuse_reply_err(req, ENOENT);
1115 return;
1116 }
1117 const string& path = get_path(node);
1118 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1119 fuse_reply_err(req, ENOENT);
1120 return;
1121 }
1122
1123 int fd = -1;
1124 if (fi) {
1125 // If we have a file_info, setattr was called with an fd so use the fd instead of path
1126 handle* h = reinterpret_cast<handle*>(fi->fh);
1127 fd = h->fd;
1128 } else {
1129 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1130 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1131 path, path, ctx->uid, ctx->pid, node->GetTransformsReason(), true /* for_write */,
1132 false /* redact */, false /* log_transforms_metrics */);
1133
1134 if (!result) {
1135 fuse_reply_err(req, EFAULT);
1136 return;
1137 }
1138
1139 if (result->status) {
1140 fuse_reply_err(req, EACCES);
1141 return;
1142 }
1143 }
1144 struct timespec times[2];
1145 TRACE_NODE(node, req);
1146
1147 /* XXX: incomplete implementation on purpose.
1148 * chmod/chown should NEVER be implemented.*/
1149
1150 if ((to_set & FUSE_SET_ATTR_SIZE)) {
1151 int res = 0;
1152 if (fd == -1) {
1153 res = truncate64(path.c_str(), attr->st_size);
1154 } else {
1155 res = ftruncate64(fd, attr->st_size);
1156 }
1157
1158 if (res < 0) {
1159 fuse_reply_err(req, errno);
1160 return;
1161 }
1162 }
1163
1164 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
1165 * are both set, then set it to the current time. Else, set it to the
1166 * time specified in the request. Same goes for mtime. Use utimensat(2)
1167 * as it allows ATIME and MTIME to be changed independently, and has
1168 * nanosecond resolution which fuse also has.
1169 */
1170 if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
1171 times[0].tv_nsec = UTIME_OMIT;
1172 times[1].tv_nsec = UTIME_OMIT;
1173 if (to_set & FATTR_ATIME) {
1174 if (to_set & FATTR_ATIME_NOW) {
1175 times[0].tv_nsec = UTIME_NOW;
1176 } else {
1177 times[0] = attr->st_atim;
1178 }
1179 }
1180
1181 if (to_set & FATTR_MTIME) {
1182 if (to_set & FATTR_MTIME_NOW) {
1183 times[1].tv_nsec = UTIME_NOW;
1184 } else {
1185 times[1] = attr->st_mtim;
1186 }
1187 }
1188
1189 TRACE_NODE(node, req);
1190 int res = 0;
1191 if (fd == -1) {
1192 res = utimensat(-1, path.c_str(), times, 0);
1193 } else {
1194 res = futimens(fd, times);
1195 }
1196
1197 if (res < 0) {
1198 fuse_reply_err(req, errno);
1199 return;
1200 }
1201 }
1202
1203 lstat(path.c_str(), attr);
1204 fuse_reply_attr(req, attr, fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1205 }
1206
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)1207 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
1208 {
1209 struct fuse* fuse = get_fuse(req);
1210 node* node = fuse->FromInode(ino);
1211 const string& path = node ? get_path(node) : "";
1212
1213 if (node && is_app_accessible_path(fuse, path, req->ctx.uid)) {
1214 // TODO(b/147482155): Check that uid has access to |path| and its contents
1215 fuse_reply_canonical_path(req, path.c_str());
1216 return;
1217 }
1218 fuse_reply_err(req, ENOENT);
1219 }
1220
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)1221 static void pf_mknod(fuse_req_t req,
1222 fuse_ino_t parent,
1223 const char* name,
1224 mode_t mode,
1225 dev_t rdev) {
1226 ATRACE_CALL();
1227 struct fuse* fuse = get_fuse(req);
1228 node* parent_node = fuse->FromInode(parent);
1229 if (!parent_node) {
1230 fuse_reply_err(req, ENOENT);
1231 return;
1232 }
1233 string parent_path = parent_node->BuildPath();
1234 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1235 fuse_reply_err(req, ENOENT);
1236 return;
1237 }
1238
1239 TRACE_NODE(parent_node, req);
1240
1241 const string child_path = parent_path + "/" + name;
1242
1243 mode = (mode & (~0777)) | 0664;
1244 if (mknod(child_path.c_str(), mode, rdev) < 0) {
1245 fuse_reply_err(req, errno);
1246 return;
1247 }
1248
1249 int error_code = 0;
1250 struct fuse_entry_param e;
1251 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1252 FuseOp::mknod)) {
1253 fuse_reply_entry(req, &e);
1254 } else {
1255 CHECK(error_code != 0);
1256 fuse_reply_err(req, error_code);
1257 }
1258 }
1259
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)1260 static void pf_mkdir(fuse_req_t req,
1261 fuse_ino_t parent,
1262 const char* name,
1263 mode_t mode) {
1264 ATRACE_CALL();
1265 struct fuse* fuse = get_fuse(req);
1266 node* parent_node = fuse->FromInode(parent);
1267 if (!parent_node) {
1268 fuse_reply_err(req, ENOENT);
1269 return;
1270 }
1271 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1272 const string parent_path = parent_node->BuildPath();
1273 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1274 fuse_reply_err(req, ENOENT);
1275 return;
1276 }
1277
1278 TRACE_NODE(parent_node, req);
1279
1280 const string child_path = parent_path + "/" + name;
1281
1282 int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
1283 if (status) {
1284 fuse_reply_err(req, status);
1285 return;
1286 }
1287
1288 mode = (mode & (~0777)) | 0775;
1289 if (mkdir(child_path.c_str(), mode) < 0) {
1290 fuse_reply_err(req, errno);
1291 return;
1292 }
1293
1294 int error_code = 0;
1295 struct fuse_entry_param e;
1296 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1297 FuseOp::mkdir)) {
1298 fuse_reply_entry(req, &e);
1299 } else {
1300 CHECK(error_code != 0);
1301 fuse_reply_err(req, error_code);
1302 }
1303 }
1304
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)1305 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
1306 ATRACE_CALL();
1307 struct fuse* fuse = get_fuse(req);
1308 node* parent_node = fuse->FromInode(parent);
1309 if (!parent_node) {
1310 fuse_reply_err(req, ENOENT);
1311 return;
1312 }
1313 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1314 const string parent_path = parent_node->BuildPath();
1315 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1316 fuse_reply_err(req, ENOENT);
1317 return;
1318 }
1319
1320 TRACE_NODE(parent_node, req);
1321
1322 const string child_path = parent_path + "/" + name;
1323
1324 int status = fuse->mp->DeleteFile(child_path, ctx->uid);
1325 if (status) {
1326 fuse_reply_err(req, status);
1327 return;
1328 }
1329
1330 // TODO(b/169306422): Log each deleted node
1331 parent_node->SetDeletedForChild(name);
1332 fuse_reply_err(req, 0);
1333 }
1334
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)1335 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
1336 ATRACE_CALL();
1337 struct fuse* fuse = get_fuse(req);
1338 node* parent_node = fuse->FromInode(parent);
1339 if (!parent_node) {
1340 fuse_reply_err(req, ENOENT);
1341 return;
1342 }
1343 const string parent_path = parent_node->BuildPath();
1344 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1345 fuse_reply_err(req, ENOENT);
1346 return;
1347 }
1348
1349 if (is_hidden_dir_path(parent_path, fuse)) {
1350 // .transforms and .picker_transcoded are special daemon controlled dirs so apps shouldn't
1351 // be able to see it via readdir, and any dir operations attempted on it should fail
1352 fuse_reply_err(req, ENOENT);
1353 return;
1354 }
1355
1356 TRACE_NODE(parent_node, req);
1357
1358 const string child_path = parent_path + "/" + name;
1359
1360 int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
1361 if (status) {
1362 fuse_reply_err(req, status);
1363 return;
1364 }
1365
1366 if (rmdir(child_path.c_str()) < 0) {
1367 fuse_reply_err(req, errno);
1368 return;
1369 }
1370
1371 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
1372 TRACE_NODE(child_node, req);
1373 if (child_node) {
1374 child_node->SetDeleted();
1375 }
1376
1377 fuse_reply_err(req, 0);
1378 }
1379 /*
1380 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
1381 const char* name)
1382 {
1383 cout << "TODO:" << __func__;
1384 }
1385 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1386 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1387 const char* new_name, unsigned int flags) {
1388 ATRACE_CALL();
1389 struct fuse* fuse = get_fuse(req);
1390
1391 // VFS handles request with RENAME_NOREPLACE by ensuring that new file does not exist
1392 // before redirecting the call to FuseDaemon.
1393 if (flags & ~RENAME_NOREPLACE) {
1394 return EINVAL;
1395 }
1396
1397 node* old_parent_node = fuse->FromInode(parent);
1398 if (!old_parent_node) return ENOENT;
1399 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1400 const string old_parent_path = old_parent_node->BuildPath();
1401 if (!is_app_accessible_path(fuse, old_parent_path, ctx->uid)) {
1402 return ENOENT;
1403 }
1404
1405 if (is_hidden_dir_path(old_parent_path, fuse)) {
1406 // .transforms and .picker_transcoded are special daemon controlled dirs so apps shouldn't
1407 // be able to see it via readdir, and any dir operations attempted on it should fail
1408 return ENOENT;
1409 }
1410
1411 node* new_parent_node;
1412 if (fuse->bpf) {
1413 new_parent_node = fuse->FromInodeNoThrow(new_parent);
1414 if (!new_parent_node) return EXDEV;
1415 } else {
1416 new_parent_node = fuse->FromInode(new_parent);
1417 if (!new_parent_node) return ENOENT;
1418 }
1419 const string new_parent_path = new_parent_node->BuildPath();
1420 if (fuse->bpf && is_bpf_backing_path(new_parent_path)) {
1421 return EXDEV;
1422 }
1423 if (!is_app_accessible_path(fuse, new_parent_path, ctx->uid)) {
1424 return ENOENT;
1425 }
1426
1427 if (!old_parent_node || !new_parent_node) {
1428 return ENOENT;
1429 } else if (parent == new_parent && name == new_name) {
1430 // No rename required.
1431 return 0;
1432 }
1433
1434 TRACE_NODE(old_parent_node, req);
1435 TRACE_NODE(new_parent_node, req);
1436
1437 const string old_child_path = old_parent_path + "/" + name;
1438 const string new_child_path = new_parent_path + "/" + new_name;
1439
1440 if (android::base::EqualsIgnoreCase(fuse->GetEffectiveRootPath() + "/android", old_child_path)) {
1441 // Prevent renaming Android/ dir since it contains bind-mounts on the primary volume
1442 return EACCES;
1443 }
1444
1445 // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
1446 const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
1447 // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
1448 // EFAULT/EIO is reported due to JNI exception.
1449 if (res == 0) {
1450 // Mark any existing destination nodes as deleted. This fixes the following edge case:
1451 // 1. New destination node is forgotten
1452 // 2. Old destination node is not forgotten because there's still an open fd ref to it
1453 // 3. Lookup for |new_name| returns old destination node with stale metadata
1454 new_parent_node->SetDeletedForChild(new_name);
1455 // TODO(b/169306422): Log each renamed node
1456 old_parent_node->RenameChild(name, new_name, new_parent_node);
1457 }
1458 return res;
1459 }
1460
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1461 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1462 const char* new_name, unsigned int flags) {
1463 int res = do_rename(req, parent, name, new_parent, new_name, flags);
1464 fuse_reply_err(req, res);
1465 }
1466
1467 /*
1468 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
1469 const char* new_name)
1470 {
1471 cout << "TODO:" << __func__;
1472 }
1473 */
1474
create_handle_for_node(struct fuse * fuse,const string & path,int fd,uid_t uid,uid_t transforms_uid,node * node,const RedactionInfo * ri,const bool allow_passthrough,const bool open_info_direct_io,int * keep_cache)1475 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, uid_t uid,
1476 uid_t transforms_uid, node* node, const RedactionInfo* ri,
1477 const bool allow_passthrough, const bool open_info_direct_io,
1478 int* keep_cache) {
1479 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1480
1481 bool redaction_needed = ri->isRedactionNeeded();
1482 handle* handle = nullptr;
1483 int transforms = node->GetTransforms();
1484 bool transforms_complete = node->IsTransformsComplete();
1485 if (transforms_uid > 0) {
1486 CHECK(transforms);
1487 }
1488
1489 if (fuse->passthrough && allow_passthrough) {
1490 *keep_cache = transforms_complete;
1491 // We only enabled passthrough iff these 2 conditions hold
1492 // 1. Redaction is not needed
1493 // 2. Node transforms are completed, e.g transcoding.
1494 // (2) is important because we transcode lazily (on the first read) and with passthrough,
1495 // we will never get a read into the FUSE daemon, so passthrough would have returned
1496 // arbitrary bytes the first time around. However, if we ensure that transforms are
1497 // completed, then it's safe to use passthrough. Additionally, transcoded nodes never
1498 // require redaction so (2) implies (1)
1499 handle = new struct handle(fd, ri, !open_info_direct_io /* cached */,
1500 !redaction_needed && transforms_complete /* passthrough */, uid,
1501 transforms_uid);
1502 } else {
1503 // Without fuse->passthrough, we don't want to use the FUSE VFS cache in two cases:
1504 // 1. When redaction is needed because app A with EXIF access might access
1505 // a region that should have been redacted for app B without EXIF access, but app B on
1506 // a subsequent read, will be able to see the EXIF data because the read request for
1507 // that region will be served from cache and not get to the FUSE daemon
1508 // 2. When the file has a read or write lock on it. This means that the MediaProvider
1509 // has given an fd to the lower file system to an app. There are two cases where using
1510 // the cache in this case can be a problem:
1511 // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
1512 // subsequent read from the lower fs fd will not see the write.
1513 // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
1514 // the lower fs fd because those writes did not go through the FUSE layer and reads from
1515 // FUSE after that write may be served from cache
1516 bool has_redacted = node->HasRedactedCache();
1517 bool is_redaction_change =
1518 (redaction_needed && !has_redacted) || (!redaction_needed && has_redacted);
1519 bool is_cached_file_open = node->HasCachedHandle();
1520 bool direct_io = open_info_direct_io || (is_cached_file_open && is_redaction_change) ||
1521 is_file_locked(fd, path) || fuse->ShouldNotCache(path);
1522
1523 if (!is_cached_file_open && is_redaction_change) {
1524 node->SetRedactedCache(redaction_needed);
1525 // Purges stale page cache before open
1526 *keep_cache = 0;
1527 } else {
1528 *keep_cache = transforms_complete;
1529 }
1530 handle = new struct handle(fd, ri, !direct_io /* cached */, false /* passthrough */, uid,
1531 transforms_uid);
1532 }
1533
1534 node->AddHandle(handle);
1535 return handle;
1536 }
1537
do_passthrough_enable(fuse_req_t req,struct fuse_file_info * fi,unsigned int fd,node * node)1538 static bool do_passthrough_enable(fuse_req_t req, struct fuse_file_info* fi, unsigned int fd,
1539 node* node) {
1540 struct fuse* fuse = get_fuse(req);
1541
1542 if (fuse->upstream_passthrough) {
1543 int backing_id = node->GetBackingId();
1544 if (!backing_id) {
1545 backing_id = fuse_passthrough_open(req, fd);
1546 if (!backing_id) return false;
1547 // We only ever want one backing id per backed file
1548 if (!node->SetBackingId(backing_id)) {
1549 fuse_passthrough_close(req, backing_id);
1550 backing_id = node->GetBackingId();
1551 if (!backing_id) return false;
1552 }
1553 }
1554
1555 fi->backing_id = backing_id;
1556 } else {
1557 int passthrough_fh = fuse_passthrough_enable(req, fd);
1558
1559 if (passthrough_fh <= 0) {
1560 return false;
1561 }
1562
1563 fi->passthrough_fh = passthrough_fh;
1564 }
1565 return true;
1566 }
1567
parse_open_flags(const string & path,const int in_flags)1568 static OpenInfo parse_open_flags(const string& path, const int in_flags) {
1569 const bool for_write = in_flags & (O_WRONLY | O_RDWR);
1570 int out_flags = in_flags;
1571 bool direct_io = false;
1572
1573 if (in_flags & O_DIRECT) {
1574 // Set direct IO on the FUSE fs file
1575 direct_io = true;
1576
1577 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
1578 // Remove O_DIRECT because there are strict alignment requirements for direct IO and
1579 // there were some historical bugs affecting encrypted block devices.
1580 // Hence, this is only supported on public volumes.
1581 out_flags &= ~O_DIRECT;
1582 }
1583 }
1584 if (in_flags & O_WRONLY) {
1585 // Replace O_WRONLY with O_RDWR because even if the FUSE fd is opened write-only, the FUSE
1586 // driver might issue reads on the lower fs ith the writeback cache enabled
1587 out_flags &= ~O_WRONLY;
1588 out_flags |= O_RDWR;
1589 }
1590 if (in_flags & O_APPEND) {
1591 // Remove O_APPEND because passing it to the lower fs can lead to file corruption when
1592 // multiple FUSE threads race themselves reading. With writeback cache enabled, the FUSE
1593 // driver already handles the O_APPEND
1594 out_flags &= ~O_APPEND;
1595 }
1596
1597 return {.flags = out_flags, .for_write = for_write, .direct_io = direct_io};
1598 }
1599
fill_fuse_file_info(const handle * handle,const OpenInfo * open_info,const int keep_cache,struct fuse_file_info * fi)1600 static void fill_fuse_file_info(const handle* handle, const OpenInfo* open_info,
1601 const int keep_cache, struct fuse_file_info* fi) {
1602 fi->fh = ptr_to_id(handle);
1603 fi->keep_cache = keep_cache;
1604 fi->direct_io = !handle->cached;
1605 }
1606
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1607 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
1608 ATRACE_CALL();
1609 struct fuse* fuse = get_fuse(req);
1610 node* node = fuse->FromInode(ino);
1611 if (!node) {
1612 fuse_reply_err(req, ENOENT);
1613 return;
1614 }
1615 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1616 const string& io_path = get_path(node);
1617 const string& build_path = node->BuildPath();
1618 if (!is_app_accessible_path(fuse, io_path, ctx->uid)) {
1619 fuse_reply_err(req, ENOENT);
1620 return;
1621 }
1622
1623 const OpenInfo open_info = parse_open_flags(io_path, fi->flags);
1624
1625 if (open_info.for_write && node->GetTransforms()) {
1626 TRACE_NODE(node, req) << "write with transforms";
1627 } else {
1628 TRACE_NODE(node, req) << (open_info.for_write ? "write" : "read");
1629 }
1630
1631 // Force permission check with the build path because the MediaProvider database might not be
1632 // aware of the io_path
1633 // We don't redact if the caller was granted write permission for this file
1634 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1635 build_path, io_path, ctx->uid, ctx->pid, node->GetTransformsReason(),
1636 open_info.for_write, !open_info.for_write /* redact */,
1637 true /* log_transforms_metrics */);
1638 if (!result) {
1639 fuse_reply_err(req, EFAULT);
1640 return;
1641 }
1642
1643 if (result->status) {
1644 fuse_reply_err(req, result->status);
1645 return;
1646 }
1647
1648 int fd = -1;
1649 const bool is_fd_from_java = result->fd >= 0;
1650 if (is_fd_from_java) {
1651 fd = result->fd;
1652 TRACE_NODE(node, req) << "opened in Java";
1653 } else {
1654 fd = open(io_path.c_str(), open_info.flags);
1655 if (fd < 0) {
1656 fuse_reply_err(req, errno);
1657 return;
1658 }
1659 }
1660
1661 int keep_cache = 1;
1662 // If is_fd_from_java==true, we disallow passthrough because the fd can be pointing to the
1663 // FUSE fs if gotten from another process
1664 const handle* h = create_handle_for_node(fuse, io_path, fd, result->uid, result->transforms_uid,
1665 node, result->redaction_info.release(),
1666 /* allow_passthrough */ !is_fd_from_java,
1667 open_info.direct_io, &keep_cache);
1668 fill_fuse_file_info(h, &open_info, keep_cache, fi);
1669
1670 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
1671 // user FUSE passthrough is a conservative rule and might be dropped as
1672 // soon as demonstrated its correctness.
1673 if (h->passthrough && !do_passthrough_enable(req, fi, fd, node)) {
1674 // TODO: Should we crash here so we can find errors easily?
1675 PLOG(ERROR) << "Passthrough OPEN failed for " << io_path;
1676 fuse_reply_err(req, EFAULT);
1677 return;
1678 }
1679
1680 fuse_reply_open(req, fi);
1681 }
1682
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi,bool direct_io)1683 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi,
1684 bool direct_io) {
1685 handle* h = reinterpret_cast<handle*>(fi->fh);
1686 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1687
1688 buf.buf[0].fd = h->fd;
1689 buf.buf[0].pos = off;
1690 buf.buf[0].flags =
1691 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1692 if (direct_io) {
1693 // sdcardfs does not register splice_read_file_operations and some requests fail with EFAULT
1694 // Specifically, FUSE splice is only enabled for 8KB+ buffers, hence such reads fail
1695 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)FUSE_BUF_NO_SPLICE);
1696 } else {
1697 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)0);
1698 }
1699 }
1700
1701 /**
1702 * Sets the parameters for a fuse_buf that reads from memory, including flags.
1703 * Makes buf->mem point to an already mapped region of zeroized memory.
1704 * This memory is read only.
1705 */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1706 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1707 buf->size = size;
1708 buf->mem = fuse->zero_addr;
1709 buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1710 buf->pos = -1;
1711 buf->fd = -1;
1712 }
1713
1714 /**
1715 * Sets the parameters for a fuse_buf that reads from file, including flags.
1716 */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1717 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1718 buf->size = size;
1719 buf->fd = fd;
1720 buf->pos = pos;
1721 buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1722 buf->mem = nullptr;
1723 }
1724
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi,bool direct_io)1725 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi,
1726 bool direct_io) {
1727 handle* h = reinterpret_cast<handle*>(fi->fh);
1728
1729 std::vector<ReadRange> ranges;
1730 h->ri->getReadRanges(off, size, &ranges);
1731
1732 // As an optimization, return early if there are no ranges to redact.
1733 if (ranges.size() == 0) {
1734 do_read(req, size, off, fi, direct_io);
1735 return;
1736 }
1737
1738 const size_t num_bufs = ranges.size();
1739 auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1740 reinterpret_cast<fuse_bufvec*>(
1741 malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1742 free};
1743 fuse_bufvec& bufvec = *bufvec_ptr;
1744
1745 // initialize bufvec
1746 bufvec.count = num_bufs;
1747 bufvec.idx = 0;
1748 bufvec.off = 0;
1749
1750 for (int i = 0; i < num_bufs; ++i) {
1751 const ReadRange& range = ranges[i];
1752 if (range.is_redaction) {
1753 create_mem_fuse_buf(range.size, &(bufvec.buf[i]), get_fuse(req));
1754 } else {
1755 create_file_fuse_buf(range.size, range.start, h->fd, &(bufvec.buf[i]));
1756 }
1757 }
1758
1759 fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1760 }
1761
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1762 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1763 struct fuse_file_info* fi) {
1764 ATRACE_CALL();
1765 handle* h = reinterpret_cast<handle*>(fi->fh);
1766 if (h == nullptr) {
1767 return;
1768 }
1769 const bool direct_io = !h->cached;
1770 struct fuse* fuse = get_fuse(req);
1771
1772 node* node = fuse->FromInode(ino);
1773
1774 if (!node->IsTransformsComplete()) {
1775 if (!fuse->mp->Transform(node->BuildPath(), node->GetIoPath(), node->GetTransforms(),
1776 node->GetTransformsReason(), req->ctx.uid, h->uid,
1777 h->transforms_uid)) {
1778 fuse_reply_err(req, EFAULT);
1779 return;
1780 }
1781 node->SetTransformsComplete(true);
1782 }
1783
1784 fuse->fadviser.Record(h->fd, size);
1785
1786 if (h->ri->isRedactionNeeded()) {
1787 do_read_with_redaction(req, size, off, fi, direct_io);
1788 } else {
1789 do_read(req, size, off, fi, direct_io);
1790 }
1791 }
1792
1793 /*
1794 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1795 size_t size, off_t off, struct fuse_file_info* fi)
1796 {
1797 cout << "TODO:" << __func__;
1798 }
1799 */
1800
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1801 static void pf_write_buf(fuse_req_t req,
1802 fuse_ino_t ino,
1803 struct fuse_bufvec* bufv,
1804 off_t off,
1805 struct fuse_file_info* fi) {
1806 ATRACE_CALL();
1807 handle* h = reinterpret_cast<handle*>(fi->fh);
1808 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1809 ssize_t size;
1810 struct fuse* fuse = get_fuse(req);
1811
1812 buf.buf[0].fd = h->fd;
1813 buf.buf[0].pos = off;
1814 buf.buf[0].flags =
1815 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1816 size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1817
1818 if (size < 0)
1819 fuse_reply_err(req, -size);
1820 else {
1821 // Execute Record *before* fuse_reply_write to avoid the following ordering:
1822 // fuse_reply_write -> pf_release (destroy handle) -> Record (use handle after free)
1823 fuse->fadviser.Record(h->fd, size);
1824 fuse_reply_write(req, size);
1825 }
1826 }
1827 // Haven't tested this one. Not sure what calls it.
1828 #if 0
1829 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1830 off_t off_in, struct fuse_file_info* fi_in,
1831 fuse_ino_t ino_out, off_t off_out,
1832 struct fuse_file_info* fi_out, size_t len,
1833 int flags)
1834 {
1835 handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1836 handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1837 struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1838 struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1839 ssize_t size;
1840
1841 buf_in.buf[0].fd = h_in->fd;
1842 buf_in.buf[0].pos = off_in;
1843 buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1844
1845 buf_out.buf[0].fd = h_out->fd;
1846 buf_out.buf[0].pos = off_out;
1847 buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1848 size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1849
1850 if (size < 0) {
1851 fuse_reply_err(req, -size);
1852 }
1853
1854 fuse_reply_write(req, size);
1855 }
1856 #endif
1857
1858 /*
1859 * This function does nothing except being a placeholder to keep the FUSE
1860 * driver handling flushes on close(2).
1861 * In fact, kernels prior to 5.8 stop attempting flushing the cache on close(2)
1862 * if the .flush operation is not implemented by the FUSE daemon.
1863 * This has been fixed in the kernel by commit 614c026e8a46 ("fuse: always
1864 * flush dirty data on close(2)"), merged in Linux 5.8, but until then
1865 * userspace must mitigate this behavior by not leaving the .flush function
1866 * pointer empty.
1867 */
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1868 static void pf_flush(fuse_req_t req,
1869 fuse_ino_t ino,
1870 struct fuse_file_info* fi) {
1871 ATRACE_CALL();
1872 struct fuse* fuse = get_fuse(req);
1873 TRACE_NODE(nullptr, req) << "noop";
1874 fuse_reply_err(req, 0);
1875 }
1876
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1877 static void pf_release(fuse_req_t req,
1878 fuse_ino_t ino,
1879 struct fuse_file_info* fi) {
1880 ATRACE_CALL();
1881 struct fuse* fuse = get_fuse(req);
1882
1883 node* node = fuse->FromInode(ino);
1884 handle* h = reinterpret_cast<handle*>(fi->fh);
1885 TRACE_NODE(node, req);
1886
1887 fuse->fadviser.Close(h->fd);
1888 if (node) {
1889 node->DestroyHandle(h);
1890 }
1891
1892 fuse_reply_err(req, 0);
1893 }
1894
do_sync_common(int fd,bool datasync)1895 static int do_sync_common(int fd, bool datasync) {
1896 int res = datasync ? fdatasync(fd) : fsync(fd);
1897
1898 if (res == -1) return errno;
1899 return 0;
1900 }
1901
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1902 static void pf_fsync(fuse_req_t req,
1903 fuse_ino_t ino,
1904 int datasync,
1905 struct fuse_file_info* fi) {
1906 ATRACE_CALL();
1907 handle* h = reinterpret_cast<handle*>(fi->fh);
1908 int err = do_sync_common(h->fd, datasync);
1909
1910 fuse_reply_err(req, err);
1911 }
1912
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1913 static void pf_fsyncdir(fuse_req_t req,
1914 fuse_ino_t ino,
1915 int datasync,
1916 struct fuse_file_info* fi) {
1917 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1918 int err = do_sync_common(dirfd(h->d), datasync);
1919
1920 fuse_reply_err(req, err);
1921 }
1922
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1923 static void pf_opendir(fuse_req_t req,
1924 fuse_ino_t ino,
1925 struct fuse_file_info* fi) {
1926 ATRACE_CALL();
1927 struct fuse* fuse = get_fuse(req);
1928 node* node = fuse->FromInode(ino);
1929 if (!node) {
1930 fuse_reply_err(req, ENOENT);
1931 return;
1932 }
1933 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1934 const string path = node->BuildPath();
1935 if (!is_app_accessible_path(fuse, path, ctx->uid)) {
1936 fuse_reply_err(req, ENOENT);
1937 return;
1938 }
1939
1940 TRACE_NODE(node, req);
1941
1942 int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1943 if (status) {
1944 fuse_reply_err(req, status);
1945 return;
1946 }
1947
1948 DIR* dir = opendir(path.c_str());
1949 if (!dir) {
1950 fuse_reply_err(req, errno);
1951 return;
1952 }
1953
1954 dirhandle* h = new dirhandle(dir);
1955 node->AddDirHandle(h);
1956
1957 fi->fh = ptr_to_id(h);
1958 fuse_reply_open(req, fi);
1959 }
1960
1961 #define READDIR_BUF 32768LU
1962
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1963 static void do_readdir_common(fuse_req_t req,
1964 fuse_ino_t ino,
1965 size_t size,
1966 off_t off,
1967 struct fuse_file_info* fi,
1968 bool plus) {
1969 struct fuse* fuse = get_fuse(req);
1970 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1971 size_t len = std::min<size_t>(size, READDIR_BUF);
1972 char buf[READDIR_BUF];
1973 size_t used = 0;
1974 std::shared_ptr<DirectoryEntry> de;
1975
1976 struct fuse_entry_param e;
1977 size_t entry_size = 0;
1978
1979 node* node = fuse->FromInode(ino);
1980 if (!node) {
1981 fuse_reply_err(req, ENOENT);
1982 return;
1983 }
1984 const string path = node->BuildPath();
1985 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1986 fuse_reply_err(req, ENOENT);
1987 return;
1988 }
1989
1990 TRACE_NODE(node, req);
1991
1992 // We don't return EACCES for compatibility with the previous implementation.
1993 // It just ignored entries causing EACCES.
1994 if (!is_user_accessible_path(req, fuse, path)) {
1995 fuse_reply_buf(req, buf, used);
1996 return;
1997 }
1998
1999 // Get all directory entries from MediaProvider on first readdir() call of
2000 // directory handle. h->next_off = 0 indicates that current readdir() call
2001 // is first readdir() call for the directory handle, Avoid multiple JNI calls
2002 // for single directory handle.
2003 if (h->next_off == 0) {
2004 h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
2005 }
2006 // If the last entry in the previous readdir() call was rejected due to
2007 // buffer capacity constraints, update directory offset to start from
2008 // previously rejected entry. Directory offset can also change if there was
2009 // a seekdir() on the given directory handle.
2010 if (off != h->next_off) {
2011 h->next_off = off;
2012 }
2013 const int num_directory_entries = h->de.size();
2014 // Check for errors. Any error/exception occurred while obtaining directory
2015 // entries will be indicated by marking first directory entry name as empty
2016 // string. In the erroneous case corresponding d_type will hold error number.
2017 if (num_directory_entries && h->de[0]->d_name.empty()) {
2018 fuse_reply_err(req, h->de[0]->d_type);
2019 return;
2020 }
2021
2022 while (h->next_off < num_directory_entries) {
2023 de = h->de[h->next_off];
2024 entry_size = 0;
2025 h->next_off++;
2026 if (plus) {
2027 int error_code = 0;
2028 // Skip validating user and app access as they are already performed on parent node
2029 if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code, FuseOp::readdir, false)) {
2030 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
2031 &e, h->next_off);
2032 } else {
2033 // Ignore lookup errors on
2034 // 1. non-existing files returned from MediaProvider database.
2035 // 2. path that doesn't match FuseDaemon UID and calling uid.
2036 // 3. EIO / EINVAL may be returned on filesystem errors; try to
2037 // keep going to show other files in the directory.
2038
2039 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES
2040 || error_code == EIO || error_code == EINVAL) continue;
2041 fuse_reply_err(req, error_code);
2042 return;
2043 }
2044 } else {
2045 // This should never happen because we have readdir_plus enabled without adaptive
2046 // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
2047 LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
2048 e.attr.st_ino = FUSE_UNKNOWN_INO;
2049 e.attr.st_mode = de->d_type << 12;
2050 entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
2051 h->next_off);
2052 }
2053 // If buffer in fuse_add_direntry[_plus] is not large enough then
2054 // the entry is not added to buffer but the size of the entry is still
2055 // returned. Check available buffer size + returned entry size is less
2056 // than actual buffer size to confirm entry is added to buffer.
2057 if (used + entry_size > len) {
2058 // When an entry is rejected, lookup called by readdir_plus will not be tracked by
2059 // kernel. Call forget on the rejected node to decrement the reference count.
2060 if (plus && e.ino > 0) {
2061 do_forget(req, fuse, e.ino, 1);
2062 }
2063 break;
2064 }
2065 used += entry_size;
2066 }
2067 fuse_reply_buf(req, buf, used);
2068 }
2069
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2070 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
2071 struct fuse_file_info* fi) {
2072 ATRACE_CALL();
2073 do_readdir_common(req, ino, size, off, fi, false);
2074 }
2075
round_up(off_t o,size_t s)2076 static off_t round_up(off_t o, size_t s) {
2077 return (o + s - 1) / s * s;
2078 }
2079
pf_readdir_postfilter(fuse_req_t req,fuse_ino_t ino,uint32_t error_in,off_t off_in,off_t off_out,size_t size_out,const void * dirents_in,struct fuse_file_info * fi)2080 static void pf_readdir_postfilter(fuse_req_t req, fuse_ino_t ino, uint32_t error_in, off_t off_in,
2081 off_t off_out, size_t size_out, const void* dirents_in,
2082 struct fuse_file_info* fi) {
2083 struct fuse* fuse = get_fuse(req);
2084 char buf[READDIR_BUF];
2085 struct fuse_read_out* fro = (struct fuse_read_out*)(buf);
2086 size_t used = 0;
2087 bool redacted = false;
2088 char* dirents_out = (char*)(fro + 1);
2089
2090 ATRACE_CALL();
2091 node* node = fuse->FromInode(ino);
2092 if (!node) {
2093 fuse_reply_err(req, ENOENT);
2094 return;
2095 }
2096
2097 TRACE_NODE(node, req);
2098 const string path = node->BuildPath();
2099
2100 *fro = (struct fuse_read_out){
2101 .offset = (uint64_t)off_out,
2102 };
2103
2104 for (off_t in = 0; in < size_out;) {
2105 struct fuse_dirent* dirent_in = (struct fuse_dirent*)((char*)dirents_in + in);
2106 struct fuse_dirent* dirent_out = (struct fuse_dirent*)((char*)dirents_out + used);
2107 struct stat stats;
2108 int err;
2109
2110 std::string child_name(dirent_in->name, dirent_in->namelen);
2111 std::string child_path = path + "/" + child_name;
2112
2113 in += sizeof(*dirent_in) + round_up(dirent_in->namelen, sizeof(uint64_t));
2114 err = stat(child_path.c_str(), &stats);
2115 if (err == 0 &&
2116 ((stats.st_mode & 0001) || ((stats.st_mode & 0010) && req->ctx.gid == stats.st_gid) ||
2117 ((stats.st_mode & 0100) && req->ctx.uid == stats.st_uid) ||
2118 fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, child_path) ||
2119 child_name == ".nomedia")) {
2120 *dirent_out = *dirent_in;
2121 strcpy(dirent_out->name, child_name.c_str());
2122 used += sizeof(*dirent_out) + round_up(dirent_out->namelen, sizeof(uint64_t));
2123 } else {
2124 redacted = true;
2125 }
2126 }
2127 if (redacted && used == 0) fro->again = 1;
2128 fuse_reply_buf(req, buf, sizeof(*fro) + used);
2129 }
2130
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2131 static void pf_readdirplus(fuse_req_t req,
2132 fuse_ino_t ino,
2133 size_t size,
2134 off_t off,
2135 struct fuse_file_info* fi) {
2136 ATRACE_CALL();
2137 do_readdir_common(req, ino, size, off, fi, true);
2138 }
2139
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)2140 static void pf_releasedir(fuse_req_t req,
2141 fuse_ino_t ino,
2142 struct fuse_file_info* fi) {
2143 ATRACE_CALL();
2144 struct fuse* fuse = get_fuse(req);
2145
2146 node* node = fuse->FromInode(ino);
2147
2148 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
2149 TRACE_NODE(node, req);
2150 if (node) {
2151 node->DestroyDirHandle(h);
2152 }
2153
2154 fuse_reply_err(req, 0);
2155 }
2156
pf_statfs(fuse_req_t req,fuse_ino_t ino)2157 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
2158 ATRACE_CALL();
2159 struct statvfs st;
2160 struct fuse* fuse = get_fuse(req);
2161
2162 if (statvfs(fuse->root->GetName().c_str(), &st))
2163 fuse_reply_err(req, errno);
2164 else
2165 fuse_reply_statfs(req, &st);
2166 }
2167 /*
2168 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2169 const char* value, size_t size, int flags)
2170 {
2171 cout << "TODO:" << __func__;
2172 }
2173
2174 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2175 size_t size)
2176 {
2177 cout << "TODO:" << __func__;
2178 }
2179
2180 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
2181 {
2182 cout << "TODO:" << __func__;
2183 }
2184
2185 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
2186 {
2187 cout << "TODO:" << __func__;
2188 }*/
2189
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)2190 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
2191 ATRACE_CALL();
2192 struct fuse* fuse = get_fuse(req);
2193
2194 node* node = fuse->FromInode(ino);
2195 if (!node) {
2196 fuse_reply_err(req, ENOENT);
2197 return;
2198 }
2199 const string path = node->BuildPath();
2200 if (path != PRIMARY_VOLUME_PREFIX && !is_app_accessible_path(fuse, path, req->ctx.uid)) {
2201 fuse_reply_err(req, ENOENT);
2202 return;
2203 }
2204 TRACE_NODE(node, req);
2205
2206 // exists() checks are always allowed.
2207 if (mask == F_OK) {
2208 int res = access(path.c_str(), F_OK);
2209 fuse_reply_err(req, res ? errno : 0);
2210 return;
2211 }
2212 struct stat stat;
2213 if (lstat(path.c_str(), &stat)) {
2214 // File doesn't exist
2215 fuse_reply_err(req, ENOENT);
2216 return;
2217 }
2218
2219 // For read and write permission checks we go to MediaProvider.
2220 int status = 0;
2221 bool for_write = mask & W_OK;
2222 bool is_directory = S_ISDIR(stat.st_mode);
2223 if (is_directory) {
2224 if (path == PRIMARY_VOLUME_PREFIX && mask == X_OK) {
2225 // Special case for this path: apps should be allowed to enter it,
2226 // but not list directory contents (which would be user numbers).
2227 int res = access(path.c_str(), X_OK);
2228 fuse_reply_err(req, res ? errno : 0);
2229 return;
2230 }
2231 status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
2232 } else {
2233 if (mask & X_OK) {
2234 // Fuse is mounted with MS_NOEXEC.
2235 fuse_reply_err(req, EACCES);
2236 return;
2237 }
2238
2239 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
2240 path, path, req->ctx.uid, req->ctx.pid, node->GetTransformsReason(), for_write,
2241 false /* redact */, false /* log_transforms_metrics */);
2242 if (!result) {
2243 status = EFAULT;
2244 } else if (result->status) {
2245 status = EACCES;
2246 }
2247 }
2248
2249 fuse_reply_err(req, status);
2250 }
2251
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)2252 static void pf_create(fuse_req_t req,
2253 fuse_ino_t parent,
2254 const char* name,
2255 mode_t mode,
2256 struct fuse_file_info* fi) {
2257 ATRACE_CALL();
2258 struct fuse* fuse = get_fuse(req);
2259 node* parent_node = fuse->FromInode(parent);
2260 if (!parent_node) {
2261 fuse_reply_err(req, ENOENT);
2262 return;
2263 }
2264 const string parent_path = parent_node->BuildPath();
2265 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
2266 fuse_reply_err(req, ENOENT);
2267 return;
2268 }
2269
2270 TRACE_NODE(parent_node, req);
2271
2272 const string child_path = parent_path + "/" + name;
2273
2274 const OpenInfo open_info = parse_open_flags(child_path, fi->flags);
2275
2276 int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
2277 if (mp_return_code) {
2278 fuse_reply_err(req, mp_return_code);
2279 return;
2280 }
2281
2282 mode = (mode & (~0777)) | 0664;
2283 int fd = open(child_path.c_str(), open_info.flags, mode);
2284 if (fd < 0) {
2285 int error_code = errno;
2286 // We've already inserted the file into the MP database before the
2287 // failed open(), so that needs to be rolled back here.
2288 fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
2289 fuse_reply_err(req, error_code);
2290 return;
2291 }
2292
2293 int error_code = 0;
2294 struct fuse_entry_param e;
2295 node* node = make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
2296 FuseOp::create);
2297 TRACE_NODE(node, req);
2298 if (!node) {
2299 CHECK(error_code != 0);
2300 fuse_reply_err(req, error_code);
2301 return;
2302 }
2303
2304 // Let MediaProvider know we've created a new file
2305 fuse->mp->OnFileCreated(child_path);
2306
2307 // TODO(b/147274248): Assume there will be no EXIF to redact.
2308 // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
2309 // to the file before all the EXIF content is written. We could special case reads before the
2310 // first close after a file has just been created.
2311 int keep_cache = 1;
2312 const handle* h = create_handle_for_node(
2313 fuse, child_path, fd, req->ctx.uid, 0 /* transforms_uid */, node, new RedactionInfo(),
2314 /* allow_passthrough */ true, open_info.direct_io, &keep_cache);
2315 fill_fuse_file_info(h, &open_info, keep_cache, fi);
2316
2317 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
2318 // user FUSE passthrough is a conservative rule and might be dropped as
2319 // soon as demonstrated its correctness.
2320 if (h->passthrough && !do_passthrough_enable(req, fi, fd, node)) {
2321 PLOG(ERROR) << "Passthrough CREATE failed for " << child_path;
2322 fuse_reply_err(req, EFAULT);
2323 return;
2324 }
2325
2326 fuse_reply_create(req, &e, fi);
2327 }
2328 /*
2329 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
2330 struct fuse_file_info* fi, struct flock* lock)
2331 {
2332 cout << "TODO:" << __func__;
2333 }
2334
2335 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
2336 struct fuse_file_info* fi,
2337 struct flock* lock, int sleep)
2338 {
2339 cout << "TODO:" << __func__;
2340 }
2341
2342 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
2343 uint64_t idx)
2344 {
2345 cout << "TODO:" << __func__;
2346 }
2347
2348 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
2349 void* arg, struct fuse_file_info* fi, unsigned flags,
2350 const void* in_buf, size_t in_bufsz, size_t out_bufsz)
2351 {
2352 cout << "TODO:" << __func__;
2353 }
2354
2355 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
2356 struct fuse_pollhandle* ph)
2357 {
2358 cout << "TODO:" << __func__;
2359 }
2360
2361 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
2362 off_t offset, struct fuse_bufvec* bufv)
2363 {
2364 cout << "TODO:" << __func__;
2365 }
2366
2367 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
2368 struct fuse_file_info* fi, int op)
2369 {
2370 cout << "TODO:" << __func__;
2371 }
2372
2373 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
2374 off_t offset, off_t length, struct fuse_file_info* fi)
2375 {
2376 cout << "TODO:" << __func__;
2377 }
2378 */
2379
2380 static struct fuse_lowlevel_ops ops{
2381 .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup,
2382 .lookup_postfilter = pf_lookup_postfilter, .forget = pf_forget, .getattr = pf_getattr,
2383 .setattr = pf_setattr, .canonical_path = pf_canonical_path, .mknod = pf_mknod,
2384 .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
2385 /*.symlink = pf_symlink,*/
2386 .rename = pf_rename,
2387 /*.link = pf_link,*/
2388 .open = pf_open, .read = pf_read,
2389 /*.write = pf_write,*/
2390 .flush = pf_flush, .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir,
2391 .readdir = pf_readdir, .readdirpostfilter = pf_readdir_postfilter, .releasedir = pf_releasedir,
2392 .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
2393 /*.setxattr = pf_setxattr,
2394 .getxattr = pf_getxattr,
2395 .listxattr = pf_listxattr,
2396 .removexattr = pf_removexattr,*/
2397 .access = pf_access, .create = pf_create,
2398 /*.getlk = pf_getlk,
2399 .setlk = pf_setlk,
2400 .bmap = pf_bmap,
2401 .ioctl = pf_ioctl,
2402 .poll = pf_poll,*/
2403 .write_buf = pf_write_buf,
2404 /*.retrieve_reply = pf_retrieve_reply,*/
2405 .forget_multi = pf_forget_multi,
2406 /*.flock = pf_flock,*/
2407 .fallocate = pf_fallocate, .readdirplus = pf_readdirplus,
2408 /*.copy_file_range = pf_copy_file_range,*/
2409 };
2410
2411 static struct fuse_loop_config config = {
2412 .clone_fd = 1,
2413 .max_idle_threads = 10,
2414 };
2415
2416 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
2417 {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
2418 {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
2419 {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
2420 {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
2421 {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
2422 {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
2423 {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
2424 {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
2425 });
2426
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)2427 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
2428 __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
2429 }
2430
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)2431 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
2432 if (fuse->passthrough) {
2433 // Always open with FUSE if passthrough is enabled. This avoids the delicate file lock
2434 // acquisition below to ensure VFS cache consistency and doesn't impact filesystem
2435 // performance since read(2)/write(2) happen in the kernel
2436 return true;
2437 }
2438
2439 bool use_fuse = false;
2440
2441 if (active.load(std::memory_order_acquire)) {
2442 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2443 const node* node = node::LookupAbsolutePath(fuse->root, path);
2444 if (node && node->HasCachedHandle()) {
2445 use_fuse = true;
2446 } else {
2447 // If we are unable to set a lock, we should use fuse since we can't track
2448 // when all fd references (including dups) are closed. This can happen when
2449 // we try to set a write lock twice on the same file
2450 use_fuse = set_file_lock(fd, for_read, path);
2451 }
2452 } else {
2453 LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
2454 }
2455
2456 return use_fuse;
2457 }
2458
UsesFusePassthrough() const2459 bool FuseDaemon::UsesFusePassthrough() const {
2460 return fuse->passthrough;
2461 }
2462
InvalidateFuseDentryCache(const std::string & path)2463 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
2464 LOG(VERBOSE) << "Invalidating FUSE dentry cache";
2465 if (active.load(std::memory_order_acquire)) {
2466 string name;
2467 fuse_ino_t parent;
2468 fuse_ino_t child;
2469 {
2470 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2471 const node* node = node::LookupAbsolutePath(fuse->root, path);
2472 if (node) {
2473 name = node->GetName();
2474 child = fuse->ToInode(const_cast<class node*>(node));
2475 parent = fuse->ToInode(node->GetParent());
2476 }
2477 }
2478
2479 if (!name.empty()) {
2480 std::thread t([=]() { fuse_inval(fuse->se, parent, child, name, path); });
2481 t.detach();
2482 }
2483 } else {
2484 LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
2485 }
2486 }
2487
FuseDaemon(JNIEnv * env,jobject mediaProvider)2488 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
2489 active(false), fuse(nullptr) {}
2490
IsStarted() const2491 bool FuseDaemon::IsStarted() const {
2492 return active.load(std::memory_order_acquire);
2493 }
2494
IsPropertySet(const char * name,bool & value)2495 static bool IsPropertySet(const char* name, bool& value) {
2496 if (android::base::GetProperty(name, "") == "") return false;
2497
2498 value = android::base::GetBoolProperty(name, false);
2499 LOG(INFO) << "fuse-bpf is " << (value ? "enabled" : "disabled") << " because of property "
2500 << name;
2501 return true;
2502 }
2503
IsFuseBpfEnabled()2504 bool IsFuseBpfEnabled() {
2505 // ro.fuse.bpf.is_running may not be set when first reading this property, so we have to
2506 // reproduce the vold/Utils.cpp:isFuseBpfEnabled() logic here
2507
2508 bool is_enabled;
2509 if (IsPropertySet("ro.fuse.bpf.is_running", is_enabled)) return is_enabled;
2510 if (IsPropertySet("persist.sys.fuse.bpf.override", is_enabled)) return is_enabled;
2511 if (IsPropertySet("ro.fuse.bpf.enabled", is_enabled)) return is_enabled;
2512
2513 // If the kernel has fuse-bpf, /sys/fs/fuse/features/fuse_bpf will exist and have the contents
2514 // 'supported\n' - see fs/fuse/inode.c in the kernel source
2515 string contents;
2516 const char* filename = "/sys/fs/fuse/features/fuse_bpf";
2517 if (!android::base::ReadFileToString(filename, &contents)) {
2518 LOG(INFO) << "fuse-bpf is disabled because " << filename << " cannot be read";
2519 return false;
2520 }
2521
2522 if (contents == "supported\n") {
2523 LOG(INFO) << "fuse-bpf is enabled because " << filename << " reads 'supported'";
2524 return true;
2525 } else {
2526 LOG(INFO) << "fuse-bpf is disabled because " << filename << " does not read 'supported'";
2527 return false;
2528 }
2529 }
2530
Start(android::base::unique_fd fd,const std::string & path,const bool uncached_mode,const std::vector<std::string> & supported_transcoding_relative_paths,const std::vector<std::string> & supported_uncached_relative_paths)2531 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path,
2532 const bool uncached_mode,
2533 const std::vector<std::string>& supported_transcoding_relative_paths,
2534 const std::vector<std::string>& supported_uncached_relative_paths) {
2535 android::base::SetDefaultTag(LOG_TAG);
2536
2537 struct fuse_args args;
2538 struct fuse_cmdline_opts opts;
2539
2540 struct stat stat;
2541
2542 if (lstat(path.c_str(), &stat)) {
2543 PLOG(ERROR) << "ERROR: failed to stat source " << path;
2544 return;
2545 }
2546
2547 if (!S_ISDIR(stat.st_mode)) {
2548 PLOG(ERROR) << "ERROR: source is not a directory";
2549 return;
2550 }
2551
2552 args = FUSE_ARGS_INIT(0, nullptr);
2553 if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
2554 fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
2555 LOG(ERROR) << "ERROR: failed to set options";
2556 return;
2557 }
2558
2559 bool bpf_enabled = IsFuseBpfEnabled();
2560 android::base::unique_fd bpf_fd(-1);
2561 if (bpf_enabled) {
2562 bpf_fd.reset(android::bpf::retrieveProgram(FUSE_BPF_PROG_PATH));
2563 if (!bpf_fd.ok()) {
2564 int error = errno;
2565 PLOG(ERROR) << "Failed to fetch BPF prog fd: " << error;
2566 bpf_enabled = false;
2567 } else {
2568 LOG(INFO) << "Using FUSE BPF, BPF prog fd fetched";
2569 }
2570 }
2571
2572 if (!bpf_enabled) {
2573 LOG(INFO) << "Not using FUSE BPF";
2574 }
2575
2576 struct fuse fuse_default(path, stat.st_ino, uncached_mode, bpf_enabled, std::move(bpf_fd),
2577 supported_transcoding_relative_paths,
2578 supported_uncached_relative_paths);
2579 fuse_default.mp = ∓
2580 // fuse_default is stack allocated, but it's safe to save it as an instance variable because
2581 // this method blocks and FuseDaemon#active tells if we are currently blocking
2582 fuse = &fuse_default;
2583
2584 // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
2585 // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
2586 // on each read.
2587 fuse_default.zero_addr = static_cast<char*>(mmap(
2588 NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
2589 if (fuse_default.zero_addr == MAP_FAILED) {
2590 LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
2591 }
2592
2593 // Custom logging for libfuse
2594 if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
2595 fuse_set_log_func(fuse_logger);
2596 }
2597
2598 if (MY_USER_ID != 0 && mp.IsAppCloneUser(MY_USER_ID)) {
2599 // Disable dentry caching for the app clone user
2600 fuse->disable_dentry_cache = true;
2601 }
2602
2603 fuse->passthrough = android::base::GetBoolProperty("persist.sys.fuse.passthrough.enable", false);
2604 if (fuse->passthrough) {
2605 LOG(INFO) << "Using FUSE passthrough";
2606 }
2607
2608 struct fuse_session
2609 * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
2610 if (!se) {
2611 PLOG(ERROR) << "Failed to create session ";
2612 return;
2613 }
2614 fuse_default.se = se;
2615 fuse_default.active = &active;
2616 se->fd = fd.release(); // libfuse owns the FD now
2617 se->mountpoint = strdup(path.c_str());
2618
2619 // Single thread. Useful for debugging
2620 // fuse_session_loop(se);
2621 // Multi-threaded
2622 LOG(INFO) << "Starting fuse...";
2623 fuse_session_loop_mt(se, &config);
2624 fuse->active->store(false, std::memory_order_release);
2625 LOG(INFO) << "Ending fuse...";
2626
2627 if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
2628 PLOG(ERROR) << "munmap failed!";
2629 }
2630
2631 fuse_opt_free_args(&args);
2632 fuse_session_destroy(se);
2633 LOG(INFO) << "Ended fuse";
2634 return;
2635 }
2636
CheckFdAccess(int fd,uid_t uid) const2637 std::unique_ptr<FdAccessResult> FuseDaemon::CheckFdAccess(int fd, uid_t uid) const {
2638 struct stat s;
2639 memset(&s, 0, sizeof(s));
2640 if (fstat(fd, &s) < 0) {
2641 PLOG(DEBUG) << "CheckFdAccess fstat failed.";
2642 return std::make_unique<FdAccessResult>(string(), false);
2643 }
2644
2645 ino_t ino = s.st_ino;
2646 dev_t dev = s.st_dev;
2647
2648 dev_t fuse_dev = fuse->dev.load(std::memory_order_acquire);
2649 if (dev != fuse_dev) {
2650 PLOG(DEBUG) << "CheckFdAccess FUSE device id does not match.";
2651 return std::make_unique<FdAccessResult>(string(), false);
2652 }
2653
2654 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2655 const node* node = node::LookupInode(fuse->root, ino);
2656 if (!node) {
2657 PLOG(DEBUG) << "CheckFdAccess no node found with given ino";
2658 return std::make_unique<FdAccessResult>(string(), false);
2659 }
2660
2661 return node->CheckHandleForUid(uid);
2662 }
2663
InitializeDeviceId(const std::string & path)2664 void FuseDaemon::InitializeDeviceId(const std::string& path) {
2665 struct stat stat;
2666
2667 if (lstat(path.c_str(), &stat)) {
2668 PLOG(ERROR) << "InitializeDeviceId failed to stat given path " << path;
2669 return;
2670 }
2671
2672 fuse->dev.store(stat.st_dev, std::memory_order_release);
2673 }
2674
SetupLevelDbConnection(const std::string & instance_name)2675 void FuseDaemon::SetupLevelDbConnection(const std::string& instance_name) {
2676 if (CheckLevelDbConnection(instance_name)) {
2677 LOG(DEBUG) << "Leveldb connection already exists for :" << instance_name;
2678 return;
2679 }
2680
2681 std::string leveldbPath =
2682 "/data/media/" + MY_USER_ID_STRING + "/.transforms/recovery/leveldb-" + instance_name;
2683 leveldb::Options options;
2684 options.create_if_missing = true;
2685 leveldb::DB* leveldb;
2686 leveldb::Status status = leveldb::DB::Open(options, leveldbPath, &leveldb);
2687 if (status.ok()) {
2688 fuse->level_db_connection_map.insert(
2689 std::pair<std::string, leveldb::DB*>(instance_name, leveldb));
2690 LOG(INFO) << "Leveldb connection established for :" << instance_name;
2691 } else {
2692 LOG(ERROR) << "Leveldb connection failed for :" << instance_name
2693 << " with error:" << status.ToString();
2694 }
2695 }
2696
SetupLevelDbInstances()2697 void FuseDaemon::SetupLevelDbInstances() {
2698 if (android::base::StartsWith(fuse->root->GetIoPath(), PRIMARY_VOLUME_PREFIX)) {
2699 // Setup leveldb instance for both external primary and internal volume.
2700 fuse->level_db_mutex.lock();
2701 // Create level db instance for internal volume
2702 SetupLevelDbConnection(mediaprovider::fuse::VOLUME_INTERNAL);
2703 // Create level db instance for external primary volume
2704 SetupLevelDbConnection(VOLUME_EXTERNAL_PRIMARY);
2705 // Create level db instance to store owner id to owner package name and vice versa relation
2706 SetupLevelDbConnection(OWNERSHIP_RELATION);
2707 fuse->level_db_mutex.unlock();
2708 }
2709 }
2710
SetupPublicVolumeLevelDbInstance(const std::string & volume_name)2711 void FuseDaemon::SetupPublicVolumeLevelDbInstance(const std::string& volume_name) {
2712 // Setup leveldb instance for both external primary and internal volume.
2713 fuse->level_db_mutex.lock();
2714 // Create level db instance for public volume
2715 SetupLevelDbConnection(volume_name);
2716 fuse->level_db_mutex.unlock();
2717 }
2718
deriveVolumeName(const std::string & path)2719 std::string deriveVolumeName(const std::string& path) {
2720 std::string volume_name = mediaprovider::fuse::getVolumeNameFromPath(path);
2721 if (volume_name.empty()) {
2722 LOG(ERROR) << "Invalid input URI for extracting volume name." << path;
2723 } else {
2724 LOG(DEBUG) << "Volume name from input path: " << path << " , volName: " + volume_name;
2725 }
2726 return volume_name;
2727 }
2728
DeleteFromLevelDb(const std::string & key)2729 void FuseDaemon::DeleteFromLevelDb(const std::string& key) {
2730 fuse->level_db_mutex.lock();
2731 std::string volume_name = deriveVolumeName(key);
2732 if (!CheckLevelDbConnection(volume_name)) {
2733 fuse->level_db_mutex.unlock();
2734 LOG(ERROR) << "DeleteFromLevelDb: Missing leveldb connection.";
2735 return;
2736 }
2737
2738 leveldb::Status status;
2739 status = fuse->level_db_connection_map[volume_name]->Delete(leveldb::WriteOptions(), key);
2740 if (!status.ok()) {
2741 LOG(ERROR) << "Failure in leveldb delete for key: " << key
2742 << " from volume:" << volume_name;
2743 }
2744 fuse->level_db_mutex.unlock();
2745 }
2746
InsertInLevelDb(const std::string & volume_name,const std::string & key,const std::string & value)2747 void FuseDaemon::InsertInLevelDb(const std::string& volume_name, const std::string& key,
2748 const std::string& value) {
2749 fuse->level_db_mutex.lock();
2750 if (!CheckLevelDbConnection(volume_name)) {
2751 fuse->level_db_mutex.unlock();
2752 LOG(ERROR) << "InsertInLevelDb: Missing leveldb connection.";
2753 return;
2754 }
2755
2756 leveldb::Status status;
2757 status = fuse->level_db_connection_map[volume_name]->Put(leveldb::WriteOptions(), key,
2758 value);
2759 fuse->level_db_mutex.unlock();
2760 if (!status.ok()) {
2761 LOG(ERROR) << "Failure in leveldb insert for key: " << key
2762 << " in volume:" << volume_name;
2763 LOG(ERROR) << status.ToString();
2764 }
2765 }
2766
ReadFilePathsFromLevelDb(const std::string & volume_name,const std::string & last_read_value,int limit)2767 std::vector<std::string> FuseDaemon::ReadFilePathsFromLevelDb(const std::string& volume_name,
2768 const std::string& last_read_value,
2769 int limit) {
2770 fuse->level_db_mutex.lock();
2771 int counter = 0;
2772 std::vector<std::string> file_paths;
2773
2774 if (!CheckLevelDbConnection(volume_name)) {
2775 fuse->level_db_mutex.unlock();
2776 LOG(ERROR) << "ReadFilePathsFromLevelDb: Missing leveldb connection";
2777 return file_paths;
2778 }
2779
2780 leveldb::Iterator* it =
2781 fuse->level_db_connection_map[volume_name]->NewIterator(leveldb::ReadOptions());
2782 if (android::base::EqualsIgnoreCase(last_read_value, "")) {
2783 it->SeekToFirst();
2784 } else {
2785 // Start after last read value
2786 leveldb::Slice slice = last_read_value;
2787 it->Seek(slice);
2788 it->Next();
2789 }
2790 for (; it->Valid() && counter < limit; it->Next()) {
2791 file_paths.push_back(it->key().ToString());
2792 counter++;
2793 }
2794 fuse->level_db_mutex.unlock();
2795 return file_paths;
2796 }
2797
ReadBackedUpDataFromLevelDb(const std::string & filePath)2798 std::string FuseDaemon::ReadBackedUpDataFromLevelDb(const std::string& filePath) {
2799 fuse->level_db_mutex.lock();
2800 std::string data = "";
2801 std::string volume_name = deriveVolumeName(filePath);
2802 if (!CheckLevelDbConnection(volume_name)) {
2803 fuse->level_db_mutex.unlock();
2804 LOG(ERROR) << "ReadBackedUpDataFromLevelDb: Missing leveldb connection.";
2805 return data;
2806 }
2807
2808 leveldb::Status status = fuse->level_db_connection_map[volume_name]->Get(
2809 leveldb::ReadOptions(), filePath, &data);
2810 fuse->level_db_mutex.unlock();
2811
2812 if (status.IsNotFound()) {
2813 LOG(VERBOSE) << "Key is not found in leveldb: " << filePath << " " << status.ToString();
2814 } else if (!status.ok()) {
2815 LOG(WARNING) << "Failure in leveldb read for key: " << filePath << " "
2816 << status.ToString();
2817 }
2818 return data;
2819 }
2820
ReadOwnership(const std::string & key)2821 std::string FuseDaemon::ReadOwnership(const std::string& key) {
2822 fuse->level_db_mutex.lock();
2823 // Return empty string if key not found
2824 std::string data = "";
2825 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2826 fuse->level_db_mutex.unlock();
2827 LOG(ERROR) << "ReadOwnership: Missing leveldb connection.";
2828 return data;
2829 }
2830
2831 leveldb::Status status = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Get(
2832 leveldb::ReadOptions(), key, &data);
2833 fuse->level_db_mutex.unlock();
2834
2835 if (status.IsNotFound()) {
2836 LOG(VERBOSE) << "Key is not found in leveldb: " << key << " " << status.ToString();
2837 } else if (!status.ok()) {
2838 LOG(WARNING) << "Failure in leveldb read for key: " << key << " " << status.ToString();
2839 }
2840
2841 return data;
2842 }
2843
CreateOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2844 void FuseDaemon::CreateOwnerIdRelation(const std::string& ownerId,
2845 const std::string& ownerPackageIdentifier) {
2846 fuse->level_db_mutex.lock();
2847 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2848 fuse->level_db_mutex.unlock();
2849 LOG(ERROR) << "CreateOwnerIdRelation: Missing leveldb connection.";
2850 return;
2851 }
2852
2853 leveldb::Status status1, status2;
2854 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2855 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2856 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2857 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2858 if (!status1.ok() || !status2.ok()) {
2859 // If both inserts did not go through, remove both.
2860 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2861 ownerId);
2862 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2863 ownerPackageIdentifier);
2864 LOG(ERROR) << "Failure in leveldb insert for owner_id: " << ownerId
2865 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2866 }
2867 fuse->level_db_mutex.unlock();
2868 }
2869
RemoveOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2870 void FuseDaemon::RemoveOwnerIdRelation(const std::string& ownerId,
2871 const std::string& ownerPackageIdentifier) {
2872 fuse->level_db_mutex.lock();
2873 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2874 fuse->level_db_mutex.unlock();
2875 LOG(ERROR) << "RemoveOwnerIdRelation: Missing leveldb connection.";
2876 return;
2877 }
2878
2879 leveldb::Status status1, status2;
2880 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2881 ownerId);
2882 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2883 ownerPackageIdentifier);
2884 if (status1.ok() && status2.ok()) {
2885 LOG(INFO) << "Successfully deleted rows in leveldb for owner_id: " << ownerId
2886 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2887 } else {
2888 // If both deletes did not go through, revert both.
2889 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2890 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2891 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2892 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2893 LOG(ERROR) << "Failure in leveldb delete for owner_id: " << ownerId
2894 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2895 }
2896 fuse->level_db_mutex.unlock();
2897 }
2898
GetOwnerRelationship()2899 std::map<std::string, std::string> FuseDaemon::GetOwnerRelationship() {
2900 fuse->level_db_mutex.lock();
2901 std::map<std::string, std::string> resultMap;
2902 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2903 fuse->level_db_mutex.unlock();
2904 LOG(ERROR) << "GetOwnerRelationship: Missing leveldb connection.";
2905 return resultMap;
2906 }
2907
2908 leveldb::Status status;
2909 // Get the key-value pairs from the database.
2910 leveldb::Iterator* it =
2911 fuse->level_db_connection_map[OWNERSHIP_RELATION]->NewIterator(leveldb::ReadOptions());
2912 for (it->SeekToFirst(); it->Valid(); it->Next()) {
2913 std::string key = it->key().ToString();
2914 std::string value = it->value().ToString();
2915 resultMap.insert(std::pair<std::string, std::string>(key, value));
2916 }
2917
2918 fuse->level_db_mutex.unlock();
2919 return resultMap;
2920 }
2921
CheckLevelDbConnection(const std::string & instance_name)2922 bool FuseDaemon::CheckLevelDbConnection(const std::string& instance_name) {
2923 if (fuse->level_db_connection_map.find(instance_name) == fuse->level_db_connection_map.end()) {
2924 LOG(ERROR) << "Leveldb setup is missing for: " << instance_name;
2925 return false;
2926 }
2927 return true;
2928 }
2929
2930 } //namespace fuse
2931 } // namespace mediaprovider
2932