1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "sdcard"
18
19 #include <ctype.h>
20 #include <dirent.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <linux/fuse.h>
26 #include <pthread.h>
27 #include <stdbool.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/inotify.h>
32 #include <sys/mount.h>
33 #include <sys/param.h>
34 #include <sys/resource.h>
35 #include <sys/stat.h>
36 #include <sys/statfs.h>
37 #include <sys/time.h>
38 #include <sys/types.h>
39 #include <sys/uio.h>
40 #include <unistd.h>
41
42 #include <cutils/fs.h>
43 #include <cutils/hashmap.h>
44 #include <cutils/log.h>
45 #include <cutils/multiuser.h>
46 #include <cutils/properties.h>
47 #include <packagelistparser/packagelistparser.h>
48
49 #include <private/android_filesystem_config.h>
50
51 /* FUSE_CANONICAL_PATH is not currently upstreamed */
52 #define FUSE_CANONICAL_PATH 2016
53
54 /* README
55 *
56 * What is this?
57 *
58 * sdcard is a program that uses FUSE to emulate FAT-on-sdcard style
59 * directory permissions (all files are given fixed owner, group, and
60 * permissions at creation, owner, group, and permissions are not
61 * changeable, symlinks and hardlinks are not createable, etc.
62 *
63 * See usage() for command line options.
64 *
65 * It must be run as root, but will drop to requested UID/GID as soon as it
66 * mounts a filesystem. It will refuse to run if requested UID/GID are zero.
67 *
68 * Things I believe to be true:
69 *
70 * - ops that return a fuse_entry (LOOKUP, MKNOD, MKDIR, LINK, SYMLINK,
71 * CREAT) must bump that node's refcount
72 * - don't forget that FORGET can forget multiple references (req->nlookup)
73 * - if an op that returns a fuse_entry fails writing the reply to the
74 * kernel, you must rollback the refcount to reflect the reference the
75 * kernel did not actually acquire
76 *
77 * This daemon can also derive custom filesystem permissions based on directory
78 * structure when requested. These custom permissions support several features:
79 *
80 * - Apps can access their own files in /Android/data/com.example/ without
81 * requiring any additional GIDs.
82 * - Separate permissions for protecting directories like Pictures and Music.
83 * - Multi-user separation on the same physical device.
84 */
85
86 #define FUSE_TRACE 0
87
88 #if FUSE_TRACE
89 #define TRACE(x...) ALOGD(x)
90 #else
91 #define TRACE(x...) do {} while (0)
92 #endif
93
94 #define ERROR(x...) ALOGE(x)
95
96 #define PROP_SDCARDFS_DEVICE "ro.sys.sdcardfs"
97 #define PROP_SDCARDFS_USER "persist.sys.sdcardfs"
98
99 #define FUSE_UNKNOWN_INO 0xffffffff
100
101 /* Maximum number of bytes to write in one request. */
102 #define MAX_WRITE (256 * 1024)
103
104 /* Maximum number of bytes to read in one request. */
105 #define MAX_READ (128 * 1024)
106
107 /* Largest possible request.
108 * The request size is bounded by the maximum size of a FUSE_WRITE request because it has
109 * the largest possible data payload. */
110 #define MAX_REQUEST_SIZE (sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) + MAX_WRITE)
111
112 /* Pseudo-error constant used to indicate that no fuse status is needed
113 * or that a reply has already been written. */
114 #define NO_STATUS 1
115
116 /* Supplementary groups to execute with */
117 static const gid_t kGroups[1] = { AID_PACKAGE_INFO };
118
119 /* Permission mode for a specific node. Controls how file permissions
120 * are derived for children nodes. */
121 typedef enum {
122 /* Nothing special; this node should just inherit from its parent. */
123 PERM_INHERIT,
124 /* This node is one level above a normal root; used for legacy layouts
125 * which use the first level to represent user_id. */
126 PERM_PRE_ROOT,
127 /* This node is "/" */
128 PERM_ROOT,
129 /* This node is "/Android" */
130 PERM_ANDROID,
131 /* This node is "/Android/data" */
132 PERM_ANDROID_DATA,
133 /* This node is "/Android/obb" */
134 PERM_ANDROID_OBB,
135 /* This node is "/Android/media" */
136 PERM_ANDROID_MEDIA,
137 } perm_t;
138
139 struct handle {
140 int fd;
141 };
142
143 struct dirhandle {
144 DIR *d;
145 };
146
147 struct node {
148 __u32 refcount;
149 __u64 nid;
150 __u64 gen;
151 /*
152 * The inode number for this FUSE node. Note that this isn't stable across
153 * multiple invocations of the FUSE daemon.
154 */
155 __u32 ino;
156
157 /* State derived based on current position in hierarchy. */
158 perm_t perm;
159 userid_t userid;
160 uid_t uid;
161 bool under_android;
162
163 struct node *next; /* per-dir sibling list */
164 struct node *child; /* first contained file by this dir */
165 struct node *parent; /* containing directory */
166
167 size_t namelen;
168 char *name;
169 /* If non-null, this is the real name of the file in the underlying storage.
170 * This may differ from the field "name" only by case.
171 * strlen(actual_name) will always equal strlen(name), so it is safe to use
172 * namelen for both fields.
173 */
174 char *actual_name;
175
176 /* If non-null, an exact underlying path that should be grafted into this
177 * position. Used to support things like OBB. */
178 char* graft_path;
179 size_t graft_pathlen;
180
181 bool deleted;
182 };
183
str_hash(void * key)184 static int str_hash(void *key) {
185 return hashmapHash(key, strlen(key));
186 }
187
188 /** Test if two string keys are equal ignoring case */
str_icase_equals(void * keyA,void * keyB)189 static bool str_icase_equals(void *keyA, void *keyB) {
190 return strcasecmp(keyA, keyB) == 0;
191 }
192
193 /* Global data for all FUSE mounts */
194 struct fuse_global {
195 pthread_mutex_t lock;
196
197 uid_t uid;
198 gid_t gid;
199 bool multi_user;
200
201 char source_path[PATH_MAX];
202 char obb_path[PATH_MAX];
203
204 Hashmap* package_to_appid;
205
206 __u64 next_generation;
207 struct node root;
208
209 /* Used to allocate unique inode numbers for fuse nodes. We use
210 * a simple counter based scheme where inode numbers from deleted
211 * nodes aren't reused. Note that inode allocations are not stable
212 * across multiple invocation of the sdcard daemon, but that shouldn't
213 * be a huge problem in practice.
214 *
215 * Note that we restrict inodes to 32 bit unsigned integers to prevent
216 * truncation on 32 bit processes when unsigned long long stat.st_ino is
217 * assigned to an unsigned long ino_t type in an LP32 process.
218 *
219 * Also note that fuse_attr and fuse_dirent inode values are 64 bits wide
220 * on both LP32 and LP64, but the fuse kernel code doesn't squash 64 bit
221 * inode numbers into 32 bit values on 64 bit kernels (see fuse_squash_ino
222 * in fs/fuse/inode.c).
223 *
224 * Accesses must be guarded by |lock|.
225 */
226 __u32 inode_ctr;
227
228 struct fuse* fuse_default;
229 struct fuse* fuse_read;
230 struct fuse* fuse_write;
231 };
232
233 /* Single FUSE mount */
234 struct fuse {
235 struct fuse_global* global;
236
237 char dest_path[PATH_MAX];
238
239 int fd;
240
241 gid_t gid;
242 mode_t mask;
243 };
244
245 /* Private data used by a single FUSE handler */
246 struct fuse_handler {
247 struct fuse* fuse;
248 int token;
249
250 /* To save memory, we never use the contents of the request buffer and the read
251 * buffer at the same time. This allows us to share the underlying storage. */
252 union {
253 __u8 request_buffer[MAX_REQUEST_SIZE];
254 __u8 read_buffer[MAX_READ + PAGE_SIZE];
255 };
256 };
257
id_to_ptr(__u64 nid)258 static inline void *id_to_ptr(__u64 nid)
259 {
260 return (void *) (uintptr_t) nid;
261 }
262
ptr_to_id(void * ptr)263 static inline __u64 ptr_to_id(void *ptr)
264 {
265 return (__u64) (uintptr_t) ptr;
266 }
267
acquire_node_locked(struct node * node)268 static void acquire_node_locked(struct node* node)
269 {
270 node->refcount++;
271 TRACE("ACQUIRE %p (%s) rc=%d\n", node, node->name, node->refcount);
272 }
273
274 static void remove_node_from_parent_locked(struct node* node);
275
release_node_locked(struct node * node)276 static void release_node_locked(struct node* node)
277 {
278 TRACE("RELEASE %p (%s) rc=%d\n", node, node->name, node->refcount);
279 if (node->refcount > 0) {
280 node->refcount--;
281 if (!node->refcount) {
282 TRACE("DESTROY %p (%s)\n", node, node->name);
283 remove_node_from_parent_locked(node);
284
285 /* TODO: remove debugging - poison memory */
286 memset(node->name, 0xef, node->namelen);
287 free(node->name);
288 free(node->actual_name);
289 memset(node, 0xfc, sizeof(*node));
290 free(node);
291 }
292 } else {
293 ERROR("Zero refcnt %p\n", node);
294 }
295 }
296
add_node_to_parent_locked(struct node * node,struct node * parent)297 static void add_node_to_parent_locked(struct node *node, struct node *parent) {
298 node->parent = parent;
299 node->next = parent->child;
300 parent->child = node;
301 acquire_node_locked(parent);
302 }
303
remove_node_from_parent_locked(struct node * node)304 static void remove_node_from_parent_locked(struct node* node)
305 {
306 if (node->parent) {
307 if (node->parent->child == node) {
308 node->parent->child = node->parent->child->next;
309 } else {
310 struct node *node2;
311 node2 = node->parent->child;
312 while (node2->next != node)
313 node2 = node2->next;
314 node2->next = node->next;
315 }
316 release_node_locked(node->parent);
317 node->parent = NULL;
318 node->next = NULL;
319 }
320 }
321
322 /* Gets the absolute path to a node into the provided buffer.
323 *
324 * Populates 'buf' with the path and returns the length of the path on success,
325 * or returns -1 if the path is too long for the provided buffer.
326 */
get_node_path_locked(struct node * node,char * buf,size_t bufsize)327 static ssize_t get_node_path_locked(struct node* node, char* buf, size_t bufsize) {
328 const char* name;
329 size_t namelen;
330 if (node->graft_path) {
331 name = node->graft_path;
332 namelen = node->graft_pathlen;
333 } else if (node->actual_name) {
334 name = node->actual_name;
335 namelen = node->namelen;
336 } else {
337 name = node->name;
338 namelen = node->namelen;
339 }
340
341 if (bufsize < namelen + 1) {
342 return -1;
343 }
344
345 ssize_t pathlen = 0;
346 if (node->parent && node->graft_path == NULL) {
347 pathlen = get_node_path_locked(node->parent, buf, bufsize - namelen - 1);
348 if (pathlen < 0) {
349 return -1;
350 }
351 buf[pathlen++] = '/';
352 }
353
354 memcpy(buf + pathlen, name, namelen + 1); /* include trailing \0 */
355 return pathlen + namelen;
356 }
357
358 /* Finds the absolute path of a file within a given directory.
359 * Performs a case-insensitive search for the file and sets the buffer to the path
360 * of the first matching file. If 'search' is zero or if no match is found, sets
361 * the buffer to the path that the file would have, assuming the name were case-sensitive.
362 *
363 * Populates 'buf' with the path and returns the actual name (within 'buf') on success,
364 * or returns NULL if the path is too long for the provided buffer.
365 */
find_file_within(const char * path,const char * name,char * buf,size_t bufsize,int search)366 static char* find_file_within(const char* path, const char* name,
367 char* buf, size_t bufsize, int search)
368 {
369 size_t pathlen = strlen(path);
370 size_t namelen = strlen(name);
371 size_t childlen = pathlen + namelen + 1;
372 char* actual;
373
374 if (bufsize <= childlen) {
375 return NULL;
376 }
377
378 memcpy(buf, path, pathlen);
379 buf[pathlen] = '/';
380 actual = buf + pathlen + 1;
381 memcpy(actual, name, namelen + 1);
382
383 if (search && access(buf, F_OK)) {
384 struct dirent* entry;
385 DIR* dir = opendir(path);
386 if (!dir) {
387 ERROR("opendir %s failed: %s\n", path, strerror(errno));
388 return actual;
389 }
390 while ((entry = readdir(dir))) {
391 if (!strcasecmp(entry->d_name, name)) {
392 /* we have a match - replace the name, don't need to copy the null again */
393 memcpy(actual, entry->d_name, namelen);
394 break;
395 }
396 }
397 closedir(dir);
398 }
399 return actual;
400 }
401
attr_from_stat(struct fuse * fuse,struct fuse_attr * attr,const struct stat * s,const struct node * node)402 static void attr_from_stat(struct fuse* fuse, struct fuse_attr *attr,
403 const struct stat *s, const struct node* node) {
404 attr->ino = node->ino;
405 attr->size = s->st_size;
406 attr->blocks = s->st_blocks;
407 attr->atime = s->st_atim.tv_sec;
408 attr->mtime = s->st_mtim.tv_sec;
409 attr->ctime = s->st_ctim.tv_sec;
410 attr->atimensec = s->st_atim.tv_nsec;
411 attr->mtimensec = s->st_mtim.tv_nsec;
412 attr->ctimensec = s->st_ctim.tv_nsec;
413 attr->mode = s->st_mode;
414 attr->nlink = s->st_nlink;
415
416 attr->uid = node->uid;
417
418 if (fuse->gid == AID_SDCARD_RW) {
419 /* As an optimization, certain trusted system components only run
420 * as owner but operate across all users. Since we're now handing
421 * out the sdcard_rw GID only to trusted apps, we're okay relaxing
422 * the user boundary enforcement for the default view. The UIDs
423 * assigned to app directories are still multiuser aware. */
424 attr->gid = AID_SDCARD_RW;
425 } else {
426 attr->gid = multiuser_get_uid(node->userid, fuse->gid);
427 }
428
429 int visible_mode = 0775 & ~fuse->mask;
430 if (node->perm == PERM_PRE_ROOT) {
431 /* Top of multi-user view should always be visible to ensure
432 * secondary users can traverse inside. */
433 visible_mode = 0711;
434 } else if (node->under_android) {
435 /* Block "other" access to Android directories, since only apps
436 * belonging to a specific user should be in there; we still
437 * leave +x open for the default view. */
438 if (fuse->gid == AID_SDCARD_RW) {
439 visible_mode = visible_mode & ~0006;
440 } else {
441 visible_mode = visible_mode & ~0007;
442 }
443 }
444 int owner_mode = s->st_mode & 0700;
445 int filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
446 attr->mode = (attr->mode & S_IFMT) | filtered_mode;
447 }
448
touch(char * path,mode_t mode)449 static int touch(char* path, mode_t mode) {
450 int fd = open(path, O_RDWR | O_CREAT | O_EXCL | O_NOFOLLOW, mode);
451 if (fd == -1) {
452 if (errno == EEXIST) {
453 return 0;
454 } else {
455 ERROR("Failed to open(%s): %s\n", path, strerror(errno));
456 return -1;
457 }
458 }
459 close(fd);
460 return 0;
461 }
462
derive_permissions_locked(struct fuse * fuse,struct node * parent,struct node * node)463 static void derive_permissions_locked(struct fuse* fuse, struct node *parent,
464 struct node *node) {
465 appid_t appid;
466
467 /* By default, each node inherits from its parent */
468 node->perm = PERM_INHERIT;
469 node->userid = parent->userid;
470 node->uid = parent->uid;
471 node->under_android = parent->under_android;
472
473 /* Derive custom permissions based on parent and current node */
474 switch (parent->perm) {
475 case PERM_INHERIT:
476 /* Already inherited above */
477 break;
478 case PERM_PRE_ROOT:
479 /* Legacy internal layout places users at top level */
480 node->perm = PERM_ROOT;
481 node->userid = strtoul(node->name, NULL, 10);
482 break;
483 case PERM_ROOT:
484 /* Assume masked off by default. */
485 if (!strcasecmp(node->name, "Android")) {
486 /* App-specific directories inside; let anyone traverse */
487 node->perm = PERM_ANDROID;
488 node->under_android = true;
489 }
490 break;
491 case PERM_ANDROID:
492 if (!strcasecmp(node->name, "data")) {
493 /* App-specific directories inside; let anyone traverse */
494 node->perm = PERM_ANDROID_DATA;
495 } else if (!strcasecmp(node->name, "obb")) {
496 /* App-specific directories inside; let anyone traverse */
497 node->perm = PERM_ANDROID_OBB;
498 /* Single OBB directory is always shared */
499 node->graft_path = fuse->global->obb_path;
500 node->graft_pathlen = strlen(fuse->global->obb_path);
501 } else if (!strcasecmp(node->name, "media")) {
502 /* App-specific directories inside; let anyone traverse */
503 node->perm = PERM_ANDROID_MEDIA;
504 }
505 break;
506 case PERM_ANDROID_DATA:
507 case PERM_ANDROID_OBB:
508 case PERM_ANDROID_MEDIA:
509 appid = (appid_t) (uintptr_t) hashmapGet(fuse->global->package_to_appid, node->name);
510 if (appid != 0) {
511 node->uid = multiuser_get_uid(parent->userid, appid);
512 }
513 break;
514 }
515 }
516
derive_permissions_recursive_locked(struct fuse * fuse,struct node * parent)517 static void derive_permissions_recursive_locked(struct fuse* fuse, struct node *parent) {
518 struct node *node;
519 for (node = parent->child; node; node = node->next) {
520 derive_permissions_locked(fuse, parent, node);
521 if (node->child) {
522 derive_permissions_recursive_locked(fuse, node);
523 }
524 }
525 }
526
527 /* Kernel has already enforced everything we returned through
528 * derive_permissions_locked(), so this is used to lock down access
529 * even further, such as enforcing that apps hold sdcard_rw. */
check_caller_access_to_name(struct fuse * fuse,const struct fuse_in_header * hdr,const struct node * parent_node,const char * name,int mode)530 static bool check_caller_access_to_name(struct fuse* fuse,
531 const struct fuse_in_header *hdr, const struct node* parent_node,
532 const char* name, int mode) {
533 /* Always block security-sensitive files at root */
534 if (parent_node && parent_node->perm == PERM_ROOT) {
535 if (!strcasecmp(name, "autorun.inf")
536 || !strcasecmp(name, ".android_secure")
537 || !strcasecmp(name, "android_secure")) {
538 return false;
539 }
540 }
541
542 /* Root always has access; access for any other UIDs should always
543 * be controlled through packages.list. */
544 if (hdr->uid == 0) {
545 return true;
546 }
547
548 /* No extra permissions to enforce */
549 return true;
550 }
551
check_caller_access_to_node(struct fuse * fuse,const struct fuse_in_header * hdr,const struct node * node,int mode)552 static bool check_caller_access_to_node(struct fuse* fuse,
553 const struct fuse_in_header *hdr, const struct node* node, int mode) {
554 return check_caller_access_to_name(fuse, hdr, node->parent, node->name, mode);
555 }
556
create_node_locked(struct fuse * fuse,struct node * parent,const char * name,const char * actual_name)557 struct node *create_node_locked(struct fuse* fuse,
558 struct node *parent, const char *name, const char* actual_name)
559 {
560 struct node *node;
561 size_t namelen = strlen(name);
562
563 // Detect overflows in the inode counter. "4 billion nodes should be enough
564 // for everybody".
565 if (fuse->global->inode_ctr == 0) {
566 ERROR("No more inode numbers available");
567 return NULL;
568 }
569
570 node = calloc(1, sizeof(struct node));
571 if (!node) {
572 return NULL;
573 }
574 node->name = malloc(namelen + 1);
575 if (!node->name) {
576 free(node);
577 return NULL;
578 }
579 memcpy(node->name, name, namelen + 1);
580 if (strcmp(name, actual_name)) {
581 node->actual_name = malloc(namelen + 1);
582 if (!node->actual_name) {
583 free(node->name);
584 free(node);
585 return NULL;
586 }
587 memcpy(node->actual_name, actual_name, namelen + 1);
588 }
589 node->namelen = namelen;
590 node->nid = ptr_to_id(node);
591 node->ino = fuse->global->inode_ctr++;
592 node->gen = fuse->global->next_generation++;
593
594 node->deleted = false;
595
596 derive_permissions_locked(fuse, parent, node);
597 acquire_node_locked(node);
598 add_node_to_parent_locked(node, parent);
599 return node;
600 }
601
rename_node_locked(struct node * node,const char * name,const char * actual_name)602 static int rename_node_locked(struct node *node, const char *name,
603 const char* actual_name)
604 {
605 size_t namelen = strlen(name);
606 int need_actual_name = strcmp(name, actual_name);
607
608 /* make the storage bigger without actually changing the name
609 * in case an error occurs part way */
610 if (namelen > node->namelen) {
611 char* new_name = realloc(node->name, namelen + 1);
612 if (!new_name) {
613 return -ENOMEM;
614 }
615 node->name = new_name;
616 if (need_actual_name && node->actual_name) {
617 char* new_actual_name = realloc(node->actual_name, namelen + 1);
618 if (!new_actual_name) {
619 return -ENOMEM;
620 }
621 node->actual_name = new_actual_name;
622 }
623 }
624
625 /* update the name, taking care to allocate storage before overwriting the old name */
626 if (need_actual_name) {
627 if (!node->actual_name) {
628 node->actual_name = malloc(namelen + 1);
629 if (!node->actual_name) {
630 return -ENOMEM;
631 }
632 }
633 memcpy(node->actual_name, actual_name, namelen + 1);
634 } else {
635 free(node->actual_name);
636 node->actual_name = NULL;
637 }
638 memcpy(node->name, name, namelen + 1);
639 node->namelen = namelen;
640 return 0;
641 }
642
lookup_node_by_id_locked(struct fuse * fuse,__u64 nid)643 static struct node *lookup_node_by_id_locked(struct fuse *fuse, __u64 nid)
644 {
645 if (nid == FUSE_ROOT_ID) {
646 return &fuse->global->root;
647 } else {
648 return id_to_ptr(nid);
649 }
650 }
651
lookup_node_and_path_by_id_locked(struct fuse * fuse,__u64 nid,char * buf,size_t bufsize)652 static struct node* lookup_node_and_path_by_id_locked(struct fuse* fuse, __u64 nid,
653 char* buf, size_t bufsize)
654 {
655 struct node* node = lookup_node_by_id_locked(fuse, nid);
656 if (node && get_node_path_locked(node, buf, bufsize) < 0) {
657 node = NULL;
658 }
659 return node;
660 }
661
lookup_child_by_name_locked(struct node * node,const char * name)662 static struct node *lookup_child_by_name_locked(struct node *node, const char *name)
663 {
664 for (node = node->child; node; node = node->next) {
665 /* use exact string comparison, nodes that differ by case
666 * must be considered distinct even if they refer to the same
667 * underlying file as otherwise operations such as "mv x x"
668 * will not work because the source and target nodes are the same. */
669 if (!strcmp(name, node->name) && !node->deleted) {
670 return node;
671 }
672 }
673 return 0;
674 }
675
acquire_or_create_child_locked(struct fuse * fuse,struct node * parent,const char * name,const char * actual_name)676 static struct node* acquire_or_create_child_locked(
677 struct fuse* fuse, struct node* parent,
678 const char* name, const char* actual_name)
679 {
680 struct node* child = lookup_child_by_name_locked(parent, name);
681 if (child) {
682 acquire_node_locked(child);
683 } else {
684 child = create_node_locked(fuse, parent, name, actual_name);
685 }
686 return child;
687 }
688
fuse_status(struct fuse * fuse,__u64 unique,int err)689 static void fuse_status(struct fuse *fuse, __u64 unique, int err)
690 {
691 struct fuse_out_header hdr;
692 hdr.len = sizeof(hdr);
693 hdr.error = err;
694 hdr.unique = unique;
695 write(fuse->fd, &hdr, sizeof(hdr));
696 }
697
fuse_reply(struct fuse * fuse,__u64 unique,void * data,int len)698 static void fuse_reply(struct fuse *fuse, __u64 unique, void *data, int len)
699 {
700 struct fuse_out_header hdr;
701 struct iovec vec[2];
702 int res;
703
704 hdr.len = len + sizeof(hdr);
705 hdr.error = 0;
706 hdr.unique = unique;
707
708 vec[0].iov_base = &hdr;
709 vec[0].iov_len = sizeof(hdr);
710 vec[1].iov_base = data;
711 vec[1].iov_len = len;
712
713 res = writev(fuse->fd, vec, 2);
714 if (res < 0) {
715 ERROR("*** REPLY FAILED *** %d\n", errno);
716 }
717 }
718
fuse_reply_entry(struct fuse * fuse,__u64 unique,struct node * parent,const char * name,const char * actual_name,const char * path)719 static int fuse_reply_entry(struct fuse* fuse, __u64 unique,
720 struct node* parent, const char* name, const char* actual_name,
721 const char* path)
722 {
723 struct node* node;
724 struct fuse_entry_out out;
725 struct stat s;
726
727 if (lstat(path, &s) < 0) {
728 return -errno;
729 }
730
731 pthread_mutex_lock(&fuse->global->lock);
732 node = acquire_or_create_child_locked(fuse, parent, name, actual_name);
733 if (!node) {
734 pthread_mutex_unlock(&fuse->global->lock);
735 return -ENOMEM;
736 }
737 memset(&out, 0, sizeof(out));
738 attr_from_stat(fuse, &out.attr, &s, node);
739 out.attr_valid = 10;
740 out.entry_valid = 10;
741 out.nodeid = node->nid;
742 out.generation = node->gen;
743 pthread_mutex_unlock(&fuse->global->lock);
744 fuse_reply(fuse, unique, &out, sizeof(out));
745 return NO_STATUS;
746 }
747
fuse_reply_attr(struct fuse * fuse,__u64 unique,const struct node * node,const char * path)748 static int fuse_reply_attr(struct fuse* fuse, __u64 unique, const struct node* node,
749 const char* path)
750 {
751 struct fuse_attr_out out;
752 struct stat s;
753
754 if (lstat(path, &s) < 0) {
755 return -errno;
756 }
757 memset(&out, 0, sizeof(out));
758 attr_from_stat(fuse, &out.attr, &s, node);
759 out.attr_valid = 10;
760 fuse_reply(fuse, unique, &out, sizeof(out));
761 return NO_STATUS;
762 }
763
fuse_notify_delete(struct fuse * fuse,const __u64 parent,const __u64 child,const char * name)764 static void fuse_notify_delete(struct fuse* fuse, const __u64 parent,
765 const __u64 child, const char* name) {
766 struct fuse_out_header hdr;
767 struct fuse_notify_delete_out data;
768 struct iovec vec[3];
769 size_t namelen = strlen(name);
770 int res;
771
772 hdr.len = sizeof(hdr) + sizeof(data) + namelen + 1;
773 hdr.error = FUSE_NOTIFY_DELETE;
774 hdr.unique = 0;
775
776 data.parent = parent;
777 data.child = child;
778 data.namelen = namelen;
779 data.padding = 0;
780
781 vec[0].iov_base = &hdr;
782 vec[0].iov_len = sizeof(hdr);
783 vec[1].iov_base = &data;
784 vec[1].iov_len = sizeof(data);
785 vec[2].iov_base = (void*) name;
786 vec[2].iov_len = namelen + 1;
787
788 res = writev(fuse->fd, vec, 3);
789 /* Ignore ENOENT, since other views may not have seen the entry */
790 if (res < 0 && errno != ENOENT) {
791 ERROR("*** NOTIFY FAILED *** %d\n", errno);
792 }
793 }
794
handle_lookup(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)795 static int handle_lookup(struct fuse* fuse, struct fuse_handler* handler,
796 const struct fuse_in_header *hdr, const char* name)
797 {
798 struct node* parent_node;
799 char parent_path[PATH_MAX];
800 char child_path[PATH_MAX];
801 const char* actual_name;
802
803 pthread_mutex_lock(&fuse->global->lock);
804 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
805 parent_path, sizeof(parent_path));
806 TRACE("[%d] LOOKUP %s @ %"PRIx64" (%s)\n", handler->token, name, hdr->nodeid,
807 parent_node ? parent_node->name : "?");
808 pthread_mutex_unlock(&fuse->global->lock);
809
810 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
811 child_path, sizeof(child_path), 1))) {
812 return -ENOENT;
813 }
814 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, R_OK)) {
815 return -EACCES;
816 }
817
818 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
819 }
820
handle_forget(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_forget_in * req)821 static int handle_forget(struct fuse* fuse, struct fuse_handler* handler,
822 const struct fuse_in_header *hdr, const struct fuse_forget_in *req)
823 {
824 struct node* node;
825
826 pthread_mutex_lock(&fuse->global->lock);
827 node = lookup_node_by_id_locked(fuse, hdr->nodeid);
828 TRACE("[%d] FORGET #%"PRIu64" @ %"PRIx64" (%s)\n", handler->token, req->nlookup,
829 hdr->nodeid, node ? node->name : "?");
830 if (node) {
831 __u64 n = req->nlookup;
832 while (n--) {
833 release_node_locked(node);
834 }
835 }
836 pthread_mutex_unlock(&fuse->global->lock);
837 return NO_STATUS; /* no reply */
838 }
839
handle_getattr(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_getattr_in * req)840 static int handle_getattr(struct fuse* fuse, struct fuse_handler* handler,
841 const struct fuse_in_header *hdr, const struct fuse_getattr_in *req)
842 {
843 struct node* node;
844 char path[PATH_MAX];
845
846 pthread_mutex_lock(&fuse->global->lock);
847 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
848 TRACE("[%d] GETATTR flags=%x fh=%"PRIx64" @ %"PRIx64" (%s)\n", handler->token,
849 req->getattr_flags, req->fh, hdr->nodeid, node ? node->name : "?");
850 pthread_mutex_unlock(&fuse->global->lock);
851
852 if (!node) {
853 return -ENOENT;
854 }
855 if (!check_caller_access_to_node(fuse, hdr, node, R_OK)) {
856 return -EACCES;
857 }
858
859 return fuse_reply_attr(fuse, hdr->unique, node, path);
860 }
861
handle_setattr(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_setattr_in * req)862 static int handle_setattr(struct fuse* fuse, struct fuse_handler* handler,
863 const struct fuse_in_header *hdr, const struct fuse_setattr_in *req)
864 {
865 struct node* node;
866 char path[PATH_MAX];
867 struct timespec times[2];
868
869 pthread_mutex_lock(&fuse->global->lock);
870 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
871 TRACE("[%d] SETATTR fh=%"PRIx64" valid=%x @ %"PRIx64" (%s)\n", handler->token,
872 req->fh, req->valid, hdr->nodeid, node ? node->name : "?");
873 pthread_mutex_unlock(&fuse->global->lock);
874
875 if (!node) {
876 return -ENOENT;
877 }
878
879 if (!(req->valid & FATTR_FH) &&
880 !check_caller_access_to_node(fuse, hdr, node, W_OK)) {
881 return -EACCES;
882 }
883
884 /* XXX: incomplete implementation on purpose.
885 * chmod/chown should NEVER be implemented.*/
886
887 if ((req->valid & FATTR_SIZE) && truncate64(path, req->size) < 0) {
888 return -errno;
889 }
890
891 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
892 * are both set, then set it to the current time. Else, set it to the
893 * time specified in the request. Same goes for mtime. Use utimensat(2)
894 * as it allows ATIME and MTIME to be changed independently, and has
895 * nanosecond resolution which fuse also has.
896 */
897 if (req->valid & (FATTR_ATIME | FATTR_MTIME)) {
898 times[0].tv_nsec = UTIME_OMIT;
899 times[1].tv_nsec = UTIME_OMIT;
900 if (req->valid & FATTR_ATIME) {
901 if (req->valid & FATTR_ATIME_NOW) {
902 times[0].tv_nsec = UTIME_NOW;
903 } else {
904 times[0].tv_sec = req->atime;
905 times[0].tv_nsec = req->atimensec;
906 }
907 }
908 if (req->valid & FATTR_MTIME) {
909 if (req->valid & FATTR_MTIME_NOW) {
910 times[1].tv_nsec = UTIME_NOW;
911 } else {
912 times[1].tv_sec = req->mtime;
913 times[1].tv_nsec = req->mtimensec;
914 }
915 }
916 TRACE("[%d] Calling utimensat on %s with atime %ld, mtime=%ld\n",
917 handler->token, path, times[0].tv_sec, times[1].tv_sec);
918 if (utimensat(-1, path, times, 0) < 0) {
919 return -errno;
920 }
921 }
922 return fuse_reply_attr(fuse, hdr->unique, node, path);
923 }
924
handle_mknod(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_mknod_in * req,const char * name)925 static int handle_mknod(struct fuse* fuse, struct fuse_handler* handler,
926 const struct fuse_in_header* hdr, const struct fuse_mknod_in* req, const char* name)
927 {
928 struct node* parent_node;
929 char parent_path[PATH_MAX];
930 char child_path[PATH_MAX];
931 const char* actual_name;
932
933 pthread_mutex_lock(&fuse->global->lock);
934 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
935 parent_path, sizeof(parent_path));
936 TRACE("[%d] MKNOD %s 0%o @ %"PRIx64" (%s)\n", handler->token,
937 name, req->mode, hdr->nodeid, parent_node ? parent_node->name : "?");
938 pthread_mutex_unlock(&fuse->global->lock);
939
940 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
941 child_path, sizeof(child_path), 1))) {
942 return -ENOENT;
943 }
944 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
945 return -EACCES;
946 }
947 __u32 mode = (req->mode & (~0777)) | 0664;
948 if (mknod(child_path, mode, req->rdev) < 0) {
949 return -errno;
950 }
951 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
952 }
953
handle_mkdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_mkdir_in * req,const char * name)954 static int handle_mkdir(struct fuse* fuse, struct fuse_handler* handler,
955 const struct fuse_in_header* hdr, const struct fuse_mkdir_in* req, const char* name)
956 {
957 struct node* parent_node;
958 char parent_path[PATH_MAX];
959 char child_path[PATH_MAX];
960 const char* actual_name;
961
962 pthread_mutex_lock(&fuse->global->lock);
963 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
964 parent_path, sizeof(parent_path));
965 TRACE("[%d] MKDIR %s 0%o @ %"PRIx64" (%s)\n", handler->token,
966 name, req->mode, hdr->nodeid, parent_node ? parent_node->name : "?");
967 pthread_mutex_unlock(&fuse->global->lock);
968
969 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
970 child_path, sizeof(child_path), 1))) {
971 return -ENOENT;
972 }
973 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
974 return -EACCES;
975 }
976 __u32 mode = (req->mode & (~0777)) | 0775;
977 if (mkdir(child_path, mode) < 0) {
978 return -errno;
979 }
980
981 /* When creating /Android/data and /Android/obb, mark them as .nomedia */
982 if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "data")) {
983 char nomedia[PATH_MAX];
984 snprintf(nomedia, PATH_MAX, "%s/.nomedia", child_path);
985 if (touch(nomedia, 0664) != 0) {
986 ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
987 return -ENOENT;
988 }
989 }
990 if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "obb")) {
991 char nomedia[PATH_MAX];
992 snprintf(nomedia, PATH_MAX, "%s/.nomedia", fuse->global->obb_path);
993 if (touch(nomedia, 0664) != 0) {
994 ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
995 return -ENOENT;
996 }
997 }
998
999 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
1000 }
1001
handle_unlink(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)1002 static int handle_unlink(struct fuse* fuse, struct fuse_handler* handler,
1003 const struct fuse_in_header* hdr, const char* name)
1004 {
1005 struct node* parent_node;
1006 struct node* child_node;
1007 char parent_path[PATH_MAX];
1008 char child_path[PATH_MAX];
1009
1010 pthread_mutex_lock(&fuse->global->lock);
1011 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1012 parent_path, sizeof(parent_path));
1013 TRACE("[%d] UNLINK %s @ %"PRIx64" (%s)\n", handler->token,
1014 name, hdr->nodeid, parent_node ? parent_node->name : "?");
1015 pthread_mutex_unlock(&fuse->global->lock);
1016
1017 if (!parent_node || !find_file_within(parent_path, name,
1018 child_path, sizeof(child_path), 1)) {
1019 return -ENOENT;
1020 }
1021 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
1022 return -EACCES;
1023 }
1024 if (unlink(child_path) < 0) {
1025 return -errno;
1026 }
1027 pthread_mutex_lock(&fuse->global->lock);
1028 child_node = lookup_child_by_name_locked(parent_node, name);
1029 if (child_node) {
1030 child_node->deleted = true;
1031 }
1032 pthread_mutex_unlock(&fuse->global->lock);
1033 if (parent_node && child_node) {
1034 /* Tell all other views that node is gone */
1035 TRACE("[%d] fuse_notify_delete parent=%"PRIx64", child=%"PRIx64", name=%s\n",
1036 handler->token, (uint64_t) parent_node->nid, (uint64_t) child_node->nid, name);
1037 if (fuse != fuse->global->fuse_default) {
1038 fuse_notify_delete(fuse->global->fuse_default, parent_node->nid, child_node->nid, name);
1039 }
1040 if (fuse != fuse->global->fuse_read) {
1041 fuse_notify_delete(fuse->global->fuse_read, parent_node->nid, child_node->nid, name);
1042 }
1043 if (fuse != fuse->global->fuse_write) {
1044 fuse_notify_delete(fuse->global->fuse_write, parent_node->nid, child_node->nid, name);
1045 }
1046 }
1047 return 0;
1048 }
1049
handle_rmdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)1050 static int handle_rmdir(struct fuse* fuse, struct fuse_handler* handler,
1051 const struct fuse_in_header* hdr, const char* name)
1052 {
1053 struct node* child_node;
1054 struct node* parent_node;
1055 char parent_path[PATH_MAX];
1056 char child_path[PATH_MAX];
1057
1058 pthread_mutex_lock(&fuse->global->lock);
1059 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1060 parent_path, sizeof(parent_path));
1061 TRACE("[%d] RMDIR %s @ %"PRIx64" (%s)\n", handler->token,
1062 name, hdr->nodeid, parent_node ? parent_node->name : "?");
1063 pthread_mutex_unlock(&fuse->global->lock);
1064
1065 if (!parent_node || !find_file_within(parent_path, name,
1066 child_path, sizeof(child_path), 1)) {
1067 return -ENOENT;
1068 }
1069 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
1070 return -EACCES;
1071 }
1072 if (rmdir(child_path) < 0) {
1073 return -errno;
1074 }
1075 pthread_mutex_lock(&fuse->global->lock);
1076 child_node = lookup_child_by_name_locked(parent_node, name);
1077 if (child_node) {
1078 child_node->deleted = true;
1079 }
1080 pthread_mutex_unlock(&fuse->global->lock);
1081 if (parent_node && child_node) {
1082 /* Tell all other views that node is gone */
1083 TRACE("[%d] fuse_notify_delete parent=%"PRIx64", child=%"PRIx64", name=%s\n",
1084 handler->token, (uint64_t) parent_node->nid, (uint64_t) child_node->nid, name);
1085 if (fuse != fuse->global->fuse_default) {
1086 fuse_notify_delete(fuse->global->fuse_default, parent_node->nid, child_node->nid, name);
1087 }
1088 if (fuse != fuse->global->fuse_read) {
1089 fuse_notify_delete(fuse->global->fuse_read, parent_node->nid, child_node->nid, name);
1090 }
1091 if (fuse != fuse->global->fuse_write) {
1092 fuse_notify_delete(fuse->global->fuse_write, parent_node->nid, child_node->nid, name);
1093 }
1094 }
1095 return 0;
1096 }
1097
handle_rename(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_rename_in * req,const char * old_name,const char * new_name)1098 static int handle_rename(struct fuse* fuse, struct fuse_handler* handler,
1099 const struct fuse_in_header* hdr, const struct fuse_rename_in* req,
1100 const char* old_name, const char* new_name)
1101 {
1102 struct node* old_parent_node;
1103 struct node* new_parent_node;
1104 struct node* child_node;
1105 char old_parent_path[PATH_MAX];
1106 char new_parent_path[PATH_MAX];
1107 char old_child_path[PATH_MAX];
1108 char new_child_path[PATH_MAX];
1109 const char* new_actual_name;
1110 int res;
1111
1112 pthread_mutex_lock(&fuse->global->lock);
1113 old_parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1114 old_parent_path, sizeof(old_parent_path));
1115 new_parent_node = lookup_node_and_path_by_id_locked(fuse, req->newdir,
1116 new_parent_path, sizeof(new_parent_path));
1117 TRACE("[%d] RENAME %s->%s @ %"PRIx64" (%s) -> %"PRIx64" (%s)\n", handler->token,
1118 old_name, new_name,
1119 hdr->nodeid, old_parent_node ? old_parent_node->name : "?",
1120 req->newdir, new_parent_node ? new_parent_node->name : "?");
1121 if (!old_parent_node || !new_parent_node) {
1122 res = -ENOENT;
1123 goto lookup_error;
1124 }
1125 if (!check_caller_access_to_name(fuse, hdr, old_parent_node, old_name, W_OK)) {
1126 res = -EACCES;
1127 goto lookup_error;
1128 }
1129 if (!check_caller_access_to_name(fuse, hdr, new_parent_node, new_name, W_OK)) {
1130 res = -EACCES;
1131 goto lookup_error;
1132 }
1133 child_node = lookup_child_by_name_locked(old_parent_node, old_name);
1134 if (!child_node || get_node_path_locked(child_node,
1135 old_child_path, sizeof(old_child_path)) < 0) {
1136 res = -ENOENT;
1137 goto lookup_error;
1138 }
1139 acquire_node_locked(child_node);
1140 pthread_mutex_unlock(&fuse->global->lock);
1141
1142 /* Special case for renaming a file where destination is same path
1143 * differing only by case. In this case we don't want to look for a case
1144 * insensitive match. This allows commands like "mv foo FOO" to work as expected.
1145 */
1146 int search = old_parent_node != new_parent_node
1147 || strcasecmp(old_name, new_name);
1148 if (!(new_actual_name = find_file_within(new_parent_path, new_name,
1149 new_child_path, sizeof(new_child_path), search))) {
1150 res = -ENOENT;
1151 goto io_error;
1152 }
1153
1154 TRACE("[%d] RENAME %s->%s\n", handler->token, old_child_path, new_child_path);
1155 res = rename(old_child_path, new_child_path);
1156 if (res < 0) {
1157 res = -errno;
1158 goto io_error;
1159 }
1160
1161 pthread_mutex_lock(&fuse->global->lock);
1162 res = rename_node_locked(child_node, new_name, new_actual_name);
1163 if (!res) {
1164 remove_node_from_parent_locked(child_node);
1165 derive_permissions_locked(fuse, new_parent_node, child_node);
1166 derive_permissions_recursive_locked(fuse, child_node);
1167 add_node_to_parent_locked(child_node, new_parent_node);
1168 }
1169 goto done;
1170
1171 io_error:
1172 pthread_mutex_lock(&fuse->global->lock);
1173 done:
1174 release_node_locked(child_node);
1175 lookup_error:
1176 pthread_mutex_unlock(&fuse->global->lock);
1177 return res;
1178 }
1179
open_flags_to_access_mode(int open_flags)1180 static int open_flags_to_access_mode(int open_flags) {
1181 if ((open_flags & O_ACCMODE) == O_RDONLY) {
1182 return R_OK;
1183 } else if ((open_flags & O_ACCMODE) == O_WRONLY) {
1184 return W_OK;
1185 } else {
1186 /* Probably O_RDRW, but treat as default to be safe */
1187 return R_OK | W_OK;
1188 }
1189 }
1190
handle_open(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_open_in * req)1191 static int handle_open(struct fuse* fuse, struct fuse_handler* handler,
1192 const struct fuse_in_header* hdr, const struct fuse_open_in* req)
1193 {
1194 struct node* node;
1195 char path[PATH_MAX];
1196 struct fuse_open_out out;
1197 struct handle *h;
1198
1199 pthread_mutex_lock(&fuse->global->lock);
1200 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
1201 TRACE("[%d] OPEN 0%o @ %"PRIx64" (%s)\n", handler->token,
1202 req->flags, hdr->nodeid, node ? node->name : "?");
1203 pthread_mutex_unlock(&fuse->global->lock);
1204
1205 if (!node) {
1206 return -ENOENT;
1207 }
1208 if (!check_caller_access_to_node(fuse, hdr, node,
1209 open_flags_to_access_mode(req->flags))) {
1210 return -EACCES;
1211 }
1212 h = malloc(sizeof(*h));
1213 if (!h) {
1214 return -ENOMEM;
1215 }
1216 TRACE("[%d] OPEN %s\n", handler->token, path);
1217 h->fd = open(path, req->flags);
1218 if (h->fd < 0) {
1219 free(h);
1220 return -errno;
1221 }
1222 out.fh = ptr_to_id(h);
1223 out.open_flags = 0;
1224 out.padding = 0;
1225 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1226 return NO_STATUS;
1227 }
1228
handle_read(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_read_in * req)1229 static int handle_read(struct fuse* fuse, struct fuse_handler* handler,
1230 const struct fuse_in_header* hdr, const struct fuse_read_in* req)
1231 {
1232 struct handle *h = id_to_ptr(req->fh);
1233 __u64 unique = hdr->unique;
1234 __u32 size = req->size;
1235 __u64 offset = req->offset;
1236 int res;
1237 __u8 *read_buffer = (__u8 *) ((uintptr_t)(handler->read_buffer + PAGE_SIZE) & ~((uintptr_t)PAGE_SIZE-1));
1238
1239 /* Don't access any other fields of hdr or req beyond this point, the read buffer
1240 * overlaps the request buffer and will clobber data in the request. This
1241 * saves us 128KB per request handler thread at the cost of this scary comment. */
1242
1243 TRACE("[%d] READ %p(%d) %u@%"PRIu64"\n", handler->token,
1244 h, h->fd, size, (uint64_t) offset);
1245 if (size > MAX_READ) {
1246 return -EINVAL;
1247 }
1248 res = pread64(h->fd, read_buffer, size, offset);
1249 if (res < 0) {
1250 return -errno;
1251 }
1252 fuse_reply(fuse, unique, read_buffer, res);
1253 return NO_STATUS;
1254 }
1255
handle_write(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_write_in * req,const void * buffer)1256 static int handle_write(struct fuse* fuse, struct fuse_handler* handler,
1257 const struct fuse_in_header* hdr, const struct fuse_write_in* req,
1258 const void* buffer)
1259 {
1260 struct fuse_write_out out;
1261 struct handle *h = id_to_ptr(req->fh);
1262 int res;
1263 __u8 aligned_buffer[req->size] __attribute__((__aligned__(PAGE_SIZE)));
1264
1265 if (req->flags & O_DIRECT) {
1266 memcpy(aligned_buffer, buffer, req->size);
1267 buffer = (const __u8*) aligned_buffer;
1268 }
1269
1270 TRACE("[%d] WRITE %p(%d) %u@%"PRIu64"\n", handler->token,
1271 h, h->fd, req->size, req->offset);
1272 res = pwrite64(h->fd, buffer, req->size, req->offset);
1273 if (res < 0) {
1274 return -errno;
1275 }
1276 out.size = res;
1277 out.padding = 0;
1278 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1279 return NO_STATUS;
1280 }
1281
handle_statfs(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr)1282 static int handle_statfs(struct fuse* fuse, struct fuse_handler* handler,
1283 const struct fuse_in_header* hdr)
1284 {
1285 char path[PATH_MAX];
1286 struct statfs stat;
1287 struct fuse_statfs_out out;
1288 int res;
1289
1290 pthread_mutex_lock(&fuse->global->lock);
1291 TRACE("[%d] STATFS\n", handler->token);
1292 res = get_node_path_locked(&fuse->global->root, path, sizeof(path));
1293 pthread_mutex_unlock(&fuse->global->lock);
1294 if (res < 0) {
1295 return -ENOENT;
1296 }
1297 if (statfs(fuse->global->root.name, &stat) < 0) {
1298 return -errno;
1299 }
1300 memset(&out, 0, sizeof(out));
1301 out.st.blocks = stat.f_blocks;
1302 out.st.bfree = stat.f_bfree;
1303 out.st.bavail = stat.f_bavail;
1304 out.st.files = stat.f_files;
1305 out.st.ffree = stat.f_ffree;
1306 out.st.bsize = stat.f_bsize;
1307 out.st.namelen = stat.f_namelen;
1308 out.st.frsize = stat.f_frsize;
1309 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1310 return NO_STATUS;
1311 }
1312
handle_release(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_release_in * req)1313 static int handle_release(struct fuse* fuse, struct fuse_handler* handler,
1314 const struct fuse_in_header* hdr, const struct fuse_release_in* req)
1315 {
1316 struct handle *h = id_to_ptr(req->fh);
1317
1318 TRACE("[%d] RELEASE %p(%d)\n", handler->token, h, h->fd);
1319 close(h->fd);
1320 free(h);
1321 return 0;
1322 }
1323
handle_fsync(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_fsync_in * req)1324 static int handle_fsync(struct fuse* fuse, struct fuse_handler* handler,
1325 const struct fuse_in_header* hdr, const struct fuse_fsync_in* req)
1326 {
1327 bool is_dir = (hdr->opcode == FUSE_FSYNCDIR);
1328 bool is_data_sync = req->fsync_flags & 1;
1329
1330 int fd = -1;
1331 if (is_dir) {
1332 struct dirhandle *dh = id_to_ptr(req->fh);
1333 fd = dirfd(dh->d);
1334 } else {
1335 struct handle *h = id_to_ptr(req->fh);
1336 fd = h->fd;
1337 }
1338
1339 TRACE("[%d] %s %p(%d) is_data_sync=%d\n", handler->token,
1340 is_dir ? "FSYNCDIR" : "FSYNC",
1341 id_to_ptr(req->fh), fd, is_data_sync);
1342 int res = is_data_sync ? fdatasync(fd) : fsync(fd);
1343 if (res == -1) {
1344 return -errno;
1345 }
1346 return 0;
1347 }
1348
handle_flush(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr)1349 static int handle_flush(struct fuse* fuse, struct fuse_handler* handler,
1350 const struct fuse_in_header* hdr)
1351 {
1352 TRACE("[%d] FLUSH\n", handler->token);
1353 return 0;
1354 }
1355
handle_opendir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_open_in * req)1356 static int handle_opendir(struct fuse* fuse, struct fuse_handler* handler,
1357 const struct fuse_in_header* hdr, const struct fuse_open_in* req)
1358 {
1359 struct node* node;
1360 char path[PATH_MAX];
1361 struct fuse_open_out out;
1362 struct dirhandle *h;
1363
1364 pthread_mutex_lock(&fuse->global->lock);
1365 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
1366 TRACE("[%d] OPENDIR @ %"PRIx64" (%s)\n", handler->token,
1367 hdr->nodeid, node ? node->name : "?");
1368 pthread_mutex_unlock(&fuse->global->lock);
1369
1370 if (!node) {
1371 return -ENOENT;
1372 }
1373 if (!check_caller_access_to_node(fuse, hdr, node, R_OK)) {
1374 return -EACCES;
1375 }
1376 h = malloc(sizeof(*h));
1377 if (!h) {
1378 return -ENOMEM;
1379 }
1380 TRACE("[%d] OPENDIR %s\n", handler->token, path);
1381 h->d = opendir(path);
1382 if (!h->d) {
1383 free(h);
1384 return -errno;
1385 }
1386 out.fh = ptr_to_id(h);
1387 out.open_flags = 0;
1388 out.padding = 0;
1389 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1390 return NO_STATUS;
1391 }
1392
handle_readdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_read_in * req)1393 static int handle_readdir(struct fuse* fuse, struct fuse_handler* handler,
1394 const struct fuse_in_header* hdr, const struct fuse_read_in* req)
1395 {
1396 char buffer[8192];
1397 struct fuse_dirent *fde = (struct fuse_dirent*) buffer;
1398 struct dirent *de;
1399 struct dirhandle *h = id_to_ptr(req->fh);
1400
1401 TRACE("[%d] READDIR %p\n", handler->token, h);
1402 if (req->offset == 0) {
1403 /* rewinddir() might have been called above us, so rewind here too */
1404 TRACE("[%d] calling rewinddir()\n", handler->token);
1405 rewinddir(h->d);
1406 }
1407 de = readdir(h->d);
1408 if (!de) {
1409 return 0;
1410 }
1411 fde->ino = FUSE_UNKNOWN_INO;
1412 /* increment the offset so we can detect when rewinddir() seeks back to the beginning */
1413 fde->off = req->offset + 1;
1414 fde->type = de->d_type;
1415 fde->namelen = strlen(de->d_name);
1416 memcpy(fde->name, de->d_name, fde->namelen + 1);
1417 fuse_reply(fuse, hdr->unique, fde,
1418 FUSE_DIRENT_ALIGN(sizeof(struct fuse_dirent) + fde->namelen));
1419 return NO_STATUS;
1420 }
1421
handle_releasedir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_release_in * req)1422 static int handle_releasedir(struct fuse* fuse, struct fuse_handler* handler,
1423 const struct fuse_in_header* hdr, const struct fuse_release_in* req)
1424 {
1425 struct dirhandle *h = id_to_ptr(req->fh);
1426
1427 TRACE("[%d] RELEASEDIR %p\n", handler->token, h);
1428 closedir(h->d);
1429 free(h);
1430 return 0;
1431 }
1432
handle_init(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_init_in * req)1433 static int handle_init(struct fuse* fuse, struct fuse_handler* handler,
1434 const struct fuse_in_header* hdr, const struct fuse_init_in* req)
1435 {
1436 struct fuse_init_out out;
1437 size_t fuse_struct_size;
1438
1439 TRACE("[%d] INIT ver=%d.%d maxread=%d flags=%x\n",
1440 handler->token, req->major, req->minor, req->max_readahead, req->flags);
1441
1442 /* Kernel 2.6.16 is the first stable kernel with struct fuse_init_out
1443 * defined (fuse version 7.6). The structure is the same from 7.6 through
1444 * 7.22. Beginning with 7.23, the structure increased in size and added
1445 * new parameters.
1446 */
1447 if (req->major != FUSE_KERNEL_VERSION || req->minor < 6) {
1448 ERROR("Fuse kernel version mismatch: Kernel version %d.%d, Expected at least %d.6",
1449 req->major, req->minor, FUSE_KERNEL_VERSION);
1450 return -1;
1451 }
1452
1453 /* We limit ourselves to 15 because we don't handle BATCH_FORGET yet */
1454 out.minor = MIN(req->minor, 15);
1455 fuse_struct_size = sizeof(out);
1456 #if defined(FUSE_COMPAT_22_INIT_OUT_SIZE)
1457 /* FUSE_KERNEL_VERSION >= 23. */
1458
1459 /* If the kernel only works on minor revs older than or equal to 22,
1460 * then use the older structure size since this code only uses the 7.22
1461 * version of the structure. */
1462 if (req->minor <= 22) {
1463 fuse_struct_size = FUSE_COMPAT_22_INIT_OUT_SIZE;
1464 }
1465 #endif
1466
1467 out.major = FUSE_KERNEL_VERSION;
1468 out.max_readahead = req->max_readahead;
1469 out.flags = FUSE_ATOMIC_O_TRUNC | FUSE_BIG_WRITES;
1470 out.max_background = 32;
1471 out.congestion_threshold = 32;
1472 out.max_write = MAX_WRITE;
1473 fuse_reply(fuse, hdr->unique, &out, fuse_struct_size);
1474 return NO_STATUS;
1475 }
1476
handle_canonical_path(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr)1477 static int handle_canonical_path(struct fuse* fuse, struct fuse_handler* handler,
1478 const struct fuse_in_header *hdr)
1479 {
1480 struct node* node;
1481 char path[PATH_MAX];
1482 int len;
1483
1484 pthread_mutex_lock(&fuse->global->lock);
1485 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1486 path, sizeof(path));
1487 TRACE("[%d] CANONICAL_PATH @ %" PRIx64 " (%s)\n", handler->token, hdr->nodeid,
1488 node ? node->name : "?");
1489 pthread_mutex_unlock(&fuse->global->lock);
1490
1491 if (!node) {
1492 return -ENOENT;
1493 }
1494 if (!check_caller_access_to_node(fuse, hdr, node, R_OK)) {
1495 return -EACCES;
1496 }
1497 len = strlen(path);
1498 if (len + 1 > PATH_MAX)
1499 len = PATH_MAX - 1;
1500 path[PATH_MAX - 1] = 0;
1501 fuse_reply(fuse, hdr->unique, path, len + 1);
1502 return NO_STATUS;
1503 }
1504
1505
handle_fuse_request(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const void * data,size_t data_len)1506 static int handle_fuse_request(struct fuse *fuse, struct fuse_handler* handler,
1507 const struct fuse_in_header *hdr, const void *data, size_t data_len)
1508 {
1509 switch (hdr->opcode) {
1510 case FUSE_LOOKUP: { /* bytez[] -> entry_out */
1511 const char* name = data;
1512 return handle_lookup(fuse, handler, hdr, name);
1513 }
1514
1515 case FUSE_FORGET: {
1516 const struct fuse_forget_in *req = data;
1517 return handle_forget(fuse, handler, hdr, req);
1518 }
1519
1520 case FUSE_GETATTR: { /* getattr_in -> attr_out */
1521 const struct fuse_getattr_in *req = data;
1522 return handle_getattr(fuse, handler, hdr, req);
1523 }
1524
1525 case FUSE_SETATTR: { /* setattr_in -> attr_out */
1526 const struct fuse_setattr_in *req = data;
1527 return handle_setattr(fuse, handler, hdr, req);
1528 }
1529
1530 // case FUSE_READLINK:
1531 // case FUSE_SYMLINK:
1532 case FUSE_MKNOD: { /* mknod_in, bytez[] -> entry_out */
1533 const struct fuse_mknod_in *req = data;
1534 const char *name = ((const char*) data) + sizeof(*req);
1535 return handle_mknod(fuse, handler, hdr, req, name);
1536 }
1537
1538 case FUSE_MKDIR: { /* mkdir_in, bytez[] -> entry_out */
1539 const struct fuse_mkdir_in *req = data;
1540 const char *name = ((const char*) data) + sizeof(*req);
1541 return handle_mkdir(fuse, handler, hdr, req, name);
1542 }
1543
1544 case FUSE_UNLINK: { /* bytez[] -> */
1545 const char* name = data;
1546 return handle_unlink(fuse, handler, hdr, name);
1547 }
1548
1549 case FUSE_RMDIR: { /* bytez[] -> */
1550 const char* name = data;
1551 return handle_rmdir(fuse, handler, hdr, name);
1552 }
1553
1554 case FUSE_RENAME: { /* rename_in, oldname, newname -> */
1555 const struct fuse_rename_in *req = data;
1556 const char *old_name = ((const char*) data) + sizeof(*req);
1557 const char *new_name = old_name + strlen(old_name) + 1;
1558 return handle_rename(fuse, handler, hdr, req, old_name, new_name);
1559 }
1560
1561 // case FUSE_LINK:
1562 case FUSE_OPEN: { /* open_in -> open_out */
1563 const struct fuse_open_in *req = data;
1564 return handle_open(fuse, handler, hdr, req);
1565 }
1566
1567 case FUSE_READ: { /* read_in -> byte[] */
1568 const struct fuse_read_in *req = data;
1569 return handle_read(fuse, handler, hdr, req);
1570 }
1571
1572 case FUSE_WRITE: { /* write_in, byte[write_in.size] -> write_out */
1573 const struct fuse_write_in *req = data;
1574 const void* buffer = (const __u8*)data + sizeof(*req);
1575 return handle_write(fuse, handler, hdr, req, buffer);
1576 }
1577
1578 case FUSE_STATFS: { /* getattr_in -> attr_out */
1579 return handle_statfs(fuse, handler, hdr);
1580 }
1581
1582 case FUSE_RELEASE: { /* release_in -> */
1583 const struct fuse_release_in *req = data;
1584 return handle_release(fuse, handler, hdr, req);
1585 }
1586
1587 case FUSE_FSYNC:
1588 case FUSE_FSYNCDIR: {
1589 const struct fuse_fsync_in *req = data;
1590 return handle_fsync(fuse, handler, hdr, req);
1591 }
1592
1593 // case FUSE_SETXATTR:
1594 // case FUSE_GETXATTR:
1595 // case FUSE_LISTXATTR:
1596 // case FUSE_REMOVEXATTR:
1597 case FUSE_FLUSH: {
1598 return handle_flush(fuse, handler, hdr);
1599 }
1600
1601 case FUSE_OPENDIR: { /* open_in -> open_out */
1602 const struct fuse_open_in *req = data;
1603 return handle_opendir(fuse, handler, hdr, req);
1604 }
1605
1606 case FUSE_READDIR: {
1607 const struct fuse_read_in *req = data;
1608 return handle_readdir(fuse, handler, hdr, req);
1609 }
1610
1611 case FUSE_RELEASEDIR: { /* release_in -> */
1612 const struct fuse_release_in *req = data;
1613 return handle_releasedir(fuse, handler, hdr, req);
1614 }
1615
1616 case FUSE_INIT: { /* init_in -> init_out */
1617 const struct fuse_init_in *req = data;
1618 return handle_init(fuse, handler, hdr, req);
1619 }
1620
1621 case FUSE_CANONICAL_PATH: { /* nodeid -> bytez[] */
1622 return handle_canonical_path(fuse, handler, hdr);
1623 }
1624
1625 default: {
1626 TRACE("[%d] NOTIMPL op=%d uniq=%"PRIx64" nid=%"PRIx64"\n",
1627 handler->token, hdr->opcode, hdr->unique, hdr->nodeid);
1628 return -ENOSYS;
1629 }
1630 }
1631 }
1632
handle_fuse_requests(struct fuse_handler * handler)1633 static void handle_fuse_requests(struct fuse_handler* handler)
1634 {
1635 struct fuse* fuse = handler->fuse;
1636 for (;;) {
1637 ssize_t len = TEMP_FAILURE_RETRY(read(fuse->fd,
1638 handler->request_buffer, sizeof(handler->request_buffer)));
1639 if (len < 0) {
1640 if (errno == ENODEV) {
1641 ERROR("[%d] someone stole our marbles!\n", handler->token);
1642 exit(2);
1643 }
1644 ERROR("[%d] handle_fuse_requests: errno=%d\n", handler->token, errno);
1645 continue;
1646 }
1647
1648 if ((size_t)len < sizeof(struct fuse_in_header)) {
1649 ERROR("[%d] request too short: len=%zu\n", handler->token, (size_t)len);
1650 continue;
1651 }
1652
1653 const struct fuse_in_header *hdr = (void*)handler->request_buffer;
1654 if (hdr->len != (size_t)len) {
1655 ERROR("[%d] malformed header: len=%zu, hdr->len=%u\n",
1656 handler->token, (size_t)len, hdr->len);
1657 continue;
1658 }
1659
1660 const void *data = handler->request_buffer + sizeof(struct fuse_in_header);
1661 size_t data_len = len - sizeof(struct fuse_in_header);
1662 __u64 unique = hdr->unique;
1663 int res = handle_fuse_request(fuse, handler, hdr, data, data_len);
1664
1665 /* We do not access the request again after this point because the underlying
1666 * buffer storage may have been reused while processing the request. */
1667
1668 if (res != NO_STATUS) {
1669 if (res) {
1670 TRACE("[%d] ERROR %d\n", handler->token, res);
1671 }
1672 fuse_status(fuse, unique, res);
1673 }
1674 }
1675 }
1676
start_handler(void * data)1677 static void* start_handler(void* data)
1678 {
1679 struct fuse_handler* handler = data;
1680 handle_fuse_requests(handler);
1681 return NULL;
1682 }
1683
remove_str_to_int(void * key,void * value,void * context)1684 static bool remove_str_to_int(void *key, void *value, void *context) {
1685 Hashmap* map = context;
1686 hashmapRemove(map, key);
1687 free(key);
1688 return true;
1689 }
1690
package_parse_callback(pkg_info * info,void * userdata)1691 static bool package_parse_callback(pkg_info *info, void *userdata) {
1692 struct fuse_global *global = (struct fuse_global *)userdata;
1693
1694 char* name = strdup(info->name);
1695 hashmapPut(global->package_to_appid, name, (void*) (uintptr_t) info->uid);
1696 packagelist_free(info);
1697 return true;
1698 }
1699
read_package_list(struct fuse_global * global)1700 static bool read_package_list(struct fuse_global* global) {
1701 pthread_mutex_lock(&global->lock);
1702
1703 hashmapForEach(global->package_to_appid, remove_str_to_int, global->package_to_appid);
1704
1705 bool rc = packagelist_parse(package_parse_callback, global);
1706 TRACE("read_package_list: found %zu packages\n",
1707 hashmapSize(global->package_to_appid));
1708
1709 /* Regenerate ownership details using newly loaded mapping */
1710 derive_permissions_recursive_locked(global->fuse_default, &global->root);
1711
1712 pthread_mutex_unlock(&global->lock);
1713
1714 return rc;
1715 }
1716
watch_package_list(struct fuse_global * global)1717 static void watch_package_list(struct fuse_global* global) {
1718 struct inotify_event *event;
1719 char event_buf[512];
1720
1721 int nfd = inotify_init();
1722 if (nfd < 0) {
1723 ERROR("inotify_init failed: %s\n", strerror(errno));
1724 return;
1725 }
1726
1727 bool active = false;
1728 while (1) {
1729 if (!active) {
1730 int res = inotify_add_watch(nfd, PACKAGES_LIST_FILE, IN_DELETE_SELF);
1731 if (res == -1) {
1732 if (errno == ENOENT || errno == EACCES) {
1733 /* Framework may not have created yet, sleep and retry */
1734 ERROR("missing \"%s\"; retrying\n", PACKAGES_LIST_FILE);
1735 sleep(3);
1736 continue;
1737 } else {
1738 ERROR("inotify_add_watch failed: %s\n", strerror(errno));
1739 return;
1740 }
1741 }
1742
1743 /* Watch above will tell us about any future changes, so
1744 * read the current state. */
1745 if (read_package_list(global) == false) {
1746 ERROR("read_package_list failed\n");
1747 return;
1748 }
1749 active = true;
1750 }
1751
1752 int event_pos = 0;
1753 int res = read(nfd, event_buf, sizeof(event_buf));
1754 if (res < (int) sizeof(*event)) {
1755 if (errno == EINTR)
1756 continue;
1757 ERROR("failed to read inotify event: %s\n", strerror(errno));
1758 return;
1759 }
1760
1761 while (res >= (int) sizeof(*event)) {
1762 int event_size;
1763 event = (struct inotify_event *) (event_buf + event_pos);
1764
1765 TRACE("inotify event: %08x\n", event->mask);
1766 if ((event->mask & IN_IGNORED) == IN_IGNORED) {
1767 /* Previously watched file was deleted, probably due to move
1768 * that swapped in new data; re-arm the watch and read. */
1769 active = false;
1770 }
1771
1772 event_size = sizeof(*event) + event->len;
1773 res -= event_size;
1774 event_pos += event_size;
1775 }
1776 }
1777 }
1778
usage()1779 static int usage() {
1780 ERROR("usage: sdcard [OPTIONS] <source_path> <label>\n"
1781 " -u: specify UID to run as\n"
1782 " -g: specify GID to run as\n"
1783 " -U: specify user ID that owns device\n"
1784 " -m: source_path is multi-user\n"
1785 " -w: runtime write mount has full write access\n"
1786 "\n");
1787 return 1;
1788 }
1789
fuse_setup(struct fuse * fuse,gid_t gid,mode_t mask)1790 static int fuse_setup(struct fuse* fuse, gid_t gid, mode_t mask) {
1791 char opts[256];
1792
1793 fuse->fd = open("/dev/fuse", O_RDWR);
1794 if (fuse->fd == -1) {
1795 ERROR("failed to open fuse device: %s\n", strerror(errno));
1796 return -1;
1797 }
1798
1799 umount2(fuse->dest_path, MNT_DETACH);
1800
1801 snprintf(opts, sizeof(opts),
1802 "fd=%i,rootmode=40000,default_permissions,allow_other,user_id=%d,group_id=%d",
1803 fuse->fd, fuse->global->uid, fuse->global->gid);
1804 if (mount("/dev/fuse", fuse->dest_path, "fuse", MS_NOSUID | MS_NODEV | MS_NOEXEC |
1805 MS_NOATIME, opts) != 0) {
1806 ERROR("failed to mount fuse filesystem: %s\n", strerror(errno));
1807 return -1;
1808 }
1809
1810 fuse->gid = gid;
1811 fuse->mask = mask;
1812
1813 return 0;
1814 }
1815
run(const char * source_path,const char * label,uid_t uid,gid_t gid,userid_t userid,bool multi_user,bool full_write)1816 static void run(const char* source_path, const char* label, uid_t uid,
1817 gid_t gid, userid_t userid, bool multi_user, bool full_write) {
1818 struct fuse_global global;
1819 struct fuse fuse_default;
1820 struct fuse fuse_read;
1821 struct fuse fuse_write;
1822 struct fuse_handler handler_default;
1823 struct fuse_handler handler_read;
1824 struct fuse_handler handler_write;
1825 pthread_t thread_default;
1826 pthread_t thread_read;
1827 pthread_t thread_write;
1828
1829 memset(&global, 0, sizeof(global));
1830 memset(&fuse_default, 0, sizeof(fuse_default));
1831 memset(&fuse_read, 0, sizeof(fuse_read));
1832 memset(&fuse_write, 0, sizeof(fuse_write));
1833 memset(&handler_default, 0, sizeof(handler_default));
1834 memset(&handler_read, 0, sizeof(handler_read));
1835 memset(&handler_write, 0, sizeof(handler_write));
1836
1837 pthread_mutex_init(&global.lock, NULL);
1838 global.package_to_appid = hashmapCreate(256, str_hash, str_icase_equals);
1839 global.uid = uid;
1840 global.gid = gid;
1841 global.multi_user = multi_user;
1842 global.next_generation = 0;
1843 global.inode_ctr = 1;
1844
1845 memset(&global.root, 0, sizeof(global.root));
1846 global.root.nid = FUSE_ROOT_ID; /* 1 */
1847 global.root.refcount = 2;
1848 global.root.namelen = strlen(source_path);
1849 global.root.name = strdup(source_path);
1850 global.root.userid = userid;
1851 global.root.uid = AID_ROOT;
1852 global.root.under_android = false;
1853
1854 strcpy(global.source_path, source_path);
1855
1856 if (multi_user) {
1857 global.root.perm = PERM_PRE_ROOT;
1858 snprintf(global.obb_path, sizeof(global.obb_path), "%s/obb", source_path);
1859 } else {
1860 global.root.perm = PERM_ROOT;
1861 snprintf(global.obb_path, sizeof(global.obb_path), "%s/Android/obb", source_path);
1862 }
1863
1864 fuse_default.global = &global;
1865 fuse_read.global = &global;
1866 fuse_write.global = &global;
1867
1868 global.fuse_default = &fuse_default;
1869 global.fuse_read = &fuse_read;
1870 global.fuse_write = &fuse_write;
1871
1872 snprintf(fuse_default.dest_path, PATH_MAX, "/mnt/runtime/default/%s", label);
1873 snprintf(fuse_read.dest_path, PATH_MAX, "/mnt/runtime/read/%s", label);
1874 snprintf(fuse_write.dest_path, PATH_MAX, "/mnt/runtime/write/%s", label);
1875
1876 handler_default.fuse = &fuse_default;
1877 handler_read.fuse = &fuse_read;
1878 handler_write.fuse = &fuse_write;
1879
1880 handler_default.token = 0;
1881 handler_read.token = 1;
1882 handler_write.token = 2;
1883
1884 umask(0);
1885
1886 if (multi_user) {
1887 /* Multi-user storage is fully isolated per user, so "other"
1888 * permissions are completely masked off. */
1889 if (fuse_setup(&fuse_default, AID_SDCARD_RW, 0006)
1890 || fuse_setup(&fuse_read, AID_EVERYBODY, 0027)
1891 || fuse_setup(&fuse_write, AID_EVERYBODY, full_write ? 0007 : 0027)) {
1892 ERROR("failed to fuse_setup\n");
1893 exit(1);
1894 }
1895 } else {
1896 /* Physical storage is readable by all users on device, but
1897 * the Android directories are masked off to a single user
1898 * deep inside attr_from_stat(). */
1899 if (fuse_setup(&fuse_default, AID_SDCARD_RW, 0006)
1900 || fuse_setup(&fuse_read, AID_EVERYBODY, full_write ? 0027 : 0022)
1901 || fuse_setup(&fuse_write, AID_EVERYBODY, full_write ? 0007 : 0022)) {
1902 ERROR("failed to fuse_setup\n");
1903 exit(1);
1904 }
1905 }
1906
1907 /* Drop privs */
1908 if (setgroups(sizeof(kGroups) / sizeof(kGroups[0]), kGroups) < 0) {
1909 ERROR("cannot setgroups: %s\n", strerror(errno));
1910 exit(1);
1911 }
1912 if (setgid(gid) < 0) {
1913 ERROR("cannot setgid: %s\n", strerror(errno));
1914 exit(1);
1915 }
1916 if (setuid(uid) < 0) {
1917 ERROR("cannot setuid: %s\n", strerror(errno));
1918 exit(1);
1919 }
1920
1921 if (multi_user) {
1922 fs_prepare_dir(global.obb_path, 0775, uid, gid);
1923 }
1924
1925 if (pthread_create(&thread_default, NULL, start_handler, &handler_default)
1926 || pthread_create(&thread_read, NULL, start_handler, &handler_read)
1927 || pthread_create(&thread_write, NULL, start_handler, &handler_write)) {
1928 ERROR("failed to pthread_create\n");
1929 exit(1);
1930 }
1931
1932 watch_package_list(&global);
1933 ERROR("terminated prematurely\n");
1934 exit(1);
1935 }
1936
sdcardfs_setup(const char * source_path,const char * dest_path,uid_t fsuid,gid_t fsgid,bool multi_user,userid_t userid,gid_t gid,mode_t mask)1937 static int sdcardfs_setup(const char *source_path, const char *dest_path, uid_t fsuid,
1938 gid_t fsgid, bool multi_user, userid_t userid, gid_t gid, mode_t mask) {
1939 char opts[256];
1940
1941 snprintf(opts, sizeof(opts),
1942 "fsuid=%d,fsgid=%d,%smask=%d,userid=%d,gid=%d",
1943 fsuid, fsgid, multi_user?"multiuser,":"", mask, userid, gid);
1944
1945 if (mount(source_path, dest_path, "sdcardfs",
1946 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_NOATIME, opts) != 0) {
1947 ERROR("failed to mount sdcardfs filesystem: %s\n", strerror(errno));
1948 return -1;
1949 }
1950
1951 return 0;
1952 }
1953
run_sdcardfs(const char * source_path,const char * label,uid_t uid,gid_t gid,userid_t userid,bool multi_user,bool full_write)1954 static void run_sdcardfs(const char* source_path, const char* label, uid_t uid,
1955 gid_t gid, userid_t userid, bool multi_user, bool full_write) {
1956 char dest_path_default[PATH_MAX];
1957 char dest_path_read[PATH_MAX];
1958 char dest_path_write[PATH_MAX];
1959 char obb_path[PATH_MAX];
1960 snprintf(dest_path_default, PATH_MAX, "/mnt/runtime/default/%s", label);
1961 snprintf(dest_path_read, PATH_MAX, "/mnt/runtime/read/%s", label);
1962 snprintf(dest_path_write, PATH_MAX, "/mnt/runtime/write/%s", label);
1963
1964 umask(0);
1965 if (multi_user) {
1966 /* Multi-user storage is fully isolated per user, so "other"
1967 * permissions are completely masked off. */
1968 if (sdcardfs_setup(source_path, dest_path_default, uid, gid, multi_user, userid,
1969 AID_SDCARD_RW, 0006)
1970 || sdcardfs_setup(source_path, dest_path_read, uid, gid, multi_user, userid,
1971 AID_EVERYBODY, 0027)
1972 || sdcardfs_setup(source_path, dest_path_write, uid, gid, multi_user, userid,
1973 AID_EVERYBODY, full_write ? 0007 : 0027)) {
1974 ERROR("failed to fuse_setup\n");
1975 exit(1);
1976 }
1977 } else {
1978 /* Physical storage is readable by all users on device, but
1979 * the Android directories are masked off to a single user
1980 * deep inside attr_from_stat(). */
1981 if (sdcardfs_setup(source_path, dest_path_default, uid, gid, multi_user, userid,
1982 AID_SDCARD_RW, 0006)
1983 || sdcardfs_setup(source_path, dest_path_read, uid, gid, multi_user, userid,
1984 AID_EVERYBODY, full_write ? 0027 : 0022)
1985 || sdcardfs_setup(source_path, dest_path_write, uid, gid, multi_user, userid,
1986 AID_EVERYBODY, full_write ? 0007 : 0022)) {
1987 ERROR("failed to fuse_setup\n");
1988 exit(1);
1989 }
1990 }
1991
1992 /* Drop privs */
1993 if (setgroups(sizeof(kGroups) / sizeof(kGroups[0]), kGroups) < 0) {
1994 ERROR("cannot setgroups: %s\n", strerror(errno));
1995 exit(1);
1996 }
1997 if (setgid(gid) < 0) {
1998 ERROR("cannot setgid: %s\n", strerror(errno));
1999 exit(1);
2000 }
2001 if (setuid(uid) < 0) {
2002 ERROR("cannot setuid: %s\n", strerror(errno));
2003 exit(1);
2004 }
2005
2006 if (multi_user) {
2007 snprintf(obb_path, sizeof(obb_path), "%s/obb", source_path);
2008 fs_prepare_dir(&obb_path[0], 0775, uid, gid);
2009 }
2010
2011 exit(0);
2012 }
2013
supports_sdcardfs(void)2014 static bool supports_sdcardfs(void) {
2015 FILE *fp;
2016 char *buf = NULL;
2017 size_t buflen = 0;
2018
2019 fp = fopen("/proc/filesystems", "r");
2020 if (!fp) {
2021 ERROR("Could not read /proc/filesystems, error: %s\n", strerror(errno));
2022 return false;
2023 }
2024 while ((getline(&buf, &buflen, fp)) > 0) {
2025 if (strstr(buf, "sdcardfs\n")) {
2026 free(buf);
2027 fclose(fp);
2028 return true;
2029 }
2030 }
2031 free(buf);
2032 fclose(fp);
2033 return false;
2034 }
2035
should_use_sdcardfs(void)2036 static bool should_use_sdcardfs(void) {
2037 char property[PROPERTY_VALUE_MAX];
2038
2039 // Allow user to have a strong opinion about state
2040 property_get(PROP_SDCARDFS_USER, property, "");
2041 if (!strcmp(property, "force_on")) {
2042 ALOGW("User explicitly enabled sdcardfs");
2043 return supports_sdcardfs();
2044 } else if (!strcmp(property, "force_off")) {
2045 ALOGW("User explicitly disabled sdcardfs");
2046 return false;
2047 }
2048
2049 // Fall back to device opinion about state
2050 if (property_get_bool(PROP_SDCARDFS_DEVICE, false)) {
2051 ALOGW("Device explicitly enabled sdcardfs");
2052 return supports_sdcardfs();
2053 } else {
2054 ALOGW("Device explicitly disabled sdcardfs");
2055 return false;
2056 }
2057 }
2058
main(int argc,char ** argv)2059 int main(int argc, char **argv) {
2060 const char *source_path = NULL;
2061 const char *label = NULL;
2062 uid_t uid = 0;
2063 gid_t gid = 0;
2064 userid_t userid = 0;
2065 bool multi_user = false;
2066 bool full_write = false;
2067 int i;
2068 struct rlimit rlim;
2069 int fs_version;
2070
2071 int opt;
2072 while ((opt = getopt(argc, argv, "u:g:U:mw")) != -1) {
2073 switch (opt) {
2074 case 'u':
2075 uid = strtoul(optarg, NULL, 10);
2076 break;
2077 case 'g':
2078 gid = strtoul(optarg, NULL, 10);
2079 break;
2080 case 'U':
2081 userid = strtoul(optarg, NULL, 10);
2082 break;
2083 case 'm':
2084 multi_user = true;
2085 break;
2086 case 'w':
2087 full_write = true;
2088 break;
2089 case '?':
2090 default:
2091 return usage();
2092 }
2093 }
2094
2095 for (i = optind; i < argc; i++) {
2096 char* arg = argv[i];
2097 if (!source_path) {
2098 source_path = arg;
2099 } else if (!label) {
2100 label = arg;
2101 } else {
2102 ERROR("too many arguments\n");
2103 return usage();
2104 }
2105 }
2106
2107 if (!source_path) {
2108 ERROR("no source path specified\n");
2109 return usage();
2110 }
2111 if (!label) {
2112 ERROR("no label specified\n");
2113 return usage();
2114 }
2115 if (!uid || !gid) {
2116 ERROR("uid and gid must be nonzero\n");
2117 return usage();
2118 }
2119
2120 rlim.rlim_cur = 8192;
2121 rlim.rlim_max = 8192;
2122 if (setrlimit(RLIMIT_NOFILE, &rlim)) {
2123 ERROR("Error setting RLIMIT_NOFILE, errno = %d\n", errno);
2124 }
2125
2126 while ((fs_read_atomic_int("/data/.layout_version", &fs_version) == -1) || (fs_version < 3)) {
2127 ERROR("installd fs upgrade not yet complete. Waiting...\n");
2128 sleep(1);
2129 }
2130
2131 if (should_use_sdcardfs()) {
2132 run_sdcardfs(source_path, label, uid, gid, userid, multi_user, full_write);
2133 } else {
2134 run(source_path, label, uid, gid, userid, multi_user, full_write);
2135 }
2136 return 1;
2137 }
2138