1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "sdcard"
18
19 #include <ctype.h>
20 #include <dirent.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <linux/fuse.h>
26 #include <pthread.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/inotify.h>
31 #include <sys/mount.h>
32 #include <sys/param.h>
33 #include <sys/resource.h>
34 #include <sys/stat.h>
35 #include <sys/statfs.h>
36 #include <sys/time.h>
37 #include <sys/uio.h>
38 #include <unistd.h>
39
40 #include <cutils/fs.h>
41 #include <cutils/hashmap.h>
42 #include <cutils/log.h>
43 #include <cutils/multiuser.h>
44
45 #include <private/android_filesystem_config.h>
46
47 /* README
48 *
49 * What is this?
50 *
51 * sdcard is a program that uses FUSE to emulate FAT-on-sdcard style
52 * directory permissions (all files are given fixed owner, group, and
53 * permissions at creation, owner, group, and permissions are not
54 * changeable, symlinks and hardlinks are not createable, etc.
55 *
56 * See usage() for command line options.
57 *
58 * It must be run as root, but will drop to requested UID/GID as soon as it
59 * mounts a filesystem. It will refuse to run if requested UID/GID are zero.
60 *
61 * Things I believe to be true:
62 *
63 * - ops that return a fuse_entry (LOOKUP, MKNOD, MKDIR, LINK, SYMLINK,
64 * CREAT) must bump that node's refcount
65 * - don't forget that FORGET can forget multiple references (req->nlookup)
66 * - if an op that returns a fuse_entry fails writing the reply to the
67 * kernel, you must rollback the refcount to reflect the reference the
68 * kernel did not actually acquire
69 *
70 * This daemon can also derive custom filesystem permissions based on directory
71 * structure when requested. These custom permissions support several features:
72 *
73 * - Apps can access their own files in /Android/data/com.example/ without
74 * requiring any additional GIDs.
75 * - Separate permissions for protecting directories like Pictures and Music.
76 * - Multi-user separation on the same physical device.
77 */
78
79 #define FUSE_TRACE 0
80
81 #if FUSE_TRACE
82 #define TRACE(x...) ALOGD(x)
83 #else
84 #define TRACE(x...) do {} while (0)
85 #endif
86
87 #define ERROR(x...) ALOGE(x)
88
89 #define FUSE_UNKNOWN_INO 0xffffffff
90
91 /* Maximum number of bytes to write in one request. */
92 #define MAX_WRITE (256 * 1024)
93
94 /* Maximum number of bytes to read in one request. */
95 #define MAX_READ (128 * 1024)
96
97 /* Largest possible request.
98 * The request size is bounded by the maximum size of a FUSE_WRITE request because it has
99 * the largest possible data payload. */
100 #define MAX_REQUEST_SIZE (sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) + MAX_WRITE)
101
102 /* Pseudo-error constant used to indicate that no fuse status is needed
103 * or that a reply has already been written. */
104 #define NO_STATUS 1
105
106 /* Path to system-provided mapping of package name to appIds */
107 static const char* const kPackagesListFile = "/data/system/packages.list";
108
109 /* Supplementary groups to execute with */
110 static const gid_t kGroups[1] = { AID_PACKAGE_INFO };
111
112 /* Permission mode for a specific node. Controls how file permissions
113 * are derived for children nodes. */
114 typedef enum {
115 /* Nothing special; this node should just inherit from its parent. */
116 PERM_INHERIT,
117 /* This node is one level above a normal root; used for legacy layouts
118 * which use the first level to represent user_id. */
119 PERM_PRE_ROOT,
120 /* This node is "/" */
121 PERM_ROOT,
122 /* This node is "/Android" */
123 PERM_ANDROID,
124 /* This node is "/Android/data" */
125 PERM_ANDROID_DATA,
126 /* This node is "/Android/obb" */
127 PERM_ANDROID_OBB,
128 /* This node is "/Android/media" */
129 PERM_ANDROID_MEDIA,
130 } perm_t;
131
132 struct handle {
133 int fd;
134 };
135
136 struct dirhandle {
137 DIR *d;
138 };
139
140 struct node {
141 __u32 refcount;
142 __u64 nid;
143 __u64 gen;
144 /*
145 * The inode number for this FUSE node. Note that this isn't stable across
146 * multiple invocations of the FUSE daemon.
147 */
148 __u32 ino;
149
150 /* State derived based on current position in hierarchy. */
151 perm_t perm;
152 userid_t userid;
153 uid_t uid;
154 bool under_android;
155
156 struct node *next; /* per-dir sibling list */
157 struct node *child; /* first contained file by this dir */
158 struct node *parent; /* containing directory */
159
160 size_t namelen;
161 char *name;
162 /* If non-null, this is the real name of the file in the underlying storage.
163 * This may differ from the field "name" only by case.
164 * strlen(actual_name) will always equal strlen(name), so it is safe to use
165 * namelen for both fields.
166 */
167 char *actual_name;
168
169 /* If non-null, an exact underlying path that should be grafted into this
170 * position. Used to support things like OBB. */
171 char* graft_path;
172 size_t graft_pathlen;
173
174 bool deleted;
175 };
176
str_hash(void * key)177 static int str_hash(void *key) {
178 return hashmapHash(key, strlen(key));
179 }
180
181 /** Test if two string keys are equal ignoring case */
str_icase_equals(void * keyA,void * keyB)182 static bool str_icase_equals(void *keyA, void *keyB) {
183 return strcasecmp(keyA, keyB) == 0;
184 }
185
186 /* Global data for all FUSE mounts */
187 struct fuse_global {
188 pthread_mutex_t lock;
189
190 uid_t uid;
191 gid_t gid;
192 bool multi_user;
193
194 char source_path[PATH_MAX];
195 char obb_path[PATH_MAX];
196
197 Hashmap* package_to_appid;
198
199 __u64 next_generation;
200 struct node root;
201
202 /* Used to allocate unique inode numbers for fuse nodes. We use
203 * a simple counter based scheme where inode numbers from deleted
204 * nodes aren't reused. Note that inode allocations are not stable
205 * across multiple invocation of the sdcard daemon, but that shouldn't
206 * be a huge problem in practice.
207 *
208 * Note that we restrict inodes to 32 bit unsigned integers to prevent
209 * truncation on 32 bit processes when unsigned long long stat.st_ino is
210 * assigned to an unsigned long ino_t type in an LP32 process.
211 *
212 * Also note that fuse_attr and fuse_dirent inode values are 64 bits wide
213 * on both LP32 and LP64, but the fuse kernel code doesn't squash 64 bit
214 * inode numbers into 32 bit values on 64 bit kernels (see fuse_squash_ino
215 * in fs/fuse/inode.c).
216 *
217 * Accesses must be guarded by |lock|.
218 */
219 __u32 inode_ctr;
220
221 struct fuse* fuse_default;
222 struct fuse* fuse_read;
223 struct fuse* fuse_write;
224 };
225
226 /* Single FUSE mount */
227 struct fuse {
228 struct fuse_global* global;
229
230 char dest_path[PATH_MAX];
231
232 int fd;
233
234 gid_t gid;
235 mode_t mask;
236 };
237
238 /* Private data used by a single FUSE handler */
239 struct fuse_handler {
240 struct fuse* fuse;
241 int token;
242
243 /* To save memory, we never use the contents of the request buffer and the read
244 * buffer at the same time. This allows us to share the underlying storage. */
245 union {
246 __u8 request_buffer[MAX_REQUEST_SIZE];
247 __u8 read_buffer[MAX_READ + PAGESIZE];
248 };
249 };
250
id_to_ptr(__u64 nid)251 static inline void *id_to_ptr(__u64 nid)
252 {
253 return (void *) (uintptr_t) nid;
254 }
255
ptr_to_id(void * ptr)256 static inline __u64 ptr_to_id(void *ptr)
257 {
258 return (__u64) (uintptr_t) ptr;
259 }
260
acquire_node_locked(struct node * node)261 static void acquire_node_locked(struct node* node)
262 {
263 node->refcount++;
264 TRACE("ACQUIRE %p (%s) rc=%d\n", node, node->name, node->refcount);
265 }
266
267 static void remove_node_from_parent_locked(struct node* node);
268
release_node_locked(struct node * node)269 static void release_node_locked(struct node* node)
270 {
271 TRACE("RELEASE %p (%s) rc=%d\n", node, node->name, node->refcount);
272 if (node->refcount > 0) {
273 node->refcount--;
274 if (!node->refcount) {
275 TRACE("DESTROY %p (%s)\n", node, node->name);
276 remove_node_from_parent_locked(node);
277
278 /* TODO: remove debugging - poison memory */
279 memset(node->name, 0xef, node->namelen);
280 free(node->name);
281 free(node->actual_name);
282 memset(node, 0xfc, sizeof(*node));
283 free(node);
284 }
285 } else {
286 ERROR("Zero refcnt %p\n", node);
287 }
288 }
289
add_node_to_parent_locked(struct node * node,struct node * parent)290 static void add_node_to_parent_locked(struct node *node, struct node *parent) {
291 node->parent = parent;
292 node->next = parent->child;
293 parent->child = node;
294 acquire_node_locked(parent);
295 }
296
remove_node_from_parent_locked(struct node * node)297 static void remove_node_from_parent_locked(struct node* node)
298 {
299 if (node->parent) {
300 if (node->parent->child == node) {
301 node->parent->child = node->parent->child->next;
302 } else {
303 struct node *node2;
304 node2 = node->parent->child;
305 while (node2->next != node)
306 node2 = node2->next;
307 node2->next = node->next;
308 }
309 release_node_locked(node->parent);
310 node->parent = NULL;
311 node->next = NULL;
312 }
313 }
314
315 /* Gets the absolute path to a node into the provided buffer.
316 *
317 * Populates 'buf' with the path and returns the length of the path on success,
318 * or returns -1 if the path is too long for the provided buffer.
319 */
get_node_path_locked(struct node * node,char * buf,size_t bufsize)320 static ssize_t get_node_path_locked(struct node* node, char* buf, size_t bufsize) {
321 const char* name;
322 size_t namelen;
323 if (node->graft_path) {
324 name = node->graft_path;
325 namelen = node->graft_pathlen;
326 } else if (node->actual_name) {
327 name = node->actual_name;
328 namelen = node->namelen;
329 } else {
330 name = node->name;
331 namelen = node->namelen;
332 }
333
334 if (bufsize < namelen + 1) {
335 return -1;
336 }
337
338 ssize_t pathlen = 0;
339 if (node->parent && node->graft_path == NULL) {
340 pathlen = get_node_path_locked(node->parent, buf, bufsize - namelen - 1);
341 if (pathlen < 0) {
342 return -1;
343 }
344 buf[pathlen++] = '/';
345 }
346
347 memcpy(buf + pathlen, name, namelen + 1); /* include trailing \0 */
348 return pathlen + namelen;
349 }
350
351 /* Finds the absolute path of a file within a given directory.
352 * Performs a case-insensitive search for the file and sets the buffer to the path
353 * of the first matching file. If 'search' is zero or if no match is found, sets
354 * the buffer to the path that the file would have, assuming the name were case-sensitive.
355 *
356 * Populates 'buf' with the path and returns the actual name (within 'buf') on success,
357 * or returns NULL if the path is too long for the provided buffer.
358 */
find_file_within(const char * path,const char * name,char * buf,size_t bufsize,int search)359 static char* find_file_within(const char* path, const char* name,
360 char* buf, size_t bufsize, int search)
361 {
362 size_t pathlen = strlen(path);
363 size_t namelen = strlen(name);
364 size_t childlen = pathlen + namelen + 1;
365 char* actual;
366
367 if (bufsize <= childlen) {
368 return NULL;
369 }
370
371 memcpy(buf, path, pathlen);
372 buf[pathlen] = '/';
373 actual = buf + pathlen + 1;
374 memcpy(actual, name, namelen + 1);
375
376 if (search && access(buf, F_OK)) {
377 struct dirent* entry;
378 DIR* dir = opendir(path);
379 if (!dir) {
380 ERROR("opendir %s failed: %s\n", path, strerror(errno));
381 return actual;
382 }
383 while ((entry = readdir(dir))) {
384 if (!strcasecmp(entry->d_name, name)) {
385 /* we have a match - replace the name, don't need to copy the null again */
386 memcpy(actual, entry->d_name, namelen);
387 break;
388 }
389 }
390 closedir(dir);
391 }
392 return actual;
393 }
394
attr_from_stat(struct fuse * fuse,struct fuse_attr * attr,const struct stat * s,const struct node * node)395 static void attr_from_stat(struct fuse* fuse, struct fuse_attr *attr,
396 const struct stat *s, const struct node* node) {
397 attr->ino = node->ino;
398 attr->size = s->st_size;
399 attr->blocks = s->st_blocks;
400 attr->atime = s->st_atim.tv_sec;
401 attr->mtime = s->st_mtim.tv_sec;
402 attr->ctime = s->st_ctim.tv_sec;
403 attr->atimensec = s->st_atim.tv_nsec;
404 attr->mtimensec = s->st_mtim.tv_nsec;
405 attr->ctimensec = s->st_ctim.tv_nsec;
406 attr->mode = s->st_mode;
407 attr->nlink = s->st_nlink;
408
409 attr->uid = node->uid;
410
411 if (fuse->gid == AID_SDCARD_RW) {
412 /* As an optimization, certain trusted system components only run
413 * as owner but operate across all users. Since we're now handing
414 * out the sdcard_rw GID only to trusted apps, we're okay relaxing
415 * the user boundary enforcement for the default view. The UIDs
416 * assigned to app directories are still multiuser aware. */
417 attr->gid = AID_SDCARD_RW;
418 } else {
419 attr->gid = multiuser_get_uid(node->userid, fuse->gid);
420 }
421
422 int visible_mode = 0775 & ~fuse->mask;
423 if (node->perm == PERM_PRE_ROOT) {
424 /* Top of multi-user view should always be visible to ensure
425 * secondary users can traverse inside. */
426 visible_mode = 0711;
427 } else if (node->under_android) {
428 /* Block "other" access to Android directories, since only apps
429 * belonging to a specific user should be in there; we still
430 * leave +x open for the default view. */
431 if (fuse->gid == AID_SDCARD_RW) {
432 visible_mode = visible_mode & ~0006;
433 } else {
434 visible_mode = visible_mode & ~0007;
435 }
436 }
437 int owner_mode = s->st_mode & 0700;
438 int filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
439 attr->mode = (attr->mode & S_IFMT) | filtered_mode;
440 }
441
touch(char * path,mode_t mode)442 static int touch(char* path, mode_t mode) {
443 int fd = open(path, O_RDWR | O_CREAT | O_EXCL | O_NOFOLLOW, mode);
444 if (fd == -1) {
445 if (errno == EEXIST) {
446 return 0;
447 } else {
448 ERROR("Failed to open(%s): %s\n", path, strerror(errno));
449 return -1;
450 }
451 }
452 close(fd);
453 return 0;
454 }
455
derive_permissions_locked(struct fuse * fuse,struct node * parent,struct node * node)456 static void derive_permissions_locked(struct fuse* fuse, struct node *parent,
457 struct node *node) {
458 appid_t appid;
459
460 /* By default, each node inherits from its parent */
461 node->perm = PERM_INHERIT;
462 node->userid = parent->userid;
463 node->uid = parent->uid;
464 node->under_android = parent->under_android;
465
466 /* Derive custom permissions based on parent and current node */
467 switch (parent->perm) {
468 case PERM_INHERIT:
469 /* Already inherited above */
470 break;
471 case PERM_PRE_ROOT:
472 /* Legacy internal layout places users at top level */
473 node->perm = PERM_ROOT;
474 node->userid = strtoul(node->name, NULL, 10);
475 break;
476 case PERM_ROOT:
477 /* Assume masked off by default. */
478 if (!strcasecmp(node->name, "Android")) {
479 /* App-specific directories inside; let anyone traverse */
480 node->perm = PERM_ANDROID;
481 node->under_android = true;
482 }
483 break;
484 case PERM_ANDROID:
485 if (!strcasecmp(node->name, "data")) {
486 /* App-specific directories inside; let anyone traverse */
487 node->perm = PERM_ANDROID_DATA;
488 } else if (!strcasecmp(node->name, "obb")) {
489 /* App-specific directories inside; let anyone traverse */
490 node->perm = PERM_ANDROID_OBB;
491 /* Single OBB directory is always shared */
492 node->graft_path = fuse->global->obb_path;
493 node->graft_pathlen = strlen(fuse->global->obb_path);
494 } else if (!strcasecmp(node->name, "media")) {
495 /* App-specific directories inside; let anyone traverse */
496 node->perm = PERM_ANDROID_MEDIA;
497 }
498 break;
499 case PERM_ANDROID_DATA:
500 case PERM_ANDROID_OBB:
501 case PERM_ANDROID_MEDIA:
502 appid = (appid_t) (uintptr_t) hashmapGet(fuse->global->package_to_appid, node->name);
503 if (appid != 0) {
504 node->uid = multiuser_get_uid(parent->userid, appid);
505 }
506 break;
507 }
508 }
509
derive_permissions_recursive_locked(struct fuse * fuse,struct node * parent)510 static void derive_permissions_recursive_locked(struct fuse* fuse, struct node *parent) {
511 struct node *node;
512 for (node = parent->child; node; node = node->next) {
513 derive_permissions_locked(fuse, parent, node);
514 if (node->child) {
515 derive_permissions_recursive_locked(fuse, node);
516 }
517 }
518 }
519
520 /* Kernel has already enforced everything we returned through
521 * derive_permissions_locked(), so this is used to lock down access
522 * even further, such as enforcing that apps hold sdcard_rw. */
check_caller_access_to_name(struct fuse * fuse,const struct fuse_in_header * hdr,const struct node * parent_node,const char * name,int mode)523 static bool check_caller_access_to_name(struct fuse* fuse,
524 const struct fuse_in_header *hdr, const struct node* parent_node,
525 const char* name, int mode) {
526 /* Always block security-sensitive files at root */
527 if (parent_node && parent_node->perm == PERM_ROOT) {
528 if (!strcasecmp(name, "autorun.inf")
529 || !strcasecmp(name, ".android_secure")
530 || !strcasecmp(name, "android_secure")) {
531 return false;
532 }
533 }
534
535 /* Root always has access; access for any other UIDs should always
536 * be controlled through packages.list. */
537 if (hdr->uid == 0) {
538 return true;
539 }
540
541 /* No extra permissions to enforce */
542 return true;
543 }
544
check_caller_access_to_node(struct fuse * fuse,const struct fuse_in_header * hdr,const struct node * node,int mode)545 static bool check_caller_access_to_node(struct fuse* fuse,
546 const struct fuse_in_header *hdr, const struct node* node, int mode) {
547 return check_caller_access_to_name(fuse, hdr, node->parent, node->name, mode);
548 }
549
create_node_locked(struct fuse * fuse,struct node * parent,const char * name,const char * actual_name)550 struct node *create_node_locked(struct fuse* fuse,
551 struct node *parent, const char *name, const char* actual_name)
552 {
553 struct node *node;
554 size_t namelen = strlen(name);
555
556 // Detect overflows in the inode counter. "4 billion nodes should be enough
557 // for everybody".
558 if (fuse->global->inode_ctr == 0) {
559 ERROR("No more inode numbers available");
560 return NULL;
561 }
562
563 node = calloc(1, sizeof(struct node));
564 if (!node) {
565 return NULL;
566 }
567 node->name = malloc(namelen + 1);
568 if (!node->name) {
569 free(node);
570 return NULL;
571 }
572 memcpy(node->name, name, namelen + 1);
573 if (strcmp(name, actual_name)) {
574 node->actual_name = malloc(namelen + 1);
575 if (!node->actual_name) {
576 free(node->name);
577 free(node);
578 return NULL;
579 }
580 memcpy(node->actual_name, actual_name, namelen + 1);
581 }
582 node->namelen = namelen;
583 node->nid = ptr_to_id(node);
584 node->ino = fuse->global->inode_ctr++;
585 node->gen = fuse->global->next_generation++;
586
587 node->deleted = false;
588
589 derive_permissions_locked(fuse, parent, node);
590 acquire_node_locked(node);
591 add_node_to_parent_locked(node, parent);
592 return node;
593 }
594
rename_node_locked(struct node * node,const char * name,const char * actual_name)595 static int rename_node_locked(struct node *node, const char *name,
596 const char* actual_name)
597 {
598 size_t namelen = strlen(name);
599 int need_actual_name = strcmp(name, actual_name);
600
601 /* make the storage bigger without actually changing the name
602 * in case an error occurs part way */
603 if (namelen > node->namelen) {
604 char* new_name = realloc(node->name, namelen + 1);
605 if (!new_name) {
606 return -ENOMEM;
607 }
608 node->name = new_name;
609 if (need_actual_name && node->actual_name) {
610 char* new_actual_name = realloc(node->actual_name, namelen + 1);
611 if (!new_actual_name) {
612 return -ENOMEM;
613 }
614 node->actual_name = new_actual_name;
615 }
616 }
617
618 /* update the name, taking care to allocate storage before overwriting the old name */
619 if (need_actual_name) {
620 if (!node->actual_name) {
621 node->actual_name = malloc(namelen + 1);
622 if (!node->actual_name) {
623 return -ENOMEM;
624 }
625 }
626 memcpy(node->actual_name, actual_name, namelen + 1);
627 } else {
628 free(node->actual_name);
629 node->actual_name = NULL;
630 }
631 memcpy(node->name, name, namelen + 1);
632 node->namelen = namelen;
633 return 0;
634 }
635
lookup_node_by_id_locked(struct fuse * fuse,__u64 nid)636 static struct node *lookup_node_by_id_locked(struct fuse *fuse, __u64 nid)
637 {
638 if (nid == FUSE_ROOT_ID) {
639 return &fuse->global->root;
640 } else {
641 return id_to_ptr(nid);
642 }
643 }
644
lookup_node_and_path_by_id_locked(struct fuse * fuse,__u64 nid,char * buf,size_t bufsize)645 static struct node* lookup_node_and_path_by_id_locked(struct fuse* fuse, __u64 nid,
646 char* buf, size_t bufsize)
647 {
648 struct node* node = lookup_node_by_id_locked(fuse, nid);
649 if (node && get_node_path_locked(node, buf, bufsize) < 0) {
650 node = NULL;
651 }
652 return node;
653 }
654
lookup_child_by_name_locked(struct node * node,const char * name)655 static struct node *lookup_child_by_name_locked(struct node *node, const char *name)
656 {
657 for (node = node->child; node; node = node->next) {
658 /* use exact string comparison, nodes that differ by case
659 * must be considered distinct even if they refer to the same
660 * underlying file as otherwise operations such as "mv x x"
661 * will not work because the source and target nodes are the same. */
662 if (!strcmp(name, node->name) && !node->deleted) {
663 return node;
664 }
665 }
666 return 0;
667 }
668
acquire_or_create_child_locked(struct fuse * fuse,struct node * parent,const char * name,const char * actual_name)669 static struct node* acquire_or_create_child_locked(
670 struct fuse* fuse, struct node* parent,
671 const char* name, const char* actual_name)
672 {
673 struct node* child = lookup_child_by_name_locked(parent, name);
674 if (child) {
675 acquire_node_locked(child);
676 } else {
677 child = create_node_locked(fuse, parent, name, actual_name);
678 }
679 return child;
680 }
681
fuse_status(struct fuse * fuse,__u64 unique,int err)682 static void fuse_status(struct fuse *fuse, __u64 unique, int err)
683 {
684 struct fuse_out_header hdr;
685 hdr.len = sizeof(hdr);
686 hdr.error = err;
687 hdr.unique = unique;
688 write(fuse->fd, &hdr, sizeof(hdr));
689 }
690
fuse_reply(struct fuse * fuse,__u64 unique,void * data,int len)691 static void fuse_reply(struct fuse *fuse, __u64 unique, void *data, int len)
692 {
693 struct fuse_out_header hdr;
694 struct iovec vec[2];
695 int res;
696
697 hdr.len = len + sizeof(hdr);
698 hdr.error = 0;
699 hdr.unique = unique;
700
701 vec[0].iov_base = &hdr;
702 vec[0].iov_len = sizeof(hdr);
703 vec[1].iov_base = data;
704 vec[1].iov_len = len;
705
706 res = writev(fuse->fd, vec, 2);
707 if (res < 0) {
708 ERROR("*** REPLY FAILED *** %d\n", errno);
709 }
710 }
711
fuse_reply_entry(struct fuse * fuse,__u64 unique,struct node * parent,const char * name,const char * actual_name,const char * path)712 static int fuse_reply_entry(struct fuse* fuse, __u64 unique,
713 struct node* parent, const char* name, const char* actual_name,
714 const char* path)
715 {
716 struct node* node;
717 struct fuse_entry_out out;
718 struct stat s;
719
720 if (lstat(path, &s) < 0) {
721 return -errno;
722 }
723
724 pthread_mutex_lock(&fuse->global->lock);
725 node = acquire_or_create_child_locked(fuse, parent, name, actual_name);
726 if (!node) {
727 pthread_mutex_unlock(&fuse->global->lock);
728 return -ENOMEM;
729 }
730 memset(&out, 0, sizeof(out));
731 attr_from_stat(fuse, &out.attr, &s, node);
732 out.attr_valid = 10;
733 out.entry_valid = 10;
734 out.nodeid = node->nid;
735 out.generation = node->gen;
736 pthread_mutex_unlock(&fuse->global->lock);
737 fuse_reply(fuse, unique, &out, sizeof(out));
738 return NO_STATUS;
739 }
740
fuse_reply_attr(struct fuse * fuse,__u64 unique,const struct node * node,const char * path)741 static int fuse_reply_attr(struct fuse* fuse, __u64 unique, const struct node* node,
742 const char* path)
743 {
744 struct fuse_attr_out out;
745 struct stat s;
746
747 if (lstat(path, &s) < 0) {
748 return -errno;
749 }
750 memset(&out, 0, sizeof(out));
751 attr_from_stat(fuse, &out.attr, &s, node);
752 out.attr_valid = 10;
753 fuse_reply(fuse, unique, &out, sizeof(out));
754 return NO_STATUS;
755 }
756
fuse_notify_delete(struct fuse * fuse,const __u64 parent,const __u64 child,const char * name)757 static void fuse_notify_delete(struct fuse* fuse, const __u64 parent,
758 const __u64 child, const char* name) {
759 struct fuse_out_header hdr;
760 struct fuse_notify_delete_out data;
761 struct iovec vec[3];
762 size_t namelen = strlen(name);
763 int res;
764
765 hdr.len = sizeof(hdr) + sizeof(data) + namelen + 1;
766 hdr.error = FUSE_NOTIFY_DELETE;
767 hdr.unique = 0;
768
769 data.parent = parent;
770 data.child = child;
771 data.namelen = namelen;
772 data.padding = 0;
773
774 vec[0].iov_base = &hdr;
775 vec[0].iov_len = sizeof(hdr);
776 vec[1].iov_base = &data;
777 vec[1].iov_len = sizeof(data);
778 vec[2].iov_base = (void*) name;
779 vec[2].iov_len = namelen + 1;
780
781 res = writev(fuse->fd, vec, 3);
782 /* Ignore ENOENT, since other views may not have seen the entry */
783 if (res < 0 && errno != ENOENT) {
784 ERROR("*** NOTIFY FAILED *** %d\n", errno);
785 }
786 }
787
handle_lookup(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)788 static int handle_lookup(struct fuse* fuse, struct fuse_handler* handler,
789 const struct fuse_in_header *hdr, const char* name)
790 {
791 struct node* parent_node;
792 char parent_path[PATH_MAX];
793 char child_path[PATH_MAX];
794 const char* actual_name;
795
796 pthread_mutex_lock(&fuse->global->lock);
797 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
798 parent_path, sizeof(parent_path));
799 TRACE("[%d] LOOKUP %s @ %"PRIx64" (%s)\n", handler->token, name, hdr->nodeid,
800 parent_node ? parent_node->name : "?");
801 pthread_mutex_unlock(&fuse->global->lock);
802
803 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
804 child_path, sizeof(child_path), 1))) {
805 return -ENOENT;
806 }
807 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, R_OK)) {
808 return -EACCES;
809 }
810
811 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
812 }
813
handle_forget(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_forget_in * req)814 static int handle_forget(struct fuse* fuse, struct fuse_handler* handler,
815 const struct fuse_in_header *hdr, const struct fuse_forget_in *req)
816 {
817 struct node* node;
818
819 pthread_mutex_lock(&fuse->global->lock);
820 node = lookup_node_by_id_locked(fuse, hdr->nodeid);
821 TRACE("[%d] FORGET #%"PRIu64" @ %"PRIx64" (%s)\n", handler->token, req->nlookup,
822 hdr->nodeid, node ? node->name : "?");
823 if (node) {
824 __u64 n = req->nlookup;
825 while (n--) {
826 release_node_locked(node);
827 }
828 }
829 pthread_mutex_unlock(&fuse->global->lock);
830 return NO_STATUS; /* no reply */
831 }
832
handle_getattr(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_getattr_in * req)833 static int handle_getattr(struct fuse* fuse, struct fuse_handler* handler,
834 const struct fuse_in_header *hdr, const struct fuse_getattr_in *req)
835 {
836 struct node* node;
837 char path[PATH_MAX];
838
839 pthread_mutex_lock(&fuse->global->lock);
840 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
841 TRACE("[%d] GETATTR flags=%x fh=%"PRIx64" @ %"PRIx64" (%s)\n", handler->token,
842 req->getattr_flags, req->fh, hdr->nodeid, node ? node->name : "?");
843 pthread_mutex_unlock(&fuse->global->lock);
844
845 if (!node) {
846 return -ENOENT;
847 }
848 if (!check_caller_access_to_node(fuse, hdr, node, R_OK)) {
849 return -EACCES;
850 }
851
852 return fuse_reply_attr(fuse, hdr->unique, node, path);
853 }
854
handle_setattr(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_setattr_in * req)855 static int handle_setattr(struct fuse* fuse, struct fuse_handler* handler,
856 const struct fuse_in_header *hdr, const struct fuse_setattr_in *req)
857 {
858 struct node* node;
859 char path[PATH_MAX];
860 struct timespec times[2];
861
862 pthread_mutex_lock(&fuse->global->lock);
863 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
864 TRACE("[%d] SETATTR fh=%"PRIx64" valid=%x @ %"PRIx64" (%s)\n", handler->token,
865 req->fh, req->valid, hdr->nodeid, node ? node->name : "?");
866 pthread_mutex_unlock(&fuse->global->lock);
867
868 if (!node) {
869 return -ENOENT;
870 }
871
872 if (!(req->valid & FATTR_FH) &&
873 !check_caller_access_to_node(fuse, hdr, node, W_OK)) {
874 return -EACCES;
875 }
876
877 /* XXX: incomplete implementation on purpose.
878 * chmod/chown should NEVER be implemented.*/
879
880 if ((req->valid & FATTR_SIZE) && truncate64(path, req->size) < 0) {
881 return -errno;
882 }
883
884 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
885 * are both set, then set it to the current time. Else, set it to the
886 * time specified in the request. Same goes for mtime. Use utimensat(2)
887 * as it allows ATIME and MTIME to be changed independently, and has
888 * nanosecond resolution which fuse also has.
889 */
890 if (req->valid & (FATTR_ATIME | FATTR_MTIME)) {
891 times[0].tv_nsec = UTIME_OMIT;
892 times[1].tv_nsec = UTIME_OMIT;
893 if (req->valid & FATTR_ATIME) {
894 if (req->valid & FATTR_ATIME_NOW) {
895 times[0].tv_nsec = UTIME_NOW;
896 } else {
897 times[0].tv_sec = req->atime;
898 times[0].tv_nsec = req->atimensec;
899 }
900 }
901 if (req->valid & FATTR_MTIME) {
902 if (req->valid & FATTR_MTIME_NOW) {
903 times[1].tv_nsec = UTIME_NOW;
904 } else {
905 times[1].tv_sec = req->mtime;
906 times[1].tv_nsec = req->mtimensec;
907 }
908 }
909 TRACE("[%d] Calling utimensat on %s with atime %ld, mtime=%ld\n",
910 handler->token, path, times[0].tv_sec, times[1].tv_sec);
911 if (utimensat(-1, path, times, 0) < 0) {
912 return -errno;
913 }
914 }
915 return fuse_reply_attr(fuse, hdr->unique, node, path);
916 }
917
handle_mknod(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_mknod_in * req,const char * name)918 static int handle_mknod(struct fuse* fuse, struct fuse_handler* handler,
919 const struct fuse_in_header* hdr, const struct fuse_mknod_in* req, const char* name)
920 {
921 struct node* parent_node;
922 char parent_path[PATH_MAX];
923 char child_path[PATH_MAX];
924 const char* actual_name;
925
926 pthread_mutex_lock(&fuse->global->lock);
927 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
928 parent_path, sizeof(parent_path));
929 TRACE("[%d] MKNOD %s 0%o @ %"PRIx64" (%s)\n", handler->token,
930 name, req->mode, hdr->nodeid, parent_node ? parent_node->name : "?");
931 pthread_mutex_unlock(&fuse->global->lock);
932
933 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
934 child_path, sizeof(child_path), 1))) {
935 return -ENOENT;
936 }
937 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
938 return -EACCES;
939 }
940 __u32 mode = (req->mode & (~0777)) | 0664;
941 if (mknod(child_path, mode, req->rdev) < 0) {
942 return -errno;
943 }
944 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
945 }
946
handle_mkdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_mkdir_in * req,const char * name)947 static int handle_mkdir(struct fuse* fuse, struct fuse_handler* handler,
948 const struct fuse_in_header* hdr, const struct fuse_mkdir_in* req, const char* name)
949 {
950 struct node* parent_node;
951 char parent_path[PATH_MAX];
952 char child_path[PATH_MAX];
953 const char* actual_name;
954
955 pthread_mutex_lock(&fuse->global->lock);
956 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
957 parent_path, sizeof(parent_path));
958 TRACE("[%d] MKDIR %s 0%o @ %"PRIx64" (%s)\n", handler->token,
959 name, req->mode, hdr->nodeid, parent_node ? parent_node->name : "?");
960 pthread_mutex_unlock(&fuse->global->lock);
961
962 if (!parent_node || !(actual_name = find_file_within(parent_path, name,
963 child_path, sizeof(child_path), 1))) {
964 return -ENOENT;
965 }
966 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
967 return -EACCES;
968 }
969 __u32 mode = (req->mode & (~0777)) | 0775;
970 if (mkdir(child_path, mode) < 0) {
971 return -errno;
972 }
973
974 /* When creating /Android/data and /Android/obb, mark them as .nomedia */
975 if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "data")) {
976 char nomedia[PATH_MAX];
977 snprintf(nomedia, PATH_MAX, "%s/.nomedia", child_path);
978 if (touch(nomedia, 0664) != 0) {
979 ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
980 return -ENOENT;
981 }
982 }
983 if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "obb")) {
984 char nomedia[PATH_MAX];
985 snprintf(nomedia, PATH_MAX, "%s/.nomedia", fuse->global->obb_path);
986 if (touch(nomedia, 0664) != 0) {
987 ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
988 return -ENOENT;
989 }
990 }
991
992 return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
993 }
994
handle_unlink(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)995 static int handle_unlink(struct fuse* fuse, struct fuse_handler* handler,
996 const struct fuse_in_header* hdr, const char* name)
997 {
998 struct node* parent_node;
999 struct node* child_node;
1000 char parent_path[PATH_MAX];
1001 char child_path[PATH_MAX];
1002
1003 pthread_mutex_lock(&fuse->global->lock);
1004 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1005 parent_path, sizeof(parent_path));
1006 TRACE("[%d] UNLINK %s @ %"PRIx64" (%s)\n", handler->token,
1007 name, hdr->nodeid, parent_node ? parent_node->name : "?");
1008 pthread_mutex_unlock(&fuse->global->lock);
1009
1010 if (!parent_node || !find_file_within(parent_path, name,
1011 child_path, sizeof(child_path), 1)) {
1012 return -ENOENT;
1013 }
1014 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
1015 return -EACCES;
1016 }
1017 if (unlink(child_path) < 0) {
1018 return -errno;
1019 }
1020 pthread_mutex_lock(&fuse->global->lock);
1021 child_node = lookup_child_by_name_locked(parent_node, name);
1022 if (child_node) {
1023 child_node->deleted = true;
1024 }
1025 pthread_mutex_unlock(&fuse->global->lock);
1026 if (parent_node && child_node) {
1027 /* Tell all other views that node is gone */
1028 TRACE("[%d] fuse_notify_delete parent=%"PRIx64", child=%"PRIx64", name=%s\n",
1029 handler->token, (uint64_t) parent_node->nid, (uint64_t) child_node->nid, name);
1030 if (fuse != fuse->global->fuse_default) {
1031 fuse_notify_delete(fuse->global->fuse_default, parent_node->nid, child_node->nid, name);
1032 }
1033 if (fuse != fuse->global->fuse_read) {
1034 fuse_notify_delete(fuse->global->fuse_read, parent_node->nid, child_node->nid, name);
1035 }
1036 if (fuse != fuse->global->fuse_write) {
1037 fuse_notify_delete(fuse->global->fuse_write, parent_node->nid, child_node->nid, name);
1038 }
1039 }
1040 return 0;
1041 }
1042
handle_rmdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const char * name)1043 static int handle_rmdir(struct fuse* fuse, struct fuse_handler* handler,
1044 const struct fuse_in_header* hdr, const char* name)
1045 {
1046 struct node* child_node;
1047 struct node* parent_node;
1048 char parent_path[PATH_MAX];
1049 char child_path[PATH_MAX];
1050
1051 pthread_mutex_lock(&fuse->global->lock);
1052 parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1053 parent_path, sizeof(parent_path));
1054 TRACE("[%d] RMDIR %s @ %"PRIx64" (%s)\n", handler->token,
1055 name, hdr->nodeid, parent_node ? parent_node->name : "?");
1056 pthread_mutex_unlock(&fuse->global->lock);
1057
1058 if (!parent_node || !find_file_within(parent_path, name,
1059 child_path, sizeof(child_path), 1)) {
1060 return -ENOENT;
1061 }
1062 if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
1063 return -EACCES;
1064 }
1065 if (rmdir(child_path) < 0) {
1066 return -errno;
1067 }
1068 pthread_mutex_lock(&fuse->global->lock);
1069 child_node = lookup_child_by_name_locked(parent_node, name);
1070 if (child_node) {
1071 child_node->deleted = true;
1072 }
1073 pthread_mutex_unlock(&fuse->global->lock);
1074 if (parent_node && child_node) {
1075 /* Tell all other views that node is gone */
1076 TRACE("[%d] fuse_notify_delete parent=%"PRIx64", child=%"PRIx64", name=%s\n",
1077 handler->token, (uint64_t) parent_node->nid, (uint64_t) child_node->nid, name);
1078 if (fuse != fuse->global->fuse_default) {
1079 fuse_notify_delete(fuse->global->fuse_default, parent_node->nid, child_node->nid, name);
1080 }
1081 if (fuse != fuse->global->fuse_read) {
1082 fuse_notify_delete(fuse->global->fuse_read, parent_node->nid, child_node->nid, name);
1083 }
1084 if (fuse != fuse->global->fuse_write) {
1085 fuse_notify_delete(fuse->global->fuse_write, parent_node->nid, child_node->nid, name);
1086 }
1087 }
1088 return 0;
1089 }
1090
handle_rename(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_rename_in * req,const char * old_name,const char * new_name)1091 static int handle_rename(struct fuse* fuse, struct fuse_handler* handler,
1092 const struct fuse_in_header* hdr, const struct fuse_rename_in* req,
1093 const char* old_name, const char* new_name)
1094 {
1095 struct node* old_parent_node;
1096 struct node* new_parent_node;
1097 struct node* child_node;
1098 char old_parent_path[PATH_MAX];
1099 char new_parent_path[PATH_MAX];
1100 char old_child_path[PATH_MAX];
1101 char new_child_path[PATH_MAX];
1102 const char* new_actual_name;
1103 int res;
1104
1105 pthread_mutex_lock(&fuse->global->lock);
1106 old_parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
1107 old_parent_path, sizeof(old_parent_path));
1108 new_parent_node = lookup_node_and_path_by_id_locked(fuse, req->newdir,
1109 new_parent_path, sizeof(new_parent_path));
1110 TRACE("[%d] RENAME %s->%s @ %"PRIx64" (%s) -> %"PRIx64" (%s)\n", handler->token,
1111 old_name, new_name,
1112 hdr->nodeid, old_parent_node ? old_parent_node->name : "?",
1113 req->newdir, new_parent_node ? new_parent_node->name : "?");
1114 if (!old_parent_node || !new_parent_node) {
1115 res = -ENOENT;
1116 goto lookup_error;
1117 }
1118 if (!check_caller_access_to_name(fuse, hdr, old_parent_node, old_name, W_OK)) {
1119 res = -EACCES;
1120 goto lookup_error;
1121 }
1122 if (!check_caller_access_to_name(fuse, hdr, new_parent_node, new_name, W_OK)) {
1123 res = -EACCES;
1124 goto lookup_error;
1125 }
1126 child_node = lookup_child_by_name_locked(old_parent_node, old_name);
1127 if (!child_node || get_node_path_locked(child_node,
1128 old_child_path, sizeof(old_child_path)) < 0) {
1129 res = -ENOENT;
1130 goto lookup_error;
1131 }
1132 acquire_node_locked(child_node);
1133 pthread_mutex_unlock(&fuse->global->lock);
1134
1135 /* Special case for renaming a file where destination is same path
1136 * differing only by case. In this case we don't want to look for a case
1137 * insensitive match. This allows commands like "mv foo FOO" to work as expected.
1138 */
1139 int search = old_parent_node != new_parent_node
1140 || strcasecmp(old_name, new_name);
1141 if (!(new_actual_name = find_file_within(new_parent_path, new_name,
1142 new_child_path, sizeof(new_child_path), search))) {
1143 res = -ENOENT;
1144 goto io_error;
1145 }
1146
1147 TRACE("[%d] RENAME %s->%s\n", handler->token, old_child_path, new_child_path);
1148 res = rename(old_child_path, new_child_path);
1149 if (res < 0) {
1150 res = -errno;
1151 goto io_error;
1152 }
1153
1154 pthread_mutex_lock(&fuse->global->lock);
1155 res = rename_node_locked(child_node, new_name, new_actual_name);
1156 if (!res) {
1157 remove_node_from_parent_locked(child_node);
1158 derive_permissions_locked(fuse, new_parent_node, child_node);
1159 derive_permissions_recursive_locked(fuse, child_node);
1160 add_node_to_parent_locked(child_node, new_parent_node);
1161 }
1162 goto done;
1163
1164 io_error:
1165 pthread_mutex_lock(&fuse->global->lock);
1166 done:
1167 release_node_locked(child_node);
1168 lookup_error:
1169 pthread_mutex_unlock(&fuse->global->lock);
1170 return res;
1171 }
1172
open_flags_to_access_mode(int open_flags)1173 static int open_flags_to_access_mode(int open_flags) {
1174 if ((open_flags & O_ACCMODE) == O_RDONLY) {
1175 return R_OK;
1176 } else if ((open_flags & O_ACCMODE) == O_WRONLY) {
1177 return W_OK;
1178 } else {
1179 /* Probably O_RDRW, but treat as default to be safe */
1180 return R_OK | W_OK;
1181 }
1182 }
1183
handle_open(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_open_in * req)1184 static int handle_open(struct fuse* fuse, struct fuse_handler* handler,
1185 const struct fuse_in_header* hdr, const struct fuse_open_in* req)
1186 {
1187 struct node* node;
1188 char path[PATH_MAX];
1189 struct fuse_open_out out;
1190 struct handle *h;
1191
1192 pthread_mutex_lock(&fuse->global->lock);
1193 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
1194 TRACE("[%d] OPEN 0%o @ %"PRIx64" (%s)\n", handler->token,
1195 req->flags, hdr->nodeid, node ? node->name : "?");
1196 pthread_mutex_unlock(&fuse->global->lock);
1197
1198 if (!node) {
1199 return -ENOENT;
1200 }
1201 if (!check_caller_access_to_node(fuse, hdr, node,
1202 open_flags_to_access_mode(req->flags))) {
1203 return -EACCES;
1204 }
1205 h = malloc(sizeof(*h));
1206 if (!h) {
1207 return -ENOMEM;
1208 }
1209 TRACE("[%d] OPEN %s\n", handler->token, path);
1210 h->fd = open(path, req->flags);
1211 if (h->fd < 0) {
1212 free(h);
1213 return -errno;
1214 }
1215 out.fh = ptr_to_id(h);
1216 out.open_flags = 0;
1217 out.padding = 0;
1218 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1219 return NO_STATUS;
1220 }
1221
handle_read(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_read_in * req)1222 static int handle_read(struct fuse* fuse, struct fuse_handler* handler,
1223 const struct fuse_in_header* hdr, const struct fuse_read_in* req)
1224 {
1225 struct handle *h = id_to_ptr(req->fh);
1226 __u64 unique = hdr->unique;
1227 __u32 size = req->size;
1228 __u64 offset = req->offset;
1229 int res;
1230 __u8 *read_buffer = (__u8 *) ((uintptr_t)(handler->read_buffer + PAGESIZE) & ~((uintptr_t)PAGESIZE-1));
1231
1232 /* Don't access any other fields of hdr or req beyond this point, the read buffer
1233 * overlaps the request buffer and will clobber data in the request. This
1234 * saves us 128KB per request handler thread at the cost of this scary comment. */
1235
1236 TRACE("[%d] READ %p(%d) %u@%"PRIu64"\n", handler->token,
1237 h, h->fd, size, (uint64_t) offset);
1238 if (size > MAX_READ) {
1239 return -EINVAL;
1240 }
1241 res = pread64(h->fd, read_buffer, size, offset);
1242 if (res < 0) {
1243 return -errno;
1244 }
1245 fuse_reply(fuse, unique, read_buffer, res);
1246 return NO_STATUS;
1247 }
1248
handle_write(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_write_in * req,const void * buffer)1249 static int handle_write(struct fuse* fuse, struct fuse_handler* handler,
1250 const struct fuse_in_header* hdr, const struct fuse_write_in* req,
1251 const void* buffer)
1252 {
1253 struct fuse_write_out out;
1254 struct handle *h = id_to_ptr(req->fh);
1255 int res;
1256 __u8 aligned_buffer[req->size] __attribute__((__aligned__(PAGESIZE)));
1257
1258 if (req->flags & O_DIRECT) {
1259 memcpy(aligned_buffer, buffer, req->size);
1260 buffer = (const __u8*) aligned_buffer;
1261 }
1262
1263 TRACE("[%d] WRITE %p(%d) %u@%"PRIu64"\n", handler->token,
1264 h, h->fd, req->size, req->offset);
1265 res = pwrite64(h->fd, buffer, req->size, req->offset);
1266 if (res < 0) {
1267 return -errno;
1268 }
1269 out.size = res;
1270 out.padding = 0;
1271 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1272 return NO_STATUS;
1273 }
1274
handle_statfs(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr)1275 static int handle_statfs(struct fuse* fuse, struct fuse_handler* handler,
1276 const struct fuse_in_header* hdr)
1277 {
1278 char path[PATH_MAX];
1279 struct statfs stat;
1280 struct fuse_statfs_out out;
1281 int res;
1282
1283 pthread_mutex_lock(&fuse->global->lock);
1284 TRACE("[%d] STATFS\n", handler->token);
1285 res = get_node_path_locked(&fuse->global->root, path, sizeof(path));
1286 pthread_mutex_unlock(&fuse->global->lock);
1287 if (res < 0) {
1288 return -ENOENT;
1289 }
1290 if (statfs(fuse->global->root.name, &stat) < 0) {
1291 return -errno;
1292 }
1293 memset(&out, 0, sizeof(out));
1294 out.st.blocks = stat.f_blocks;
1295 out.st.bfree = stat.f_bfree;
1296 out.st.bavail = stat.f_bavail;
1297 out.st.files = stat.f_files;
1298 out.st.ffree = stat.f_ffree;
1299 out.st.bsize = stat.f_bsize;
1300 out.st.namelen = stat.f_namelen;
1301 out.st.frsize = stat.f_frsize;
1302 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1303 return NO_STATUS;
1304 }
1305
handle_release(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_release_in * req)1306 static int handle_release(struct fuse* fuse, struct fuse_handler* handler,
1307 const struct fuse_in_header* hdr, const struct fuse_release_in* req)
1308 {
1309 struct handle *h = id_to_ptr(req->fh);
1310
1311 TRACE("[%d] RELEASE %p(%d)\n", handler->token, h, h->fd);
1312 close(h->fd);
1313 free(h);
1314 return 0;
1315 }
1316
handle_fsync(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_fsync_in * req)1317 static int handle_fsync(struct fuse* fuse, struct fuse_handler* handler,
1318 const struct fuse_in_header* hdr, const struct fuse_fsync_in* req)
1319 {
1320 bool is_dir = (hdr->opcode == FUSE_FSYNCDIR);
1321 bool is_data_sync = req->fsync_flags & 1;
1322
1323 int fd = -1;
1324 if (is_dir) {
1325 struct dirhandle *dh = id_to_ptr(req->fh);
1326 fd = dirfd(dh->d);
1327 } else {
1328 struct handle *h = id_to_ptr(req->fh);
1329 fd = h->fd;
1330 }
1331
1332 TRACE("[%d] %s %p(%d) is_data_sync=%d\n", handler->token,
1333 is_dir ? "FSYNCDIR" : "FSYNC",
1334 id_to_ptr(req->fh), fd, is_data_sync);
1335 int res = is_data_sync ? fdatasync(fd) : fsync(fd);
1336 if (res == -1) {
1337 return -errno;
1338 }
1339 return 0;
1340 }
1341
handle_flush(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr)1342 static int handle_flush(struct fuse* fuse, struct fuse_handler* handler,
1343 const struct fuse_in_header* hdr)
1344 {
1345 TRACE("[%d] FLUSH\n", handler->token);
1346 return 0;
1347 }
1348
handle_opendir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_open_in * req)1349 static int handle_opendir(struct fuse* fuse, struct fuse_handler* handler,
1350 const struct fuse_in_header* hdr, const struct fuse_open_in* req)
1351 {
1352 struct node* node;
1353 char path[PATH_MAX];
1354 struct fuse_open_out out;
1355 struct dirhandle *h;
1356
1357 pthread_mutex_lock(&fuse->global->lock);
1358 node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path));
1359 TRACE("[%d] OPENDIR @ %"PRIx64" (%s)\n", handler->token,
1360 hdr->nodeid, node ? node->name : "?");
1361 pthread_mutex_unlock(&fuse->global->lock);
1362
1363 if (!node) {
1364 return -ENOENT;
1365 }
1366 if (!check_caller_access_to_node(fuse, hdr, node, R_OK)) {
1367 return -EACCES;
1368 }
1369 h = malloc(sizeof(*h));
1370 if (!h) {
1371 return -ENOMEM;
1372 }
1373 TRACE("[%d] OPENDIR %s\n", handler->token, path);
1374 h->d = opendir(path);
1375 if (!h->d) {
1376 free(h);
1377 return -errno;
1378 }
1379 out.fh = ptr_to_id(h);
1380 out.open_flags = 0;
1381 out.padding = 0;
1382 fuse_reply(fuse, hdr->unique, &out, sizeof(out));
1383 return NO_STATUS;
1384 }
1385
handle_readdir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_read_in * req)1386 static int handle_readdir(struct fuse* fuse, struct fuse_handler* handler,
1387 const struct fuse_in_header* hdr, const struct fuse_read_in* req)
1388 {
1389 char buffer[8192];
1390 struct fuse_dirent *fde = (struct fuse_dirent*) buffer;
1391 struct dirent *de;
1392 struct dirhandle *h = id_to_ptr(req->fh);
1393
1394 TRACE("[%d] READDIR %p\n", handler->token, h);
1395 if (req->offset == 0) {
1396 /* rewinddir() might have been called above us, so rewind here too */
1397 TRACE("[%d] calling rewinddir()\n", handler->token);
1398 rewinddir(h->d);
1399 }
1400 de = readdir(h->d);
1401 if (!de) {
1402 return 0;
1403 }
1404 fde->ino = FUSE_UNKNOWN_INO;
1405 /* increment the offset so we can detect when rewinddir() seeks back to the beginning */
1406 fde->off = req->offset + 1;
1407 fde->type = de->d_type;
1408 fde->namelen = strlen(de->d_name);
1409 memcpy(fde->name, de->d_name, fde->namelen + 1);
1410 fuse_reply(fuse, hdr->unique, fde,
1411 FUSE_DIRENT_ALIGN(sizeof(struct fuse_dirent) + fde->namelen));
1412 return NO_STATUS;
1413 }
1414
handle_releasedir(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_release_in * req)1415 static int handle_releasedir(struct fuse* fuse, struct fuse_handler* handler,
1416 const struct fuse_in_header* hdr, const struct fuse_release_in* req)
1417 {
1418 struct dirhandle *h = id_to_ptr(req->fh);
1419
1420 TRACE("[%d] RELEASEDIR %p\n", handler->token, h);
1421 closedir(h->d);
1422 free(h);
1423 return 0;
1424 }
1425
handle_init(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const struct fuse_init_in * req)1426 static int handle_init(struct fuse* fuse, struct fuse_handler* handler,
1427 const struct fuse_in_header* hdr, const struct fuse_init_in* req)
1428 {
1429 struct fuse_init_out out;
1430 size_t fuse_struct_size;
1431
1432 TRACE("[%d] INIT ver=%d.%d maxread=%d flags=%x\n",
1433 handler->token, req->major, req->minor, req->max_readahead, req->flags);
1434
1435 /* Kernel 2.6.16 is the first stable kernel with struct fuse_init_out
1436 * defined (fuse version 7.6). The structure is the same from 7.6 through
1437 * 7.22. Beginning with 7.23, the structure increased in size and added
1438 * new parameters.
1439 */
1440 if (req->major != FUSE_KERNEL_VERSION || req->minor < 6) {
1441 ERROR("Fuse kernel version mismatch: Kernel version %d.%d, Expected at least %d.6",
1442 req->major, req->minor, FUSE_KERNEL_VERSION);
1443 return -1;
1444 }
1445
1446 /* We limit ourselves to 15 because we don't handle BATCH_FORGET yet */
1447 out.minor = MIN(req->minor, 15);
1448 fuse_struct_size = sizeof(out);
1449 #if defined(FUSE_COMPAT_22_INIT_OUT_SIZE)
1450 /* FUSE_KERNEL_VERSION >= 23. */
1451
1452 /* If the kernel only works on minor revs older than or equal to 22,
1453 * then use the older structure size since this code only uses the 7.22
1454 * version of the structure. */
1455 if (req->minor <= 22) {
1456 fuse_struct_size = FUSE_COMPAT_22_INIT_OUT_SIZE;
1457 }
1458 #endif
1459
1460 out.major = FUSE_KERNEL_VERSION;
1461 out.max_readahead = req->max_readahead;
1462 out.flags = FUSE_ATOMIC_O_TRUNC | FUSE_BIG_WRITES;
1463 out.max_background = 32;
1464 out.congestion_threshold = 32;
1465 out.max_write = MAX_WRITE;
1466 fuse_reply(fuse, hdr->unique, &out, fuse_struct_size);
1467 return NO_STATUS;
1468 }
1469
handle_fuse_request(struct fuse * fuse,struct fuse_handler * handler,const struct fuse_in_header * hdr,const void * data,size_t data_len)1470 static int handle_fuse_request(struct fuse *fuse, struct fuse_handler* handler,
1471 const struct fuse_in_header *hdr, const void *data, size_t data_len)
1472 {
1473 switch (hdr->opcode) {
1474 case FUSE_LOOKUP: { /* bytez[] -> entry_out */
1475 const char* name = data;
1476 return handle_lookup(fuse, handler, hdr, name);
1477 }
1478
1479 case FUSE_FORGET: {
1480 const struct fuse_forget_in *req = data;
1481 return handle_forget(fuse, handler, hdr, req);
1482 }
1483
1484 case FUSE_GETATTR: { /* getattr_in -> attr_out */
1485 const struct fuse_getattr_in *req = data;
1486 return handle_getattr(fuse, handler, hdr, req);
1487 }
1488
1489 case FUSE_SETATTR: { /* setattr_in -> attr_out */
1490 const struct fuse_setattr_in *req = data;
1491 return handle_setattr(fuse, handler, hdr, req);
1492 }
1493
1494 // case FUSE_READLINK:
1495 // case FUSE_SYMLINK:
1496 case FUSE_MKNOD: { /* mknod_in, bytez[] -> entry_out */
1497 const struct fuse_mknod_in *req = data;
1498 const char *name = ((const char*) data) + sizeof(*req);
1499 return handle_mknod(fuse, handler, hdr, req, name);
1500 }
1501
1502 case FUSE_MKDIR: { /* mkdir_in, bytez[] -> entry_out */
1503 const struct fuse_mkdir_in *req = data;
1504 const char *name = ((const char*) data) + sizeof(*req);
1505 return handle_mkdir(fuse, handler, hdr, req, name);
1506 }
1507
1508 case FUSE_UNLINK: { /* bytez[] -> */
1509 const char* name = data;
1510 return handle_unlink(fuse, handler, hdr, name);
1511 }
1512
1513 case FUSE_RMDIR: { /* bytez[] -> */
1514 const char* name = data;
1515 return handle_rmdir(fuse, handler, hdr, name);
1516 }
1517
1518 case FUSE_RENAME: { /* rename_in, oldname, newname -> */
1519 const struct fuse_rename_in *req = data;
1520 const char *old_name = ((const char*) data) + sizeof(*req);
1521 const char *new_name = old_name + strlen(old_name) + 1;
1522 return handle_rename(fuse, handler, hdr, req, old_name, new_name);
1523 }
1524
1525 // case FUSE_LINK:
1526 case FUSE_OPEN: { /* open_in -> open_out */
1527 const struct fuse_open_in *req = data;
1528 return handle_open(fuse, handler, hdr, req);
1529 }
1530
1531 case FUSE_READ: { /* read_in -> byte[] */
1532 const struct fuse_read_in *req = data;
1533 return handle_read(fuse, handler, hdr, req);
1534 }
1535
1536 case FUSE_WRITE: { /* write_in, byte[write_in.size] -> write_out */
1537 const struct fuse_write_in *req = data;
1538 const void* buffer = (const __u8*)data + sizeof(*req);
1539 return handle_write(fuse, handler, hdr, req, buffer);
1540 }
1541
1542 case FUSE_STATFS: { /* getattr_in -> attr_out */
1543 return handle_statfs(fuse, handler, hdr);
1544 }
1545
1546 case FUSE_RELEASE: { /* release_in -> */
1547 const struct fuse_release_in *req = data;
1548 return handle_release(fuse, handler, hdr, req);
1549 }
1550
1551 case FUSE_FSYNC:
1552 case FUSE_FSYNCDIR: {
1553 const struct fuse_fsync_in *req = data;
1554 return handle_fsync(fuse, handler, hdr, req);
1555 }
1556
1557 // case FUSE_SETXATTR:
1558 // case FUSE_GETXATTR:
1559 // case FUSE_LISTXATTR:
1560 // case FUSE_REMOVEXATTR:
1561 case FUSE_FLUSH: {
1562 return handle_flush(fuse, handler, hdr);
1563 }
1564
1565 case FUSE_OPENDIR: { /* open_in -> open_out */
1566 const struct fuse_open_in *req = data;
1567 return handle_opendir(fuse, handler, hdr, req);
1568 }
1569
1570 case FUSE_READDIR: {
1571 const struct fuse_read_in *req = data;
1572 return handle_readdir(fuse, handler, hdr, req);
1573 }
1574
1575 case FUSE_RELEASEDIR: { /* release_in -> */
1576 const struct fuse_release_in *req = data;
1577 return handle_releasedir(fuse, handler, hdr, req);
1578 }
1579
1580 case FUSE_INIT: { /* init_in -> init_out */
1581 const struct fuse_init_in *req = data;
1582 return handle_init(fuse, handler, hdr, req);
1583 }
1584
1585 default: {
1586 TRACE("[%d] NOTIMPL op=%d uniq=%"PRIx64" nid=%"PRIx64"\n",
1587 handler->token, hdr->opcode, hdr->unique, hdr->nodeid);
1588 return -ENOSYS;
1589 }
1590 }
1591 }
1592
handle_fuse_requests(struct fuse_handler * handler)1593 static void handle_fuse_requests(struct fuse_handler* handler)
1594 {
1595 struct fuse* fuse = handler->fuse;
1596 for (;;) {
1597 ssize_t len = TEMP_FAILURE_RETRY(read(fuse->fd,
1598 handler->request_buffer, sizeof(handler->request_buffer)));
1599 if (len < 0) {
1600 if (errno == ENODEV) {
1601 ERROR("[%d] someone stole our marbles!\n", handler->token);
1602 exit(2);
1603 }
1604 ERROR("[%d] handle_fuse_requests: errno=%d\n", handler->token, errno);
1605 continue;
1606 }
1607
1608 if ((size_t)len < sizeof(struct fuse_in_header)) {
1609 ERROR("[%d] request too short: len=%zu\n", handler->token, (size_t)len);
1610 continue;
1611 }
1612
1613 const struct fuse_in_header *hdr = (void*)handler->request_buffer;
1614 if (hdr->len != (size_t)len) {
1615 ERROR("[%d] malformed header: len=%zu, hdr->len=%u\n",
1616 handler->token, (size_t)len, hdr->len);
1617 continue;
1618 }
1619
1620 const void *data = handler->request_buffer + sizeof(struct fuse_in_header);
1621 size_t data_len = len - sizeof(struct fuse_in_header);
1622 __u64 unique = hdr->unique;
1623 int res = handle_fuse_request(fuse, handler, hdr, data, data_len);
1624
1625 /* We do not access the request again after this point because the underlying
1626 * buffer storage may have been reused while processing the request. */
1627
1628 if (res != NO_STATUS) {
1629 if (res) {
1630 TRACE("[%d] ERROR %d\n", handler->token, res);
1631 }
1632 fuse_status(fuse, unique, res);
1633 }
1634 }
1635 }
1636
start_handler(void * data)1637 static void* start_handler(void* data)
1638 {
1639 struct fuse_handler* handler = data;
1640 handle_fuse_requests(handler);
1641 return NULL;
1642 }
1643
remove_str_to_int(void * key,void * value,void * context)1644 static bool remove_str_to_int(void *key, void *value, void *context) {
1645 Hashmap* map = context;
1646 hashmapRemove(map, key);
1647 free(key);
1648 return true;
1649 }
1650
read_package_list(struct fuse_global * global)1651 static int read_package_list(struct fuse_global* global) {
1652 pthread_mutex_lock(&global->lock);
1653
1654 hashmapForEach(global->package_to_appid, remove_str_to_int, global->package_to_appid);
1655
1656 FILE* file = fopen(kPackagesListFile, "r");
1657 if (!file) {
1658 ERROR("failed to open package list: %s\n", strerror(errno));
1659 pthread_mutex_unlock(&global->lock);
1660 return -1;
1661 }
1662
1663 char buf[512];
1664 while (fgets(buf, sizeof(buf), file) != NULL) {
1665 char package_name[512];
1666 int appid;
1667 char gids[512];
1668
1669 if (sscanf(buf, "%s %d %*d %*s %*s %s", package_name, &appid, gids) == 3) {
1670 char* package_name_dup = strdup(package_name);
1671 hashmapPut(global->package_to_appid, package_name_dup, (void*) (uintptr_t) appid);
1672 }
1673 }
1674
1675 TRACE("read_package_list: found %zu packages\n",
1676 hashmapSize(global->package_to_appid));
1677 fclose(file);
1678
1679 /* Regenerate ownership details using newly loaded mapping */
1680 derive_permissions_recursive_locked(global->fuse_default, &global->root);
1681
1682 pthread_mutex_unlock(&global->lock);
1683 return 0;
1684 }
1685
watch_package_list(struct fuse_global * global)1686 static void watch_package_list(struct fuse_global* global) {
1687 struct inotify_event *event;
1688 char event_buf[512];
1689
1690 int nfd = inotify_init();
1691 if (nfd < 0) {
1692 ERROR("inotify_init failed: %s\n", strerror(errno));
1693 return;
1694 }
1695
1696 bool active = false;
1697 while (1) {
1698 if (!active) {
1699 int res = inotify_add_watch(nfd, kPackagesListFile, IN_DELETE_SELF);
1700 if (res == -1) {
1701 if (errno == ENOENT || errno == EACCES) {
1702 /* Framework may not have created yet, sleep and retry */
1703 ERROR("missing packages.list; retrying\n");
1704 sleep(3);
1705 continue;
1706 } else {
1707 ERROR("inotify_add_watch failed: %s\n", strerror(errno));
1708 return;
1709 }
1710 }
1711
1712 /* Watch above will tell us about any future changes, so
1713 * read the current state. */
1714 if (read_package_list(global) == -1) {
1715 ERROR("read_package_list failed: %s\n", strerror(errno));
1716 return;
1717 }
1718 active = true;
1719 }
1720
1721 int event_pos = 0;
1722 int res = read(nfd, event_buf, sizeof(event_buf));
1723 if (res < (int) sizeof(*event)) {
1724 if (errno == EINTR)
1725 continue;
1726 ERROR("failed to read inotify event: %s\n", strerror(errno));
1727 return;
1728 }
1729
1730 while (res >= (int) sizeof(*event)) {
1731 int event_size;
1732 event = (struct inotify_event *) (event_buf + event_pos);
1733
1734 TRACE("inotify event: %08x\n", event->mask);
1735 if ((event->mask & IN_IGNORED) == IN_IGNORED) {
1736 /* Previously watched file was deleted, probably due to move
1737 * that swapped in new data; re-arm the watch and read. */
1738 active = false;
1739 }
1740
1741 event_size = sizeof(*event) + event->len;
1742 res -= event_size;
1743 event_pos += event_size;
1744 }
1745 }
1746 }
1747
usage()1748 static int usage() {
1749 ERROR("usage: sdcard [OPTIONS] <source_path> <label>\n"
1750 " -u: specify UID to run as\n"
1751 " -g: specify GID to run as\n"
1752 " -U: specify user ID that owns device\n"
1753 " -m: source_path is multi-user\n"
1754 " -w: runtime write mount has full write access\n"
1755 "\n");
1756 return 1;
1757 }
1758
fuse_setup(struct fuse * fuse,gid_t gid,mode_t mask)1759 static int fuse_setup(struct fuse* fuse, gid_t gid, mode_t mask) {
1760 char opts[256];
1761
1762 fuse->fd = open("/dev/fuse", O_RDWR);
1763 if (fuse->fd == -1) {
1764 ERROR("failed to open fuse device: %s\n", strerror(errno));
1765 return -1;
1766 }
1767
1768 umount2(fuse->dest_path, MNT_DETACH);
1769
1770 snprintf(opts, sizeof(opts),
1771 "fd=%i,rootmode=40000,default_permissions,allow_other,user_id=%d,group_id=%d",
1772 fuse->fd, fuse->global->uid, fuse->global->gid);
1773 if (mount("/dev/fuse", fuse->dest_path, "fuse", MS_NOSUID | MS_NODEV | MS_NOEXEC |
1774 MS_NOATIME, opts) != 0) {
1775 ERROR("failed to mount fuse filesystem: %s\n", strerror(errno));
1776 return -1;
1777 }
1778
1779 fuse->gid = gid;
1780 fuse->mask = mask;
1781
1782 return 0;
1783 }
1784
run(const char * source_path,const char * label,uid_t uid,gid_t gid,userid_t userid,bool multi_user,bool full_write)1785 static void run(const char* source_path, const char* label, uid_t uid,
1786 gid_t gid, userid_t userid, bool multi_user, bool full_write) {
1787 struct fuse_global global;
1788 struct fuse fuse_default;
1789 struct fuse fuse_read;
1790 struct fuse fuse_write;
1791 struct fuse_handler handler_default;
1792 struct fuse_handler handler_read;
1793 struct fuse_handler handler_write;
1794 pthread_t thread_default;
1795 pthread_t thread_read;
1796 pthread_t thread_write;
1797
1798 memset(&global, 0, sizeof(global));
1799 memset(&fuse_default, 0, sizeof(fuse_default));
1800 memset(&fuse_read, 0, sizeof(fuse_read));
1801 memset(&fuse_write, 0, sizeof(fuse_write));
1802 memset(&handler_default, 0, sizeof(handler_default));
1803 memset(&handler_read, 0, sizeof(handler_read));
1804 memset(&handler_write, 0, sizeof(handler_write));
1805
1806 pthread_mutex_init(&global.lock, NULL);
1807 global.package_to_appid = hashmapCreate(256, str_hash, str_icase_equals);
1808 global.uid = uid;
1809 global.gid = gid;
1810 global.multi_user = multi_user;
1811 global.next_generation = 0;
1812 global.inode_ctr = 1;
1813
1814 memset(&global.root, 0, sizeof(global.root));
1815 global.root.nid = FUSE_ROOT_ID; /* 1 */
1816 global.root.refcount = 2;
1817 global.root.namelen = strlen(source_path);
1818 global.root.name = strdup(source_path);
1819 global.root.userid = userid;
1820 global.root.uid = AID_ROOT;
1821 global.root.under_android = false;
1822
1823 strcpy(global.source_path, source_path);
1824
1825 if (multi_user) {
1826 global.root.perm = PERM_PRE_ROOT;
1827 snprintf(global.obb_path, sizeof(global.obb_path), "%s/obb", source_path);
1828 } else {
1829 global.root.perm = PERM_ROOT;
1830 snprintf(global.obb_path, sizeof(global.obb_path), "%s/Android/obb", source_path);
1831 }
1832
1833 fuse_default.global = &global;
1834 fuse_read.global = &global;
1835 fuse_write.global = &global;
1836
1837 global.fuse_default = &fuse_default;
1838 global.fuse_read = &fuse_read;
1839 global.fuse_write = &fuse_write;
1840
1841 snprintf(fuse_default.dest_path, PATH_MAX, "/mnt/runtime/default/%s", label);
1842 snprintf(fuse_read.dest_path, PATH_MAX, "/mnt/runtime/read/%s", label);
1843 snprintf(fuse_write.dest_path, PATH_MAX, "/mnt/runtime/write/%s", label);
1844
1845 handler_default.fuse = &fuse_default;
1846 handler_read.fuse = &fuse_read;
1847 handler_write.fuse = &fuse_write;
1848
1849 handler_default.token = 0;
1850 handler_read.token = 1;
1851 handler_write.token = 2;
1852
1853 umask(0);
1854
1855 if (multi_user) {
1856 /* Multi-user storage is fully isolated per user, so "other"
1857 * permissions are completely masked off. */
1858 if (fuse_setup(&fuse_default, AID_SDCARD_RW, 0006)
1859 || fuse_setup(&fuse_read, AID_EVERYBODY, 0027)
1860 || fuse_setup(&fuse_write, AID_EVERYBODY, full_write ? 0007 : 0027)) {
1861 ERROR("failed to fuse_setup\n");
1862 exit(1);
1863 }
1864 } else {
1865 /* Physical storage is readable by all users on device, but
1866 * the Android directories are masked off to a single user
1867 * deep inside attr_from_stat(). */
1868 if (fuse_setup(&fuse_default, AID_SDCARD_RW, 0006)
1869 || fuse_setup(&fuse_read, AID_EVERYBODY, full_write ? 0027 : 0022)
1870 || fuse_setup(&fuse_write, AID_EVERYBODY, full_write ? 0007 : 0022)) {
1871 ERROR("failed to fuse_setup\n");
1872 exit(1);
1873 }
1874 }
1875
1876 /* Drop privs */
1877 if (setgroups(sizeof(kGroups) / sizeof(kGroups[0]), kGroups) < 0) {
1878 ERROR("cannot setgroups: %s\n", strerror(errno));
1879 exit(1);
1880 }
1881 if (setgid(gid) < 0) {
1882 ERROR("cannot setgid: %s\n", strerror(errno));
1883 exit(1);
1884 }
1885 if (setuid(uid) < 0) {
1886 ERROR("cannot setuid: %s\n", strerror(errno));
1887 exit(1);
1888 }
1889
1890 if (multi_user) {
1891 fs_prepare_dir(global.obb_path, 0775, uid, gid);
1892 }
1893
1894 if (pthread_create(&thread_default, NULL, start_handler, &handler_default)
1895 || pthread_create(&thread_read, NULL, start_handler, &handler_read)
1896 || pthread_create(&thread_write, NULL, start_handler, &handler_write)) {
1897 ERROR("failed to pthread_create\n");
1898 exit(1);
1899 }
1900
1901 watch_package_list(&global);
1902 ERROR("terminated prematurely\n");
1903 exit(1);
1904 }
1905
main(int argc,char ** argv)1906 int main(int argc, char **argv) {
1907 const char *source_path = NULL;
1908 const char *label = NULL;
1909 uid_t uid = 0;
1910 gid_t gid = 0;
1911 userid_t userid = 0;
1912 bool multi_user = false;
1913 bool full_write = false;
1914 int i;
1915 struct rlimit rlim;
1916 int fs_version;
1917
1918 int opt;
1919 while ((opt = getopt(argc, argv, "u:g:U:mw")) != -1) {
1920 switch (opt) {
1921 case 'u':
1922 uid = strtoul(optarg, NULL, 10);
1923 break;
1924 case 'g':
1925 gid = strtoul(optarg, NULL, 10);
1926 break;
1927 case 'U':
1928 userid = strtoul(optarg, NULL, 10);
1929 break;
1930 case 'm':
1931 multi_user = true;
1932 break;
1933 case 'w':
1934 full_write = true;
1935 break;
1936 case '?':
1937 default:
1938 return usage();
1939 }
1940 }
1941
1942 for (i = optind; i < argc; i++) {
1943 char* arg = argv[i];
1944 if (!source_path) {
1945 source_path = arg;
1946 } else if (!label) {
1947 label = arg;
1948 } else {
1949 ERROR("too many arguments\n");
1950 return usage();
1951 }
1952 }
1953
1954 if (!source_path) {
1955 ERROR("no source path specified\n");
1956 return usage();
1957 }
1958 if (!label) {
1959 ERROR("no label specified\n");
1960 return usage();
1961 }
1962 if (!uid || !gid) {
1963 ERROR("uid and gid must be nonzero\n");
1964 return usage();
1965 }
1966
1967 rlim.rlim_cur = 8192;
1968 rlim.rlim_max = 8192;
1969 if (setrlimit(RLIMIT_NOFILE, &rlim)) {
1970 ERROR("Error setting RLIMIT_NOFILE, errno = %d\n", errno);
1971 }
1972
1973 while ((fs_read_atomic_int("/data/.layout_version", &fs_version) == -1) || (fs_version < 3)) {
1974 ERROR("installd fs upgrade not yet complete. Waiting...\n");
1975 sleep(1);
1976 }
1977
1978 run(source_path, label, uid, gid, userid, multi_user, full_write);
1979 return 1;
1980 }
1981