• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include "fcntl.h"
13 #include "sys/stat.h"
14 #include <errno.h>
15 #include <pthread.h>
16 #include <chcore/bug.h>
17 #include <chcore/type.h>
18 #include <chcore/memory.h>
19 #include <chcore-internal/fs_defs.h>
20 #include <chcore-internal/fs_debug.h>
21 #include <string.h>
22 #include <sys/mman.h>
23 #include <chcore/defs.h>
24 #include "fs_wrapper_defs.h"
25 #include "fs_page_cache.h"
26 #include "fs_vnode.h"
27 #include "fs_page_fault.h"
28 
29 /* Return true if fd is NOT valid */
fd_type_invalid(int fd,bool isfile)30 static inline bool fd_type_invalid(int fd, bool isfile)
31 {
32     if (fd < 0 || fd >= MAX_SERVER_ENTRY_NUM)
33         return true;
34     if (server_entrys[fd] == NULL)
35         return true;
36     if (isfile && (server_entrys[fd]->vnode->type != FS_NODE_REG))
37         return true;
38     if (!isfile && (server_entrys[fd]->vnode->type != FS_NODE_DIR))
39         return true;
40     return false;
41 }
42 
get_path_leaf(const char * path,char * path_leaf)43 static int get_path_leaf(const char *path, char *path_leaf)
44 {
45     int i;
46     int ret;
47 
48     ret = -1; /* return -1 means find no '/' */
49 
50     for (i = strlen(path) - 2; i >= 0; i--) {
51         if (path[i] == '/') {
52             strcpy(path_leaf, path + i + 1);
53             ret = 0;
54             break;
55         }
56     }
57 
58     if (ret == -1)
59         return ret;
60 
61     if (path_leaf[strlen(path_leaf) - 1] == '/') {
62         path_leaf[strlen(path_leaf) - 1] = '\0';
63     }
64 
65     return ret;
66 }
67 
get_path_prefix(const char * path,char * path_prefix)68 static int get_path_prefix(const char *path, char *path_prefix)
69 {
70     int i;
71     int ret;
72 
73     ret = -1; /* return -1 means find no '/' */
74 
75     BUG_ON(strlen(path) > FS_REQ_PATH_BUF_LEN);
76 
77     strcpy(path_prefix, path);
78     for (i = strlen(path_prefix) - 2; i >= 0; i--) {
79         if (path_prefix[i] == '/') {
80             path_prefix[i] = '\0';
81             ret = 0;
82             break;
83         }
84     }
85 
86     return ret;
87 }
88 
check_path_leaf_is_not_dot(const char * path)89 static int check_path_leaf_is_not_dot(const char *path)
90 {
91     char leaf[FS_REQ_PATH_BUF_LEN];
92 
93     if (get_path_leaf(path, leaf) == -1)
94         return -EINVAL;
95     if (strcmp(leaf, ".") == 0 || strcmp(leaf, "..") == 0)
96         return -EINVAL;
97 
98     return 0;
99 }
100 
101 /* Default server operation: do nothing, just print error info and return -1 */
default_server_operation(ipc_msg_t * ipc_msg,struct fs_request * fr)102 int default_server_operation(ipc_msg_t *ipc_msg, struct fs_request *fr)
103 {
104     printf("[fs server] operation %d is not defined\n", fr->req);
105     return -1;
106 }
107 
default_ssize_t_server_operation(ipc_msg_t * ipc_msg,struct fs_request * fr)108 ssize_t default_ssize_t_server_operation(ipc_msg_t *ipc_msg,
109                                          struct fs_request *fr)
110 {
111     printf("[fs server] operation %d is not defined\n", fr->req);
112     return -1;
113 }
114 
fs_wrapper_open(badge_t client_badge,ipc_msg_t * ipc_msg,struct fs_request * fr)115 int fs_wrapper_open(badge_t client_badge, ipc_msg_t *ipc_msg,
116                     struct fs_request *fr)
117 {
118     int new_fd;
119     char *path;
120     int flags;
121     int mode;
122     int entry_id;
123     int ret;
124 
125     ino_t vnode_id;
126     int vnode_type;
127     off_t vnode_size;
128     void *vnode_private;
129 
130     struct fs_vnode *vnode;
131 
132     off_t entry_off;
133 
134     /* Prase arguments from fr */
135     new_fd = fr->open.new_fd; /* Store fr->fd (newly generated client fd) to
136                                  new_fd temporarly */
137     path = fr->open.pathname;
138     flags = fr->open.flags;
139     mode = fr->open.mode;
140 
141     if (strlen(path) == 0) {
142         return -ENOENT;
143     }
144 
145     fs_debug_trace_fswrapper(
146         "new_fd=%d, flags=0%o, path=%s\n", new_fd, flags, path);
147 
148     /*
149      * If O_CREAT and O_EXCL are set, open() shall fail if the file exists.
150      */
151     if ((flags & O_CREAT) && (flags & O_EXCL)) {
152         struct stat st;
153         ret = server_ops.fstatat(path, &st, AT_SYMLINK_NOFOLLOW);
154         if (ret == 0) {
155             return -EEXIST;
156         }
157     }
158 
159     fr->open.new_fd = AT_FDROOT;
160     ret = server_ops.open(
161         path, flags, mode, &vnode_id, &vnode_size, &vnode_type, &vnode_private);
162     if (ret != 0) {
163         fs_debug_error("ret = %d\n", ret);
164         return ret;
165     }
166 
167     fs_debug_trace_fswrapper("vnode_id=%ld, vnode_size=0x%lx, vnode_type=%d\n",
168                              vnode_id,
169                              vnode_size,
170                              vnode_type);
171 
172     if ((flags & O_DIRECTORY) && vnode_type != FS_NODE_DIR) {
173         server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), true);
174         return -ENOTDIR;
175     }
176 
177     if ((flags & (O_RDWR | O_WRONLY)) && vnode_type == FS_NODE_DIR) {
178         server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), true);
179         return -ENOTDIR;
180     }
181 
182     if (flags & O_NOCTTY) {
183         BUG_ON(1);
184     }
185 
186     if (!(flags & (O_RDWR | O_WRONLY)) && (flags & (O_TRUNC | O_APPEND))) {
187         server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), true);
188         return -EACCES;
189     }
190 
191     if ((flags & O_TRUNC) && (vnode_type == FS_NODE_REG)) {
192         server_ops.ftruncate(vnode_private, 0);
193     }
194 
195     entry_id = alloc_entry();
196     if (entry_id < 0) {
197         server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), true);
198         return -EMFILE;
199     }
200 
201     if ((flags & O_APPEND) && (vnode_type == FS_NODE_REG)) {
202         entry_off = vnode_size;
203     } else {
204         entry_off = 0;
205     }
206 
207     vnode = get_fs_vnode_by_id(vnode_id);
208     if (NULL != vnode) {
209         /* Assign new entry to existing vnode, close newly opened struct
210          */
211         inc_ref_fs_vnode(vnode);
212         assign_entry(server_entrys[entry_id],
213                      flags,
214                      entry_off,
215                      1,
216                      (void *)strdup(path),
217                      vnode);
218         server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), false);
219     } else {
220         vnode = alloc_fs_vnode(vnode_id, vnode_type, vnode_size, vnode_private);
221         if (vnode == NULL) {
222             server_ops.close(vnode_private, (vnode_type == FS_NODE_DIR), true);
223             free_entry(entry_id);
224             return -ENOMEM;
225         }
226         push_fs_vnode(vnode);
227         assign_entry(server_entrys[entry_id],
228                      flags,
229                      entry_off,
230                      1,
231                      (void *)strdup(path),
232                      vnode);
233     }
234 
235     /* After server handling the open request, mapping new_fd to fid */
236     ret = fs_wrapper_set_server_entry(client_badge, new_fd, entry_id);
237     if (ret < 0) {
238         dec_ref_fs_vnode(vnode);
239         free_entry(entry_id);
240         return ret;
241     }
242 
243     fs_debug_trace_fswrapper("entry_id=%d\n", entry_id);
244 
245     return new_fd;
246 }
247 
fs_wrapper_close(badge_t client_badge,ipc_msg_t * ipc_msg,struct fs_request * fr)248 int fs_wrapper_close(badge_t client_badge, ipc_msg_t *ipc_msg,
249                      struct fs_request *fr)
250 {
251     int entry_id;
252     struct fs_vnode *vnode;
253     fs_debug_trace_fswrapper("fd=%d\n", fr->close.fd);
254 
255     /* Parsing and check arguments */
256     entry_id = fr->close.fd;
257     if (fd_type_invalid(entry_id, true) && fd_type_invalid(entry_id, false)) {
258         fs_debug_error("fd_type_invalid\n");
259         return -ENOENT;
260     }
261 
262     vnode = server_entrys[entry_id]->vnode;
263     server_entrys[entry_id]->refcnt--;
264     if (server_entrys[entry_id]->refcnt == 0) {
265         free_entry(entry_id);
266         fs_wrapper_clear_server_entry(client_badge, entry_id);
267         dec_ref_fs_vnode(vnode);
268     }
269 
270     /*
271      * To preserve page cache even after we close the file,
272      * we don't revoke vnode when user call close().
273      */
274 
275     /* Revoke vnode, if refcnt == 0 */
276 
277     return 0;
278 }
279 
fs_wrapper_read(ipc_msg_t * ipc_msg,struct fs_request * fr)280 int fs_wrapper_read(ipc_msg_t *ipc_msg, struct fs_request *fr)
281 {
282     int fd;
283     char *buf;
284     off_t offset;
285     size_t size;
286     void *operator;
287     int ret;
288     struct fs_vnode *vnode;
289     char *page_buf;
290     int fptr, page_idx, page_off, copy_size;
291 
292     ret = 0;
293     fd = fr->read.fd;
294     buf = (void *)fr;
295     fs_debug_trace_fswrapper("entry_id=%d\n", fd);
296 
297     pthread_mutex_lock(&server_entrys[fd]->lock);
298     pthread_rwlock_rdlock(&server_entrys[fd]->vnode->rwlock);
299 
300     size = (size_t)fr->read.count;
301     offset = (off_t)server_entrys[fd]->offset;
302     vnode = server_entrys[fd]->vnode;
303     operator= server_entrys[fd]->vnode->private;
304 
305     /* Checking open flags: reading file opened as write-only */
306     if (server_entrys[fd]->flags & O_WRONLY) {
307         return -EBADF;
308     }
309 
310     /* Do not read a directory directly */
311     if (server_entrys[fd]->vnode->type == FS_NODE_DIR) {
312         return -EISDIR;
313     }
314 
315     /*
316      * If offset is already outside the file,
317      *      do nothing and return 0
318      */
319     if (offset >= server_entrys[fd]->vnode->size) {
320         goto out;
321     }
322 
323     /*
324      * If offset + size > file_size,
325      * 	change size to (file_size - offset).
326      */
327     if (offset + size > server_entrys[fd]->vnode->size) {
328         size = server_entrys[fd]->vnode->size - offset;
329     }
330 
331     /**
332      * read(2):
333      * On Linux, read() (and similar system calls) will transfer at most
334      * 0x7ffff000 (2,147,479,552) bytes, returning the number of bytes
335      * actually transferred.  (This is true on both 32-bit and 64-bit
336      * systems.)
337      */
338     size = size <= READ_SIZE_MAX ? size : READ_SIZE_MAX;
339 
340     /*
341      * Server-side read operation should implement like:
342      * - Base: read file from `offset` for `size` length,
343      *      if it touch a file ending, return content from offset to end
344      *      and return bytes read.
345      */
346     if (!using_page_cache) {
347         ret = server_ops.read(operator, offset, size, buf);
348     } else {
349         for (fptr = offset; fptr < offset + size;
350              fptr = ROUND_DOWN(fptr, PAGE_SIZE) + PAGE_SIZE) {
351             page_idx = fptr / PAGE_SIZE;
352             page_off = fptr - ROUND_DOWN(fptr, PAGE_SIZE);
353             copy_size = MIN(PAGE_SIZE - page_off, offset + size - fptr);
354 
355             /* get-read-put */
356             page_buf = page_cache_get_block_or_page(
357                 vnode->page_cache, page_idx, -1, READ);
358             memcpy(buf + fptr - offset, page_buf + page_off, copy_size);
359             page_cache_put_block_or_page(vnode->page_cache, page_idx, -1, READ);
360 
361             ret += copy_size;
362         }
363     }
364 
365     /* Update server_entry and vnode metadata */
366     server_entrys[fd]->offset += ret;
367 
368 out:
369     pthread_rwlock_unlock(&server_entrys[fd]->vnode->rwlock);
370     pthread_mutex_unlock(&server_entrys[fd]->lock);
371     return ret;
372 }
373 
fs_wrapper_write(ipc_msg_t * ipc_msg,struct fs_request * fr)374 int fs_wrapper_write(ipc_msg_t *ipc_msg, struct fs_request *fr)
375 {
376     int fd;
377     char *buf;
378     size_t size;
379     off_t offset;
380     void *operator;
381     int ret;
382     struct fs_vnode *vnode;
383     char *block_buf;
384     int fptr, page_idx, block_idx, block_off, copy_size;
385 
386     ret = 0;
387     fd = fr->write.fd;
388     buf = (void *)fr + sizeof(struct fs_request);
389     fs_debug_trace_fswrapper("entry_id=%d\n", fd);
390 
391     pthread_mutex_lock(&server_entrys[fd]->lock);
392     pthread_rwlock_wrlock(&server_entrys[fd]->vnode->rwlock);
393 
394     size = (size_t)fr->write.count;
395     offset = (off_t)server_entrys[fd]->offset;
396     vnode = server_entrys[fd]->vnode;
397     operator= server_entrys[fd]->vnode->private;
398 
399     /* Checking open flags: writing file opened as read-only */
400     if (server_entrys[fd]->flags & O_RDONLY) {
401         return -EBADF;
402     }
403 
404     /*
405      * If size == 0, do nothing and return 0
406      * Even the offset is outside of the file, inode size is not changed!
407      */
408     if (size == 0) {
409         goto out;
410     }
411 
412     /*
413      * POSIX: Before each write(2), the file offset is positioned at the end
414      * of the file, as if with lseek(2).
415      */
416     if (server_entrys[fd]->flags & O_APPEND) {
417         offset = (off_t)server_entrys[fd]->vnode->size;
418         server_entrys[fd]->offset = offset;
419     }
420 
421     /** see fs_wrapper_read */
422     size = size <= READ_SIZE_MAX ? size : READ_SIZE_MAX;
423 
424     /*
425      * Server-side write operation should implement like:
426      * - Base: write file and return bytes written
427      * - If offset is outside the file (notice size=0 is handled)
428      *      Filling '\0' until offset pos, then append file
429      */
430 
431     if (!using_page_cache)
432         ret = server_ops.write(operator, offset, size, buf);
433     else {
434         if (offset + size > vnode->size) {
435             vnode->size = offset + size;
436             server_ops.ftruncate(operator, offset + size);
437         }
438         for (fptr = offset; fptr < offset + size;
439              fptr = ROUND_DOWN(fptr, CACHED_BLOCK_SIZE) + CACHED_BLOCK_SIZE) {
440             page_idx = fptr / CACHED_PAGE_SIZE;
441             block_idx =
442                 (fptr - ROUND_DOWN(fptr, PAGE_SIZE)) / CACHED_BLOCK_SIZE;
443             block_off = fptr - ROUND_DOWN(fptr, CACHED_BLOCK_SIZE);
444             copy_size =
445                 MIN(CACHED_BLOCK_SIZE - block_off, offset + size - fptr);
446 
447             /* get-write-put */
448             block_buf = page_cache_get_block_or_page(
449                 vnode->page_cache, page_idx, block_idx, WRITE);
450             memcpy(block_buf + block_off, buf + fptr - offset, copy_size);
451             page_cache_put_block_or_page(
452                 vnode->page_cache, page_idx, block_idx, WRITE);
453 
454             ret += copy_size;
455         }
456     }
457 
458     /* Update server_entry and vnode metadata */
459     server_entrys[fd]->offset += ret;
460     if (server_entrys[fd]->offset > server_entrys[fd]->vnode->size) {
461         server_entrys[fd]->vnode->size = server_entrys[fd]->offset;
462     }
463 
464 out:
465     pthread_rwlock_unlock(&server_entrys[fd]->vnode->rwlock);
466     pthread_mutex_unlock(&server_entrys[fd]->lock);
467     return ret;
468 }
469 
fs_wrapper_lseek(ipc_msg_t * ipc_msg,struct fs_request * fr)470 int fs_wrapper_lseek(ipc_msg_t *ipc_msg, struct fs_request *fr)
471 {
472     int fd;
473     off_t offset;
474     int whence;
475     off_t target_off;
476 
477     fd = fr->lseek.fd;
478     offset = fr->lseek.offset;
479     whence = fr->lseek.whence;
480 
481     switch (whence) {
482     case SEEK_SET: {
483         target_off = offset;
484         break;
485     }
486     case SEEK_CUR: {
487         target_off = server_entrys[fd]->offset + offset;
488         break;
489     }
490     case SEEK_END:
491         target_off = server_entrys[fd]->vnode->size + offset;
492         break;
493     default: {
494         printf("%s: %d Not impelemented yet\n", __func__, whence);
495         target_off = -1;
496         break;
497     }
498     }
499     if (target_off < 0)
500         return -EINVAL;
501 
502     server_entrys[fd]->offset = target_off;
503     fr->lseek.ret = target_off;
504 
505     return 0;
506 }
507 
fs_wrapper_ftruncate(ipc_msg_t * ipc_msg,struct fs_request * fr)508 int fs_wrapper_ftruncate(ipc_msg_t *ipc_msg, struct fs_request *fr)
509 {
510     int ret;
511     int fd;
512     off_t len;
513     void *operator;
514 
515     fd = fr->ftruncate.fd;
516     if (fd_type_invalid(fd, true)) {
517         return -EBADF;
518     }
519 
520     if (!(server_entrys[fd]->flags & O_WRONLY)
521         && !(server_entrys[fd]->flags & O_RDWR)) {
522         return -EINVAL;
523     }
524 
525     len = fr->ftruncate.length;
526 
527     /* The argument len is negative or larger than the maximum file size */
528     if (len < 0)
529         return -EINVAL;
530 
531     operator= server_entrys[fd]->vnode->private;
532 
533     ret = server_ops.ftruncate(operator, len);
534     if (!ret)
535         server_entrys[fd]->vnode->size = len;
536     return ret;
537 }
538 
fs_wrapper_fstatat(ipc_msg_t * ipc_msg,struct fs_request * fr)539 int fs_wrapper_fstatat(ipc_msg_t *ipc_msg, struct fs_request *fr)
540 {
541     BUG_ON(fr->stat.dirfd != AT_FDROOT);
542     char *path = fr->stat.pathname;
543     int flags = fr->stat.flags;
544     struct stat *st = (struct stat *)ipc_get_msg_data(ipc_msg);
545     int err;
546     fs_debug_trace_fswrapper("path=%s, flags=%d\n", path, flags);
547 
548     if (strlen(path) == 0) {
549         return -ENOENT;
550     }
551 
552     err = server_ops.fstatat(path, st, flags);
553     if (err)
554         return err;
555 
556     struct fs_vnode *vnode;
557     vnode = get_fs_vnode_by_id(st->st_ino);
558     if (vnode && (st->st_mode & S_IFREG)) {
559         /* vnode is cached in memory, update size in stat */
560         st->st_size = vnode->size;
561     }
562 
563     return 0;
564 }
565 
fs_wrapper_unlink(ipc_msg_t * ipc_msg,struct fs_request * fr)566 int fs_wrapper_unlink(ipc_msg_t *ipc_msg, struct fs_request *fr)
567 {
568     char *path = fr->unlink.pathname;
569     int flags = fr->unlink.flags;
570     int ret;
571     struct stat st;
572     struct fs_vnode *vnode = NULL;
573     fs_debug_trace_fswrapper("path=%s, flags=0%o\n", path, flags);
574 
575     if (strlen(path) == 0) {
576         return -ENOENT;
577     }
578 
579     if (using_page_cache) {
580         /* clear page cache */
581         ret = server_ops.fstatat(path, &st, AT_SYMLINK_NOFOLLOW);
582         if (ret)
583             return ret;
584         vnode = get_fs_vnode_by_id(st.st_ino);
585         if (vnode)
586             page_cache_delete_pages_of_inode(vnode->page_cache);
587     }
588 
589     vnode = get_fs_vnode_by_id(st.st_ino);
590 
591     ret = server_ops.unlink(path, flags);
592 
593     return ret;
594 }
595 
fs_wrapper_rename(ipc_msg_t * ipc_msg,struct fs_request * fr)596 int fs_wrapper_rename(ipc_msg_t *ipc_msg, struct fs_request *fr)
597 {
598     int ret;
599     char *oldpath = fr->rename.oldpath;
600     char *newpath = fr->rename.newpath;
601     char new_path_prefix[FS_REQ_PATH_BUF_LEN];
602     struct stat st;
603     struct fs_vnode *vnode;
604     bool old_is_dir, new_is_dir;
605     ino_t old_ino;
606     fs_debug_trace_fswrapper("old=%s, new=%s\n", oldpath, newpath);
607 
608     if (strlen(oldpath) == 0 || strlen(newpath) == 0) {
609         return -ENOENT;
610     }
611 
612     /* Check . and .. in the final component */
613     if ((ret = check_path_leaf_is_not_dot(oldpath)) != 0)
614         return ret;
615     if ((ret = check_path_leaf_is_not_dot(newpath)) != 0)
616         return ret;
617 
618     /* Check if oldpath exists */
619     ret = server_ops.fstatat(oldpath, &st, AT_SYMLINK_NOFOLLOW);
620     if (ret != 0) {
621         return ret;
622     }
623 
624     old_is_dir = (st.st_mode & S_IFDIR) ? true : false;
625     old_ino = st.st_ino;
626 
627     /* Check old is not a ancestor of new */
628     if (strncmp(oldpath, newpath, strlen(oldpath)) == 0) {
629         if (newpath[strlen(oldpath)] == '/')
630             return -EINVAL;
631     }
632 
633     /* Check if new_path_prefix valid */
634     if (get_path_prefix(newpath, new_path_prefix) == -1) {
635         return -EINVAL;
636     }
637     if (new_path_prefix[0]) {
638         /* this is a prefix, so we should follow the symlink? */
639         ret = server_ops.fstatat(new_path_prefix, &st, AT_SYMLINK_FOLLOW);
640         if (ret)
641             return ret;
642 
643         if (!(st.st_mode & S_IFDIR))
644             return -ENOTDIR;
645     }
646 
647     /* If oldpath and newpath both exists */
648     ret = server_ops.fstatat(newpath, &st, AT_SYMLINK_NOFOLLOW);
649     if (ret != -ENOENT) {
650         new_is_dir = (st.st_mode & S_IFDIR) ? true : false;
651         /* oldpath and newpath are the same file, do nothing */
652         if (old_ino == st.st_ino) {
653             return 0;
654         }
655         if (old_is_dir && !new_is_dir)
656             return -ENOTDIR;
657         if (!old_is_dir && new_is_dir)
658             return -EISDIR;
659         if (old_is_dir) {
660             /* both old and new are dirs */
661             ret = server_ops.rmdir(newpath, AT_SYMLINK_NOFOLLOW);
662             if (ret == -ENOTEMPTY)
663                 return ret;
664             BUG_ON(ret);
665         } else {
666             /* both regular */
667             ret = server_ops.unlink(newpath, AT_SYMLINK_NOFOLLOW);
668             if (ret)
669                 return ret;
670             BUG_ON(ret);
671         }
672     }
673 
674     /* Flush page cache of oldpath */
675     if (using_page_cache && !old_is_dir) {
676         /* clear page cache */
677         vnode = get_fs_vnode_by_id(old_ino);
678         if (vnode)
679             page_cache_evict_pages_of_inode(vnode->page_cache);
680     }
681 
682     ret = server_ops.rename(oldpath, newpath);
683 
684     return ret;
685 }
686 
fs_wrapper_count(ipc_msg_t * ipc_msg,struct fs_request * fr)687 int fs_wrapper_count(ipc_msg_t *ipc_msg, struct fs_request *fr)
688 {
689     printf("hit: %d miss: %d disk_writer: %d disk_read: %d\n",
690            count.hit,
691            count.miss,
692            count.disk_i,
693            count.disk_o);
694     return 0;
695 }
696 
fs_wrapper_rmdir(ipc_msg_t * ipc_msg,struct fs_request * fr)697 int fs_wrapper_rmdir(ipc_msg_t *ipc_msg, struct fs_request *fr)
698 {
699     char *path = fr->rmdir.pathname;
700     int flags = fr->rmdir.flags;
701     fs_debug_trace_fswrapper("path=%s, flags=0%o\n", path, flags);
702 
703     if (strlen(path) == 0) {
704         return -ENOENT;
705     }
706 
707     return server_ops.rmdir(path, flags);
708 }
709 
fs_wrapper_mkdir(ipc_msg_t * ipc_msg,struct fs_request * fr)710 int fs_wrapper_mkdir(ipc_msg_t *ipc_msg, struct fs_request *fr)
711 {
712     int ret;
713 
714     const char *path = fr->mkdir.pathname;
715     mode_t mode = fr->mkdir.mode;
716     fs_debug_trace_fswrapper("path=%s, mode=%d\n", path, mode);
717 
718     if (strlen(path) == 0) {
719         return -ENOENT;
720     }
721 
722     ret = server_ops.mkdir(path, mode);
723     return ret;
724 }
725 
fs_wrapper_sync(void)726 int fs_wrapper_sync(void)
727 {
728     int ret = 0;
729 
730     if (using_page_cache)
731         ret = page_cache_flush_all_pages();
732 
733     return ret;
734 }
735 
fs_wrapper_fsync(ipc_msg_t * ipc_msg,struct fs_request * fr)736 int fs_wrapper_fsync(ipc_msg_t *ipc_msg, struct fs_request *fr)
737 {
738     struct fs_vnode *vnode;
739     int ret = 0;
740 
741     int fd = fr->fsync.fd;
742 
743     BUG_ON(fd == AT_FDROOT);
744 
745     if (using_page_cache) {
746         vnode = server_entrys[fd]->vnode;
747         ret = page_cache_flush_pages_of_inode(vnode->page_cache);
748     }
749 
750     return ret;
751 }
752 
753 #ifdef CHCORE_ENABLE_FMAP
fs_wrapper_fmap(badge_t client_badge,ipc_msg_t * ipc_msg,struct fs_request * fr,bool * ret_with_cap)754 int fs_wrapper_fmap(badge_t client_badge, ipc_msg_t *ipc_msg,
755                     struct fs_request *fr, bool *ret_with_cap)
756 {
757     void *addr;
758     size_t length;
759     int prot;
760     int flags;
761     int fd;
762     off_t offset;
763     struct fs_vnode *vnode;
764     cap_t pmo_cap;
765     int ret;
766 
767     /* If there is no valid fmap implementation, return -EINVAL */
768     if (!using_page_cache
769         && server_ops.fmap_get_page_addr == default_fmap_get_page_addr) {
770         fs_debug_error("fmap is not impl.\n");
771         return -EINVAL;
772     }
773 
774     /* Step: Parsing arguments in fr */
775     addr = (void *)fr->mmap.addr;
776     length = (size_t)fr->mmap.length;
777     prot = fr->mmap.prot;
778     flags = fr->mmap.flags;
779     fd = fr->mmap.fd;
780     offset = (off_t)fr->mmap.offset;
781 
782     vnode = server_entrys[fd]->vnode;
783 
784     fs_debug_trace_fswrapper(
785         "addr=0x%lx, length=0x%lx, prot=%d, flags=%d, fd=%d, offset=0x%lx\n",
786         (u64)addr,
787         length,
788         prot,
789         flags,
790         fd,
791         offset);
792 
793     /* Sanity Check for arguments */
794     if (prot & (~(PROT_NONE | PROT_READ | PROT_WRITE | PROT_EXEC))) {
795         return -EINVAL;
796     }
797 
798     if (flags & MAP_ANONYMOUS) {
799         return -EINVAL;
800     }
801 
802     if (flags & (~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED_NOREPLACE))) {
803         fs_debug_trace_fswrapper("unsupported flags=%d\n", flags);
804         return -EINVAL;
805     }
806 
807     if (length % PAGE_SIZE) {
808         length = ROUND_UP(length, PAGE_SIZE);
809     }
810     UNUSED(addr);
811     UNUSED(fd);
812     UNUSED(offset);
813 
814     /* Step: Record (client_badge, mmaped_va, length) -> (vnode, offset,
815      * flags) */
816     ret = fmap_area_insert(
817         client_badge, (vaddr_t)addr, length, vnode, offset, flags, prot);
818     if (ret < 0) {
819         goto out_fail;
820     }
821 
822     /* Step: Create a PMO_FILE for file, if not created */
823     if (vnode->pmo_cap == -1) {
824         pmo_cap = usys_create_pmo(vnode->size, PMO_FILE);
825         if (pmo_cap < 0) {
826             ret = pmo_cap;
827             goto out_remove_mapping;
828         }
829         vnode->pmo_cap = pmo_cap;
830     }
831 
832     /* Step: Send PMO_FILE back to client side */
833     ipc_msg->cap_slot_number = 1;
834     ipc_set_msg_cap(ipc_msg, 0, vnode->pmo_cap);
835     *ret_with_cap = true;
836 
837     return 0;
838 out_remove_mapping:
839     fmap_area_remove(client_badge, (vaddr_t)addr, length);
840 out_fail:
841     *ret_with_cap = false;
842     return ret;
843 }
844 
fs_wrapper_funmap(badge_t client_badge,ipc_msg_t * ipc_msg,struct fs_request * fr)845 int fs_wrapper_funmap(badge_t client_badge, ipc_msg_t *ipc_msg,
846                       struct fs_request *fr)
847 {
848     return fmap_area_remove(client_badge, (vaddr_t)fr->munmap.addr, fr->munmap.length);
849 }
850 #endif
851 
fs_wrapper_creat(ipc_msg_t * ipc_msg,struct fs_request * fr)852 int fs_wrapper_creat(ipc_msg_t *ipc_msg, struct fs_request *fr)
853 {
854     if (strlen(fr->creat.pathname) == 0) {
855         return -ENOENT;
856     }
857     return server_ops.creat(ipc_msg, fr);
858 }
859 
fs_wrapper_getdents64(ipc_msg_t * ipc_msg,struct fs_request * fr)860 int fs_wrapper_getdents64(ipc_msg_t *ipc_msg, struct fs_request *fr)
861 {
862     return server_ops.getdents64(ipc_msg, fr);
863 }
864 
fs_wrapper_fstat(ipc_msg_t * ipc_msg,struct fs_request * fr)865 int fs_wrapper_fstat(ipc_msg_t *ipc_msg, struct fs_request *fr)
866 {
867     return server_ops.fstat(ipc_msg, fr);
868 }
869 
fs_wrapper_statfs(ipc_msg_t * ipc_msg,struct fs_request * fr)870 int fs_wrapper_statfs(ipc_msg_t *ipc_msg, struct fs_request *fr)
871 {
872     if (strlen(fr->stat.pathname) == 0) {
873         return -ENOENT;
874     }
875     return server_ops.statfs(ipc_msg, fr);
876 }
877 
fs_wrapper_fstatfs(ipc_msg_t * ipc_msg,struct fs_request * fr)878 int fs_wrapper_fstatfs(ipc_msg_t *ipc_msg, struct fs_request *fr)
879 {
880     return server_ops.fstatfs(ipc_msg, fr);
881 }
882 
fs_wrapper_faccessat(ipc_msg_t * ipc_msg,struct fs_request * fr)883 int fs_wrapper_faccessat(ipc_msg_t *ipc_msg, struct fs_request *fr)
884 {
885     if (strlen(fr->faccessat.pathname) == 0) {
886         return -ENOENT;
887     }
888     fs_debug_trace_fswrapper("path=%s\n", fr->faccessat.pathname);
889     return server_ops.faccessat(ipc_msg, fr);
890 }
891 
fs_wrapper_symlinkat(ipc_msg_t * ipc_msg,struct fs_request * fr)892 int fs_wrapper_symlinkat(ipc_msg_t *ipc_msg, struct fs_request *fr)
893 {
894     if (strlen(fr->symlinkat.linkpath) == 0) {
895         return -ENOENT;
896     }
897     return server_ops.symlinkat(ipc_msg, fr);
898 }
899 
fs_wrapper_readlinkat(ipc_msg_t * ipc_msg,struct fs_request * fr)900 int fs_wrapper_readlinkat(ipc_msg_t *ipc_msg, struct fs_request *fr)
901 {
902     if (strlen(fr->readlinkat.pathname) == 0) {
903         return -ENOENT;
904     }
905     return server_ops.readlinkat(ipc_msg, fr);
906 }
907 
fs_wrapper_fallocate(ipc_msg_t * ipc_msg,struct fs_request * fr)908 int fs_wrapper_fallocate(ipc_msg_t *ipc_msg, struct fs_request *fr)
909 {
910     return server_ops.fallocate(ipc_msg, fr);
911 }
912 
fs_wrapper_fcntl(badge_t client_badge,ipc_msg_t * ipc_msg,struct fs_request * fr)913 int fs_wrapper_fcntl(badge_t client_badge, ipc_msg_t *ipc_msg,
914                      struct fs_request *fr)
915 {
916     struct server_entry *entry;
917     void *operator;
918     int ret = 0;
919 
920     if ((entry = server_entrys[fr->fcntl.fd]) == NULL)
921         return -EBADF;
922 
923     switch (fr->fcntl.fcntl_cmd) {
924     case F_GETFL:
925         ret = entry->flags;
926         break;
927     case F_SETFL: {
928         // file access mode and the file creation flags
929         // should be ingored, per POSIX
930         int effective_bits =
931             (~O_ACCMODE & ~O_CLOEXEC & ~O_CREAT & ~O_DIRECTORY & ~O_EXCL
932              & ~O_NOCTTY & ~O_NOFOLLOW & ~O_TRUNC & ~O_TTY_INIT);
933 
934         entry->flags = (fr->fcntl.fcntl_arg & effective_bits)
935                        | (entry->flags & ~effective_bits);
936         break;
937     }
938     case F_DUPFD: {
939         ret = fs_wrapper_set_server_entry(
940             client_badge, fr->fcntl.fcntl_arg, fr->fcntl.fd);
941         if (ret < 0) {
942             break;
943         }
944         server_entrys[fr->fcntl.fd]->refcnt++;
945         operator= entry->vnode->private;
946         ret = server_ops.fcntl(operator,
947                                fr->fcntl.fd,
948                                fr->fcntl.fcntl_cmd,
949                                fr->fcntl.fcntl_arg);
950         break;
951     }
952 
953     case F_GETOWN:
954     case F_SETOWN:
955     case F_GETLK:
956     case F_SETLK:
957     case F_SETLKW:
958     default:
959         printf("unsopported fcntl cmd %d\n", fr->fcntl.fcntl_cmd);
960         ret = -1;
961         break;
962     }
963 
964     return ret;
965 }
966 
fs_wrapper_mount(ipc_msg_t * ipc_msg,struct fs_request * fr)967 int fs_wrapper_mount(ipc_msg_t *ipc_msg, struct fs_request *fr)
968 {
969     /*
970      * Mount req should be called only if mounted flag is off,
971      * Normally, only called once after booted during FSM's mount procedure
972      */
973     int ret;
974     if (mounted) {
975         printf("[Error] fs: server has been mounted!\n");
976         ret = -EINVAL;
977         goto out;
978     }
979     ret = server_ops.mount(ipc_msg, fr);
980     if (!ret)
981         mounted = true;
982 out:
983     return ret;
984 }
985 
fs_wrapper_umount(ipc_msg_t * ipc_msg,struct fs_request * fr)986 int fs_wrapper_umount(ipc_msg_t *ipc_msg, struct fs_request *fr)
987 {
988     return server_ops.umount(ipc_msg, fr);
989 }
990 
fs_server_destructor(badge_t client_badge)991 void fs_server_destructor(badge_t client_badge)
992 {
993     struct server_entry_node *n;
994     bool found = false;
995     int i, fd;
996     struct fs_vnode *vnode;
997 
998     pthread_rwlock_wrlock(&fs_wrapper_meta_rwlock);
999 
1000     pthread_spin_lock(&server_entry_mapping_lock);
1001     for_each_in_list (
1002         n, struct server_entry_node, node, &server_entry_mapping) {
1003         if (n->client_badge == client_badge) {
1004             list_del(&n->node);
1005             found = true;
1006             break;
1007         }
1008     }
1009     pthread_spin_unlock(&server_entry_mapping_lock);
1010 
1011     if (found) {
1012         for (i = 0; i < MAX_SERVER_ENTRY_NUM; i++) {
1013             fd = n->fd_to_fid[i];
1014             if (fd >= 0 && server_entrys[fd]) {
1015                 vnode = server_entrys[fd]->vnode;
1016                 server_entrys[fd]->refcnt--;
1017                 if (server_entrys[fd]->refcnt == 0) {
1018                     free_entry(fd);
1019                     dec_ref_fs_vnode(vnode);
1020                 }
1021             }
1022         }
1023         free(n);
1024     }
1025 
1026     fmap_area_recycle(client_badge);
1027 
1028     pthread_rwlock_unlock(&fs_wrapper_meta_rwlock);
1029 }
1030