• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include "fs_vnode.h"
13 #include "fs_page_cache.h"
14 #include <chcore-internal/fs_debug.h>
15 #include <chcore/syscall.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 
19 #include "fs_vnode.h"
20 #include "fs_page_cache.h"
21 
comp_vnode_key(const void * key,const struct rb_node * node)22 static int comp_vnode_key(const void *key, const struct rb_node *node)
23 {
24     struct fs_vnode *vnode = rb_entry(node, struct fs_vnode, node);
25     ino_t vnode_id = *(ino_t *)key;
26 
27     if (vnode_id < vnode->vnode_id)
28         return -1;
29     else if (vnode_id > vnode->vnode_id)
30         return 1;
31     else
32         return 0;
33 }
34 
less_vnode(const struct rb_node * lhs,const struct rb_node * rhs)35 static bool less_vnode(const struct rb_node *lhs, const struct rb_node *rhs)
36 {
37     struct fs_vnode *l = rb_entry(lhs, struct fs_vnode, node);
38     struct fs_vnode *r = rb_entry(rhs, struct fs_vnode, node);
39 
40     return l->vnode_id < r->vnode_id;
41 }
42 
free_entry(int entry_idx)43 void free_entry(int entry_idx)
44 {
45     free(server_entrys[entry_idx]->path);
46     free(server_entrys[entry_idx]);
47     server_entrys[entry_idx] = NULL;
48 }
49 
alloc_entry(void)50 int alloc_entry(void)
51 {
52     int i;
53 
54     for (i = 0; i < MAX_SERVER_ENTRY_NUM; i++) {
55         if (server_entrys[i] == NULL) {
56             server_entrys[i] =
57                 (struct server_entry *)malloc(sizeof(struct server_entry));
58             if (server_entrys[i] == NULL)
59                 return -1;
60             pthread_mutex_init(&server_entrys[i]->lock, NULL);
61             fs_debug_trace_fswrapper("entry_id=%d\n", i);
62             return i;
63         }
64     }
65     return -1;
66 }
67 
assign_entry(struct server_entry * e,u64 f,off_t o,int t,void * p,struct fs_vnode * n)68 void assign_entry(struct server_entry *e, u64 f, off_t o, int t, void *p,
69                   struct fs_vnode *n)
70 {
71     fs_debug_trace_fswrapper(
72         "flags=0x%lo, offset=0x%ld, path=%s, vnode_id=%ld\n",
73         f,
74         o,
75         (char *)p,
76         n->vnode_id);
77     e->flags = f;
78     e->offset = o;
79     e->path = p;
80     e->vnode = n;
81     e->refcnt = t;
82 }
83 
fs_vnode_init(void)84 void fs_vnode_init(void)
85 {
86     fs_vnode_list = malloc(sizeof(*fs_vnode_list));
87     if (fs_vnode_list == NULL) {
88         printf("[fs_base] no enough memory to initialize, exiting...\n");
89         exit(-1);
90     }
91     init_rb_root(fs_vnode_list);
92 }
93 
alloc_fs_vnode(ino_t id,enum fs_vnode_type type,off_t size,void * private)94 struct fs_vnode *alloc_fs_vnode(ino_t id, enum fs_vnode_type type, off_t size,
95                                 void *private)
96 {
97     struct fs_vnode *ret = (struct fs_vnode *)malloc(sizeof(*ret));
98     if (ret == NULL) {
99         return NULL;
100     }
101 
102     /* Filling Initial State */
103     ret->vnode_id = id;
104     ret->type = type;
105     ret->size = size;
106     ret->private = private;
107     fs_debug_trace_fswrapper(
108         "id=%ld, type=%d, size=%ld(0x%lx)\n", id, type, size, size);
109 
110     /* Ref Count start as 1 */
111     ret->refcnt = 1;
112 
113     ret->pmo_cap = -1;
114 
115     /* Create a page cache entity for vnode */
116     if (using_page_cache)
117         ret->page_cache = new_page_cache_entity_of_inode(ret->vnode_id, ret);
118     pthread_rwlock_init(&ret->rwlock, NULL);
119 
120     return ret;
121 }
122 
push_fs_vnode(struct fs_vnode * n)123 void push_fs_vnode(struct fs_vnode *n)
124 {
125     rb_insert(fs_vnode_list, &n->node, less_vnode);
126 }
127 
pop_free_fs_vnode(struct fs_vnode * n)128 void pop_free_fs_vnode(struct fs_vnode *n)
129 {
130     rb_erase(fs_vnode_list, &n->node);
131     if (n->pmo_cap > 0) {
132         usys_revoke_cap(n->pmo_cap, false);
133     }
134     free(n);
135 }
136 
get_fs_vnode_by_id(ino_t vnode_id)137 struct fs_vnode *get_fs_vnode_by_id(ino_t vnode_id)
138 {
139     struct rb_node *node = rb_search(fs_vnode_list, &vnode_id, comp_vnode_key);
140     if (node == NULL)
141         return NULL;
142     return rb_entry(node, struct fs_vnode, node);
143 }
144 
145 /* refcnt for vnode */
inc_ref_fs_vnode(void * n)146 int inc_ref_fs_vnode(void *n)
147 {
148     ((struct fs_vnode *)n)->refcnt++;
149     return 0;
150 }
151 
dec_ref_fs_vnode(void * node)152 int dec_ref_fs_vnode(void *node)
153 {
154     int ret;
155     struct fs_vnode *n = (struct fs_vnode *)node;
156 
157     n->refcnt--;
158     assert(n->refcnt >= 0);
159 
160     if (n->refcnt == 0) {
161         ret = server_ops.close(n->private, (n->type == FS_NODE_DIR), true);
162         if (ret) {
163             printf("Warning: close failed when deref vnode: %d\n", ret);
164             return ret;
165         }
166 
167         pop_free_fs_vnode(n);
168     }
169 
170     return 0;
171 }