1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include "fsm.h"
13 #include <errno.h>
14 #include <chcore/container/list.h>
15 #include "device.h"
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <pthread.h>
19 #include <chcore-internal/procmgr_defs.h>
20 #include <chcore/string.h>
21
22 #include "fsm_client_cap.h"
23 #include "mount_info.h"
24
25 int fs_num = 0;
26 pthread_mutex_t fsm_client_cap_table_lock;
27 pthread_rwlock_t mount_point_infos_rwlock;
28
29 /* Initialize when fsm start */
init_utils(void)30 static inline void init_utils(void)
31 {
32 init_list_head(&fsm_client_cap_table);
33 pthread_mutex_init(&fsm_client_cap_table_lock, NULL);
34 init_list_head(&mount_point_infos);
35 pthread_rwlock_init(&mount_point_infos_rwlock, NULL);
36 }
37
fsm_destructor(badge_t client_badge)38 static void fsm_destructor(badge_t client_badge)
39 {
40 struct fsm_client_cap_node *n, *iter_tmp;
41 pthread_mutex_lock(&fsm_client_cap_table_lock);
42
43 for_each_in_list_safe (n, iter_tmp, node, &fsm_client_cap_table) {
44 if (n->client_badge == client_badge) {
45 list_del(&n->node);
46 free(n);
47 break;
48 }
49 }
50
51 pthread_mutex_unlock(&fsm_client_cap_table_lock);
52 }
53
init_fsm(void)54 int init_fsm(void)
55 {
56 int ret;
57
58 /* Initialize */
59 init_utils();
60
61 ret = fsm_mount_fs("/tmpfs.srv", "/");
62 if (ret < 0) {
63 error("failed to mount tmpfs, ret %d\n", ret);
64 usys_exit(-1);
65 }
66
67 return 0;
68 }
69
boot_tmpfs(void)70 static int boot_tmpfs(void)
71 {
72 struct proc_request pr;
73 ipc_msg_t *ipc_msg;
74 int ret;
75
76 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(struct proc_request));
77 pr.req = PROC_REQ_GET_SERVER_CAP;
78 pr.get_server_cap.server_id = SERVER_TMPFS;
79
80 ipc_set_msg_data(ipc_msg, &pr, 0, sizeof(pr));
81 ret = ipc_call(procmgr_ipc_struct, ipc_msg);
82
83 if (ret < 0) {
84 goto out;
85 }
86
87 ret = ipc_get_msg_cap(ipc_msg, 0);
88 out:
89 ipc_destroy_msg(ipc_msg);
90 return ret;
91 }
92
fsm_mount_fs(const char * path,const char * mount_point)93 int fsm_mount_fs(const char *path, const char *mount_point)
94 {
95 cap_t fs_cap;
96 int ret;
97 struct mount_point_info_node *mp_node;
98
99 ret = -1;
100 if (fs_num == MAX_FS_NUM) {
101 error("maximal number of FSs is reached: %d\n", fs_num);
102 goto out;
103 }
104
105 if (strlen(mount_point) > MAX_MOUNT_POINT_LEN) {
106 error("mount point too long: > %d\n", MAX_MOUNT_POINT_LEN);
107 goto out;
108 }
109
110 if (mount_point[0] != '/') {
111 error("mount point should start with '/'\n");
112 goto out;
113 }
114
115 if (strcmp(path, "/tmpfs.srv") == 0) {
116 /* @fs_cap is 0 -> means launch TMPFS */
117 info("Mounting fs from local binary: %s...\n", path);
118
119 fs_cap = boot_tmpfs();
120
121 if (fs_cap <= 0) {
122 info("Fails to launch TMPFS, which returns %d\n", ret);
123 goto out;
124 }
125
126 pthread_rwlock_wrlock(&mount_point_infos_rwlock);
127 mp_node = set_mount_point("/", 1, fs_cap);
128 info("TMPFS is up, with cap = %d\n", fs_cap);
129 } else {
130 fs_cap = mount_storage_device(path);
131 if (fs_cap < 0) {
132 ret = -errno;
133 goto out;
134 }
135 pthread_rwlock_wrlock(&mount_point_infos_rwlock);
136 mp_node = set_mount_point(mount_point, strlen(mount_point), fs_cap);
137 }
138
139 /* Connect to the FS that we mount now. */
140 mp_node->_fs_ipc_struct = ipc_register_client(mp_node->fs_cap);
141
142 if (mp_node->_fs_ipc_struct == NULL) {
143 info("ipc_register_client failed\n");
144 BUG_ON(remove_mount_point(mp_node->path) != 0);
145 pthread_rwlock_unlock(&mount_point_infos_rwlock);
146 goto out;
147 }
148
149 strlcpy(mp_node->path, mount_point, sizeof(mp_node->path));
150
151 fs_num++;
152 ret = 0;
153 pthread_rwlock_unlock(&mount_point_infos_rwlock);
154
155 out:
156 return ret;
157 }
158
159 /*
160 * @args: 'path' is device name, like 'sda1'...
161 * send FS_REQ_UMOUNT to corresponding fs_server
162 */
fsm_umount_fs(const char * path)163 int fsm_umount_fs(const char *path)
164 {
165 cap_t fs_cap;
166 int ret;
167 ipc_msg_t *ipc_msg;
168 ipc_struct_t *ipc_struct;
169 struct fs_request *fr_ptr;
170
171 pthread_rwlock_wrlock(&mount_point_infos_rwlock);
172
173 /* get corresponding fs_server_cap by device name */
174 fs_cap = mount_storage_device(path);
175
176 ipc_struct = ipc_register_client(fs_cap);
177 ipc_msg = ipc_create_msg(ipc_struct, sizeof(struct fs_request));
178 fr_ptr = (struct fs_request *)ipc_get_msg_data(ipc_msg);
179
180 fr_ptr->req = FS_REQ_UMOUNT;
181
182 ipc_set_msg_data(ipc_msg, (char *)fr_ptr, 0, sizeof(struct fs_request));
183 ret = ipc_call(ipc_struct, ipc_msg);
184 ipc_destroy_msg(ipc_msg);
185
186 pthread_rwlock_unlock(&mount_point_infos_rwlock);
187
188 return ret;
189 }
190
191 /*
192 * @args: 'path' is device name, like 'sda1'...
193 * send FS_REQ_UMOUNT to corresponding fs_server
194 */
fsm_sync_page_cache(void)195 int fsm_sync_page_cache(void)
196 {
197 ipc_msg_t *ipc_msg;
198 ipc_struct_t *ipc_struct;
199 struct fs_request *fr_ptr;
200 struct mount_point_info_node *iter;
201 int ret = 0;
202
203 for_each_in_list (
204 iter, struct mount_point_info_node, node, &mount_point_infos) {
205 ipc_struct = iter->_fs_ipc_struct;
206 ipc_msg = ipc_create_msg(ipc_struct, sizeof(struct fs_request));
207 fr_ptr = (struct fs_request *)ipc_get_msg_data(ipc_msg);
208
209 fr_ptr->req = FS_REQ_SYNC;
210
211 ret = ipc_call(ipc_struct, ipc_msg);
212 ipc_destroy_msg(ipc_msg);
213 if (ret != 0) {
214 printf("Failed to sync in %s\n", iter->path);
215 goto out;
216 }
217 }
218
219 out:
220 return ret;
221 }
222
223 /*
224 * Types in the following two functions would conflict with existing builds,
225 * I suggest to move the tmpfs code out of kernel tree to resolve this.
226 */
227
fsm_dispatch(ipc_msg_t * ipc_msg,badge_t client_badge)228 void fsm_dispatch(ipc_msg_t *ipc_msg, badge_t client_badge)
229 {
230 int ret = 0;
231 struct fsm_request *fsm_req;
232 struct mount_point_info_node *mpinfo;
233 int mount_id;
234 bool ret_with_cap = false;
235
236 if (ipc_msg == NULL) {
237 ipc_return(ipc_msg, -EINVAL);
238 }
239
240 if (ipc_msg->data_len >= sizeof(fsm_req->req)) {
241 fsm_req = (struct fsm_request *)ipc_get_msg_data(ipc_msg);
242
243 switch (fsm_req->req) {
244 case FSM_REQ_PARSE_PATH: {
245 // Get Corresponding MOUNT_INFO
246 pthread_rwlock_rdlock(&mount_point_infos_rwlock);
247 mpinfo = get_mount_point(fsm_req->path, strlen(fsm_req->path));
248 pthread_mutex_lock(&fsm_client_cap_table_lock);
249 mount_id = fsm_get_client_cap(client_badge, mpinfo->fs_cap);
250
251 if (mount_id == -1) {
252 /* Client not hold corresponding fs_cap */
253
254 // Newly generated mount_id
255 mount_id = fsm_set_client_cap(client_badge, mpinfo->fs_cap);
256 pthread_mutex_unlock(&fsm_client_cap_table_lock);
257
258 if (mount_id < 0) {
259 ipc_return(ipc_msg, mount_id);
260 }
261
262 // Filling responses
263 fsm_req->mount_id = mount_id;
264 strncpy(fsm_req->mount_path, mpinfo->path, mpinfo->path_len);
265 fsm_req->mount_path[mpinfo->path_len] = '\0';
266 fsm_req->mount_path_len = mpinfo->path_len;
267 if (fsm_req->mount_path_len == 1)
268 fsm_req->mount_path_len = 0;
269 fsm_req->new_cap_flag = 1;
270
271 // Return with cap
272 pthread_rwlock_unlock(&mount_point_infos_rwlock);
273 ipc_msg->cap_slot_number = 1;
274 ipc_set_msg_cap(ipc_msg, 0, mpinfo->fs_cap);
275 ipc_return_with_cap(ipc_msg, 0);
276 } else {
277 /* Client holds corresponding fs_cap */
278 pthread_mutex_unlock(&fsm_client_cap_table_lock);
279 fsm_req->mount_id = mount_id;
280 strncpy(fsm_req->mount_path, mpinfo->path, mpinfo->path_len);
281 fsm_req->mount_path[mpinfo->path_len] = '\0';
282 fsm_req->mount_path_len = mpinfo->path_len;
283 if (fsm_req->mount_path_len == 1)
284 fsm_req->mount_path_len = 0;
285 fsm_req->new_cap_flag = 0;
286
287 pthread_rwlock_unlock(&mount_point_infos_rwlock);
288 ipc_return(ipc_msg, 0);
289 }
290 break;
291 }
292 case FSM_REQ_MOUNT: {
293 // path=(device_name), path2=(mount_point)
294 ret = fsm_mount_fs(fsm_req->path, fsm_req->mount_path);
295 break;
296 }
297 case FSM_REQ_UMOUNT: {
298 ret = fsm_umount_fs(fsm_req->path);
299 break;
300 }
301 case FSM_REQ_SYNC: {
302 ret = fsm_sync_page_cache();
303 break;
304 }
305 default:
306 error("%s: %d Not impelemented yet\n",
307 __func__,
308 ((int *)(ipc_get_msg_data(ipc_msg)))[0]);
309 usys_exit(-1);
310 break;
311 }
312 } else {
313 error("FSM: no operation num\n");
314 usys_exit(-1);
315 }
316
317 if (ret_with_cap)
318 ipc_return_with_cap(ipc_msg, ret);
319 else
320 ipc_return(ipc_msg, ret);
321 }
322
main(int argc,char * argv[],char * envp[])323 int main(int argc, char *argv[], char *envp[])
324 {
325 init_fsm();
326 info("[FSM] register server value = %u\n",
327 ipc_register_server_with_destructor(
328 fsm_dispatch, DEFAULT_CLIENT_REGISTER_HANDLER, fsm_destructor));
329
330 usys_exit(0);
331 return 0;
332 }
333