1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <errno.h>
13 #include <pthread.h>
14 #include <chcore/bug.h>
15 #include <chcore/type.h>
16 #include <chcore/memory.h>
17 #include <chcore-internal/fs_defs.h>
18 #include <chcore-internal/fs_debug.h>
19 #include <sys/mman.h>
20 #include "fs_wrapper_defs.h"
21 #include "fs_page_cache.h"
22 #include "fs_vnode.h"
23 #include "fs_page_fault.h"
24
25 /* fs server private data */
26 struct list_head server_entry_mapping;
27 pthread_spinlock_t server_entry_mapping_lock;
28 pthread_rwlock_t fs_wrapper_meta_rwlock;
29
30 /* +++++++++++++++++++++++++++ Initializing +++++++++++++++++++++++++++++++ */
31
real_file_reader(char * buffer,pidx_t file_page_idx,void * private)32 int real_file_reader(char *buffer, pidx_t file_page_idx, void *private)
33 {
34 struct fs_vnode *vnode;
35 size_t size;
36 off_t offset;
37
38 vnode = (struct fs_vnode *)private;
39
40 size = CACHED_PAGE_SIZE;
41 offset = file_page_idx * CACHED_PAGE_SIZE;
42
43 /* buffer size should always be PAGE_SIZE. */
44 memset(buffer, 0, size);
45
46 if (offset + size > vnode->size)
47 size = vnode->size - offset;
48 #ifdef TEST_COUNT_PAGE_CACHE
49 count.disk_o = count.disk_o + size;
50 #endif
51 return server_ops.read(vnode->private, offset, size, buffer);
52 }
53
real_file_writer(char * buffer,pidx_t file_page_idx,int page_block_idx,void * private)54 int real_file_writer(char *buffer, pidx_t file_page_idx, int page_block_idx,
55 void *private)
56 {
57 struct fs_vnode *vnode;
58 off_t offset;
59 size_t size;
60
61 vnode = (struct fs_vnode *)private;
62 offset = file_page_idx * CACHED_PAGE_SIZE;
63 if (page_block_idx == -1) {
64 size = CACHED_PAGE_SIZE;
65 } else {
66 size = CACHED_BLOCK_SIZE;
67 offset += page_block_idx * CACHED_BLOCK_SIZE;
68 }
69
70 if (offset + size > vnode->size)
71 size = vnode->size - offset;
72 #ifdef TEST_COUNT_PAGE_CACHE
73 count.disk_i = count.disk_i + size;
74 #endif
75 return server_ops.write(vnode->private, offset, size, buffer);
76 }
77
init_fs_wrapper(void)78 void init_fs_wrapper(void)
79 {
80 struct user_defined_funcs uf;
81
82 /* fs wrapper */
83 init_list_head(&server_entry_mapping);
84 pthread_spin_init(&server_entry_mapping_lock, 0);
85 fs_vnode_init();
86 pthread_rwlock_init(&fs_wrapper_meta_rwlock, NULL);
87
88 uf.file_read = real_file_reader;
89 uf.file_write = real_file_writer;
90 uf.handler_pce_turns_empty = dec_ref_fs_vnode;
91 uf.handler_pce_turns_nonempty = inc_ref_fs_vnode;
92
93 fs_page_cache_init(WRITE_THROUGH, &uf);
94
95 #ifdef CHCORE_ENABLE_FMAP
96 /* Module: fmap fault */
97 fs_page_fault_init();
98 #endif
99
100 #ifdef TEST_COUNT_PAGE_CACHE
101 count.hit = 0;
102 count.miss = 0;
103 count.disk_i = 0;
104 count.disk_o = 0;
105 #endif
106 }
107
108 /* +++++++++++++++++++++++++++ FID Mapping ++++++++++++++++++++++++++++++++ */
109
110 /* Get (client_badge, fd) -> fid(server_entry) mapping */
fs_wrapper_get_server_entry(badge_t client_badge,int fd)111 int fs_wrapper_get_server_entry(badge_t client_badge, int fd)
112 {
113 struct server_entry_node *n;
114
115 /* Stable fd number, need no translating */
116 if (fd == AT_FDROOT)
117 return AT_FDROOT;
118
119 /* Validate fd */
120 if (fd < 0 || fd >= MAX_SERVER_ENTRY_PER_CLIENT) {
121 return -1;
122 }
123
124 pthread_spin_lock(&server_entry_mapping_lock);
125 for_each_in_list (n, struct server_entry_node, node, &server_entry_mapping)
126 if (n->client_badge == client_badge) {
127 pthread_spin_unlock(&server_entry_mapping_lock);
128 return n->fd_to_fid[fd];
129 }
130
131 pthread_spin_unlock(&server_entry_mapping_lock);
132 return -1;
133 }
134
135 /* Set (client_badge, fd) -> fid(server_entry) mapping */
fs_wrapper_set_server_entry(badge_t client_badge,int fd,int fid)136 int fs_wrapper_set_server_entry(badge_t client_badge, int fd, int fid)
137 {
138 struct server_entry_node *private_iter;
139
140 /* Validate fd */
141 BUG_ON(fd < 0 || fd >= MAX_SERVER_ENTRY_PER_CLIENT);
142
143 /* Check if client_badge already involved */
144 pthread_spin_lock(&server_entry_mapping_lock);
145 for_each_in_list (
146 private_iter, struct server_entry_node, node, &server_entry_mapping) {
147 if (private_iter->client_badge == client_badge) {
148 private_iter->fd_to_fid[fd] = fid;
149 goto out;
150 }
151 }
152
153 /* New server_entry_node */
154 struct server_entry_node *n =
155 (struct server_entry_node *)malloc(sizeof(*n));
156 if (n == NULL) {
157 return -ENOMEM;
158 }
159 n->client_badge = client_badge;
160 int i;
161 for (i = 0; i < MAX_SERVER_ENTRY_PER_CLIENT; i++)
162 n->fd_to_fid[i] = -1;
163
164 n->fd_to_fid[fd] = fid;
165
166 /* Insert node to server_entry_mapping */
167 list_append(&n->node, &server_entry_mapping);
168
169 out:
170 pthread_spin_unlock(&server_entry_mapping_lock);
171 return 0;
172 }
173
fs_wrapper_clear_server_entry(badge_t client_badge,int fid)174 void fs_wrapper_clear_server_entry(badge_t client_badge, int fid)
175 {
176 struct server_entry_node *private_iter;
177
178 /* Check if client_badge already involved */
179 pthread_spin_lock(&server_entry_mapping_lock);
180 for_each_in_list (
181 private_iter, struct server_entry_node, node, &server_entry_mapping) {
182 if (private_iter->client_badge == client_badge) {
183 for (int i = 0; i < MAX_SERVER_ENTRY_NUM; i++) {
184 if (private_iter->fd_to_fid[i] == fid) {
185 private_iter->fd_to_fid[i] = -1;
186 }
187 }
188 pthread_spin_unlock(&server_entry_mapping_lock);
189 return;
190 }
191 }
192 pthread_spin_unlock(&server_entry_mapping_lock);
193 }
194
195 #define translate_or_noent(badge, fd) \
196 do { \
197 int r; \
198 r = fs_wrapper_get_server_entry(badge, fd); \
199 if (r < 0) \
200 ret = -ENOENT; \
201 else \
202 (fd) = r; \
203 } while (0)
204
205 /* Translate xxxfd field to fid correspondingly */
translate_fd_to_fid(badge_t client_badge,struct fs_request * fr)206 int translate_fd_to_fid(badge_t client_badge, struct fs_request *fr)
207 {
208 int ret = 0;
209 /* Except FS_REQ_OPEN and FS_REQ_MOUNT, fd should be translated */
210 if (fr->req == FS_REQ_OPEN || fr->req == FS_REQ_MOUNT)
211 return ret;
212
213 switch (fr->req) {
214 case FS_REQ_FSTATAT:
215 case FS_REQ_FSTAT:
216 case FS_REQ_FSTATFS:
217 case FS_REQ_STATFS:
218 fr->stat.dirfd =
219 fs_wrapper_get_server_entry(client_badge, fr->stat.dirfd);
220 fr->stat.fd = fs_wrapper_get_server_entry(client_badge, fr->stat.fd);
221
222 if ((fr->stat.fd < 0 && fr->stat.fd != AT_FDROOT)
223 || (fr->stat.dirfd < 0 && fr->stat.dirfd != AT_FDROOT)) {
224 ret = -ENOENT;
225 }
226
227 break;
228 case FS_REQ_READ:
229 translate_or_noent(client_badge, fr->read.fd);
230 break;
231 case FS_REQ_WRITE:
232 translate_or_noent(client_badge, fr->write.fd);
233 break;
234 case FS_REQ_LSEEK:
235 translate_or_noent(client_badge, fr->lseek.fd);
236 break;
237 case FS_REQ_CLOSE:
238 translate_or_noent(client_badge, fr->close.fd);
239 break;
240 case FS_REQ_FTRUNCATE:
241 translate_or_noent(client_badge, fr->ftruncate.fd);
242 break;
243 case FS_REQ_FALLOCATE:
244 translate_or_noent(client_badge, fr->fallocate.fd);
245 break;
246 case FS_REQ_FCNTL:
247 translate_or_noent(client_badge, fr->fcntl.fd);
248 break;
249 case FS_REQ_FSYNC:
250 translate_or_noent(client_badge, fr->fsync.fd);
251 break;
252 case FS_REQ_FDATASYNC:
253 translate_or_noent(client_badge, fr->fdatasync.fd);
254 break;
255 #ifdef CHCORE_ENABLE_FMAP
256 case FS_REQ_FMAP:
257 translate_or_noent(client_badge, fr->mmap.fd);
258 break;
259 #endif
260 case FS_REQ_GETDENTS64:
261 translate_or_noent(client_badge, fr->getdents64.fd);
262 break;
263 default:
264 break;
265 }
266
267 return ret;
268 }
269
fs_server_dispatch(ipc_msg_t * ipc_msg,badge_t client_badge)270 void fs_server_dispatch(ipc_msg_t *ipc_msg, badge_t client_badge)
271 {
272 struct fs_request *fr;
273 long ret;
274 bool ret_with_cap = false;
275
276 if (ipc_msg == NULL) {
277 ipc_return(ipc_msg, -EINVAL);
278 }
279
280 fr = (struct fs_request *)ipc_get_msg_data(ipc_msg);
281
282 /* We only support concurrent READ and WRITE */
283 if (fr->req != FS_REQ_READ && fr->req != FS_REQ_WRITE) {
284 pthread_rwlock_wrlock(&fs_wrapper_meta_rwlock);
285 } else {
286 pthread_rwlock_rdlock(&fs_wrapper_meta_rwlock);
287 }
288
289 /*
290 * Some FS Servers need to complete the initialization process when
291 * mounting eg. Connect with corresponding block device, Save partition
292 * offset, etc So, when the mounted flag is off, requests will be
293 * rejected except FS_REQ_MOUNT
294 */
295 if (!mounted && (fr->req != FS_REQ_MOUNT)) {
296 printf("[fs server] Not fully initialized, send FS_REQ_MOUNT first\n");
297 ret = -EINVAL;
298 goto out;
299 }
300
301 /*
302 * Now fr->fd stores the `Client Side FD Index',
303 * We need to translate fr->fd to fid here, except FS_REQ_OPEN
304 * FS_REQ_OPEN's fr->fd stores the newly generated `Client Side FD
305 * Index' and we should build mapping between fr->fd to fid when handle
306 * open request
307 */
308 ret = translate_fd_to_fid(client_badge, fr);
309 if (ret < 0) {
310 goto out;
311 }
312
313 /*
314 * FS Server Requests Handlers
315 */
316 switch (fr->req) {
317 case FS_REQ_MOUNT:
318 ret = fs_wrapper_mount(ipc_msg, fr);
319 break;
320 case FS_REQ_UMOUNT:
321 ret = fs_wrapper_umount(ipc_msg, fr);
322 break;
323 case FS_REQ_OPEN:
324 ret = fs_wrapper_open(client_badge, ipc_msg, fr);
325 break;
326 case FS_REQ_READ:
327 ret = fs_wrapper_read(ipc_msg, fr);
328 break;
329 case FS_REQ_WRITE:
330 ret = fs_wrapper_write(ipc_msg, fr);
331 break;
332 case FS_REQ_LSEEK:
333 ret = fs_wrapper_lseek(ipc_msg, fr);
334 break;
335 case FS_REQ_CLOSE:
336 ret = fs_wrapper_close(client_badge, ipc_msg, fr);
337 break;
338 case FS_REQ_CREAT:
339 ret = fs_wrapper_creat(ipc_msg, fr);
340 break;
341 case FS_REQ_UNLINK:
342 ret = fs_wrapper_unlink(ipc_msg, fr);
343 break;
344 case FS_REQ_RMDIR:
345 ret = fs_wrapper_rmdir(ipc_msg, fr);
346 break;
347 case FS_REQ_MKDIR:
348 ret = fs_wrapper_mkdir(ipc_msg, fr);
349 break;
350 case FS_REQ_RENAME:
351 ret = fs_wrapper_rename(ipc_msg, fr);
352 break;
353 case FS_REQ_GETDENTS64:
354 ret = fs_wrapper_getdents64(ipc_msg, fr);
355 break;
356 case FS_REQ_FTRUNCATE:
357 ret = fs_wrapper_ftruncate(ipc_msg, fr);
358 break;
359 case FS_REQ_FSTATAT:
360 ret = fs_wrapper_fstatat(ipc_msg, fr);
361 break;
362 case FS_REQ_FSTAT:
363 ret = fs_wrapper_fstat(ipc_msg, fr);
364 break;
365 case FS_REQ_STATFS:
366 ret = fs_wrapper_statfs(ipc_msg, fr);
367 break;
368 case FS_REQ_FSTATFS:
369 ret = fs_wrapper_fstatfs(ipc_msg, fr);
370 break;
371 case FS_REQ_FACCESSAT:
372 ret = fs_wrapper_faccessat(ipc_msg, fr);
373 break;
374 case FS_REQ_SYMLINKAT:
375 ret = fs_wrapper_symlinkat(ipc_msg, fr);
376 break;
377 case FS_REQ_READLINKAT:
378 ret = fs_wrapper_readlinkat(ipc_msg, fr);
379 break;
380 case FS_REQ_FALLOCATE:
381 ret = fs_wrapper_fallocate(ipc_msg, fr);
382 break;
383 case FS_REQ_FCNTL:
384 ret = fs_wrapper_fcntl(client_badge, ipc_msg, fr);
385 break;
386 #ifdef CHCORE_ENABLE_FMAP
387 case FS_REQ_FMAP:
388 ret = fs_wrapper_fmap(client_badge, ipc_msg, fr, &ret_with_cap);
389 break;
390 case FS_REQ_FUNMAP:
391 ret = fs_wrapper_funmap(client_badge, ipc_msg, fr);
392 break;
393 #endif
394 case FS_REQ_SYNC:
395 ret = fs_wrapper_sync();
396 break;
397 case FS_REQ_FSYNC:
398 case FS_REQ_FDATASYNC:
399 ret = fs_wrapper_fsync(ipc_msg, fr);
400 break;
401 case FS_REQ_TEST_PERF:
402 ret = fs_wrapper_count(ipc_msg, fr);
403 break;
404 default:
405 printf("[Error] Strange FS Server request number %d\n", fr->req);
406 ret = -EINVAL;
407 break;
408 }
409
410 out:
411 pthread_rwlock_unlock(&fs_wrapper_meta_rwlock);
412 if (ret_with_cap)
413 ipc_return_with_cap(ipc_msg, ret);
414 else
415 ipc_return(ipc_msg, ret);
416 }
417