1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 /*
13 * Inter-**Process** Communication.
14 *
15 * connection: between a client cap_group and a server cap_group (two processes)
16 * We store connection cap in a process' cap_group, so each thread in it can
17 * use that connection.
18 *
19 * A connection (cap) can be used by any thread in the client cap_group.
20 * A connection will be **only** served by one server thread while
21 * one server thread may serve multiple connections.
22 *
23 * There is one PMO_SHM binding with one connection.
24 *
25 * Connection can only serve one IPC request at the same time.
26 * Both user and kernel should check the "busy_state" of a connection.
27 * Besides, register thread can also serve one registration request for one
28 * time.
29 *
30 * Since a connection can only be shared by client threads in the same process,
31 * a connection has only-one **badge** to identify the process.
32 * During ipc_call, the kernel can set **badge** as an argument in register.
33 *
34 * Overview:
35 * **IPC registration (control path)**
36 * - A server thread (S1) invokes **sys_register_server** with
37 * a register_cb_thread (S2)
38 *
39 * - A client thread (C) invokes **sys_register_client(S1)**
40 * - invokes (switches) to S2 actually
41 * - S2 invokes **sys_ipc_register_cb_return** with a handler_thread (S3)
42 * - S3 will serve IPC requests later
43 * - switches back to C (finish the IPC registration)
44 *
45 * **IPC call/reply (data path)**
46 * - C invokes **sys_ipc_call**
47 * - switches to S3
48 * - S3 invokes **sys_ipc_return**
49 * - switches to C
50 */
51
52 #include <common/errno.h>
53 #include <ipc/connection.h>
54 #include <irq/irq.h>
55 #include <mm/kmalloc.h>
56 #include <mm/uaccess.h>
57 #include <object/memory.h>
58 #include <sched/context.h>
59
60 /*
61 * @brief A legal ipc_msg should be placed in shared memory of a IPC connection.
62 * Besides, ipc_msg is actually a header of following custom data and cap array,
63 * The latter ones should also be placed in shared memory. This function checks
64 * above constraints, only return 0 if all of them are satisfied. Otherwise -1
65 * is returned to indicate invalid or malicious ipc_msg.
66 *
67 * @param user_ipc_msg_ptr: pointer to ipc_msg passed by user program
68 * @param shm_start: start address of shared memory of a connection. This
69 * address may be server-side or client-side. When checking ipc_msg for
70 * client-side, client- side start address should be passed in, and vice versa.
71 * @param shm_size: size of shared memory
72 * @return int: 0 indicates a legal ipc_msg, otherwise -1 is to indicate invalid
73 * or malicious ipc_msg.
74 */
check_ipc_msg_in_shm(struct ipc_msg * user_ipc_msg_ptr,vaddr_t shm_start,size_t shm_size)75 static int check_ipc_msg_in_shm(struct ipc_msg *user_ipc_msg_ptr,
76 vaddr_t shm_start, size_t shm_size)
77 {
78 int ret;
79 struct ipc_msg ipc_msg_kbuf;
80 char *shm_start_ptr = (char *)shm_start;
81 char *shm_end = shm_start_ptr + shm_size;
82 char *ipc_msg_start = (char *)user_ipc_msg_ptr;
83 char *ipc_msg_end = ipc_msg_start + sizeof(struct ipc_msg);
84 char *ipc_data_start, *ipc_data_end;
85 char *ipc_caps_start, *ipc_caps_end;
86
87 /**
88 * checks of ipc_data and ipc_caps depends on data in ipc_msg, so
89 * we must check ipc_msg itself is legal first.
90 */
91 if (!(shm_start_ptr <= ipc_msg_start && ipc_msg_end <= shm_end)) {
92 return -1;
93 }
94
95 ret = copy_from_user(&ipc_msg_kbuf, ipc_msg_start, sizeof(struct ipc_msg));
96
97 if (ret < 0) {
98 return ret;
99 }
100
101 ipc_data_start = ipc_msg_start + ipc_msg_kbuf.data_offset;
102 ipc_data_end = ipc_data_start + ipc_msg_kbuf.data_len;
103
104 ipc_caps_start = ipc_msg_start + ipc_msg_kbuf.cap_slots_offset;
105 ipc_caps_end =
106 ipc_caps_start + ipc_msg_kbuf.cap_slot_number * sizeof(cap_t);
107
108 if (!(shm_start_ptr <= ipc_data_start && ipc_data_end <= shm_end)) {
109 return -1;
110 }
111
112 if (!(shm_start_ptr <= ipc_caps_start && ipc_caps_end <= shm_end)) {
113 return -1;
114 }
115
116 return 0;
117 }
118
119 /*
120 * Overall, a server thread that declares a serivce with this interface
121 * should specify:
122 * @ipc_routine (the real ipc service routine entry),
123 * @register_thread_cap (another server thread for handling client
124 * registration), and
125 * @destructor (one routine invoked when some connnection is closed).
126 */
register_server(struct thread * server,unsigned long ipc_routine,cap_t register_thread_cap,unsigned long destructor)127 static int register_server(struct thread *server, unsigned long ipc_routine,
128 cap_t register_thread_cap, unsigned long destructor)
129 {
130 struct ipc_server_config *config;
131 struct thread *register_cb_thread;
132 struct ipc_server_register_cb_config *register_cb_config;
133
134 BUG_ON(server == NULL);
135 if (server->general_ipc_config != NULL) {
136 kdebug("A server thread can only invoke **register_server** once!\n");
137 return -EINVAL;
138 }
139
140 /*
141 * Check the passive thread in server for handling
142 * client registration.
143 */
144 register_cb_thread =
145 obj_get(current_cap_group, register_thread_cap, TYPE_THREAD);
146 if (!register_cb_thread) {
147 kdebug("A register_cb_thread is required.\n");
148 return -ECAPBILITY;
149 }
150
151 if (register_cb_thread->thread_ctx->type != TYPE_REGISTER) {
152 kdebug("The register_cb_thread should be TYPE_REGISTER!\n");
153 obj_put(register_cb_thread);
154 return -EINVAL;
155 }
156
157 config = kmalloc(sizeof(*config));
158
159 /*
160 * @ipc_routine will be the real ipc_routine_entry.
161 * No need to validate such address because the server just
162 * kill itself if the address is illegal.
163 */
164 config->declared_ipc_routine_entry = ipc_routine;
165
166 /* Record the registration cb thread */
167 config->register_cb_thread = register_cb_thread;
168
169 register_cb_config = kmalloc(sizeof(*register_cb_config));
170 register_cb_thread->general_ipc_config = register_cb_config;
171
172 /*
173 * This lock will be used to prevent concurrent client threads
174 * from registering.
175 * In other words, a register_cb_thread can only serve
176 * registration requests one-by-one.
177 */
178 lock_init(®ister_cb_config->register_lock);
179
180 /* Record PC as well as the thread's initial stack (SP). */
181 register_cb_config->register_cb_entry =
182 arch_get_thread_next_ip(register_cb_thread);
183 register_cb_config->register_cb_stack =
184 arch_get_thread_stack(register_cb_thread);
185 register_cb_config->destructor = destructor;
186 obj_put(register_cb_thread);
187
188 #if defined(CHCORE_ARCH_AARCH64)
189 /* The following fence can ensure: the config related data,
190 * e.g., the register_lock, can been seen when
191 * server->general_ipc_config is set.
192 */
193 smp_mb();
194 #else
195 /* TSO: the fence is not required. */
196 #endif
197
198 /*
199 * The last step: fill the general_ipc_config.
200 * This field is also treated as the whether the server thread
201 * declares an IPC service (or makes the service ready).
202 */
203 server->general_ipc_config = config;
204
205 return 0;
206 }
207
connection_deinit(void * conn)208 void connection_deinit(void *conn)
209 {
210 /* For now, no de-initialization is required */
211 }
212
213 /* Just used for storing the results of function create_connection */
214 struct client_connection_result {
215 cap_t client_conn_cap;
216 cap_t server_conn_cap;
217 cap_t server_shm_cap;
218 struct ipc_connection *conn;
219 };
220
get_pmo_size(cap_t pmo_cap)221 static int get_pmo_size(cap_t pmo_cap)
222 {
223 struct pmobject *pmo;
224 int size;
225
226 pmo = obj_get(current_cap_group, pmo_cap, TYPE_PMO);
227 BUG_ON(!pmo);
228
229 size = pmo->size;
230 obj_put(pmo);
231
232 return size;
233 }
234
235 /*
236 * The function will create an IPC connection and initialize the client side
237 * information. (used in sys_register_client)
238 *
239 * The server (register_cb_thread) will initialize the server side information
240 * later (in sys_ipc_register_cb_return).
241 */
create_connection(struct thread * client,struct thread * server,int shm_cap_client,unsigned long shm_addr_client,struct client_connection_result * res)242 static int create_connection(struct thread *client, struct thread *server,
243 int shm_cap_client, unsigned long shm_addr_client,
244 struct client_connection_result *res)
245 {
246 cap_t shm_cap_server;
247 struct ipc_connection *conn;
248 int ret = 0;
249 cap_t conn_cap = 0, server_conn_cap = 0;
250
251 BUG_ON((client == NULL) || (server == NULL));
252
253 /*
254 * Copy the shm_cap to the server.
255 *
256 * It is reasonable to count the shared memory usage on the client.
257 * So, a client should prepare the shm and tell the server.
258 */
259 shm_cap_server =
260 cap_copy(current_cap_group, server->cap_group, shm_cap_client);
261
262 /* Create struct ipc_connection */
263 conn = obj_alloc(TYPE_CONNECTION, sizeof(*conn));
264 if (!conn) {
265 ret = -ENOMEM;
266 goto out_fail;
267 }
268
269 /* Initialize the connection (begin).
270 *
271 * Note that now client is applying to build the connection
272 * instead of issuing an IPC.
273 */
274 conn->is_valid = OBJECT_STATE_INVALID;
275 conn->current_client_thread = client;
276 /*
277 * The register_cb_thread in server will assign the
278 * server_handler_thread later.
279 */
280 conn->server_handler_thread = NULL;
281 /*
282 * The badge is now generated by the process who creates the client
283 * thread. Usually, the process is the procmgr user-space service.
284 * The badge needs to be unique.
285 *
286 * Before a process exits, it needs to close the connection with
287 * servers. Otherwise, a later process may pretend to be it
288 * because the badge is based on PID (if a PID is reused,
289 * the same badge occur).
290 * Or, the kernel should notify the server to close the
291 * connections when some client exits.
292 */
293 conn->client_badge = current_cap_group->badge;
294 conn->shm.client_shm_uaddr = shm_addr_client;
295 conn->shm.shm_size = get_pmo_size(shm_cap_client);
296 conn->shm.shm_cap_in_client = shm_cap_client;
297 conn->shm.shm_cap_in_server = shm_cap_server;
298 lock_init(&conn->ownership);
299 /* Initialize the connection (end) */
300
301 /* After initializing the object,
302 * give the ipc_connection (cap) to the client.
303 */
304 conn_cap = cap_alloc(current_cap_group, conn);
305 if (conn_cap < 0) {
306 ret = conn_cap;
307 goto out_free_obj;
308 }
309
310 /* Give the ipc_connection (cap) to the server */
311 server_conn_cap = cap_copy(current_cap_group, server->cap_group, conn_cap);
312 if (server_conn_cap < 0) {
313 ret = server_conn_cap;
314 goto out_free_cap;
315 }
316
317 /* Preapre the return results */
318 res->client_conn_cap = conn_cap;
319 res->server_conn_cap = server_conn_cap;
320 res->server_shm_cap = shm_cap_server;
321 res->conn = conn;
322
323 return 0;
324
325 out_free_cap:
326 cap_free(current_cap_group, conn_cap);
327 conn = NULL;
328 out_free_obj:
329 obj_free(conn);
330 out_fail:
331 return ret;
332 }
333
334 /*
335 * Grap the ipc lock before doing any modifications including
336 * modifing the conn or sending the caps.
337 */
grab_ipc_lock(struct ipc_connection * conn)338 static inline int grab_ipc_lock(struct ipc_connection *conn)
339 {
340 struct thread *target;
341 struct ipc_server_handler_config *handler_config;
342
343 target = conn->server_handler_thread;
344 handler_config =
345 (struct ipc_server_handler_config *)target->general_ipc_config;
346
347 /*
348 * Grabing the ipc_lock can ensure:
349 * First, avoid invoking the same handler thread.
350 * Second, also avoid using the same connection.
351 *
352 * perf in Qemu: lock & unlock (without contention) just takes
353 * about 20 cycles on x86_64.
354 */
355
356 /* Use try-lock, otherwise deadlock may happen
357 * deadlock: T1: ipc-call -> Server -> resched to T2: ipc-call
358 *
359 * Although lock is added in user-ipc-lib, a buggy app may dos
360 * the kernel.
361 */
362
363 if (try_lock(&handler_config->ipc_lock) != 0)
364 return -EIPCRETRY;
365
366 return 0;
367 }
368
release_ipc_lock(struct ipc_connection * conn)369 static inline int release_ipc_lock(struct ipc_connection *conn)
370 {
371 struct thread *target;
372 struct ipc_server_handler_config *handler_config;
373
374 target = conn->server_handler_thread;
375 handler_config =
376 (struct ipc_server_handler_config *)target->general_ipc_config;
377
378 unlock(&handler_config->ipc_lock);
379
380 return 0;
381 }
382
thread_migrate_to_server(struct ipc_connection * conn,unsigned long arg)383 static void thread_migrate_to_server(struct ipc_connection *conn,
384 unsigned long arg)
385 {
386 struct thread *target;
387 struct ipc_server_handler_config *handler_config;
388
389 target = conn->server_handler_thread;
390 handler_config =
391 (struct ipc_server_handler_config *)target->general_ipc_config;
392
393 /*
394 * Note that a server ipc handler thread can be assigned to multiple
395 * connections.
396 * So, it is necessary to record which connection is active.
397 */
398 handler_config->active_conn = conn;
399
400 /*
401 * Note that multiple client threads may share a same connection.
402 * So, it is necessary to record which client thread is active.
403 * Then, the server can transfer the control back to it after finishing
404 * the IPC.
405 */
406 conn->current_client_thread = current_thread;
407
408 /* Mark current_thread as TS_WAITING */
409 current_thread->thread_ctx->state = TS_WAITING;
410
411 /* Pass the scheduling context */
412 target->thread_ctx->sc = current_thread->thread_ctx->sc;
413
414 /* Set the target thread SP/IP/arguments */
415 arch_set_thread_stack(target, handler_config->ipc_routine_stack);
416 arch_set_thread_next_ip(target, handler_config->ipc_routine_entry);
417 /* First argument: ipc_msg */
418 arch_set_thread_arg0(target, arg);
419 /* Second argument: client_badge */
420 arch_set_thread_arg1(target, conn->client_badge);
421 #ifdef CHCORE_OH_TEE
422 /* Third argument: pid */
423 arch_set_thread_arg2(target, conn->current_client_thread->cap_group->pid);
424 /* Fourth argument: tid */
425 arch_set_thread_arg3(target, conn->current_client_thread->cap);
426 #endif /* CHCORE_OH_TEE */
427 set_thread_arch_spec_state_ipc(target);
428
429 /* Switch to the target thread */
430 sched_to_thread(target);
431
432 /* Function never return */
433 BUG_ON(1);
434 }
435
thread_migrate_to_client(struct thread * client,unsigned long ret_value)436 static void thread_migrate_to_client(struct thread *client,
437 unsigned long ret_value)
438 {
439 /* Set return value for the target thread */
440 arch_set_thread_return(client, ret_value);
441
442 /* Switch to the client thread */
443 sched_to_thread(client);
444
445 /* Function never return */
446 BUG_ON(1);
447 }
448
449 struct client_shm_config {
450 cap_t shm_cap;
451 unsigned long shm_addr;
452 };
453
454 /* IPC related system calls */
455
sys_register_server(unsigned long ipc_routine,cap_t register_thread_cap,unsigned long destructor)456 int sys_register_server(unsigned long ipc_routine, cap_t register_thread_cap,
457 unsigned long destructor)
458 {
459 return register_server(
460 current_thread, ipc_routine, register_thread_cap, destructor);
461 }
462
sys_register_client(cap_t server_cap,unsigned long shm_config_ptr)463 cap_t sys_register_client(cap_t server_cap, unsigned long shm_config_ptr)
464 {
465 struct thread *client;
466 struct thread *server;
467
468 /*
469 * No need to initialize actually.
470 * However, fbinfer will complain without zeroing because
471 * it cannot tell copy_from_user.
472 */
473 struct client_shm_config shm_config = {0};
474 int r;
475 struct client_connection_result res;
476
477 struct ipc_server_config *server_config;
478 struct thread *register_cb_thread;
479 struct ipc_server_register_cb_config *register_cb_config;
480
481 client = current_thread;
482
483 server = obj_get(current_cap_group, server_cap, TYPE_THREAD);
484 if (!server) {
485 r = -ECAPBILITY;
486 goto out_fail;
487 }
488
489 server_config = (struct ipc_server_config *)(server->general_ipc_config);
490 if (!server_config) {
491 r = -EIPCRETRY;
492 goto out_fail;
493 }
494
495 /*
496 * Locate the register_cb_thread first.
497 * And later, directly transfer the control flow to it
498 * for finishing the registration.
499 *
500 * The whole registration procedure:
501 * client thread -> server register_cb_thread -> client threrad
502 */
503 register_cb_thread = server_config->register_cb_thread;
504 register_cb_config = (struct ipc_server_register_cb_config
505 *)(register_cb_thread->general_ipc_config);
506
507 /* Acquiring register_lock: avoid concurrent client registration.
508 *
509 * Use try_lock instead of lock since the unlock operation is done by
510 * another thread and ChCore does not support mutex.
511 * Otherwise, dead lock may happen.
512 */
513 if (try_lock(®ister_cb_config->register_lock) != 0) {
514 r = -EIPCRETRY;
515 goto out_fail;
516 }
517
518 /* Validate the user addresses before accessing them */
519 if (check_user_addr_range(shm_config_ptr, sizeof(shm_config) != 0)) {
520 r = -EINVAL;
521 goto out_fail_unlock;
522 }
523
524 copy_from_user(
525 (void *)&shm_config, (void *)shm_config_ptr, sizeof(shm_config));
526
527 /* Map the pmo of the shared memory */
528 r = map_pmo_in_current_cap_group(
529 shm_config.shm_cap, shm_config.shm_addr, VMR_READ | VMR_WRITE);
530
531 if (r != 0) {
532 goto out_fail_unlock;
533 }
534
535 /* Create the ipc_connection object */
536 r = create_connection(
537 client, server, shm_config.shm_cap, shm_config.shm_addr, &res);
538
539 if (r != 0) {
540 goto out_fail_unlock;
541 }
542
543 /* Record the connection cap of the client process */
544 register_cb_config->conn_cap_in_client = res.client_conn_cap;
545 register_cb_config->conn_cap_in_server = res.server_conn_cap;
546 /* Record the server_shm_cap for current connection */
547 register_cb_config->shm_cap_in_server = res.server_shm_cap;
548
549 /* Mark current_thread as TS_WAITING */
550 current_thread->thread_ctx->state = TS_WAITING;
551
552 /* Set target thread SP/IP/arg */
553 arch_set_thread_stack(register_cb_thread,
554 register_cb_config->register_cb_stack);
555 arch_set_thread_next_ip(register_cb_thread,
556 register_cb_config->register_cb_entry);
557 arch_set_thread_arg0(register_cb_thread,
558 server_config->declared_ipc_routine_entry);
559 obj_put(server);
560
561 /* Pass the scheduling context */
562 register_cb_thread->thread_ctx->sc = current_thread->thread_ctx->sc;
563
564 /* On success: switch to the cb_thread of server */
565 sched_to_thread(register_cb_thread);
566
567 /* Never return */
568 BUG_ON(1);
569
570 out_fail_unlock:
571 unlock(®ister_cb_config->register_lock);
572 out_fail: /* Maybe EAGAIN */
573 if (server)
574 obj_put(server);
575 return r;
576 }
577
578 #define MAX_CAP_TRANSFER 16
ipc_send_cap(struct cap_group * target_cap_group,struct ipc_msg * ipc_msg,unsigned int cap_num)579 static int ipc_send_cap(struct cap_group *target_cap_group,
580 struct ipc_msg *ipc_msg, unsigned int cap_num)
581 {
582 int i, r;
583 unsigned int cap_slots_offset;
584 cap_t *cap_buf;
585 vaddr_t uaddr;
586
587 if (cap_num >= MAX_CAP_TRANSFER) {
588 r = -EINVAL;
589 goto out_fail;
590 }
591
592 uaddr = (vaddr_t)&ipc_msg->cap_slots_offset;
593 if (check_user_addr_range(uaddr, sizeof(cap_slots_offset)) != 0) {
594 r = -EINVAL;
595 goto out_fail;
596 }
597
598 r = copy_from_user(
599 &cap_slots_offset, (void *)uaddr, sizeof(cap_slots_offset));
600 if (r)
601 goto out_fail;
602
603 uaddr = (vaddr_t)((char *)ipc_msg + cap_slots_offset);
604 if (check_user_addr_range(uaddr, sizeof(*cap_buf) * cap_num) != 0) {
605 r = -EINVAL;
606 goto out_fail;
607 }
608
609 cap_buf = kmalloc(cap_num * sizeof(*cap_buf));
610 if (!cap_buf) {
611 r = -ENOMEM;
612 goto out_fail;
613 }
614
615 r = copy_from_user(cap_buf, (void *)uaddr, sizeof(*cap_buf) * cap_num);
616 if (r) {
617 i = 0;
618 goto out_free_cap;
619 }
620
621 for (i = 0; i < cap_num; i++) {
622 cap_t dest_cap;
623
624 dest_cap = cap_copy(current_cap_group, target_cap_group, cap_buf[i]);
625 if (dest_cap < 0) {
626 r = dest_cap;
627 goto out_free_cap;
628 }
629
630 cap_buf[i] = dest_cap;
631 }
632
633
634 r = copy_to_user((void *)uaddr, cap_buf, sizeof(*cap_buf) * cap_num);
635 if (r)
636 goto out_free_cap;
637
638 kfree(cap_buf);
639 return 0;
640
641 out_free_cap:
642 for (--i; i >= 0; i--)
643 cap_free(target_cap_group, cap_buf[i]);
644 kfree(cap_buf);
645 out_fail:
646 return r;
647 }
648
649 /* Issue an IPC request */
sys_ipc_call(cap_t conn_cap,struct ipc_msg * ipc_msg_in_client,unsigned int cap_num)650 unsigned long sys_ipc_call(cap_t conn_cap, struct ipc_msg *ipc_msg_in_client,
651 unsigned int cap_num)
652 {
653 struct ipc_connection *conn;
654 vaddr_t ipc_msg_in_server = 0;
655 int r = 0;
656
657 if (!ipc_msg_in_client) {
658 return -EINVAL;
659 }
660
661 conn = obj_get(current_cap_group, conn_cap, TYPE_CONNECTION);
662 if (unlikely(!conn)) {
663 return -ECAPBILITY;
664 }
665
666 if (try_lock(&conn->ownership) == 0) {
667 /*
668 * Succeed in locking.
669 *
670 * If the connection is INVALID (setted in sys_ipc_return or
671 * sys_recycle_cap_group),
672 * then returns an ERROR to the invoker.
673 */
674 if (conn->is_valid == OBJECT_STATE_INVALID) {
675 unlock(&conn->ownership);
676 obj_put(conn);
677 return -EINVAL;
678 }
679 } else {
680 /* Fails to lock the connection */
681 obj_put(conn);
682
683 if (current_thread->thread_ctx->thread_exit_state == TE_EXITING) {
684 /* The connection is locked by the recycler */
685
686 if (current_thread->thread_ctx->type == TYPE_SHADOW) {
687 /*
688 * The current thread is B in chained IPC
689 * (A:B:C). B will receives an Error.
690 * We hope B invokes sys_ipc_return to give
691 * the control flow back to A and unlock the
692 * related connection.
693 */
694 return -ESRCH;
695 } else {
696 /* Current thread will be set to exited by
697 * the scheduler */
698 sched();
699 eret_to_thread(switch_context());
700 }
701 } else {
702 /* The connection is locked by someone else */
703 return -EIPCRETRY;
704 }
705 }
706
707 /*
708 * try_lock may fail and returns egain.
709 * No modifications happen before locking, so the client
710 * can simply try again later.
711 */
712 if ((r = grab_ipc_lock(conn)) != 0)
713 goto out_obj_put;
714
715 if (ipc_msg_in_client != NULL) {
716 /* ipc_msg should be placed in IPC shared memory */
717 if (check_ipc_msg_in_shm(ipc_msg_in_client,
718 conn->shm.client_shm_uaddr,
719 conn->shm.shm_size)
720 < 0) {
721 r = -EINVAL;
722 goto out_release_lock;
723 }
724
725 if (cap_num != 0) {
726 r = ipc_send_cap(conn->server_handler_thread->cap_group,
727 ipc_msg_in_client,
728 cap_num);
729 if (r < 0)
730 goto out_release_lock;
731 }
732
733 conn->user_ipc_msg = ipc_msg_in_client;
734
735 /*
736 * A shm is bound to one connection.
737 * But, the client and server can map the shm at different addresses.
738 * So, we re-calculate the ipc_msg (in the shm) address here.
739 */
740 ipc_msg_in_server = (vaddr_t)ipc_msg_in_client
741 - conn->shm.client_shm_uaddr
742 + conn->shm.server_shm_uaddr;
743 }
744
745 /* Call server (handler thread) */
746 thread_migrate_to_server(conn, ipc_msg_in_server);
747
748 BUG("should not reach here\n");
749
750 out_release_lock:
751 release_ipc_lock(conn);
752 out_obj_put:
753 unlock(&conn->ownership);
754 obj_put(conn);
755 return r;
756 }
757
sys_ipc_return(unsigned long ret,unsigned int cap_num)758 int sys_ipc_return(unsigned long ret, unsigned int cap_num)
759 {
760 struct ipc_server_handler_config *handler_config;
761 struct ipc_connection *conn;
762 struct thread *client;
763
764 /* Get the currently active connection */
765 handler_config =
766 (struct ipc_server_handler_config *)current_thread->general_ipc_config;
767 conn = handler_config->active_conn;
768
769 if (!conn)
770 return -EINVAL;
771
772 /*
773 * Get the client thread that issues this IPC.
774 *
775 * Note that it is **unnecessary** to set the field to NULL
776 * i.e., conn->current_client_thread = NULL.
777 */
778 client = conn->current_client_thread;
779
780 /* Step-1. check if current_thread (conn->server_handler_thread) is
781 * TE_EXITING
782 * -> Yes: set server_handler_thread to NULL, then continue to
783 * Step-2
784 * -> No: continue to Step-2
785 */
786 if (current_thread->thread_ctx->thread_exit_state == TE_EXITING) {
787 kdebug("%s:%d Step-1\n", __func__, __LINE__);
788
789 conn->is_valid = OBJECT_STATE_INVALID;
790
791 current_thread->thread_ctx->thread_exit_state = TE_EXITED;
792 current_thread->thread_ctx->state = TS_EXIT;
793
794 /* Returns an error to the client */
795 ret = -ESRCH;
796 }
797
798 /* Step-2. check if client_thread is TS_EXITING
799 * -> Yes: set current_client_thread to NULL
800 * Then check if client is shadow
801 * -> No: set client to TS_EXIT and then sched
802 * -> Yes: return to client (it will recycle itself at next ipc_return)
803 * -> No: return normally
804 */
805 if (client->thread_ctx->thread_exit_state == TE_EXITING) {
806 kdebug("%s:%d Step-2\n", __func__, __LINE__);
807
808 /*
809 * Currently, a connection is assumed to belong to the client
810 * process. So, it the client is exiting, then the connection is
811 * useless.
812 */
813
814 conn->is_valid = OBJECT_STATE_INVALID;
815
816 /* If client thread is not TYPE_SHADOW, then directly mark it as
817 * exited and reschedule.
818 *
819 * Otherwise, client thread is B in a chained IPC (A:B:C) and
820 * current_thread is C. So, C returns to B and later B will
821 * returns to A.
822 */
823 if (client->thread_ctx->type != TYPE_SHADOW) {
824 kdebug("%s:%d Step-2.0\n", __func__, __LINE__);
825 handler_config->active_conn = NULL;
826
827 current_thread->thread_ctx->state = TS_WAITING;
828
829 current_thread->thread_ctx->sc = NULL;
830
831 unlock(&handler_config->ipc_lock);
832
833 unlock(&conn->ownership);
834 obj_put(conn);
835
836 client->thread_ctx->thread_exit_state = TE_EXITED;
837 client->thread_ctx->state = TS_EXIT;
838
839 sched();
840 eret_to_thread(switch_context());
841 /* The control flow will never go through */
842 }
843 }
844
845 if (cap_num != 0) {
846 struct ipc_msg *server_ipc_msg;
847
848 if (unlikely(conn->user_ipc_msg == NULL))
849 return -EINVAL;
850
851 /**
852 * sanity of conn->user_ipc_msg has been checked during sys_ipc_call
853 */
854 server_ipc_msg = (struct ipc_msg *)((unsigned long)conn->user_ipc_msg
855 - conn->shm.client_shm_uaddr
856 + conn->shm.server_shm_uaddr);
857
858 int r = ipc_send_cap(
859 conn->current_client_thread->cap_group, server_ipc_msg, cap_num);
860 if (r < 0)
861 return r;
862 }
863
864 /* Set active_conn to NULL since the IPC will finish sooner */
865 handler_config->active_conn = NULL;
866
867 /*
868 * Return control flow (sched-context) back later.
869 * Set current_thread state to TS_WAITING again.
870 */
871 current_thread->thread_ctx->state = TS_WAITING;
872
873 /*
874 * Shadow thread should not any more use
875 * the client's scheduling context.
876 *
877 * Note that the sc of server_thread (current_thread) must be set to
878 * NULL (named OP-SET-NULL) **before** unlocking the lock.
879 * Otherwise, a following client thread may transfer its sc to the
880 * server_thread before OP-SET-NULL.
881 */
882
883 current_thread->thread_ctx->sc = NULL;
884
885 /*
886 * Release the ipc_lock to mark the server_handler_thread can
887 * serve other requests now.
888 */
889 unlock(&handler_config->ipc_lock);
890
891 unlock(&conn->ownership);
892 obj_put(conn);
893
894 /* Return to client */
895 thread_migrate_to_client(client, ret);
896 BUG("should not reach here\n");
897 }
898
sys_ipc_register_cb_return(cap_t server_handler_thread_cap,unsigned long server_thread_exit_routine,unsigned long server_shm_addr)899 int sys_ipc_register_cb_return(cap_t server_handler_thread_cap,
900 unsigned long server_thread_exit_routine,
901 unsigned long server_shm_addr)
902 {
903 struct ipc_server_register_cb_config *config;
904 struct ipc_connection *conn;
905 struct thread *client_thread;
906
907 struct thread *ipc_server_handler_thread;
908 struct ipc_server_handler_config *handler_config;
909 int r = -ECAPBILITY;
910
911 config = (struct ipc_server_register_cb_config *)
912 current_thread->general_ipc_config;
913
914 if (!config)
915 goto out_fail;
916
917 conn =
918 obj_get(current_cap_group, config->conn_cap_in_server, TYPE_CONNECTION);
919
920 if (!conn)
921 goto out_fail;
922
923 /*
924 * @server_handler_thread_cap from server.
925 * Server uses this handler_thread to serve ipc requests.
926 */
927 ipc_server_handler_thread = (struct thread *)obj_get(
928 current_cap_group, server_handler_thread_cap, TYPE_THREAD);
929
930 if (!ipc_server_handler_thread)
931 goto out_fail_put_conn;
932
933 /* Map the shm of the connection in server */
934 r = map_pmo_in_current_cap_group(
935 config->shm_cap_in_server, server_shm_addr, VMR_READ | VMR_WRITE);
936 if (r != 0)
937 goto out_fail_put_thread;
938
939 /* Get the client_thread that issues this registration */
940 client_thread = conn->current_client_thread;
941 /*
942 * Set the return value (conn_cap) for the client here
943 * because the server has approved the registration.
944 */
945 arch_set_thread_return(client_thread, config->conn_cap_in_client);
946
947 /*
948 * Initialize the ipc configuration for the handler_thread (begin)
949 *
950 * When the handler_config isn't NULL, it means this server handler
951 * thread has been initialized before. If so, skip the initialization.
952 * This will happen when a server uses one server handler thread for
953 * serving multiple client threads.
954 */
955 if (!ipc_server_handler_thread->general_ipc_config) {
956 handler_config = (struct ipc_server_handler_config *)kmalloc(
957 sizeof(*handler_config));
958 ipc_server_handler_thread->general_ipc_config = handler_config;
959 lock_init(&handler_config->ipc_lock);
960
961 /*
962 * Record the initial PC & SP for the handler_thread.
963 * For serving each IPC, the handler_thread starts from the
964 * same PC and SP.
965 */
966 handler_config->ipc_routine_entry =
967 arch_get_thread_next_ip(ipc_server_handler_thread);
968 handler_config->ipc_routine_stack =
969 arch_get_thread_stack(ipc_server_handler_thread);
970 handler_config->ipc_exit_routine_entry = server_thread_exit_routine;
971 handler_config->destructor = config->destructor;
972 }
973 obj_put(ipc_server_handler_thread);
974 /* Initialize the ipc configuration for the handler_thread (end) */
975
976 /* Fill the server information in the IPC connection. */
977 conn->shm.server_shm_uaddr = server_shm_addr;
978 conn->server_handler_thread = ipc_server_handler_thread;
979 conn->is_valid = OBJECT_STATE_VALID;
980 conn->current_client_thread = NULL;
981 conn->conn_cap_in_client = config->conn_cap_in_client;
982 conn->conn_cap_in_server = config->conn_cap_in_server;
983 obj_put(conn);
984
985 /*
986 * Return control flow (sched-context) back later.
987 * Set current_thread state to TS_WAITING again.
988 */
989 current_thread->thread_ctx->state = TS_WAITING;
990
991 unlock(&config->register_lock);
992
993 /* Register thread should not any more use the client's scheduling
994 * context. */
995 current_thread->thread_ctx->sc = NULL;
996
997 /* Finish the registration: switch to the original client_thread */
998 sched_to_thread(client_thread);
999 /* Nerver return */
1000
1001 out_fail_put_thread:
1002 obj_put(ipc_server_handler_thread);
1003 out_fail_put_conn:
1004 obj_put(conn);
1005 out_fail:
1006 return r;
1007 }
1008
sys_ipc_exit_routine_return(void)1009 void sys_ipc_exit_routine_return(void)
1010 {
1011 struct ipc_server_handler_config *config;
1012
1013 config =
1014 (struct ipc_server_handler_config *)current_thread->general_ipc_config;
1015 if (!config) {
1016 goto out;
1017 }
1018 /*
1019 * Set the server handler thread state to TS_WAITING again
1020 * so that it can be migrated to from the client.
1021 */
1022 current_thread->thread_ctx->state = TS_WAITING;
1023 kfree(current_thread->thread_ctx->sc);
1024 unlock(&config->ipc_lock);
1025 out:
1026 sched();
1027 eret_to_thread(switch_context());
1028 }
1029