• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <object/recycle.h>
13 #include <object/cap_group.h>
14 #include <object/thread.h>
15 #include <common/list.h>
16 #include <common/sync.h>
17 #include <common/util.h>
18 #include <common/bitops.h>
19 #include <mm/kmalloc.h>
20 #include <mm/mm.h>
21 #include <mm/vmspace.h>
22 #include <mm/uaccess.h>
23 #include <lib/printk.h>
24 #include <ipc/notification.h>
25 #include <irq/irq.h>
26 #include <lib/ring_buffer.h>
27 #include <sched/context.h>
28 #include <syscall/syscall_hooks.h>
29 #ifdef CHCORE_OH_TEE
30 #include <ipc/channel.h>
31 #endif /* CHCORE_OH_TEE */
32 
33 struct recycle_msg {
34     badge_t badge;
35     int exitcode;
36     int padding;
37 };
38 
39 struct recycle_msg_node {
40     struct list_head node;
41     struct recycle_msg msg;
42 };
43 
44 struct notification *recycle_notification = NULL;
45 struct ring_buffer *recycle_msg_buffer;
46 /* This list is only used when the recycle_msg_buffer is full */
47 struct list_head recycle_msg_head;
48 struct lock recycle_buffer_lock;
49 
sys_register_recycle(cap_t notifc_cap,vaddr_t msg_buffer)50 int sys_register_recycle(cap_t notifc_cap, vaddr_t msg_buffer)
51 {
52     int ret;
53 
54     if ((ret = hook_sys_register_recycle(notifc_cap, msg_buffer)) != 0)
55         return ret;
56 
57     recycle_notification =
58         obj_get(current_cap_group, notifc_cap, TYPE_NOTIFICATION);
59 
60     if (recycle_notification == NULL)
61         return -ECAPBILITY;
62 
63     ret = trans_uva_to_kva(msg_buffer, (vaddr_t *)&recycle_msg_buffer);
64     if (ret != 0)
65         return -EINVAL;
66 
67     init_list_head(&recycle_msg_head);
68     lock_init(&recycle_buffer_lock);
69 
70     return 0;
71 }
72 
73 /*
74  * Kernel uses this function to invoke the recycle thread in procmgr.
75  * proc_badge is the badge of the process to recycle.
76  */
notify_user_recycler(badge_t proc_badge,int exitcode)77 void notify_user_recycler(badge_t proc_badge, int exitcode)
78 {
79     /* lock the recyle buffer first */
80     lock(&recycle_buffer_lock);
81 
82     if (if_buffer_full(recycle_msg_buffer)) {
83         /* Save the msg in the list for now */
84         struct recycle_msg_node *msg;
85 
86         msg = kmalloc(sizeof(*msg));
87         msg->msg.badge = proc_badge;
88         msg->msg.exitcode = exitcode;
89         list_add(&msg->node, &recycle_msg_head);
90     } else {
91         struct recycle_msg tmp;
92         tmp.badge = proc_badge;
93         tmp.exitcode = exitcode;
94         set_one_msg(recycle_msg_buffer, &tmp);
95 
96 #ifndef FBINFER
97         /* Put the msg saved in list into the buffer */
98         struct recycle_msg_node *msg, *iter_tmp;
99 
100         for_each_in_list_safe (msg, iter_tmp, node, &recycle_msg_head) {
101             if (!if_buffer_full(recycle_msg_buffer)) {
102                 list_del(&msg->node);
103                 struct recycle_msg tmp2;
104                 tmp2.badge = msg->msg.badge;
105                 tmp2.exitcode = msg->msg.exitcode;
106                 set_one_msg(recycle_msg_buffer, &tmp2);
107                 kfree(msg);
108             } else {
109                 break;
110             }
111         }
112 #endif
113 
114         /* Nofity the recycle thread through the notification */
115         /*
116          * The recycle thread's queue lock will only be
117          * grabbed by the kernel (in the following signal_notific)
118          * and the recycle thread itself.
119          * So, try_lock(queue_lock) in signal_notific should not fail.
120          */
121         int ret;
122         ret = signal_notific(recycle_notification);
123         BUG_ON(ret != 0);
124     }
125 
126     unlock(&recycle_buffer_lock);
127 }
128 
129 /* All the threads in current_cap_group should exit */
sys_exit_group(int exitcode)130 void sys_exit_group(int exitcode)
131 {
132     struct thread *thread;
133 
134     kdebug("%s\n", __func__);
135 
136     /*
137      * Check if the notification has been sent.
138      * E.g., a faulting process may trigger sys_exit_group for many times.
139      */
140     if (atomic_cmpxchg_32((&current_cap_group->notify_recycler), 0, 1) == 0) {
141         /*
142          * Grap the threads_lock and set the threads state.
143          * After that, no new thread will be allocated.
144          * see `create_thread` in thread.c
145          */
146         lock(&current_cap_group->threads_lock);
147         for_each_in_list (
148             thread, struct thread, node, &(current_cap_group->thread_list)) {
149             /* CAS is used in case the state is set to TE_EXITED
150              * concurrently */
151             atomic_cmpxchg_32((int *)(&thread->thread_ctx->thread_exit_state),
152                               TE_RUNNING,
153                               TE_EXITING);
154         }
155         unlock(&current_cap_group->threads_lock);
156 
157         notify_user_recycler(current_cap_group->badge, exitcode);
158     }
159 
160     /* Set the exit state of current_thread: no contention */
161     current_thread->thread_ctx->thread_exit_state = TE_EXITING;
162     sched();
163     eret_to_thread(switch_context());
164 }
165 
166 /*
167  * Only procmgr could call this function. It will use this function
168  * to kill the process with the given cap in procmgr's cap group.
169  *  */
sys_kill_group(int proc_cap)170 int sys_kill_group(int proc_cap)
171 {
172     struct cap_group *cap_group_to_kill;
173     struct thread *thread;
174 
175     cap_group_to_kill = obj_get(current_cap_group, proc_cap, TYPE_CAP_GROUP);
176     if (!cap_group_to_kill) {
177         /* Invalid cap or the object is not a cap group */
178         return -EINVAL;
179     }
180 
181     if (atomic_cmpxchg_32((&cap_group_to_kill->notify_recycler), 0, 1) == 0) {
182         /*
183          * Grap the threads_lock and set the threads state.
184          * After that, no new thread will be allocated.
185          * see `create_thread` in thread.c
186          */
187         lock(&cap_group_to_kill->threads_lock);
188         for_each_in_list (
189             thread, struct thread, node, &(cap_group_to_kill->thread_list)) {
190             /* CAS is used in case the state is set to TE_EXITED
191              * concurrently */
192             atomic_cmpxchg_32((int *)(&thread->thread_ctx->thread_exit_state),
193                               TE_RUNNING,
194                               TE_EXITING);
195         }
196         unlock(&cap_group_to_kill->threads_lock);
197 
198         notify_user_recycler(cap_group_to_kill->badge, 0);
199     }
200     obj_put(cap_group_to_kill);
201 
202     return 0;
203 }
204 
recycle_server_shadow_thread(struct ipc_connection * conn,struct thread * server_thread,bool recycle_client_state)205 static void recycle_server_shadow_thread(struct ipc_connection *conn,
206                                          struct thread *server_thread,
207                                          bool recycle_client_state)
208 {
209     struct ipc_server_handler_config *config;
210 
211     config =
212         (struct ipc_server_handler_config *)server_thread->general_ipc_config;
213 
214     if (config->ipc_exit_routine_entry
215         && server_thread->thread_ctx->thread_exit_state == TE_RUNNING) {
216         BUG_ON(server_thread->thread_ctx->sc);
217         /*
218          * If the server shadow thread will not exit (e.g. single
219          * shadow thread), it should be locked in case other clients
220          * perform ipc_call during its exit routine.
221          * The server exit routine should call
222          * `ipc_exit_routine_reture` syscall to unlock.
223          *
224          * Otherwise, locking the server thread hurts nothing since
225          * it will exit and will not be used by anyone.
226          */
227         BUG_ON(is_locked(&config->ipc_lock));
228         lock(&config->ipc_lock);
229 
230         server_thread->thread_ctx->sc =
231             kmalloc(sizeof(*server_thread->thread_ctx->sc));
232         server_thread->thread_ctx->sc->budget = DEFAULT_BUDGET;
233         server_thread->thread_ctx->sc->prio = DEFAULT_PRIO;
234         arch_set_thread_next_ip(server_thread, config->ipc_exit_routine_entry);
235         arch_set_thread_stack(server_thread, config->ipc_routine_stack);
236         set_thread_arch_spec_state_ipc(server_thread);
237 
238         /* See comments in sys_ipc_close_connection */
239         if (recycle_client_state) {
240             arch_set_thread_arg0(server_thread, config->destructor);
241         } else {
242             arch_set_thread_arg0(server_thread, 0);
243         }
244 
245         arch_set_thread_arg1(server_thread, conn->client_badge);
246         arch_set_thread_arg2(server_thread, conn->shm.server_shm_uaddr);
247         arch_set_thread_arg3(server_thread, conn->shm.shm_size);
248         server_thread->thread_ctx->state = TS_INTER;
249         vmspace_unmap_range(conn->server_handler_thread->vmspace,
250                             conn->shm.server_shm_uaddr,
251                             conn->shm.shm_size);
252         BUG_ON(sched_enqueue(server_thread));
253     } else {
254         /*
255          * Since the shadow thread does need to be recycled
256          * but won't be scheduled afterwards, its thread state
257          * should be set to TS_EXIT here.
258          */
259         server_thread->thread_ctx->state = TS_EXIT;
260     }
261 }
262 
__stop_connection(struct ipc_connection * conn)263 static int __stop_connection(struct ipc_connection *conn)
264 {
265     if (conn->is_valid == OBJECT_STATE_INVALID) {
266         return 0;
267     }
268 
269     if (try_lock(&conn->ownership) != 0) {
270         return -EAGAIN;
271     }
272 
273     /*
274      * Mark the connection as invalid.
275      * After, the connection will never be used. See sys_ipc_call.
276      */
277     conn->is_valid = OBJECT_STATE_INVALID;
278 
279     return 0;
280 }
281 
282 /* Wait onging IPCs to finish and stop new IPCs. */
stop_connection(struct object_slot * slot,int * ret)283 static void stop_connection(struct object_slot *slot, int *ret)
284 {
285     struct ipc_connection *conn;
286 
287     conn = (struct ipc_connection *)slot->object->opaque;
288 
289     *ret = __stop_connection(conn);
290 }
291 
__recycle_connection(struct cap_group * cap_group,struct ipc_connection * conn,bool client_process_exited)292 static void __recycle_connection(struct cap_group *cap_group,
293                                  struct ipc_connection *conn,
294                                  bool client_process_exited)
295 {
296     struct thread *server_thread;
297 
298     BUG_ON(conn->is_valid != OBJECT_STATE_INVALID);
299 
300     if (conn->client_badge == cap_group->badge) {
301         server_thread = conn->server_handler_thread;
302         if (server_thread) {
303             recycle_server_shadow_thread(
304                 conn, server_thread, client_process_exited);
305             cap_free(server_thread->cap_group, conn->conn_cap_in_server);
306             cap_free(server_thread->cap_group, conn->shm.shm_cap_in_server);
307         }
308     } else {
309         /* cap_group is the server side of the connection */
310         conn->server_handler_thread = NULL;
311         /* Since we need to lock the connection again when
312          * the connection owner (client cap_group) is recycled,
313          * unlock here.
314          * Don't worry: the connection will not be used any more.
315          */
316         unlock(&conn->ownership);
317     }
318 }
319 
recycle_connection(struct cap_group * cap_group,struct object_slot * slot)320 static void recycle_connection(struct cap_group *cap_group,
321                                struct object_slot *slot)
322 {
323     struct ipc_connection *conn;
324 
325     conn = (struct ipc_connection *)slot->object->opaque;
326 
327     __recycle_connection(cap_group, conn, true);
328 }
329 
330 /*
331  * Close an IPC connection from a client **thread** to a server handler(shadow)
332  * thread. It can be invoked by a client thread actively. So, when this syscall
333  * is invoked, the client process may have not exited, we should not let IPC
334  * server to recycle state stored for client process, because server state is
335  * for the whole client but connections are established between two threads in
336  * current programming model.
337  */
sys_ipc_close_connection(cap_t connection_cap)338 int sys_ipc_close_connection(cap_t connection_cap)
339 {
340     int ret;
341     struct ipc_connection *conn;
342     struct vmspace *client_vmspace;
343 
344     conn = obj_get(current_cap_group, connection_cap, TYPE_CONNECTION);
345 
346     if (!conn) {
347         ret = -ECAPBILITY;
348         goto out;
349     }
350 
351     ret = __stop_connection(conn);
352     if (ret < 0) {
353         goto out_put;
354     }
355 
356     __recycle_connection(current_cap_group, conn, false);
357 
358     client_vmspace = current_thread->vmspace;
359 
360     vmspace_unmap_range(
361         client_vmspace, conn->shm.client_shm_uaddr, conn->shm.shm_size);
362     cap_free(current_cap_group, conn->shm.shm_cap_in_client);
363 
364     cap_free(current_cap_group, connection_cap);
365 out_put:
366     obj_put(conn);
367 out:
368     return ret;
369 }
370 
371 /* Wait onging IPC registration to finish and stop newly coming ones */
stop_ipc_registration(struct cap_group * cap_group,struct object_slot * slot,int * ret)372 static void stop_ipc_registration(struct cap_group *cap_group,
373                                   struct object_slot *slot, int *ret)
374 {
375     struct thread *thread;
376     struct ipc_server_register_cb_config *config;
377 
378     thread = (struct thread *)slot->object->opaque;
379     if (thread->cap_group != cap_group)
380         return;
381 
382     if (thread->thread_ctx->type != TYPE_REGISTER)
383         return;
384 
385     /* Avoid deadlock during try again */
386     if (thread->thread_ctx->thread_exit_state == TE_EXITED)
387         return;
388 
389     config = (struct ipc_server_register_cb_config *)thread->general_ipc_config;
390     if (try_lock(&config->register_lock) != 0) {
391         /* Lock fails: registration is ongoing. So, try next time. */
392         *ret = -EAGAIN;
393         return;
394     }
395 
396     /*
397      * No release the register_lock. So, the register_cb_thread will never
398      * execute any more.
399      */
400     thread->thread_ctx->thread_exit_state = TE_EXITED;
401     thread->thread_ctx->state = TS_EXIT;
402 }
403 
stop_notification(struct object_slot * slot)404 static void stop_notification(struct object_slot *slot)
405 {
406     struct notification *notific;
407 
408     notific = (struct notification *)slot->object->opaque;
409     lock(&notific->notifc_lock);
410     notific->state = OBJECT_STATE_INVALID;
411     unlock(&notific->notifc_lock);
412 }
413 
414 #ifdef CHCORE_OH_TEE
stop_channel(struct object_slot * slot)415 static void stop_channel(struct object_slot *slot)
416 {
417     struct channel *channel;
418     channel = (struct channel *)slot->object->opaque;
419     close_channel(channel, slot->cap_group);
420 }
421 #endif /* CHCORE_OH_TEE */
422 
423 /*
424  * Convention: sys_exit_group is executed before to notify the recycle thread
425  * which then executes sys_cap_group_recycle.
426  *
427  * If a thread invoke this to recycle the resources, the kernel will run
428  * on the thread's kernel stack, which makes things complex.
429  * So, only the user-level recycler in the process manager
430  * can invoke cap_group_exit on some cap_group.
431  *
432  * Case-1: a thread invokes exit, it will directly tell the process manager,
433  *         and then, the process manager invokes this function.
434  * Case-2: if a thread triggers faults (e.g., segfault), the kernel will notify
435  *         the process manager to exit the corresponding process (cap_group).
436  */
sys_cap_group_recycle(cap_t cap_group_cap)437 int sys_cap_group_recycle(cap_t cap_group_cap)
438 {
439     struct cap_group *cap_group;
440     struct thread *thread;
441     int ret;
442     struct slot_table *slot_table;
443     cap_t slot_id;
444     struct vmspace *vmspace = NULL;
445 
446     if ((ret = hook_sys_cap_group_recycle(cap_group_cap)) != 0)
447         return ret;
448 
449     cap_group = obj_get(current_cap_group, cap_group_cap, TYPE_CAP_GROUP);
450     if (!cap_group)
451         return -ECAPBILITY;
452 
453     ret = 0;
454     /* Phase-1: Stop all the threads in this cap_group */
455 
456     /* IPC recycle begin */
457     slot_table = &cap_group->slot_table;
458     write_lock(&slot_table->table_guard);
459 
460     /* Handle all the connection caps and the register_cb_thread caps */
461     for_each_set_bit (slot_id, slot_table->slots_bmp, slot_table->slots_size) {
462         struct object_slot *slot;
463 
464         slot = get_slot(cap_group, slot_id);
465         BUG_ON(slot == NULL);
466 
467         if (slot->object->type == TYPE_CONNECTION) {
468             stop_connection(slot, &ret);
469         } else if (slot->object->type == TYPE_THREAD) {
470             stop_ipc_registration(cap_group, slot, &ret);
471         } else if (slot->object->type == TYPE_NOTIFICATION) {
472             stop_notification(slot);
473         }
474 #ifdef CHCORE_OH_TEE
475         if (slot->object->type == TYPE_CHANNEL) {
476             stop_channel(slot);
477         }
478 #endif /* CHCORE_OH_TEE */
479     }
480 
481     write_unlock(&slot_table->table_guard);
482     /* IPC recycle end */
483 
484     if (ret == -EAGAIN) {
485         kdebug("%s: Line: %d\n", __func__, __LINE__);
486         goto out;
487     }
488 
489     /*
490      * As `sys_exit_group` is executed before:
491      * - no new thread will be created
492      * - each thread is set as TE_EXITING in that function
493      */
494     for_each_in_list (thread, struct thread, node, &(cap_group->thread_list)) {
495         /* If some thread is not TE_EXITED, then return -EAGAIN. */
496         if (thread->thread_ctx->thread_exit_state != TE_EXITED) {
497             /*
498              * As all the connection are set to INVALID in previous
499              * step, all the shadow threads (IPC server threads)
500              * will not execute any more.
501              * Thus, we directly set them to exited here.
502              */
503             if (thread->thread_ctx->type == TYPE_SHADOW) {
504                 thread->thread_ctx->thread_exit_state = TE_EXITED;
505                 continue;
506             }
507 
508             if (thread->thread_ctx->state == TS_WAITING) {
509                 try_remove_timeout(thread);
510                 thread->thread_ctx->thread_exit_state = TE_EXITED;
511                 continue;
512             }
513 
514             ret = -EAGAIN;
515         }
516     }
517 
518     if (ret == -EAGAIN) {
519         kdebug("%s: Line: %d\n", __func__, __LINE__);
520         goto out;
521     }
522 
523     /* All the thread are TE_EXITED now, wait until their kernel stacks are
524      * free */
525     for_each_in_list (thread, struct thread, node, &(cap_group->thread_list)) {
526         wait_for_kernel_stack(thread);
527         BUG_ON(thread->thread_ctx->thread_exit_state != TE_EXITED);
528         if (thread->thread_ctx->state != TS_EXIT)
529             kwarn("thread ctx->state is %d\n", thread->thread_ctx->state);
530     }
531 
532     /*
533      * Phase-2:
534      * Iterate all the capability table and free the corresponding
535      * resources.
536      */
537 
538     slot_table = &cap_group->slot_table;
539     write_lock(&slot_table->table_guard);
540 
541     for_each_set_bit (slot_id, slot_table->slots_bmp, slot_table->slots_size) {
542         struct object_slot *slot;
543         struct object *object;
544 
545         slot = get_slot(cap_group, slot_id);
546         BUG_ON(!slot || slot->isvalid == false);
547         object = slot->object;
548 
549         if (slot_id == VMSPACE_OBJ_ID) {
550             vmspace = (struct vmspace *)(object->opaque);
551             flush_tlb_by_vmspace(vmspace);
552         }
553 
554         if (object->type == TYPE_CONNECTION) {
555             recycle_connection(cap_group, slot);
556             struct ipc_connection *conn =
557                 (struct ipc_connection *)slot->object->opaque;
558 
559             __cap_free(cap_group, conn->shm.shm_cap_in_client, true, false);
560             __cap_free(cap_group, slot_id, true, false);
561         } else if ((object->type == TYPE_THREAD)
562                    && (((struct thread *)(object->opaque))->cap_group
563                        == cap_group)) {
564             /*
565              * Use cap_free_all to free the threads belong to
566              * the exited cap_group.
567              */
568             kdebug("recycle one local thread.\n");
569 
570             /*
571              * Like cap_free_all, but without locks.
572              * Directly using cap_free_all leads to dead lock.
573              */
574             struct object_slot *slot_iter = NULL, *slot_iter_tmp = NULL;
575             int r;
576 
577             /* Not using obj_get or get_opaque is also for avoid
578              * deadlock. */
579             atomic_fetch_add_long(&object->refcount, 1);
580 
581             /* free all copied slots */
582             lock(&object->copies_lock);
583             for_each_in_list_safe (
584                 slot_iter, slot_iter_tmp, copies, &object->copies_head) {
585                 int iter_slot_id = slot_iter->slot_id;
586                 struct cap_group *iter_cap_group = slot_iter->cap_group;
587 
588                 r = __cap_free(iter_cap_group,
589                                iter_slot_id,
590                                iter_cap_group == cap_group,
591                                true);
592                 BUG_ON(r != 0);
593             }
594             unlock(&object->copies_lock);
595 
596             obj_put(object->opaque);
597         } else {
598             __cap_free(cap_group, slot_id, true, false);
599         }
600     }
601     write_unlock(&slot_table->table_guard);
602 
603     /* The cap_group will be freed in the following cap_free_all. */
604     obj_put(cap_group);
605     cap_free_all(current_cap_group, cap_group_cap);
606 
607     kdebug("%s is done\n", __func__);
608 
609     return ret;
610 out:
611     obj_put(cap_group);
612     return ret;
613 }
614