1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include "object/memory.h"
13 #include <common/kprint.h>
14 #include <common/macro.h>
15 #include <common/types.h>
16 #include <common/util.h>
17 #include <mm/kmalloc.h>
18 #include <mm/mm.h>
19 #include <mm/uaccess.h>
20 #include <object/thread.h>
21 #include <object/recycle.h>
22 #include <sched/context.h>
23 #include <arch/machine/registers.h>
24 #include <arch/machine/smp.h>
25 #include <arch/time.h>
26 #include <irq/ipi.h>
27 #include <common/endianness.h>
28
29 #include "thread_env.h"
30
31 /*
32 * local functions
33 */
34 #ifdef CHCORE
thread_init(struct thread * thread,struct cap_group * cap_group,vaddr_t stack,vaddr_t pc,u32 prio,u32 type,s32 aff)35 static int thread_init(struct thread *thread, struct cap_group *cap_group,
36 #else /* For unit test */
37 int thread_init(struct thread *thread, struct cap_group *cap_group,
38 #endif
39 vaddr_t stack, vaddr_t pc, u32 prio, u32 type, s32 aff)
40 {
41 thread->cap_group = obj_get(cap_group, CAP_GROUP_OBJ_ID, TYPE_CAP_GROUP);
42 thread->vmspace = obj_get(cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
43 obj_put(thread->cap_group);
44 obj_put(thread->vmspace);
45
46 /* Thread context is used as the kernel stack for that thread */
47 thread->thread_ctx = create_thread_ctx(type);
48 if (!thread->thread_ctx)
49 return -ENOMEM;
50 init_thread_ctx(thread, stack, pc, prio, type, aff);
51
52 /*
53 * Field prev_thread records the previous thread runs
54 * just before this thread. Obviously, it is NULL at the beginning.
55 */
56 thread->prev_thread = NULL;
57
58 /* The ipc_config will be allocated on demand */
59 thread->general_ipc_config = NULL;
60
61 thread->sleep_state.cb = NULL;
62
63 lock_init(&thread->sleep_state.queue_lock);
64
65 return 0;
66 }
67
thread_deinit(void * thread_ptr)68 void thread_deinit(void *thread_ptr)
69 {
70 struct thread *thread;
71 struct cap_group *cap_group;
72
73 thread = (struct thread *)thread_ptr;
74
75 BUG_ON(thread->thread_ctx->thread_exit_state != TE_EXITED);
76 if (thread->thread_ctx->state != TS_EXIT)
77 kwarn("thread ctx->state is %d\n", thread->thread_ctx->state);
78
79 cap_group = thread->cap_group;
80 lock(&cap_group->threads_lock);
81 list_del(&thread->node);
82 unlock(&cap_group->threads_lock);
83
84 if (thread->general_ipc_config)
85 kfree(thread->general_ipc_config);
86
87 destroy_thread_ctx(thread);
88
89 /* The thread struct itself will be freed in __free_object */
90 }
91
92 #define PFLAGS2VMRFLAGS(PF) \
93 (((PF)&PF_X ? VMR_EXEC : 0) | ((PF)&PF_W ? VMR_WRITE : 0) \
94 | ((PF)&PF_R ? VMR_READ : 0))
95
96 #define OFFSET_MASK (0xFFF)
97
98 /* Required by LibC */
99 void prepare_env(char *env, vaddr_t top_vaddr, char *name,
100 struct process_metadata *meta);
101
102 /*
103 * exported functions
104 */
switch_thread_vmspace_to(struct thread * thread)105 void switch_thread_vmspace_to(struct thread *thread)
106 {
107 switch_vmspace_to(thread->vmspace);
108 }
109
110 /* Arguments for the inital thread */
111 #if __SIZEOF_POINTER__ == 4
112 #define ROOT_THREAD_STACK_BASE (0x50000000UL)
113 #define ROOT_THREAD_STACK_SIZE (0x200000UL)
114 #else
115 #define ROOT_THREAD_STACK_BASE (0x500000000000UL)
116 #define ROOT_THREAD_STACK_SIZE (0x800000UL)
117 #endif
118 #define ROOT_THREAD_PRIO DEFAULT_PRIO
119
120 #define ROOT_THREAD_VADDR 0x400000
121
122 char ROOT_NAME[] = "/procmgr.srv";
123
124 /*
125 * The root_thread is actually a first user thread
126 * which has no difference with other user threads
127 */
create_root_thread(void)128 void create_root_thread(void)
129 {
130 struct cap_group *root_cap_group;
131 cap_t thread_cap;
132 struct thread *root_thread;
133 char data[8];
134 int ret;
135 cap_t stack_pmo_cap;
136 struct thread *thread;
137 struct pmobject *stack_pmo;
138 struct vmspace *init_vmspace;
139 vaddr_t stack;
140 vaddr_t kva;
141 struct process_metadata meta;
142
143 /*
144 * Read from binary.
145 * The msg and the binary of of the init process(procmgr) are linked
146 * behind the kernel image via the incbin instruction.
147 * The binary_procmgr_bin_start points to the first piece of info:
148 * the entry point of the init process, followed by eight bytes of data
149 * that stores the mem_size of the binary.
150 */
151
152 memcpy(data,
153 (void *)((unsigned long)&binary_procmgr_bin_start
154 + ROOT_ENTRY_OFF),
155 sizeof(data));
156 meta.entry = (unsigned long)be64_to_cpu(*(u64 *)data);
157
158 memcpy(data,
159 (void *)((unsigned long)&binary_procmgr_bin_start
160 + ROOT_FLAGS_OFF),
161 sizeof(data));
162 meta.flags = (unsigned long)be64_to_cpu(*(u64 *)data);
163
164 memcpy(data,
165 (void *)((unsigned long)&binary_procmgr_bin_start
166 + ROOT_PHENT_SIZE_OFF),
167 sizeof(data));
168 meta.phentsize = (unsigned long)be64_to_cpu(*(u64 *)data);
169
170 memcpy(data,
171 (void *)((unsigned long)&binary_procmgr_bin_start
172 + ROOT_PHNUM_OFF),
173 sizeof(data));
174 meta.phnum = (unsigned long)be64_to_cpu(*(u64 *)data);
175
176 memcpy(data,
177 (void *)((unsigned long)&binary_procmgr_bin_start
178 + ROOT_PHDR_ADDR_OFF),
179 sizeof(data));
180 meta.phdr_addr = (unsigned long)be64_to_cpu(*(u64 *)data);
181
182
183 root_cap_group = create_root_cap_group(ROOT_NAME, strlen(ROOT_NAME));
184
185 init_vmspace = obj_get(root_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
186
187 /* Allocate and setup a user stack for the init thread */
188 stack_pmo_cap = create_pmo(ROOT_THREAD_STACK_SIZE,
189 PMO_ANONYM,
190 root_cap_group,
191 0,
192 &stack_pmo);
193 BUG_ON(stack_pmo_cap < 0);
194
195 ret = vmspace_map_range(init_vmspace,
196 ROOT_THREAD_STACK_BASE,
197 ROOT_THREAD_STACK_SIZE,
198 VMR_READ | VMR_WRITE,
199 stack_pmo);
200 BUG_ON(ret != 0);
201
202 /* Allocate the init thread */
203 thread = obj_alloc(TYPE_THREAD, sizeof(*thread));
204 BUG_ON(thread == NULL);
205
206 for (int i = 0; i < meta.phnum; i++) {
207 unsigned int flags;
208 unsigned long offset, vaddr, filesz, memsz;
209
210 memcpy(data,
211 (void *)((unsigned long)&binary_procmgr_bin_start
212 + ROOT_PHDR_OFF + i * ROOT_PHENT_SIZE
213 + PHDR_FLAGS_OFF),
214 sizeof(data));
215 flags = (unsigned int)le32_to_cpu(*(u32 *)data);
216
217 memcpy(data,
218 (void *)((unsigned long)&binary_procmgr_bin_start
219 + ROOT_PHDR_OFF + i * ROOT_PHENT_SIZE
220 + PHDR_OFFSET_OFF),
221 sizeof(data));
222 offset = (unsigned long)le64_to_cpu(*(u64 *)data);
223
224 memcpy(data,
225 (void *)((unsigned long)&binary_procmgr_bin_start
226 + ROOT_PHDR_OFF + i * ROOT_PHENT_SIZE
227 + PHDR_VADDR_OFF),
228 sizeof(data));
229 vaddr = (unsigned long)le64_to_cpu(*(u64 *)data);
230
231 memcpy(data,
232 (void *)((unsigned long)&binary_procmgr_bin_start
233 + ROOT_PHDR_OFF + i * ROOT_PHENT_SIZE
234 + PHDR_FILESZ_OFF),
235 sizeof(data));
236 filesz = (unsigned long)le64_to_cpu(*(u64 *)data);
237
238 memcpy(data,
239 (void *)((unsigned long)&binary_procmgr_bin_start
240 + ROOT_PHDR_OFF + i * ROOT_PHENT_SIZE
241 + PHDR_MEMSZ_OF),
242 sizeof(data));
243 memsz = (unsigned long)le64_to_cpu(*(u64 *)data);
244
245 struct pmobject *segment_pmo;
246 size_t pmo_size = ROUND_UP(memsz, PAGE_SIZE);
247 ret = create_pmo(PAGE_SIZE,
248 PMO_DATA,
249 root_cap_group,
250 0,
251 &segment_pmo);
252 BUG_ON(ret < 0);
253 vaddr_t segment_content_kvaddr = ((unsigned long)&binary_procmgr_bin_start) + offset;
254
255 BUG_ON(filesz != memsz);
256 // No additional memory for .bss, so we can directly reuse
257 // content in kernel image as their physical pages
258 kfree((void *)phys_to_virt(segment_pmo->start));
259 segment_pmo->start = virt_to_phys(segment_content_kvaddr);
260 segment_pmo->size = pmo_size;
261 unsigned vmr_flags = 0;
262 if (flags & PHDR_FLAGS_R)
263 vmr_flags |= VMR_READ;
264 if (flags & PHDR_FLAGS_W)
265 vmr_flags |= VMR_WRITE;
266 if (flags & PHDR_FLAGS_X)
267 vmr_flags |= VMR_EXEC;
268
269 ret = vmspace_map_range(init_vmspace,
270 vaddr,
271 segment_pmo->size,
272 vmr_flags,
273 segment_pmo);
274 BUG_ON(ret < 0);
275
276 }
277 obj_put(init_vmspace);
278
279 stack = ROOT_THREAD_STACK_BASE + ROOT_THREAD_STACK_SIZE;
280
281 /* Allocate a physical page for the main stack for prepare_env */
282 kva = (vaddr_t)get_pages(0);
283 BUG_ON(kva == 0);
284 commit_page_to_pmo(stack_pmo,
285 ROOT_THREAD_STACK_SIZE / PAGE_SIZE - 1,
286 virt_to_phys((void *)kva));
287
288 prepare_env((char *)kva, stack, ROOT_NAME, &meta);
289 stack -= ENV_SIZE_ON_STACK;
290
291 ret = thread_init(thread,
292 root_cap_group,
293 stack,
294 meta.entry,
295 ROOT_THREAD_PRIO,
296 TYPE_USER,
297 smp_get_cpu_id());
298 BUG_ON(ret != 0);
299
300 /* Add the thread into the thread_list of the cap_group */
301 lock(&root_cap_group->threads_lock);
302 list_add(&thread->node, &root_cap_group->thread_list);
303 root_cap_group->thread_cnt += 1;
304 unlock(&root_cap_group->threads_lock);
305
306 /* Allocate the cap for the init thread */
307 thread_cap = cap_alloc(root_cap_group, thread);
308 BUG_ON(thread_cap < 0);
309
310 /* L1 icache & dcache have no coherence on aarch64 */
311 flush_idcache();
312
313 root_thread = obj_get(root_cap_group, thread_cap, TYPE_THREAD);
314 /* Enqueue: put init thread into the ready queue */
315 BUG_ON(sched_enqueue(root_thread));
316 obj_put(root_thread);
317 }
318
create_thread(struct cap_group * cap_group,vaddr_t stack,vaddr_t pc,unsigned long arg,u32 prio,u32 type,u64 tls)319 static cap_t create_thread(struct cap_group *cap_group, vaddr_t stack,
320 vaddr_t pc, unsigned long arg, u32 prio, u32 type,
321 u64 tls)
322 {
323 struct thread *thread;
324 cap_t cap, ret = 0;
325
326 thread = obj_alloc(TYPE_THREAD, sizeof(*thread));
327 if (!thread) {
328 ret = -ENOMEM;
329 goto out_fail;
330 }
331 ret = thread_init(thread, cap_group, stack, pc, prio, type, NO_AFF);
332 if (ret != 0)
333 goto out_free_obj;
334
335 lock(&cap_group->threads_lock);
336
337 /*
338 * Check the exiting state: do not create new threads if exiting (e.g.,
339 * after sys_exit_group is executed.
340 */
341 if (current_thread->thread_ctx->thread_exit_state == TE_EXITING) {
342 unlock(&cap_group->threads_lock);
343 obj_free(thread);
344 obj_put(cap_group);
345 sched();
346 eret_to_thread(switch_context());
347 /* No return */
348 }
349
350 list_add(&thread->node, &cap_group->thread_list);
351 cap_group->thread_cnt += 1;
352 unlock(&cap_group->threads_lock);
353
354 arch_set_thread_arg0(thread, arg);
355
356 /* set thread tls */
357 arch_set_thread_tls(thread, tls);
358
359 /* set arch-specific thread state */
360 set_thread_arch_spec_state(thread);
361
362 /* cap is thread_cap in the target cap_group */
363 cap = cap_alloc(cap_group, thread);
364 if (cap < 0) {
365 ret = cap;
366 goto out_free_obj;
367 }
368 thread->cap = cap;
369
370 /* ret is thread_cap in the current_cap_group */
371 if (cap_group != current_cap_group)
372 cap = cap_copy(cap_group, current_cap_group, cap);
373 if (type == TYPE_USER) {
374 thread->thread_ctx->state = TS_INTER;
375 BUG_ON(sched_enqueue(thread));
376 } else if ((type == TYPE_SHADOW) || (type == TYPE_REGISTER)) {
377 thread->thread_ctx->state = TS_WAITING;
378 }
379 return cap;
380
381 out_free_obj:
382 obj_free(thread);
383 out_fail:
384 return ret;
385 }
386
387 struct thread_args {
388 /* specify the cap_group in which the new thread will be created */
389 cap_t cap_group_cap;
390 vaddr_t stack;
391 vaddr_t pc;
392 unsigned long arg;
393 vaddr_t tls;
394 unsigned int prio;
395 unsigned int type;
396 };
397
398 /* Create one thread in a specified cap_group and return the thread cap in it. */
sys_create_thread(unsigned long thread_args_p)399 cap_t sys_create_thread(unsigned long thread_args_p)
400 {
401 struct thread_args args = {0};
402 struct cap_group *cap_group;
403 cap_t thread_cap;
404 u32 type;
405
406 if (check_user_addr_range(thread_args_p, sizeof(args)) != 0)
407 return -EINVAL;
408
409 int r = copy_from_user(&args, (void *)thread_args_p, sizeof(args));
410 if (r) {
411 return -EINVAL;
412 }
413 type = args.type;
414
415 if ((type != TYPE_USER) && (type != TYPE_SHADOW) && (type != TYPE_REGISTER))
416 return -EINVAL;
417
418 if (args.prio >= PRIO_NUM)
419 return -EINVAL;
420
421 cap_group = obj_get(current_cap_group, args.cap_group_cap, TYPE_CAP_GROUP);
422 if (cap_group == NULL)
423 return -ECAPBILITY;
424
425 thread_cap = create_thread(
426 cap_group, args.stack, args.pc, args.arg, args.prio, type, args.tls);
427
428 obj_put(cap_group);
429 return thread_cap;
430 }
431
432 /* Exit the current running thread */
sys_thread_exit(void)433 void sys_thread_exit(void)
434 {
435 int cnt;
436 #ifdef CHCORE_OH_TEE
437 u32 old_exit_state;
438 #endif /* CHCORE_OH_TEE */
439
440 /* As a normal application, the main thread will eventually invoke
441 * sys_exit_group or trigger unrecoverable fault (e.g., segfault).
442 *
443 * However a malicious application, all of its thread may invoke
444 * sys_thread_exit. So, we monitor the number of non-shadow threads
445 * in a cap_group (as a user process now).
446 */
447
448 kdebug("%s is invoked\n", __func__);
449
450 #ifdef CHCORE_OH_TEE
451 old_exit_state = atomic_cmpxchg_32(
452 (s32 *)(¤t_thread->thread_ctx->thread_exit_state),
453 TE_RUNNING,
454 TE_EXITING);
455
456 if (old_exit_state == TE_RUNNING) {
457 lock(&(current_cap_group->threads_lock));
458 cnt = --current_cap_group->thread_cnt;
459 unlock(&(current_cap_group->threads_lock));
460
461 if (cnt == 0) {
462 /*
463 * Current thread is the last thread in this cap_group,
464 * so we invoke sys_exit_group.
465 */
466 kdebug("%s invokes sys_exit_group\n", __func__);
467 sys_exit_group(0);
468 /* The control flow will not go through */
469 }
470 }
471 #else /* CHCORE_OH_TEE */
472 /* Set thread state, which will be recycle afterwards */
473 current_thread->thread_ctx->thread_exit_state = TE_EXITING;
474
475 lock(&(current_cap_group->threads_lock));
476 cnt = --current_cap_group->thread_cnt;
477 unlock(&(current_cap_group->threads_lock));
478
479 if (cnt == 0) {
480 /*
481 * Current thread is the last thread in this cap_group,
482 * so we invoke sys_exit_group.
483 */
484 kdebug("%s invokes sys_exit_group\n", __func__);
485 sys_exit_group(0);
486 /* The control flow will not go through */
487 }
488 #endif /* CHCORE_OH_TEE */
489
490 kdebug("%s invokes sched\n", __func__);
491 /* Reschedule */
492 sched();
493 eret_to_thread(switch_context());
494 }
495
sys_set_affinity(cap_t thread_cap,int aff)496 int sys_set_affinity(cap_t thread_cap, int aff)
497 {
498 struct thread *thread;
499
500 if (aff >= PLAT_CPU_NUM)
501 return -EINVAL;
502
503 if (thread_cap == 0)
504 /* 0 represents current thread */
505 thread = current_thread;
506 else
507 thread = obj_get(current_cap_group, thread_cap, TYPE_THREAD);
508
509 if (thread == NULL)
510 return -ECAPBILITY;
511
512 thread->thread_ctx->affinity = aff;
513
514 if (thread_cap != 0)
515 obj_put(thread);
516
517 return 0;
518 }
519
sys_get_affinity(cap_t thread_cap)520 int sys_get_affinity(cap_t thread_cap)
521 {
522 struct thread *thread;
523 int aff;
524
525 if (thread_cap == 0)
526 /* 0 represents current thread */
527 thread = current_thread;
528 else
529 thread = obj_get(current_cap_group, thread_cap, TYPE_THREAD);
530
531 if (thread == NULL)
532 return -ECAPBILITY;
533
534 aff = thread->thread_ctx->affinity;
535
536 if (thread_cap != 0)
537 obj_put(thread);
538
539 return aff;
540 }
541
542 #ifdef CHCORE_OH_TEE
sys_get_thread_id(cap_t thread_cap)543 cap_t sys_get_thread_id(cap_t thread_cap)
544 {
545 struct thread *thread;
546 int tid;
547
548 if (thread_cap == 0)
549 /* 0 represents current thread */
550 thread = current_thread;
551 else
552 thread = obj_get(current_cap_group, thread_cap, TYPE_THREAD);
553
554 if (thread == NULL)
555 return -ECAPBILITY;
556
557 tid = thread->cap;
558
559 if (thread_cap != 0)
560 obj_put(thread);
561
562 return tid;
563 }
564
sys_terminate_thread(cap_t thread_cap)565 int sys_terminate_thread(cap_t thread_cap)
566 {
567 struct thread *thread;
568 int ret = 0;
569 int cpu;
570 s32 old_exit_state;
571
572 if (thread_cap == 0) {
573 sys_thread_exit();
574 BUG_ON(1);
575 } else {
576 thread = obj_get(current_cap_group, thread_cap, TYPE_THREAD);
577 }
578
579 if (thread == NULL) {
580 ret = -ECAPBILITY;
581 goto out;
582 }
583
584 old_exit_state =
585 atomic_cmpxchg_32((s32 *)(&thread->thread_ctx->thread_exit_state),
586 TE_RUNNING,
587 TE_EXITING);
588
589 if (old_exit_state == TE_RUNNING) {
590 lock(&(current_cap_group->threads_lock));
591 current_cap_group->thread_cnt--;
592 unlock(&(current_cap_group->threads_lock));
593 }
594
595 for (cpu = 0; cpu < PLAT_CPU_NUM; cpu++) {
596 if (cpu != (int)smp_get_cpu_id()) {
597 send_ipi(cpu, IPI_RESCHED);
598 }
599 }
600
601 obj_put(thread);
602
603 out:
604 return ret;
605 }
606 #endif /* CHCORE_OH_TEE */
607
sys_disable_local_irq(void)608 void sys_disable_local_irq(void)
609 {
610 current_thread->thread_ctx->ec.reg[SPSR_EL1] |= (SPSR_EL1_FIQ);
611 }
612
sys_enable_local_irq(void)613 void sys_enable_local_irq(void)
614 {
615 current_thread->thread_ctx->ec.reg[SPSR_EL1] &= (~(SPSR_EL1_FIQ));
616 }
617
sys_set_prio(cap_t thread_cap,int prio)618 int sys_set_prio(cap_t thread_cap, int prio)
619 {
620 /* Only support the thread itself */
621 if (thread_cap != 0)
622 return -EINVAL;
623 /* Need to limit setting arbitrary priority */
624 if (prio <= 0 || prio > MAX_PRIO)
625 return -EINVAL;
626
627 current_thread->thread_ctx->sc->prio = prio;
628
629 return 0;
630 }
631
sys_get_prio(cap_t thread_cap)632 int sys_get_prio(cap_t thread_cap)
633 {
634 /* Only support the thread itself */
635 if (thread_cap != 0)
636 return -EINVAL;
637
638 return current_thread->thread_ctx->sc->prio;
639 }
640