1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <object/thread.h>
13 #include <sched/sched.h>
14 #include <arch/machine/registers.h>
15 #include <arch/machine/smp.h>
16 #include <mm/kmalloc.h>
17
init_thread_ctx(struct thread * thread,vaddr_t stack,vaddr_t func,u32 prio,u32 type,s32 aff)18 void init_thread_ctx(struct thread *thread, vaddr_t stack, vaddr_t func,
19 u32 prio, u32 type, s32 aff)
20 {
21 /* Fill the context of the thread */
22 thread->thread_ctx->ec.reg[SP_EL0] = stack;
23 thread->thread_ctx->ec.reg[ELR_EL1] = func;
24
25 thread->thread_ctx->ec.reg[SPSR_EL1] = SPSR_EL1_EL0t;
26
27 /* Set the state of the thread */
28 thread->thread_ctx->state = TS_INIT;
29
30 /* Set thread type */
31 thread->thread_ctx->type = type;
32
33 /* Set the cpuid and affinity */
34 thread->thread_ctx->affinity = aff;
35
36 /* Set the budget and priority of the thread */
37 if (thread->thread_ctx->sc != NULL) {
38 thread->thread_ctx->sc->prio = prio;
39 thread->thread_ctx->sc->budget = DEFAULT_BUDGET;
40 }
41
42 thread->thread_ctx->kernel_stack_state = KS_FREE;
43 /* Set exiting state */
44 thread->thread_ctx->thread_exit_state = TE_RUNNING;
45 }
46
arch_get_thread_stack(struct thread * thread)47 vaddr_t arch_get_thread_stack(struct thread *thread)
48 {
49 return thread->thread_ctx->ec.reg[SP_EL0];
50 }
51
arch_set_thread_stack(struct thread * thread,vaddr_t stack)52 void arch_set_thread_stack(struct thread *thread, vaddr_t stack)
53 {
54 thread->thread_ctx->ec.reg[SP_EL0] = stack;
55 }
56
arch_set_thread_return(struct thread * thread,unsigned long ret)57 void arch_set_thread_return(struct thread *thread, unsigned long ret)
58 {
59 thread->thread_ctx->ec.reg[X0] = ret;
60 }
61
arch_set_thread_next_ip(struct thread * thread,vaddr_t ip)62 void arch_set_thread_next_ip(struct thread *thread, vaddr_t ip)
63 {
64 /* Currently, we use fault PC to store the next ip */
65 /* Only required when we need to change PC */
66 /* Maybe update ELR_EL1 directly */
67 thread->thread_ctx->ec.reg[ELR_EL1] = ip;
68 }
69
arch_get_thread_next_ip(struct thread * thread)70 u64 arch_get_thread_next_ip(struct thread *thread)
71 {
72 return thread->thread_ctx->ec.reg[ELR_EL1];
73 }
74
75 /* First argument in X0 */
arch_set_thread_arg0(struct thread * thread,unsigned long arg)76 void arch_set_thread_arg0(struct thread *thread, unsigned long arg)
77 {
78 thread->thread_ctx->ec.reg[X0] = arg;
79 }
80
81 /* Second argument in X1 */
arch_set_thread_arg1(struct thread * thread,unsigned long arg)82 void arch_set_thread_arg1(struct thread *thread, unsigned long arg)
83 {
84 thread->thread_ctx->ec.reg[X1] = arg;
85 }
86
arch_set_thread_arg2(struct thread * thread,unsigned long arg)87 void arch_set_thread_arg2(struct thread *thread, unsigned long arg)
88 {
89 thread->thread_ctx->ec.reg[X2] = arg;
90 }
91
arch_set_thread_arg3(struct thread * thread,unsigned long arg)92 void arch_set_thread_arg3(struct thread *thread, unsigned long arg)
93 {
94 thread->thread_ctx->ec.reg[X3] = arg;
95 }
96
arch_set_thread_tls(struct thread * thread,unsigned long tls)97 void arch_set_thread_tls(struct thread *thread, unsigned long tls)
98 {
99 thread->thread_ctx->tls_base_reg[0] = tls;
100 }
101
102 /* set arch-specific thread state */
set_thread_arch_spec_state(struct thread * thread)103 void set_thread_arch_spec_state(struct thread *thread)
104 {
105 /* Currently, nothing need to be done in aarch64. */
106 }
107
108 /* set arch-specific thread state for ipc server thread */
set_thread_arch_spec_state_ipc(struct thread * thread)109 void set_thread_arch_spec_state_ipc(struct thread *thread)
110 {
111 /* Currently, nothing need to be done in aarch64. */
112 }
113
114 /*
115 * Saving registers related to thread local storage.
116 * On aarch64, TPIDR_EL0 is used by convention.
117 */
switch_tls_info(struct thread * from,struct thread * to)118 void switch_tls_info(struct thread *from, struct thread *to)
119 {
120 u64 tpidr_el0;
121
122 if (likely((from) && (from->thread_ctx->type > TYPE_KERNEL))) {
123 /* Save TPIDR_EL0 for thread from */
124 asm volatile("mrs %0, tpidr_el0\n" : "=r"(tpidr_el0));
125 from->thread_ctx->tls_base_reg[0] = tpidr_el0;
126 }
127
128 if (likely((to) && (to->thread_ctx->type > TYPE_KERNEL))) {
129 /* Restore TPIDR_EL0 for thread to */
130 tpidr_el0 = to->thread_ctx->tls_base_reg[0];
131 asm volatile("msr tpidr_el0, %0\n" ::"r"(tpidr_el0));
132 }
133 }
134