1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <mm/mm.h>
13 #include <machine.h>
14 #include <sched/sched.h>
15 #include <object/thread.h>
16 #include <mm/uaccess.h>
17 #include <arch/machine/smp.h>
18 #include <arch/trustzone/smc.h>
19 #include <arch/trustzone/tlogger.h>
20
21 struct smc_percpu_struct {
22 bool pending_req;
23 struct thread *waiting_thread;
24 struct smc_registers regs_k;
25 struct smc_registers *regs_u;
26 } smc_percpu_structs[PLAT_CPU_NUM];
27
28 paddr_t smc_ttbr0_el1;
29
30 #define SMC_ASID 1000UL
init_smc_page_table(void)31 static void init_smc_page_table(void)
32 {
33 extern ptp_t boot_ttbr0_l0;
34
35 /* Reuse the boot stage low memory page table */
36 smc_ttbr0_el1 = (paddr_t)&boot_ttbr0_l0;
37 smc_ttbr0_el1 |= SMC_ASID << 48;
38 }
39
smc_init(void)40 void smc_init(void)
41 {
42 u32 cpuid;
43 struct smc_percpu_struct *percpu;
44
45 for (cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) {
46 percpu = &smc_percpu_structs[cpuid];
47 percpu->pending_req = false;
48 percpu->waiting_thread = NULL;
49 }
50
51 init_smc_page_table();
52 }
53
54 static bool kernel_shared_var_recved = false;
55 static kernel_shared_varibles_t kernel_var;
56
handle_yield_smc(unsigned long x0,unsigned long x1,unsigned long x2,unsigned long x3,unsigned long x4)57 void handle_yield_smc(unsigned long x0, unsigned long x1, unsigned long x2,
58 unsigned long x3, unsigned long x4)
59 {
60 int ret;
61 struct smc_percpu_struct *percpu;
62
63 enable_tlogger();
64
65 kdebug("%s x: [%lx %lx %lx %lx %lx]\n", __func__, x0, x1, x2, x3, x4);
66
67 /* Switch from SMC page table to process page table */
68 switch_vmspace_to(current_thread->vmspace);
69
70 if (!kernel_shared_var_recved && x2 == 0xf) {
71 kernel_shared_var_recved = true;
72 kernel_var.params_stack[0] = x0;
73 kernel_var.params_stack[1] = x1;
74 kernel_var.params_stack[2] = x2;
75 kernel_var.params_stack[3] = x3;
76 kernel_var.params_stack[4] = x4;
77 }
78
79 percpu = &smc_percpu_structs[smp_get_cpu_id()];
80 if (percpu->pending_req) {
81 sched();
82 eret_to_thread(switch_context());
83 }
84 percpu->regs_k.x0 = TZ_SWITCH_REQ_STD_REQUEST;
85 percpu->regs_k.x1 = x1;
86 percpu->regs_k.x2 = x2;
87 percpu->regs_k.x3 = x3;
88 percpu->regs_k.x4 = x4;
89
90 if (percpu->waiting_thread) {
91 struct thread *current = current_thread;
92 switch_thread_vmspace_to(percpu->waiting_thread);
93 ret = copy_to_user((char *)percpu->regs_u,
94 (char *)&percpu->regs_k,
95 sizeof(percpu->regs_k));
96 switch_thread_vmspace_to(current);
97 arch_set_thread_return(percpu->waiting_thread, ret);
98 percpu->waiting_thread->thread_ctx->state = TS_INTER;
99 BUG_ON(sched_enqueue(percpu->waiting_thread));
100 percpu->waiting_thread = NULL;
101 } else {
102 percpu->pending_req = true;
103 }
104
105 sched();
106 eret_to_thread(switch_context());
107 }
108
sys_tee_wait_switch_req(struct smc_registers * regs_u)109 int sys_tee_wait_switch_req(struct smc_registers *regs_u)
110 {
111 int ret;
112 struct smc_percpu_struct *percpu;
113
114 percpu = &smc_percpu_structs[smp_get_cpu_id()];
115
116 if (percpu->pending_req) {
117 percpu->pending_req = false;
118 ret = copy_to_user(
119 (char *)regs_u, (char *)&percpu->regs_k, sizeof(percpu->regs_k));
120 return ret;
121 }
122
123 if (percpu->waiting_thread) {
124 return -EINVAL;
125 }
126
127 percpu->waiting_thread = current_thread;
128 percpu->regs_u = regs_u;
129
130 current_thread->thread_ctx->state = TS_WAITING;
131
132 sched();
133 eret_to_thread(switch_context());
134 BUG("Should not reach here.\n");
135 }
136
sys_tee_switch_req(struct smc_registers * regs_u)137 int sys_tee_switch_req(struct smc_registers *regs_u)
138 {
139 int ret;
140 struct smc_registers regs_k;
141
142 ret = copy_from_user((char *)®s_k, (char *)regs_u, sizeof(regs_k));
143 if (ret < 0) {
144 return ret;
145 }
146
147 if (regs_k.x0 == TZ_SWITCH_REQ_ENTRY_DONE) {
148 regs_k.x0 = SMC_ENTRY_DONE;
149 regs_k.x1 = (vaddr_t)&tz_vectors;
150 } else if (regs_k.x0 == TZ_SWITCH_REQ_STD_RESPONSE) {
151 regs_k.x0 = SMC_STD_RESPONSE;
152 regs_k.x1 = SMC_EXIT_NORMAL;
153 } else {
154 return -1;
155 }
156
157 save_and_release_fpu_owner();
158 arch_set_thread_return(current_thread, 0);
159 smc_call(regs_k.x0, regs_k.x1, regs_k.x2, regs_k.x3, regs_k.x4);
160 BUG("Should not reach here.\n");
161 }
162
sys_tee_pull_kernel_var(kernel_shared_varibles_t * kernel_var_ubuf)163 int sys_tee_pull_kernel_var(kernel_shared_varibles_t *kernel_var_ubuf)
164 {
165 int ret;
166
167 kinfo("%s\n", __func__);
168
169 if (check_user_addr_range((vaddr_t)kernel_var_ubuf,
170 sizeof(kernel_shared_varibles_t))) {
171 return -EINVAL;
172 }
173
174 ret = copy_to_user(
175 kernel_var_ubuf, &kernel_var, sizeof(kernel_shared_varibles_t));
176
177 return ret;
178 }