1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <mm/mm.h>
13 #include <machine.h>
14 #include <sched/sched.h>
15 #include <object/thread.h>
16 #include <mm/uaccess.h>
17 #include <arch/machine/smp.h>
18 #include <arch/trustzone/smc.h>
19 #include <arch/trustzone/tlogger.h>
20
21 struct smc_percpu_struct {
22 bool pending_req;
23 struct thread *waiting_thread;
24 struct smc_registers regs_k;
25 struct smc_registers *regs_u;
26 } smc_percpu_structs[PLAT_CPU_NUM];
27
smc_init(void)28 void smc_init(void)
29 {
30 u32 cpuid;
31 struct smc_percpu_struct *percpu;
32
33 for (cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) {
34 percpu = &smc_percpu_structs[cpuid];
35 percpu->pending_req = false;
36 percpu->waiting_thread = NULL;
37 }
38 }
39
40 static bool kernel_shared_var_recved = false;
41 static kernel_shared_varibles_t kernel_var;
42
handle_yield_smc(unsigned long x0,unsigned long x1,unsigned long x2,unsigned long x3,unsigned long x4)43 void handle_yield_smc(unsigned long x0, unsigned long x1, unsigned long x2,
44 unsigned long x3, unsigned long x4)
45 {
46 int ret;
47 struct smc_percpu_struct *percpu;
48
49 enable_tlogger();
50
51 if (!kernel_shared_var_recved && x2 == 0xf) {
52 kernel_shared_var_recved = true;
53 kernel_var.params_stack[0] = x0;
54 kernel_var.params_stack[1] = x1;
55 kernel_var.params_stack[2] = x2;
56 kernel_var.params_stack[3] = x3;
57 kernel_var.params_stack[4] = x4;
58 }
59
60 percpu = &smc_percpu_structs[smp_get_cpu_id()];
61 if (percpu->pending_req) {
62 sched();
63 eret_to_thread(switch_context());
64 }
65 percpu->regs_k.x0 = TZ_SWITCH_REQ_STD_REQUEST;
66 percpu->regs_k.x1 = x1;
67 percpu->regs_k.x2 = x2;
68 percpu->regs_k.x3 = x3;
69 percpu->regs_k.x4 = x4;
70
71 if (percpu->waiting_thread) {
72 struct thread *current = current_thread;
73 switch_thread_vmspace_to(percpu->waiting_thread);
74 ret = copy_to_user((char *)percpu->regs_u,
75 (char *)&percpu->regs_k,
76 sizeof(percpu->regs_k));
77 switch_thread_vmspace_to(current);
78 arch_set_thread_return(percpu->waiting_thread, ret);
79 percpu->waiting_thread->thread_ctx->state = TS_INTER;
80 BUG_ON(sched_enqueue(percpu->waiting_thread));
81 percpu->waiting_thread = NULL;
82 } else {
83 percpu->pending_req = true;
84 }
85
86 sched();
87 eret_to_thread(switch_context());
88 }
89
sys_tee_wait_switch_req(struct smc_registers * regs_u)90 int sys_tee_wait_switch_req(struct smc_registers *regs_u)
91 {
92 int ret;
93 struct smc_percpu_struct *percpu;
94
95 percpu = &smc_percpu_structs[smp_get_cpu_id()];
96
97 if (percpu->pending_req) {
98 percpu->pending_req = false;
99 ret = copy_to_user(
100 (char *)regs_u, (char *)&percpu->regs_k, sizeof(percpu->regs_k));
101 return ret;
102 }
103
104 if (percpu->waiting_thread) {
105 return -EINVAL;
106 }
107
108 percpu->waiting_thread = current_thread;
109 percpu->regs_u = regs_u;
110
111 current_thread->thread_ctx->state = TS_WAITING;
112
113 sched();
114 eret_to_thread(switch_context());
115 BUG("Should not reach here.\n");
116 }
117
sys_tee_switch_req(struct smc_registers * regs_u)118 int sys_tee_switch_req(struct smc_registers *regs_u)
119 {
120 int ret;
121 struct smc_registers regs_k;
122
123 ret = copy_from_user((char *)®s_k, (char *)regs_u, sizeof(regs_k));
124 kinfo("%s %d\n", __func__, __LINE__);
125 if (ret < 0) {
126 return ret;
127 }
128
129 if (regs_k.x0 == TZ_SWITCH_REQ_ENTRY_DONE) {
130 regs_k.x0 = SMC_ENTRY_DONE;
131 regs_k.x1 = (vaddr_t)&tz_vectors;
132 } else if (regs_k.x0 == TZ_SWITCH_REQ_STD_RESPONSE) {
133 regs_k.x0 = SMC_STD_RESPONSE;
134 regs_k.x1 = SMC_EXIT_NORMAL;
135 } else {
136 return -1;
137 }
138
139 arch_set_thread_return(current_thread, 0);
140 smc_call(regs_k.x0, regs_k.x1, regs_k.x2, regs_k.x3, regs_k.x4);
141 BUG("Should not reach here.\n");
142 }
143
sys_tee_pull_kernel_var(kernel_shared_varibles_t * kernel_var_ubuf)144 int sys_tee_pull_kernel_var(kernel_shared_varibles_t *kernel_var_ubuf)
145 {
146 int ret;
147
148 kinfo("%s\n", __func__);
149
150 if (check_user_addr_range((vaddr_t)kernel_var_ubuf,
151 sizeof(kernel_shared_varibles_t))) {
152 return -EINVAL;
153 }
154
155 ret = copy_to_user(
156 kernel_var_ubuf, &kernel_var, sizeof(kernel_shared_varibles_t));
157
158 return ret;
159 }
160