1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <oh_mem_ops.h>
13 #include <chcore/syscall.h>
14 #include <proc_node.h>
15 #include <chcore/container/hashtable.h>
16 #include <string.h>
17 #include <pthread.h>
18
19 #include <tee_uuid.h>
20 #include <ipclib.h>
21
22 #define DEFAULT_HTABLE_SIZE 1024
23
24 struct shm_entry {
25 vaddr_t vaddr;
26 pid_t pid;
27 cap_t pmo;
28 badge_t badge;
29 struct tee_uuid uuid;
30 struct hlist_node src2ent_node;
31 struct hlist_node badge2ent_node;
32 };
33
34 static struct htable src2ent;
35 static pthread_mutex_t oh_shmmgr_lock;
36 static struct htable badge2ent;
37
oh_mem_ops_init(void)38 void oh_mem_ops_init(void)
39 {
40 pthread_mutex_init(&oh_shmmgr_lock, NULL);
41 init_htable(&src2ent, DEFAULT_HTABLE_SIZE);
42 init_htable(&badge2ent, DEFAULT_HTABLE_SIZE);
43 }
44
__get_entry_by_src(vaddr_t vaddr,pid_t pid)45 static struct shm_entry *__get_entry_by_src(vaddr_t vaddr, pid_t pid)
46 {
47 struct hlist_head *bucket = htable_get_bucket(&src2ent, pid);
48 struct shm_entry *entry;
49 for_each_in_hlist (entry, src2ent_node, bucket) {
50 if (entry->vaddr == vaddr && entry->pid == pid) {
51 return entry;
52 }
53 }
54 return NULL;
55 }
56
handle_task_map_ns(ipc_msg_t * ipc_msg,badge_t badge)57 void handle_task_map_ns(ipc_msg_t *ipc_msg, badge_t badge)
58 {
59 struct proc_node *proc_node;
60 struct proc_request *req;
61 cap_t pmo;
62 pid_t pid;
63
64 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
65 pid = taskid_to_pid(req->task_map_ns.task_id);
66
67 proc_node = get_proc_node_by_pid(pid);
68 pmo = usys_create_ns_pmo(
69 proc_node->proc_cap, req->task_map_ns.phy_addr, req->task_map_ns.size);
70
71 ipc_return(ipc_msg, pmo);
72 }
73
handle_task_unmap_ns(ipc_msg_t * ipc_msg,badge_t badge)74 void handle_task_unmap_ns(ipc_msg_t *ipc_msg, badge_t badge)
75 {
76 int ret;
77 struct proc_node *proc_node;
78 struct proc_request *req;
79 pid_t pid;
80
81 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
82 pid = taskid_to_pid(req->task_unmap_ns.task_id);
83
84 proc_node = get_proc_node_by_pid(pid);
85 ret = usys_destroy_ns_pmo(proc_node->proc_cap, req->task_unmap_ns.pmo);
86
87 ipc_return(ipc_msg, ret);
88 }
89
handle_tee_alloc_sharemem(ipc_msg_t * ipc_msg,badge_t badge)90 void handle_tee_alloc_sharemem(ipc_msg_t *ipc_msg, badge_t badge)
91 {
92 int ret;
93 cap_t target_pmo, self_pmo;
94 struct proc_node *proc_node;
95 struct proc_request *req;
96 struct shm_entry *entry;
97
98 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
99 proc_node = get_proc_node(badge);
100
101 pthread_mutex_lock(&oh_shmmgr_lock);
102
103 entry = __get_entry_by_src(req->tee_alloc_shm.vaddr, proc_node->pid);
104 if (entry != NULL) {
105 ret = -EEXIST;
106 goto out;
107 }
108
109 entry = malloc(sizeof(*entry));
110 if (entry == NULL) {
111 ret = -ENOMEM;
112 goto out;
113 }
114
115 target_pmo = usys_create_tee_shared_pmo(proc_node->proc_cap,
116 &req->tee_alloc_shm.uuid,
117 req->tee_alloc_shm.size,
118 &self_pmo);
119 if (target_pmo < 0) {
120 ret = target_pmo;
121 goto out_free_entry;
122 }
123
124 entry->pid = proc_node->pid;
125 entry->pmo = self_pmo;
126 memcpy(&entry->uuid, &req->tee_alloc_shm.uuid, sizeof(struct tee_uuid));
127 entry->vaddr = req->tee_alloc_shm.vaddr;
128 entry->badge = badge;
129 init_hlist_node(&entry->src2ent_node);
130 init_hlist_node(&entry->badge2ent_node);
131 htable_add(&src2ent, proc_node->pid, &entry->src2ent_node);
132 htable_add(&badge2ent, badge, &entry->badge2ent_node);
133
134 pthread_mutex_unlock(&oh_shmmgr_lock);
135 ipc_return(ipc_msg, target_pmo);
136
137 out_free_entry:
138 free(entry);
139 out:
140 pthread_mutex_unlock(&oh_shmmgr_lock);
141 ipc_return(ipc_msg, ret);
142 }
143
handle_tee_get_sharemem(ipc_msg_t * ipc_msg,badge_t badge)144 void handle_tee_get_sharemem(ipc_msg_t *ipc_msg, badge_t badge)
145 {
146 int ret;
147 struct proc_node *proc_node;
148 struct proc_request *req;
149 struct shm_entry *entry;
150
151 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
152 proc_node = get_proc_node(badge);
153
154 pthread_mutex_lock(&oh_shmmgr_lock);
155
156 entry = __get_entry_by_src(req->tee_get_shm.vaddr, req->tee_get_shm.pid);
157 if (entry == NULL) {
158 ret = -ENOENT;
159 goto out;
160 }
161
162 if (memcmp(&proc_node->puuid.uuid, &entry->uuid, sizeof(struct tee_uuid))
163 != 0) {
164 ret = -EINVAL;
165 goto out;
166 }
167
168 ipc_msg->cap_slot_number = 1;
169 ipc_set_msg_cap(ipc_msg, 0, entry->pmo);
170
171 pthread_mutex_unlock(&oh_shmmgr_lock);
172 ipc_return_with_cap(ipc_msg, 0);
173
174 out:
175 pthread_mutex_unlock(&oh_shmmgr_lock);
176 ipc_return(ipc_msg, ret);
177 }
178
__destroy_entry(struct shm_entry * entry)179 static void __destroy_entry(struct shm_entry *entry)
180 {
181 htable_del(&entry->src2ent_node);
182 htable_del(&entry->badge2ent_node);
183 usys_revoke_cap(entry->pmo, false);
184 free(entry);
185 }
186
handle_tee_free_sharemem(ipc_msg_t * ipc_msg,badge_t badge)187 void handle_tee_free_sharemem(ipc_msg_t *ipc_msg, badge_t badge)
188 {
189 int ret = 0;
190 struct proc_node *proc_node;
191 struct proc_request *req;
192 struct shm_entry *entry;
193
194 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
195 proc_node = get_proc_node(badge);
196
197 pthread_mutex_lock(&oh_shmmgr_lock);
198
199 entry = __get_entry_by_src(req->tee_free_shm.vaddr, proc_node->pid);
200 if (entry == NULL) {
201 goto out;
202 }
203 __destroy_entry(entry);
204
205 out:
206 pthread_mutex_unlock(&oh_shmmgr_lock);
207 ipc_return(ipc_msg, ret);
208 }
209
clean_sharemem(badge_t badge)210 void clean_sharemem(badge_t badge)
211 {
212 struct hlist_head *bucket = htable_get_bucket(&badge2ent, badge);
213 struct shm_entry *entry, *tmp;
214
215 pthread_mutex_lock(&oh_shmmgr_lock);
216 for_each_in_hlist_safe (entry, tmp, badge2ent_node, bucket) {
217 if (entry->badge == badge) {
218 __destroy_entry(entry);
219 }
220 }
221 pthread_mutex_unlock(&oh_shmmgr_lock);
222 }
223