1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <chcore/container/hashtable.h>
13 #include <chcore/memory.h>
14 #include <chcore/ipc.h>
15 #include <chcore-internal/procmgr_defs.h>
16 #include <chcore/syscall.h>
17
18 #include <string.h>
19 #include <pthread.h>
20
21 #include <mem_ops.h>
22 #include <ipclib.h>
23 #include <assert.h>
24
25 #define GTASK_PID (4)
26 #define GTASK_TID (0xa)
27 #define GTASK_TASKID (pid_to_taskid(GTASK_TID, GTASK_PID))
28
29 #define mem_info(fmt, ...) \
30 printf("<%s:%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
31
__task_map_ns(uint32_t task_id,uint64_t phy_addr,uint32_t size)32 static cap_t __task_map_ns(uint32_t task_id, uint64_t phy_addr, uint32_t size)
33 {
34 ipc_msg_t *ipc_msg;
35 struct proc_request *req;
36 cap_t pmo;
37
38 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(*req));
39 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
40
41 req->req = PROC_REQ_TASK_MAP_NS;
42 req->task_map_ns.task_id = task_id;
43 req->task_map_ns.phy_addr = phy_addr;
44 req->task_map_ns.size = size;
45
46 pmo = ipc_call(procmgr_ipc_struct, ipc_msg);
47
48 ipc_destroy_msg(ipc_msg);
49
50 return pmo;
51 }
52
__task_unmap_ns(uint32_t task_id,cap_t pmo)53 static int __task_unmap_ns(uint32_t task_id, cap_t pmo)
54 {
55 ipc_msg_t *ipc_msg;
56 struct proc_request *req;
57 int ret;
58
59 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(*req));
60 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
61
62 req->req = PROC_REQ_TASK_UNMAP_NS;
63 req->task_unmap_ns.task_id = task_id;
64 req->task_unmap_ns.pmo = pmo;
65
66 ret = ipc_call(procmgr_ipc_struct, ipc_msg);
67
68 ipc_destroy_msg(ipc_msg);
69
70 return ret;
71 }
72
__alloc_sharemem(const struct tee_uuid * uuid,size_t size,vaddr_t vaddr)73 static cap_t __alloc_sharemem(const struct tee_uuid *uuid, size_t size,
74 vaddr_t vaddr)
75 {
76 ipc_msg_t *ipc_msg;
77 struct proc_request *req;
78 int ret;
79
80 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(*req));
81 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
82
83 req->req = PROC_REQ_TEE_ALLOC_SHM;
84 req->tee_alloc_shm.vaddr = vaddr;
85 memcpy(&req->tee_alloc_shm.uuid, uuid, sizeof(*uuid));
86 req->tee_alloc_shm.size = size;
87
88 ret = ipc_call(procmgr_ipc_struct, ipc_msg);
89
90 ipc_destroy_msg(ipc_msg);
91 return ret;
92 }
93
__get_sharemem(uint32_t task_id,vaddr_t vaddr)94 static cap_t __get_sharemem(uint32_t task_id, vaddr_t vaddr)
95 {
96 ipc_msg_t *ipc_msg;
97 struct proc_request *req;
98 int ret;
99 cap_t pmo;
100
101 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(*req));
102 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
103
104 req->req = PROC_REQ_TEE_GET_SHM;
105 req->tee_get_shm.pid = taskid_to_pid(task_id);
106 req->tee_get_shm.vaddr = vaddr;
107
108 ret = ipc_call(procmgr_ipc_struct, ipc_msg);
109 if (ret < 0) {
110 goto out;
111 }
112
113 pmo = ipc_get_msg_cap(ipc_msg, 0);
114 if (pmo < 0) {
115 ret = -EINVAL;
116 goto out;
117 }
118 ret = pmo;
119
120 out:
121 ipc_destroy_msg(ipc_msg);
122 return ret;
123 }
124
__free_sharemem(vaddr_t vaddr)125 static cap_t __free_sharemem(vaddr_t vaddr)
126 {
127 ipc_msg_t *ipc_msg;
128 struct proc_request *req;
129 int ret;
130
131 ipc_msg = ipc_create_msg(procmgr_ipc_struct, sizeof(*req));
132 req = (struct proc_request *)ipc_get_msg_data(ipc_msg);
133
134 req->req = PROC_REQ_TEE_FREE_SHM;
135 req->tee_free_shm.vaddr = vaddr;
136
137 ret = ipc_call(procmgr_ipc_struct, ipc_msg);
138
139 ipc_destroy_msg(ipc_msg);
140 return ret;
141 }
142
143 struct mem_entry {
144 cap_t pmo;
145 vaddr_t vaddr;
146 uint32_t size;
147 struct hlist_node pmo2ent_node;
148 struct hlist_node vaddr2ent_node;
149 };
150
151 #define DEFAULT_HASHTABLE_SIZE 1024
152
153 static struct htable pmo2ent;
154 static struct htable vaddr2ent;
155 static pthread_mutex_t lock;
156
mem_ops_init(void)157 int mem_ops_init(void)
158 {
159 pthread_mutex_init(&lock, NULL);
160 init_htable(&pmo2ent, DEFAULT_HASHTABLE_SIZE);
161 init_htable(&vaddr2ent, DEFAULT_HASHTABLE_SIZE);
162 return 0;
163 }
164
__get_entry_by_pmo(cap_t pmo)165 static struct mem_entry *__get_entry_by_pmo(cap_t pmo)
166 {
167 struct hlist_head *bucket = htable_get_bucket(&pmo2ent, pmo);
168 struct mem_entry *entry;
169 for_each_in_hlist (entry, pmo2ent_node, bucket) {
170 if (entry->pmo == pmo) {
171 return entry;
172 }
173 }
174 return NULL;
175 }
176
__get_entry_by_vaddr(vaddr_t vaddr)177 static struct mem_entry *__get_entry_by_vaddr(vaddr_t vaddr)
178 {
179 struct hlist_head *bucket = htable_get_bucket(&vaddr2ent, vaddr);
180 struct mem_entry *entry;
181 for_each_in_hlist (entry, vaddr2ent_node, bucket) {
182 if (entry->vaddr == vaddr) {
183 return entry;
184 }
185 }
186 return NULL;
187 }
188
__map_ns_self(uint64_t phy_addr,uint32_t size,vaddr_t * out_vaddr)189 static int __map_ns_self(uint64_t phy_addr, uint32_t size, vaddr_t *out_vaddr)
190 {
191 int ret;
192 cap_t pmo;
193 void *vaddr;
194 struct mem_entry *entry;
195 uint64_t phy_addr_begin, phy_addr_end;
196
197 pthread_mutex_lock(&lock);
198
199 phy_addr_end = ROUND_UP(phy_addr + size, PAGE_SIZE);
200 phy_addr_begin = ROUND_DOWN(phy_addr, PAGE_SIZE);
201 size = phy_addr_end - phy_addr_begin;
202
203 entry = malloc(sizeof(*entry));
204 if (entry == NULL) {
205 ret = -ENOMEM;
206 goto out;
207 }
208
209 pmo = usys_create_ns_pmo(0, phy_addr_begin, size);
210 if (pmo < 0) {
211 ret = pmo;
212 goto out_free_entry;
213 }
214
215 vaddr = chcore_auto_map_pmo(pmo, size, VM_READ | VM_WRITE);
216 if (vaddr == NULL) {
217 ret = -errno;
218 goto out_revoke_pmo;
219 }
220 vaddr = (void *)((uint64_t)vaddr + (phy_addr & (PAGE_SIZE - 1)));
221 *out_vaddr = (vaddr_t)vaddr;
222
223 entry->pmo = pmo;
224 entry->vaddr = (vaddr_t)vaddr;
225 entry->size = size;
226 init_hlist_node(&entry->vaddr2ent_node);
227 htable_add(&vaddr2ent, (vaddr_t)vaddr, &entry->vaddr2ent_node);
228
229 entry = __get_entry_by_vaddr((vaddr_t)vaddr);
230
231 pthread_mutex_unlock(&lock);
232 return 0;
233
234 out_revoke_pmo:
235 usys_revoke_cap(pmo, false);
236 out_free_entry:
237 free(entry);
238 out:
239 pthread_mutex_unlock(&lock);
240 return ret;
241 }
242
__unmap_ns_self(vaddr_t vaddr)243 static int __unmap_ns_self(vaddr_t vaddr)
244 {
245 int ret = 0;
246 struct mem_entry *entry;
247
248 pthread_mutex_lock(&lock);
249
250 entry = __get_entry_by_vaddr(vaddr);
251 if (entry == NULL) {
252 ret = -ENOENT;
253 goto out;
254 }
255
256 usys_unmap_pmo(0, entry->pmo, ROUND_DOWN(entry->vaddr, PAGE_SIZE));
257 htable_del(&entry->vaddr2ent_node);
258 usys_revoke_cap(entry->pmo, false);
259 free(entry);
260
261 out:
262 pthread_mutex_unlock(&lock);
263 return ret;
264 }
265
task_map_ns_phy_mem(uint32_t task_id,uint64_t phy_addr,uint32_t size,uint64_t * info)266 int32_t task_map_ns_phy_mem(uint32_t task_id, uint64_t phy_addr, uint32_t size,
267 uint64_t *info)
268 {
269 cap_t pmo;
270
271 mem_info("%x %lx %x %p\n", task_id, phy_addr, size, info);
272
273 if (task_id == 0) {
274 task_id = GTASK_TASKID;
275 }
276
277 if (task_id == get_self_taskid()) {
278 int ret;
279 vaddr_t vaddr;
280
281 ret = __map_ns_self(phy_addr, size, &vaddr);
282 if (ret == 0) {
283 *info = (uint64_t)vaddr;
284 }
285 return ret;
286 } else {
287 pmo = __task_map_ns(task_id, phy_addr, size);
288 if (pmo < 0) {
289 return pmo;
290 } else {
291 *info = pmo;
292 return 0;
293 }
294 }
295 }
296
self_map_ns_phy_mem(uint64_t info,uint32_t size,uint64_t * virt_addr)297 int32_t self_map_ns_phy_mem(uint64_t info, uint32_t size, uint64_t *virt_addr)
298 {
299 int ret;
300 void *addr;
301 cap_t pmo = (cap_t)info;
302 struct mem_entry *entry;
303
304 pthread_mutex_lock(&lock);
305
306 entry = __get_entry_by_pmo(pmo);
307 if (entry != NULL) {
308 ret = -EEXIST;
309 goto out;
310 }
311
312 /* VM_EXEC? */
313 addr = chcore_auto_map_pmo(pmo, size, VM_READ | VM_WRITE);
314 if (addr == NULL) {
315 ret = -errno;
316 goto out;
317 }
318
319 entry = malloc(sizeof(*entry));
320 if (entry == NULL) {
321 ret = -ENOMEM;
322 goto out_unmap;
323 }
324 entry->pmo = pmo;
325 entry->vaddr = (vaddr_t)addr;
326 entry->size = size;
327 init_hlist_node(&entry->pmo2ent_node);
328 init_hlist_node(&entry->vaddr2ent_node);
329 htable_add(&pmo2ent, pmo, &entry->pmo2ent_node);
330 htable_add(&vaddr2ent, (u32)(unsigned long)addr, &entry->vaddr2ent_node);
331
332 *virt_addr = (uint64_t)addr;
333 pthread_mutex_unlock(&lock);
334 return 0;
335
336 out_unmap:
337 chcore_auto_unmap_pmo(pmo, (unsigned long)addr, size);
338 out:
339 pthread_mutex_unlock(&lock);
340 return ret;
341 }
342
task_unmap(uint32_t task_id,uint64_t info,uint32_t size)343 int32_t task_unmap(uint32_t task_id, uint64_t info, uint32_t size)
344 {
345 int ret;
346 cap_t pmo;
347
348 if (task_id == 0) {
349 task_id = GTASK_TASKID;
350 }
351
352 if (task_id == get_self_taskid()) {
353 ret = __unmap_ns_self(info);
354 return ret;
355 } else {
356 pmo = (cap_t)info;
357 ret = __task_unmap_ns(task_id, pmo);
358 return ret;
359 }
360 }
361
self_unmap(uint64_t info)362 int32_t self_unmap(uint64_t info)
363 {
364 struct mem_entry *entry;
365 cap_t pmo = (cap_t)info;
366 int ret = 0;
367
368 pthread_mutex_lock(&lock);
369
370 entry = __get_entry_by_pmo(pmo);
371 if (entry == NULL) {
372 ret = -ENOENT;
373 goto out;
374 }
375
376 chcore_auto_unmap_pmo(entry->pmo, entry->vaddr, entry->size);
377
378 htable_del(&entry->pmo2ent_node);
379 htable_del(&entry->vaddr2ent_node);
380 usys_revoke_cap(entry->pmo, false);
381 free(entry);
382
383 out:
384 pthread_mutex_unlock(&lock);
385 return ret;
386 }
387
alloc_sharemem_aux(const struct tee_uuid * uuid,uint32_t size)388 void *alloc_sharemem_aux(const struct tee_uuid *uuid, uint32_t size)
389 {
390 vaddr_t vaddr;
391 int ret;
392 struct mem_entry *entry;
393 cap_t pmo;
394
395 size = ROUND_UP(size, PAGE_SIZE);
396 pthread_mutex_lock(&lock);
397
398 vaddr = (vaddr_t)chcore_alloc_vaddr(size);
399 if (vaddr == 0) {
400 goto out_fail;
401 }
402
403 entry = malloc(sizeof(*entry));
404 if (entry == NULL) {
405 goto out_fail_free_vaddr;
406 }
407
408 pmo = __alloc_sharemem(uuid, size, vaddr);
409 if (pmo < 0) {
410 goto out_free_entry;
411 }
412
413 ret = usys_map_pmo(0, pmo, vaddr, VM_READ | VM_WRITE);
414 if (ret != 0) {
415 goto out_free_sharemem;
416 }
417
418 entry->pmo = pmo;
419 entry->vaddr = (vaddr_t)vaddr;
420 entry->size = size;
421 init_hlist_node(&entry->vaddr2ent_node);
422 htable_add(&vaddr2ent, (u32)(vaddr_t)vaddr, &entry->vaddr2ent_node);
423
424 pthread_mutex_unlock(&lock);
425 return (void *)vaddr;
426
427 out_free_sharemem:
428 __free_sharemem((vaddr_t)vaddr);
429 out_free_entry:
430 free(entry);
431 out_fail_free_vaddr:
432 chcore_free_vaddr(vaddr, size);
433 out_fail:
434 pthread_mutex_unlock(&lock);
435 return NULL;
436 }
437
free_sharemem(void * addr,uint32_t size)438 uint32_t free_sharemem(void *addr, uint32_t size)
439 {
440 int ret;
441 struct mem_entry *entry;
442
443 pthread_mutex_lock(&lock);
444
445 entry = __get_entry_by_vaddr((vaddr_t)addr);
446 if (entry == NULL) {
447 ret = -ENOENT;
448 goto out;
449 }
450
451 ret = __free_sharemem((vaddr_t)addr);
452 if (ret < 0) {
453 goto out;
454 }
455
456 chcore_auto_unmap_pmo(entry->pmo, entry->vaddr, entry->size);
457 ret = usys_revoke_cap(entry->pmo, true);
458 htable_del(&entry->vaddr2ent_node);
459 free(entry);
460
461 out:
462 pthread_mutex_unlock(&lock);
463 return ret;
464 }
465
map_sharemem(uint32_t src_task,uint64_t vaddr,uint64_t size,uint64_t * vaddr_out)466 int32_t map_sharemem(uint32_t src_task, uint64_t vaddr, uint64_t size,
467 uint64_t *vaddr_out)
468 {
469 int ret;
470 cap_t pmo;
471 void *out_vaddr;
472 struct mem_entry *entry;
473 vaddr_t vaddr_round, vaddr_offset;
474 size_t real_size;
475
476 vaddr_round = ROUND_DOWN(vaddr, PAGE_SIZE);
477 vaddr_offset = vaddr - vaddr_round;
478 real_size = ROUND_UP(size + vaddr_offset, PAGE_SIZE);
479
480 if (src_task == 0) {
481 src_task = GTASK_TASKID;
482 }
483
484 pthread_mutex_lock(&lock);
485
486 pmo = __get_sharemem(src_task, vaddr_round);
487 if (pmo < 0) {
488 ret = pmo;
489 goto out;
490 }
491
492 entry = malloc(sizeof(*entry));
493 if (entry == NULL) {
494 ret = -ENOMEM;
495 goto out_revoke_pmo;
496 }
497
498 out_vaddr = chcore_auto_map_pmo(pmo, real_size, VM_READ | VM_WRITE);
499 if (out_vaddr == NULL) {
500 ret = -errno;
501 goto out_free_entry;
502 }
503 *vaddr_out = (uint64_t)out_vaddr + (uint64_t)vaddr_offset;
504
505 entry->pmo = pmo;
506 entry->vaddr = (vaddr_t)out_vaddr;
507 entry->size = real_size;
508 init_hlist_node(&entry->vaddr2ent_node);
509 init_hlist_node(&entry->pmo2ent_node);
510 htable_add(&vaddr2ent, (u32)(vaddr_t)out_vaddr, &entry->vaddr2ent_node);
511
512 pthread_mutex_unlock(&lock);
513 return 0;
514
515 out_free_entry:
516 free(entry);
517 out_revoke_pmo:
518 usys_revoke_cap(pmo, false);
519 out:
520 pthread_mutex_unlock(&lock);
521 return ret;
522 }
523
unmap_sharemem(void * addr,uint32_t size)524 uint32_t unmap_sharemem(void *addr, uint32_t size)
525 {
526 int ret;
527 struct mem_entry *entry;
528 vaddr_t vaddr, vaddr_round;
529
530 vaddr = (vaddr_t)addr;
531 vaddr_round = ROUND_DOWN(vaddr, PAGE_SIZE);
532
533 pthread_mutex_lock(&lock);
534
535 entry = __get_entry_by_vaddr((vaddr_t)vaddr_round);
536 if (entry == NULL) {
537 ret = -ENOENT;
538 goto out;
539 }
540
541 chcore_auto_unmap_pmo(entry->pmo, entry->vaddr, entry->size);
542 htable_del(&entry->vaddr2ent_node);
543 free(entry);
544 ret = 0;
545
546 out:
547 pthread_mutex_unlock(&lock);
548 return ret;
549 }
550
virt_to_phys(uintptr_t vaddr)551 uint64_t virt_to_phys(uintptr_t vaddr)
552 {
553 unsigned long paddr;
554 int ret;
555
556 ret = usys_get_phys_addr((void *)vaddr, &paddr);
557
558 if (ret) {
559 return 0;
560 } else {
561 return paddr;
562 }
563 }
564
dump_mem_info(struct stat_mem_info * info,int print_history)565 int32_t dump_mem_info(struct stat_mem_info *info, int print_history)
566 {
567 printf("%s not implemented!\n", __func__);
568 return 0;
569 }