• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <object/object.h>
13 #include <object/thread.h>
14 #include <object/memory.h>
15 #include <mm/vmspace.h>
16 #include <mm/uaccess.h>
17 #include <mm/mm.h>
18 #include <mm/kmalloc.h>
19 #include <common/lock.h>
20 #include <common/util.h>
21 #include <arch/mmu.h>
22 #include <object/user_fault.h>
23 #include <syscall/syscall_hooks.h>
24 #include <arch/mm/cache.h>
25 
26 #include "mmap.h"
27 
28 static int pmo_init(struct pmobject *pmo, pmo_type_t type, size_t len,
29                     paddr_t paddr, struct cap_group *cap_group);
30 void pmo_deinit(void *pmo_ptr);
31 
32 /*
33  * @paddr is only used when creating device pmo;
34  * @new_pmo is output arg if it is not NULL.
35  */
create_pmo(size_t size,pmo_type_t type,struct cap_group * cap_group,paddr_t paddr,struct pmobject ** new_pmo)36 cap_t create_pmo(size_t size, pmo_type_t type, struct cap_group *cap_group,
37                  paddr_t paddr, struct pmobject **new_pmo)
38 {
39     cap_t r, cap;
40     struct pmobject *pmo;
41 
42     pmo = obj_alloc(TYPE_PMO, sizeof(*pmo));
43     if (!pmo) {
44         r = -ENOMEM;
45         goto out_fail;
46     }
47 
48     r = pmo_init(pmo, type, size, paddr, cap_group);
49     if (r)
50         goto out_free_obj;
51 
52     cap = cap_alloc(cap_group, pmo);
53     if (cap < 0) {
54         r = cap;
55         goto out_pmo_deinit;
56     }
57 
58     if (new_pmo != NULL)
59         *new_pmo = pmo;
60 
61     return cap;
62 
63 out_pmo_deinit:
64     pmo_deinit(pmo);
65 out_free_obj:
66     obj_free(pmo);
67 out_fail:
68     return r;
69 }
70 
sys_create_device_pmo(unsigned long paddr,unsigned long size)71 cap_t sys_create_device_pmo(unsigned long paddr, unsigned long size)
72 {
73     cap_t r;
74 
75     if (size == 0)
76         return -EINVAL;
77 
78     // Authorization on this senstive syscall
79     if ((r = hook_sys_create_device_pmo(paddr, size)) != 0)
80         return r;
81 
82     r = create_pmo(size, PMO_DEVICE, current_cap_group, paddr, NULL);
83 
84     return r;
85 }
86 
sys_tee_create_ns_pmo(unsigned long paddr,unsigned long size)87 int sys_tee_create_ns_pmo(unsigned long paddr, unsigned long size)
88 {
89     int r;
90 
91     if (size == 0)
92         return -EINVAL;
93 
94     r = create_pmo(size, PMO_TZ_NS, current_cap_group, paddr, NULL);
95 
96     return r;
97 }
98 
sys_create_pmo(unsigned long size,pmo_type_t type)99 cap_t sys_create_pmo(unsigned long size, pmo_type_t type)
100 {
101     if ((size == 0) || (type == PMO_DEVICE))
102         return -EINVAL;
103 #ifdef CHCORE_OH_TEE
104     if (type == PMO_TZ_NS)
105         return -EINVAL;
106 #endif /* CHCORE_OH_TEE */
107     return create_pmo(size, type, current_cap_group, 0, NULL);
108 }
109 
110 #define WRITE 0
111 #define READ  1
read_write_pmo(cap_t pmo_cap,unsigned long offset,unsigned long user_buf,unsigned long size,unsigned long op_type)112 static int read_write_pmo(cap_t pmo_cap, unsigned long offset,
113                           unsigned long user_buf, unsigned long size,
114                           unsigned long op_type)
115 {
116     struct pmobject *pmo;
117     pmo_type_t pmo_type;
118     vaddr_t kva;
119     int r = 0;
120 
121     /* Only READ and WRITE operations are allowed. */
122     if (op_type != READ && op_type != WRITE) {
123         r = -EINVAL;
124         goto out_fail;
125     }
126 
127     if (check_user_addr_range(user_buf, size) != 0) {
128         r = -EINVAL;
129         goto out_fail;
130     }
131 
132     pmo = obj_get(current_cap_group, pmo_cap, TYPE_PMO);
133     if (!pmo) {
134         r = -ECAPBILITY;
135         goto out_fail;
136     }
137 
138     /* Overflow check and Range check */
139     if ((offset + size < offset) || (offset + size > pmo->size)) {
140         r = -EINVAL;
141         goto out_obj_put;
142     }
143 
144     pmo_type = pmo->type;
145     if ((pmo_type != PMO_DATA) && (pmo_type != PMO_DATA_NOCACHE)
146         && (pmo_type != PMO_ANONYM)) {
147         r = -EINVAL;
148         goto out_obj_put;
149     }
150 
151     if (pmo_type == PMO_DATA || pmo_type == PMO_DATA_NOCACHE) {
152         kva = phys_to_virt(pmo->start) + offset;
153         if (op_type == WRITE)
154             r = copy_from_user((void *)kva, (void *)user_buf, size);
155         else // op_type == READ
156             r = copy_to_user((void *)user_buf, (void *)kva, size);
157 
158         if (r) {
159             r = -EINVAL;
160             goto out_obj_put;
161         }
162     } else {
163         /* PMO_ANONYM */
164         unsigned long index;
165         unsigned long pa;
166         unsigned long to_read_write;
167         unsigned long offset_in_page;
168 
169         while (size > 0) {
170             index = ROUND_DOWN(offset, PAGE_SIZE) / PAGE_SIZE;
171             pa = get_page_from_pmo(pmo, index);
172             if (pa == 0) {
173                 /* Allocate a physical page for the anonymous
174                  * pmo like a page fault happens.
175                  */
176                 kva = (vaddr_t)get_pages(0);
177                 if (kva == 0) {
178                     r = -ENOMEM;
179                     goto out_obj_put;
180                 }
181 
182                 pa = virt_to_phys((void *)kva);
183                 memset((void *)kva, 0, PAGE_SIZE);
184                 commit_page_to_pmo(pmo, index, pa);
185 
186                 /* No need to map the physical page in the page
187                  * table of current process because it uses
188                  * write/read_pmo which means it does not need
189                  * the mappings.
190                  */
191             } else {
192                 kva = phys_to_virt(pa);
193             }
194             /* Now kva is the beginning of some page, we should add
195              * the offset inside the page. */
196             offset_in_page = offset - ROUND_DOWN(offset, PAGE_SIZE);
197             kva += offset_in_page;
198             to_read_write = MIN(PAGE_SIZE - offset_in_page, size);
199 
200             if (op_type == WRITE)
201                 r = copy_from_user(
202                     (void *)kva, (void *)user_buf, to_read_write);
203             else // op_type == READ
204                 r = copy_to_user((void *)user_buf, (void *)kva, to_read_write);
205 
206             if (r) {
207                 r = -EINVAL;
208                 goto out_obj_put;
209             }
210 
211             offset += to_read_write;
212             size -= to_read_write;
213         }
214     }
215 
216 out_obj_put:
217     obj_put(pmo);
218 out_fail:
219     return r;
220 }
221 
222 /*
223  * A process can send a PMO (with msgs) to others.
224  * It can write the msgs without mapping the PMO with this function.
225  */
sys_write_pmo(cap_t pmo_cap,unsigned long offset,unsigned long user_ptr,unsigned long len)226 int sys_write_pmo(cap_t pmo_cap, unsigned long offset, unsigned long user_ptr,
227                   unsigned long len)
228 {
229     return read_write_pmo(pmo_cap, offset, user_ptr, len, WRITE);
230 }
231 
sys_read_pmo(cap_t pmo_cap,unsigned long offset,unsigned long user_ptr,unsigned long len)232 int sys_read_pmo(cap_t pmo_cap, unsigned long offset, unsigned long user_ptr,
233                  unsigned long len)
234 {
235     return read_write_pmo(pmo_cap, offset, user_ptr, len, READ);
236 }
237 
238 /* Given a virtual address, return its corresponding physical address. */
sys_get_phys_addr(vaddr_t va,paddr_t * pa_buf)239 int sys_get_phys_addr(vaddr_t va, paddr_t *pa_buf)
240 {
241     struct vmspace *vmspace;
242     paddr_t pa;
243     int ret;
244 
245     // if ((ret = hook_sys_get_phys_addr(va, pa_buf)) != 0)
246     //         return ret;
247 
248     if ((check_user_addr_range(va, 0) != 0)
249         || (check_user_addr_range((vaddr_t)pa_buf, sizeof(*pa_buf)) != 0))
250         return -EINVAL;
251 
252     vmspace = current_thread->vmspace;
253     lock(&vmspace->pgtbl_lock);
254     ret = query_in_pgtbl(vmspace->pgtbl, va, &pa, NULL);
255     unlock(&vmspace->pgtbl_lock);
256 
257     if (ret < 0)
258         return ret;
259 
260     ret = copy_to_user(pa_buf, &pa, sizeof(*pa_buf));
261     if (ret) {
262         return -EINVAL;
263     }
264 
265     return 0;
266 }
267 
trans_uva_to_kva(vaddr_t user_va,vaddr_t * kernel_va)268 int trans_uva_to_kva(vaddr_t user_va, vaddr_t *kernel_va)
269 {
270     struct vmspace *vmspace = current_thread->vmspace;
271     paddr_t pa;
272     int ret;
273 
274     lock(&vmspace->pgtbl_lock);
275     ret = query_in_pgtbl(vmspace->pgtbl, user_va, &pa, NULL);
276     unlock(&vmspace->pgtbl_lock);
277 
278     if (ret < 0)
279         return ret;
280 
281     *kernel_va = phys_to_virt(pa);
282     return 0;
283 }
284 
285 /*
286  * A process can not only map a PMO into its private address space,
287  * but also can map a PMO to some others (e.g., load code for others).
288  */
sys_map_pmo(cap_t target_cap_group_cap,cap_t pmo_cap,unsigned long addr,unsigned long perm,unsigned long len)289 int sys_map_pmo(cap_t target_cap_group_cap, cap_t pmo_cap, unsigned long addr,
290                 unsigned long perm, unsigned long len)
291 {
292     struct vmspace *vmspace;
293     struct pmobject *pmo;
294     struct cap_group *target_cap_group;
295     int r;
296 
297     pmo = obj_get(current_cap_group, pmo_cap, TYPE_PMO);
298     if (!pmo) {
299         r = -ECAPBILITY;
300         goto out_fail;
301     }
302 
303 #ifdef CHCORE_OH_TEE
304     if (pmo->type == PMO_TZ_NS) {
305         if (((struct ns_pmo_private *)pmo->private)->mapped) {
306             r = -EINVAL;
307             goto out_obj_put_pmo;
308         }
309     }
310 #endif /* CHCORE_OH_TEE */
311 
312     /* set default length (-1) to pmo_size */
313     if (likely(len == -1))
314         len = pmo->size;
315 
316     if (check_user_addr_range(addr, len) != 0) {
317         r = -EINVAL;
318         goto out_obj_put_pmo;
319     }
320 
321     /* map the pmo to the target cap_group */
322     target_cap_group =
323         obj_get(current_cap_group, target_cap_group_cap, TYPE_CAP_GROUP);
324     if (!target_cap_group) {
325         r = -ECAPBILITY;
326         goto out_obj_put_pmo;
327     }
328     vmspace = obj_get(target_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
329     BUG_ON(vmspace == NULL);
330 
331 #ifdef CHCORE_OH_TEE
332     if (pmo->type == PMO_SHM && pmo->private != NULL) {
333         struct tee_shm_private *private;
334     private
335         = (struct tee_shm_private *)pmo->private;
336         if (private->owner != target_cap_group
337             && memcmp(&current_cap_group->uuid,
338                       &private->uuid,
339                       sizeof(struct tee_uuid))
340                    != 0) {
341             r = -EINVAL;
342             goto out_obj_put_vmspace;
343         }
344     }
345 #endif /* CHCORE_OH_TEE */
346 
347     r = vmspace_map_range(vmspace, addr, len, perm, pmo);
348     if (r != 0) {
349         r = -EPERM;
350         goto out_obj_put_vmspace;
351     }
352 
353     /*
354      * when a process maps a pmo to others,
355      * this func returns the new_cap in the target process.
356      */
357     if (target_cap_group != current_cap_group)
358         /* if using cap_move, we need to consider remove the mappings */
359         r = cap_copy(current_cap_group, target_cap_group, pmo_cap);
360     else
361         r = 0;
362 
363 out_obj_put_vmspace:
364     obj_put(vmspace);
365     obj_put(target_cap_group);
366 out_obj_put_pmo:
367     obj_put(pmo);
368 out_fail:
369     return r;
370 }
371 
372 /* Example usage: Used in ipc/connection.c for mapping ipc_shm */
map_pmo_in_current_cap_group(cap_t pmo_cap,unsigned long addr,unsigned long perm)373 int map_pmo_in_current_cap_group(cap_t pmo_cap, unsigned long addr,
374                                  unsigned long perm)
375 {
376     struct vmspace *vmspace;
377     struct pmobject *pmo;
378     int r;
379 
380     pmo = obj_get(current_cap_group, pmo_cap, TYPE_PMO);
381     if (!pmo) {
382         kdebug("map fails: invalid pmo (cap is %lu)\n", pmo_cap);
383         r = -ECAPBILITY;
384         goto out_fail;
385     }
386 
387     vmspace = obj_get(current_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
388     BUG_ON(vmspace == NULL);
389 
390     if (check_user_addr_range(addr, pmo->size) != 0) {
391         r = -EINVAL;
392         goto out_fail;
393     }
394 
395     r = vmspace_map_range(vmspace, addr, pmo->size, perm, pmo);
396     if (r != 0) {
397         goto out_obj_put_vmspace;
398     }
399 
400 out_obj_put_vmspace:
401     obj_put(vmspace);
402     obj_put(pmo);
403 out_fail:
404     return r;
405 }
406 
sys_unmap_pmo(cap_t target_cap_group_cap,cap_t pmo_cap,unsigned long addr)407 int sys_unmap_pmo(cap_t target_cap_group_cap, cap_t pmo_cap, unsigned long addr)
408 {
409     struct vmspace *vmspace;
410     struct pmobject *pmo;
411     struct cap_group *target_cap_group;
412     int ret;
413 
414     /* caller should have the pmo_cap */
415     pmo = obj_get(current_cap_group, pmo_cap, TYPE_PMO);
416     if (!pmo)
417         return -ECAPBILITY;
418 
419     /* map the pmo to the target cap_group */
420     target_cap_group =
421         obj_get(current_cap_group, target_cap_group_cap, TYPE_CAP_GROUP);
422     if (!target_cap_group) {
423         ret = -ECAPBILITY;
424         goto out_obj_put_pmo;
425     }
426 
427     vmspace = obj_get(target_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
428     if (!vmspace) {
429         ret = -ECAPBILITY;
430         goto out_obj_put_cap_group;
431     }
432 
433     ret = vmspace_unmap_range(vmspace, addr, pmo->size);
434 
435     obj_put(vmspace);
436 
437 out_obj_put_cap_group:
438     obj_put(target_cap_group);
439 out_obj_put_pmo:
440     obj_put(pmo);
441 
442     return ret;
443 }
444 
445 /*
446  * Initialize an allocated pmobject.
447  * @paddr is only used when @type == PMO_DEVICE || @type == PMO_TZ_NS.
448  */
pmo_init(struct pmobject * pmo,pmo_type_t type,size_t len,paddr_t paddr,struct cap_group * cap_group)449 static int pmo_init(struct pmobject *pmo, pmo_type_t type, size_t len,
450                     paddr_t paddr, struct cap_group *cap_group)
451 {
452     int ret = 0;
453 
454 #ifdef CHCORE_OH_TEE
455     lock(&cap_group->heap_size_lock);
456     if (cap_group->heap_size_used + len > cap_group->heap_size_limit) {
457         ret = -ENOMEM;
458         goto out;
459     }
460 #endif /* CHCORE_OH_TEE */
461 
462     memset((void *)pmo, 0, sizeof(*pmo));
463 
464     len = ROUND_UP(len, PAGE_SIZE);
465     pmo->size = len;
466     pmo->type = type;
467 
468     switch (type) {
469     case PMO_DATA:
470     case PMO_DATA_NOCACHE: {
471         /*
472          * For PMO_DATA, the user will use it soon (we expect).
473          * So, we directly allocate the physical memory.
474          * Note that kmalloc(>2048) returns continous physical pages.
475          */
476         void *new_va = kmalloc(len);
477         if (new_va == NULL)
478             return -ENOMEM;
479 
480         /* Clear the allocated memory */
481         memset(new_va, 0, len);
482         if (type == PMO_DATA_NOCACHE)
483             arch_flush_cache((vaddr_t)new_va, len, CACHE_CLEAN_AND_INV);
484         pmo->start = virt_to_phys(new_va);
485         break;
486     }
487     case PMO_FILE: {
488 #ifdef CHCORE_ENABLE_FMAP
489         /*
490          * PMO backed by a file.
491          * We store PMO_FILE metadata in fmap_fault_pool in
492          * pmo->private, and pmo->private is initialized NULL by memset.
493          */
494         struct fmap_fault_pool *pool_iter;
495         badge_t badge;
496         badge = current_cap_group->badge;
497         lock(&fmap_fault_pool_list_lock);
498         for_each_in_list (
499             pool_iter, struct fmap_fault_pool, node, &fmap_fault_pool_list) {
500             if (pool_iter->cap_group_badge == badge) {
501                 pmo->private = pool_iter;
502                 break;
503             }
504         }
505         unlock(&fmap_fault_pool_list_lock);
506         if (pmo->private == NULL) {
507             /* fmap_fault_pool not registered */
508             ret = -EINVAL;
509             break;
510         }
511 
512         pmo->radix = new_radix();
513         init_radix(pmo->radix);
514 #else
515         kwarn("fmap is not implemented, we should not use PMO_FILE\n");
516         ret = -EINVAL;
517 #endif
518         break;
519     }
520     case PMO_ANONYM:
521     case PMO_SHM: {
522         /*
523          * For PMO_ANONYM (e.g., stack and heap) or PMO_SHM,
524          * we do not allocate the physical memory at once.
525          */
526         pmo->radix = new_radix();
527         init_radix(pmo->radix);
528         break;
529     }
530     case PMO_DEVICE: {
531         /*
532          * For device memory (e.g., for DMA).
533          * We must ensure the range [paddr, paddr+len) is not
534          * in the main memory region.
535          */
536         pmo->start = paddr;
537         break;
538     }
539 #ifdef CHCORE_OH_TEE
540     case PMO_TZ_NS: {
541         pmo->start = paddr;
542         pmo->private = kzalloc(sizeof(struct ns_pmo_private));
543         if (pmo->private == NULL) {
544             ret = -ENOMEM;
545         } else {
546             ((struct ns_pmo_private *)pmo->private)->creater =
547                 current_cap_group;
548         }
549         break;
550     }
551 #endif /* CHCORE_OH_TEE */
552     case PMO_FORBID: {
553         /* This type marks the corresponding area cannot be accessed */
554         break;
555     }
556     default: {
557         ret = -EINVAL;
558         break;
559     }
560     }
561 #ifdef CHCORE_OH_TEE
562 out:
563     if (ret == 0) {
564         cap_group->heap_size_used += len;
565         lock_init(&pmo->owner_lock);
566         pmo->owner = obj_get(cap_group, CAP_GROUP_OBJ_ID, TYPE_CAP_GROUP);
567         BUG_ON(pmo->owner == NULL);
568     }
569     unlock(&cap_group->heap_size_lock);
570 #endif /* CHCORE_OH_TEE */
571     return ret;
572 }
573 
574 /* Record the physical page allocated to a pmo */
commit_page_to_pmo(struct pmobject * pmo,unsigned long index,paddr_t pa)575 void commit_page_to_pmo(struct pmobject *pmo, unsigned long index, paddr_t pa)
576 {
577     int ret;
578 
579     BUG_ON((pmo->type != PMO_ANONYM) && (pmo->type != PMO_SHM)
580            && (pmo->type != PMO_FILE));
581     /* The radix interfaces are thread-safe */
582     ret = radix_add(pmo->radix, index, (void *)pa);
583     BUG_ON(ret != 0);
584 }
585 
586 /* Return 0 (NULL) when not found */
get_page_from_pmo(struct pmobject * pmo,unsigned long index)587 paddr_t get_page_from_pmo(struct pmobject *pmo, unsigned long index)
588 {
589     paddr_t pa;
590 
591     /* The radix interfaces are thread-safe */
592     pa = (paddr_t)radix_get(pmo->radix, index);
593     return pa;
594 }
595 
__free_pmo_page(void * addr)596 static void __free_pmo_page(void *addr)
597 {
598     kfree((void *)phys_to_virt(addr));
599 }
600 
pmo_deinit(void * pmo_ptr)601 void pmo_deinit(void *pmo_ptr)
602 {
603     struct pmobject *pmo;
604     pmo_type_t type;
605 
606     pmo = (struct pmobject *)pmo_ptr;
607     type = pmo->type;
608 
609 #ifdef CHCORE_OH_TEE
610     lock(&pmo->owner->heap_size_lock);
611     pmo->owner->heap_size_used -= pmo->size;
612     unlock(&pmo->owner->heap_size_lock);
613     obj_put(pmo->owner);
614 #endif /* CHCORE_OH_TEE */
615 
616     switch (type) {
617     case PMO_DATA:
618     case PMO_DATA_NOCACHE: {
619         paddr_t start_addr;
620 
621         /* PMO_DATA contains continous physical pages */
622         start_addr = pmo->start;
623         kfree((void *)phys_to_virt(start_addr));
624 
625         break;
626     }
627     case PMO_SHM: {
628         if (pmo->private) {
629             kfree(pmo->private);
630         }
631     }
632     case PMO_FILE:
633     case PMO_ANONYM: {
634         struct radix *radix;
635 
636         radix = pmo->radix;
637         BUG_ON(radix == NULL);
638         /*
639          * Set value_deleter to free each memory page during
640          * traversing the radix tree in radix_free.
641          */
642         radix->value_deleter = __free_pmo_page;
643         radix_free(radix);
644 
645         break;
646     }
647     case PMO_DEVICE:
648 #ifdef CHCORE_OH_TEE
649     case PMO_TZ_NS: {
650         kfree(pmo->private);
651         break;
652     }
653 #endif /* CHCORE_OH_TEE */
654     case PMO_FORBID: {
655         break;
656     }
657     default: {
658         kinfo("Unsupported pmo type: %d\n", type);
659         BUG_ON(1);
660         break;
661     }
662     }
663 
664     /* The pmo struct itself will be free in __free_object */
665 }
666 
sys_handle_brk(unsigned long addr,unsigned long heap_start)667 unsigned long sys_handle_brk(unsigned long addr, unsigned long heap_start)
668 {
669     struct vmspace *vmspace;
670     struct pmobject *pmo;
671     struct vmregion *heap_vmr;
672     size_t len;
673     unsigned long retval = 0;
674     cap_t pmo_cap;
675 
676     if ((check_user_addr_range(addr, 0) != 0)
677         || (check_user_addr_range(heap_start, 0) != 0))
678         return -EINVAL;
679 
680     vmspace = obj_get(current_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
681     BUG_ON(vmspace == NULL);
682     lock(&vmspace->vmspace_lock);
683     if (addr == 0) {
684         retval = heap_start;
685 
686         /* create the heap pmo for the user process */
687         len = 0;
688         pmo_cap = create_pmo(len, PMO_ANONYM, current_cap_group, 0, &pmo);
689         if (pmo_cap < 0) {
690             kinfo("Fail: cannot create the initial heap pmo.\n");
691             BUG_ON(1);
692         }
693 
694         /* setup the vmr for the heap region */
695         heap_vmr = init_heap_vmr(vmspace, retval, pmo);
696         if (!heap_vmr) {
697             kinfo("Fail: cannot create the initial heap pmo.\n");
698             BUG_ON(1);
699         }
700         vmspace->heap_vmr = heap_vmr;
701     } else {
702         heap_vmr = vmspace->heap_vmr;
703         if (unlikely(heap_vmr == NULL))
704             goto out;
705 
706         /* old heap end */
707         retval = heap_vmr->start + heap_vmr->size;
708 
709         if (addr >= retval) {
710             /* enlarge the heap vmr and pmo */
711             len = addr - retval;
712             lock(&current_cap_group->heap_size_lock);
713             if (current_cap_group->heap_size_used + len > current_cap_group->heap_size_limit) {
714                 retval = -ENOMEM;
715             } else {
716                 current_cap_group->heap_size_used += len;
717                 adjust_heap_vmr(vmspace, len);
718                 retval = addr;
719             }
720             unlock(&current_cap_group->heap_size_lock);
721         } else {
722             kwarn("VM: ignore shrinking the heap.\n");
723         }
724     }
725 
726 out:
727     unlock(&vmspace->vmspace_lock);
728     obj_put(vmspace);
729     return retval;
730 }
731 
732 /* A process mmap region start:  MMAP_START (defined in mm/vmregion.c) */
get_vmr_prot(int prot)733 static vmr_prop_t get_vmr_prot(int prot)
734 {
735     vmr_prop_t ret;
736 
737     ret = 0;
738     if (prot & PROT_READ)
739         ret |= VMR_READ;
740     if (prot & PROT_WRITE)
741         ret |= VMR_WRITE;
742     if (prot & PROT_EXEC)
743         ret |= VMR_EXEC;
744 
745     return ret;
746 }
747 
sys_handle_mprotect(unsigned long addr,unsigned long length,int prot)748 int sys_handle_mprotect(unsigned long addr, unsigned long length, int prot)
749 {
750     vmr_prop_t target_prot;
751     struct vmspace *vmspace;
752     struct vmregion *vmr;
753     s64 remaining;
754     unsigned long va;
755     int ret;
756 
757     if ((addr % PAGE_SIZE) || (length % PAGE_SIZE)) {
758         return -EINVAL;
759     }
760 
761     if (length == 0)
762         return 0;
763 
764     target_prot = get_vmr_prot(prot);
765     vmspace = obj_get(current_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
766     BUG_ON(vmspace == NULL);
767 
768     lock(&vmspace->vmspace_lock);
769     /*
770      * Validate the VM range [addr, addr + lenght]
771      * - the range is totally mapped
772      * - the range has more permission than prot
773      */
774     va = addr;
775     remaining = length;
776     while (remaining > 0) {
777         vmr = find_vmr_for_va(vmspace, va);
778 
779         if (!vmr) {
780             ret = -EINVAL;
781             goto out;
782         }
783 
784         if (remaining < vmr->size) {
785             static int warn = 1;
786 
787 
788             if (warn == 1) {
789                 kwarn("func: %s, ignore a mprotect since no supporting for "
790                       "splitting vmr now.\n",
791                       __func__);
792                 warn = 0;
793             }
794 
795             ret = 0;
796             goto out;
797         }
798 
799         /* Change the prot in each vmr */
800         vmr->perm = target_prot;
801 
802         remaining -= vmr->size;
803         va += vmr->size;
804     }
805 
806     /* Modify the existing mappings in pgtbl */
807     lock(&vmspace->pgtbl_lock);
808     mprotect_in_pgtbl(vmspace->pgtbl, addr, length, target_prot);
809     unlock(&vmspace->pgtbl_lock);
810     ret = 0;
811 
812 out:
813     obj_put(vmspace);
814     unlock(&vmspace->vmspace_lock);
815 
816     return ret;
817 }
818 
sys_get_free_mem_size(void)819 unsigned long sys_get_free_mem_size(void)
820 {
821     return get_free_mem_size();
822 }
823 
824 #ifdef CHCORE_OH_TEE
__destroy_ns_pmo(struct vmspace * vmspace,struct pmobject * pmobject)825 static int __destroy_ns_pmo(struct vmspace *vmspace, struct pmobject *pmobject)
826 {
827     int ret;
828     struct ns_pmo_private *private;
829 
830     if (pmobject->type != PMO_TZ_NS) {
831         ret = -EINVAL;
832         goto out;
833     }
834 private
835     = pmobject->private;
836     if (private->creater != current_cap_group) {
837         ret = -EINVAL;
838         goto out;
839     }
840 
841     ret = vmspace_unmap_range(vmspace, private->vaddr, private->len);
842 
843 out:
844     return ret;
845 }
846 
sys_create_ns_pmo(cap_t cap_group,unsigned long paddr,unsigned long size)847 cap_t sys_create_ns_pmo(cap_t cap_group, unsigned long paddr,
848                         unsigned long size)
849 {
850     cap_t ret;
851     struct cap_group *target_cap_group;
852 
853     if (size == 0) {
854         ret = -EINVAL;
855         goto out;
856     }
857 
858     target_cap_group = obj_get(current_cap_group, cap_group, TYPE_CAP_GROUP);
859     if (target_cap_group == NULL) {
860         ret = -ECAPBILITY;
861         goto out;
862     }
863 
864     /* pmo is the cap in target_cap_group */
865     ret = create_pmo(size, PMO_TZ_NS, target_cap_group, paddr, NULL);
866     if (ret < 0) {
867         goto out_put_cap_group;
868     }
869 
870 out_put_cap_group:
871     obj_put(target_cap_group);
872 
873 out:
874     return ret;
875 }
876 
sys_destroy_ns_pmo(cap_t cap_group,cap_t pmo)877 int sys_destroy_ns_pmo(cap_t cap_group, cap_t pmo)
878 {
879     int ret;
880     struct cap_group *target_cap_group;
881     struct vmspace *vmspace;
882     struct pmobject *pmobject;
883 
884     target_cap_group = obj_get(current_cap_group, cap_group, TYPE_CAP_GROUP);
885     if (target_cap_group == NULL) {
886         ret = -ECAPBILITY;
887         goto out;
888     }
889     pmobject = obj_get(target_cap_group, pmo, TYPE_PMO);
890     if (pmobject == NULL) {
891         /* task may already unmap and revoke pmo */
892         ret = 0;
893         goto out_put_cap_group;
894     }
895     vmspace = obj_get(target_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
896     if (vmspace == NULL) {
897         ret = -ECAPBILITY;
898         goto out_put_pmo;
899     }
900 
901     ret = __destroy_ns_pmo(vmspace, pmobject);
902     if (ret < 0) {
903         goto out_put_vmspace;
904     }
905 
906     ret = cap_free(target_cap_group, pmo);
907 
908 out_put_vmspace:
909     obj_put(vmspace);
910 out_put_pmo:
911     obj_put(pmobject);
912 out_put_cap_group:
913     obj_put(target_cap_group);
914 out:
915     return ret;
916 }
917 
sys_create_tee_shared_pmo(cap_t cap_group,struct tee_uuid * uuid,unsigned long size,cap_t * self_cap)918 cap_t sys_create_tee_shared_pmo(cap_t cap_group, struct tee_uuid *uuid,
919                                 unsigned long size, cap_t *self_cap)
920 {
921     int ret;
922     struct tee_shm_private *private;
923     struct cap_group *target_cap_group;
924     struct pmobject *pmobject;
925     bool success = false;
926     cap_t self_pmo, target_pmo;
927 
928     if (check_user_addr_range((vaddr_t)uuid, sizeof(*uuid)) != 0) {
929         ret = -EINVAL;
930         goto out;
931     }
932     if (check_user_addr_range((vaddr_t)self_cap, sizeof(*self_cap)) != 0) {
933         ret = -EINVAL;
934         goto out;
935     }
936 
937 private
938     = kmalloc(sizeof(*private));
939     if (private == NULL) {
940         ret = -ENOMEM;
941         goto out;
942     }
943     copy_from_user(&private->uuid, uuid, sizeof(*uuid));
944 
945     target_cap_group = obj_get(current_cap_group, cap_group, TYPE_CAP_GROUP);
946     if (target_cap_group == NULL) {
947         ret = -ECAPBILITY;
948         goto out_free_uuid;
949     }
950 
951     target_pmo = create_pmo(size, PMO_SHM, target_cap_group, 0, &pmobject);
952     if (target_pmo < 0) {
953         ret = target_pmo;
954         goto out_put_cap_group;
955     }
956 private
957     ->owner = target_cap_group;
958     pmobject->private = private;
959 
960     self_pmo = cap_copy(target_cap_group, current_cap_group, target_pmo);
961     if (self_pmo < 0) {
962         ret = self_pmo;
963         goto out_destroy_pmo;
964     }
965     copy_to_user(self_cap, &self_pmo, sizeof(self_pmo));
966     success = true;
967     ret = target_pmo;
968 
969 out_destroy_pmo:
970     if (!success) {
971         cap_free(target_cap_group, target_pmo);
972     }
973 out_put_cap_group:
974     obj_put(target_cap_group);
975 out_free_uuid:
976     if (!success) {
977         kfree(private);
978     }
979 out:
980     return ret;
981 }
982 
sys_transfer_pmo_owner(cap_t pmo,cap_t cap_group)983 int sys_transfer_pmo_owner(cap_t pmo, cap_t cap_group)
984 {
985     int ret;
986     bool put_cap_group = true;
987     struct pmobject *pmobject;
988     struct cap_group *target_cap_group;
989 
990     pmobject = obj_get(current_cap_group, pmo, TYPE_PMO);
991     if (pmobject == NULL) {
992         ret = -ECAPBILITY;
993         goto out;
994     }
995 
996     target_cap_group = obj_get(current_cap_group, cap_group, TYPE_CAP_GROUP);
997     if (target_cap_group == NULL) {
998         ret = -ECAPBILITY;
999         goto out_put_pmo;
1000     }
1001 
1002     if (target_cap_group == current_cap_group) {
1003         ret = 0;
1004         goto out_put_cap_group;
1005     }
1006 
1007     lock(&pmobject->owner_lock);
1008     if (pmobject->owner == current_cap_group) {
1009         if (target_cap_group < current_cap_group) {
1010             lock(&target_cap_group->heap_size_lock);
1011             lock(&current_cap_group->heap_size_lock);
1012         } else {
1013             lock(&current_cap_group->heap_size_lock);
1014             lock(&target_cap_group->heap_size_lock);
1015         }
1016 
1017         if (target_cap_group->heap_size_used + pmobject->size
1018             <= target_cap_group->heap_size_limit) {
1019             current_cap_group->heap_size_used -= pmobject->size;
1020             target_cap_group->heap_size_used += pmobject->size;
1021 
1022             pmobject->owner = target_cap_group;
1023             /* CANNOT put target_cap_group because pmo has this ref */
1024             put_cap_group = false;
1025             obj_put(current_cap_group);
1026 
1027             ret = 0;
1028         } else {
1029             ret = -ENOMEM;
1030         }
1031 
1032         if (target_cap_group < current_cap_group) {
1033             unlock(&current_cap_group->heap_size_lock);
1034             unlock(&target_cap_group->heap_size_lock);
1035         } else {
1036             unlock(&target_cap_group->heap_size_lock);
1037             unlock(&current_cap_group->heap_size_lock);
1038         }
1039     } else {
1040         ret = -EINVAL;
1041     }
1042     unlock(&pmobject->owner_lock);
1043 
1044 out_put_cap_group:
1045     if (put_cap_group) {
1046         obj_put(target_cap_group);
1047     }
1048 out_put_pmo:
1049     obj_put(pmobject);
1050 out:
1051     return ret;
1052 }
1053 #endif /* CHCORE_OH_TEE */
1054