• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <object/cap_group.h>
13 #include <object/thread.h>
14 #include <object/object.h>
15 #include <common/list.h>
16 #include <common/sync.h>
17 #include <common/util.h>
18 #include <common/bitops.h>
19 #include <mm/kmalloc.h>
20 #include <mm/vmspace.h>
21 #include <mm/uaccess.h>
22 #include <lib/printk.h>
23 #include <ipc/notification.h>
24 #include <syscall/syscall_hooks.h>
25 
26 struct cap_group *root_cap_group;
27 
28 /* tool functions */
is_valid_slot_id(struct slot_table * slot_table,cap_t slot_id)29 static bool is_valid_slot_id(struct slot_table *slot_table, cap_t slot_id)
30 {
31     if (slot_id < 0 || slot_id >= slot_table->slots_size)
32         return false;
33     if (!get_bit(slot_id, slot_table->slots_bmp))
34         return false;
35     if (slot_table->slots[slot_id] == NULL)
36         BUG("slot NULL while bmp is not\n");
37     return true;
38 }
39 
slot_table_init(struct slot_table * slot_table,unsigned int size,bool init_lock)40 static int slot_table_init(struct slot_table *slot_table, unsigned int size,
41                            bool init_lock)
42 {
43     int r;
44 
45     size = DIV_ROUND_UP(size, BASE_OBJECT_NUM) * BASE_OBJECT_NUM;
46     slot_table->slots_size = size;
47     /* XXX: vmalloc is better? */
48     slot_table->slots = kzalloc(size * sizeof(*slot_table->slots));
49     if (!slot_table->slots) {
50         r = -ENOMEM;
51         goto out_fail;
52     }
53 
54     slot_table->slots_bmp =
55         kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long));
56     if (!slot_table->slots_bmp) {
57         r = -ENOMEM;
58         goto out_free_slots;
59     }
60 
61     slot_table->full_slots_bmp =
62         kzalloc(BITS_TO_LONGS(BITS_TO_LONGS(size)) * sizeof(unsigned long));
63     if (!slot_table->full_slots_bmp) {
64         r = -ENOMEM;
65         goto out_free_slots_bmp;
66     }
67 
68     if (init_lock)
69         rwlock_init(&slot_table->table_guard);
70 
71     return 0;
72 out_free_slots_bmp:
73     kfree(slot_table->slots_bmp);
74 out_free_slots:
75     kfree(slot_table->slots);
76 out_fail:
77     return r;
78 }
79 
cap_group_init(struct cap_group * cap_group,unsigned int size,badge_t badge)80 int cap_group_init(struct cap_group *cap_group, unsigned int size,
81                    badge_t badge)
82 {
83     struct slot_table *slot_table = &cap_group->slot_table;
84 
85     BUG_ON(slot_table_init(slot_table, size, true));
86     init_list_head(&cap_group->thread_list);
87     lock_init(&cap_group->threads_lock);
88     cap_group->thread_cnt = 0;
89 
90     /* Set badge of the new cap group. */
91     cap_group->badge = badge;
92 
93 #ifdef CHCORE_OH_TEE
94     lock_init(&cap_group->heap_size_lock);
95     cap_group->heap_size_limit = (size_t)-1;
96     cap_group->heap_size_used = 0;
97 #endif /* CHCORE_OH_TEE */
98 
99     return 0;
100 }
101 
cap_group_deinit(void * ptr)102 void cap_group_deinit(void *ptr)
103 {
104     struct cap_group *cap_group;
105     struct slot_table *slot_table;
106 
107     cap_group = (struct cap_group *)ptr;
108     slot_table = &cap_group->slot_table;
109     kfree(slot_table->slots);
110     kfree(slot_table->slots_bmp);
111     kfree(slot_table->full_slots_bmp);
112 }
113 
114 /* slot allocation */
expand_slot_table(struct slot_table * slot_table)115 static int expand_slot_table(struct slot_table *slot_table)
116 {
117     unsigned int new_size, old_size;
118     struct slot_table new_slot_table;
119     int r;
120 
121     old_size = slot_table->slots_size;
122     new_size = old_size + BASE_OBJECT_NUM;
123     r = slot_table_init(&new_slot_table, new_size, false);
124     if (r < 0)
125         return r;
126 
127     memcpy(new_slot_table.slots,
128            slot_table->slots,
129            old_size * sizeof(*slot_table->slots));
130     memcpy(new_slot_table.slots_bmp,
131            slot_table->slots_bmp,
132            BITS_TO_LONGS(old_size) * sizeof(unsigned long));
133     memcpy(new_slot_table.full_slots_bmp,
134            slot_table->full_slots_bmp,
135            BITS_TO_LONGS(BITS_TO_LONGS(old_size)) * sizeof(unsigned long));
136     slot_table->slots_size = new_size;
137     kfree(slot_table->slots);
138     slot_table->slots = new_slot_table.slots;
139     kfree(slot_table->slots_bmp);
140     slot_table->slots_bmp = new_slot_table.slots_bmp;
141     kfree(slot_table->full_slots_bmp);
142     slot_table->full_slots_bmp = new_slot_table.full_slots_bmp;
143     return 0;
144 }
145 
146 /* should only be called when table_guard is held */
alloc_slot_id(struct cap_group * cap_group)147 int alloc_slot_id(struct cap_group *cap_group)
148 {
149     int empty_idx = 0, r;
150     struct slot_table *slot_table;
151     int bmp_size = 0, full_bmp_size = 0;
152 
153     slot_table = &cap_group->slot_table;
154 
155     while (true) {
156         bmp_size = slot_table->slots_size;
157         full_bmp_size = BITS_TO_LONGS(bmp_size);
158 
159         empty_idx =
160             find_next_zero_bit(slot_table->full_slots_bmp, full_bmp_size, 0);
161         if (empty_idx >= full_bmp_size)
162             goto expand;
163 
164         empty_idx = find_next_zero_bit(
165             slot_table->slots_bmp, bmp_size, empty_idx * BITS_PER_LONG);
166         if (empty_idx >= bmp_size)
167             goto expand;
168         else
169             break;
170     expand:
171         r = expand_slot_table(slot_table);
172         if (r < 0)
173             goto out_fail;
174     }
175     BUG_ON(empty_idx < 0 || empty_idx >= bmp_size);
176 
177     set_bit(empty_idx, slot_table->slots_bmp);
178     if (slot_table->slots_bmp[empty_idx / BITS_PER_LONG] == ~((unsigned long)0))
179         set_bit(empty_idx / BITS_PER_LONG, slot_table->full_slots_bmp);
180 
181     return empty_idx;
182 out_fail:
183     return r;
184 }
185 
get_opaque(struct cap_group * cap_group,cap_t slot_id,bool type_valid,int type)186 void *get_opaque(struct cap_group *cap_group, cap_t slot_id, bool type_valid,
187                  int type)
188 {
189     struct slot_table *slot_table = &cap_group->slot_table;
190     struct object_slot *slot;
191     void *obj;
192 
193     read_lock(&slot_table->table_guard);
194     if (!is_valid_slot_id(slot_table, slot_id)) {
195         obj = NULL;
196         goto out_unlock_table;
197     }
198 
199     slot = get_slot(cap_group, slot_id);
200     BUG_ON(slot == NULL);
201     BUG_ON(slot->isvalid == false);
202     BUG_ON(slot->object == NULL);
203 
204     if (!type_valid || slot->object->type == type) {
205         obj = slot->object->opaque;
206     } else {
207         obj = NULL;
208         goto out_unlock_table;
209     }
210 
211     atomic_fetch_add_long(&slot->object->refcount, 1);
212 
213 out_unlock_table:
214     read_unlock(&slot_table->table_guard);
215     return obj;
216 }
217 
218 /* Get an object reference through its cap.
219  * The interface will also add the object's refcnt by one.
220  */
obj_get(struct cap_group * cap_group,cap_t slot_id,int type)221 void *obj_get(struct cap_group *cap_group, cap_t slot_id, int type)
222 {
223     return get_opaque(cap_group, slot_id, true, type);
224 }
225 
226 /* This is a pair interface of obj_get.
227  * Used when no releasing an object reference.
228  * The interface will minus the object's refcnt by one.
229  *
230  * Furthermore, free an object when its reference cnt becomes 0.
231  */
obj_put(void * obj)232 void obj_put(void *obj)
233 {
234     struct object *object;
235     u64 old_refcount;
236 
237     object = container_of(obj, struct object, opaque);
238     old_refcount = atomic_fetch_sub_long(&object->refcount, 1);
239 
240     if (old_refcount == 1) {
241         free_object_internal(object);
242     }
243 }
244 
245 /*
246  * This interface will add an object's refcnt by one.
247  * If you do not have the cap of an object, you can
248  * use this interface to just claim a reference.
249  *
250  * Be sure to call obj_put when releasing the reference.
251  */
obj_ref(void * obj)252 void obj_ref(void *obj)
253 {
254     struct object *object;
255 
256     object = container_of(obj, struct object, opaque);
257     atomic_fetch_add_long(&object->refcount, 1);
258 }
259 
260 struct cap_group_args {
261     badge_t badge;
262     vaddr_t name;
263     unsigned long name_len;
264     unsigned long pcid;
265 #ifdef CHCORE_OH_TEE
266     int pid;
267     vaddr_t puuid;
268     unsigned long heap_size;
269 #endif /* CHCORE_OH_TEE */
270 };
271 
sys_create_cap_group(unsigned long cap_group_args_p)272 cap_t sys_create_cap_group(unsigned long cap_group_args_p)
273 {
274     struct cap_group *new_cap_group;
275     struct vmspace *vmspace;
276     cap_t cap;
277     int r;
278     struct cap_group_args args = {0};
279 
280     r = hook_sys_create_cap_group(cap_group_args_p);
281     if (r != 0)
282         return r;
283 
284     if (check_user_addr_range((vaddr_t)cap_group_args_p,
285                               sizeof(struct cap_group_args))
286         != 0)
287         return -EINVAL;
288 
289     r = copy_from_user(
290         &args, (void *)cap_group_args_p, sizeof(struct cap_group_args));
291     if (r) {
292         return -EINVAL;
293     }
294 
295 #ifdef CHCORE_OH_TEE
296     if (check_user_addr_range((vaddr_t)args.puuid, sizeof(TEE_UUID)) != 0)
297         return -EINVAL;
298 #endif /* CHCORE_OH_TEE */
299 
300     if (check_user_addr_range((vaddr_t)args.name, (size_t)args.name_len) != 0)
301         return -EINVAL;
302 
303     /* cap current cap_group */
304     new_cap_group = obj_alloc(TYPE_CAP_GROUP, sizeof(*new_cap_group));
305     if (!new_cap_group) {
306         r = -ENOMEM;
307         goto out_fail;
308     }
309     cap_group_init(new_cap_group, BASE_OBJECT_NUM, args.badge);
310 #ifdef CHCORE_OH_TEE
311     new_cap_group->heap_size_limit = args.heap_size;
312     /* pid used in OH-TEE */
313     new_cap_group->pid = args.pid;
314     if (args.puuid) {
315         copy_from_user(
316             &new_cap_group->uuid, (void *)args.puuid, sizeof(TEE_UUID));
317     } else {
318         memset(&new_cap_group->uuid, 0, sizeof(TEE_UUID));
319     }
320 #endif /* CHCORE_OH_TEE */
321 
322     cap = cap_alloc(current_cap_group, new_cap_group);
323     if (cap < 0) {
324         r = cap;
325         goto out_free_obj_new_grp;
326     }
327 
328     /* 1st cap is cap_group */
329     if (cap_copy(current_thread->cap_group, new_cap_group, cap)
330         != CAP_GROUP_OBJ_ID) {
331         kwarn("%s: cap_copy fails or cap[0] is not cap_group\n", __func__);
332         r = -ECAPBILITY;
333         goto out_free_cap_grp_current;
334     }
335 
336     /* 2st cap is vmspace */
337     vmspace = obj_alloc(TYPE_VMSPACE, sizeof(*vmspace));
338     if (!vmspace) {
339         r = -ENOMEM;
340         goto out_free_obj_vmspace;
341     }
342 
343     vmspace_init(vmspace, args.pcid);
344 
345     r = cap_alloc(new_cap_group, vmspace);
346     if (r != VMSPACE_OBJ_ID) {
347         kwarn("%s: cap_copy fails or cap[1] is not vmspace\n", __func__);
348         r = -ECAPBILITY;
349         goto out_free_obj_vmspace;
350     }
351 
352     new_cap_group->notify_recycler = 0;
353 
354     /* Set the cap_group_name (process_name) for easing debugging */
355     memset(new_cap_group->cap_group_name, 0, MAX_GROUP_NAME_LEN + 1);
356     if (args.name_len > MAX_GROUP_NAME_LEN)
357         args.name_len = MAX_GROUP_NAME_LEN;
358 
359     r = copy_from_user(
360         new_cap_group->cap_group_name, (void *)args.name, args.name_len);
361     if (r) {
362         r = -EINVAL;
363         goto out_free_obj_vmspace;
364     }
365 
366     return cap;
367 out_free_obj_vmspace:
368     obj_free(vmspace);
369 out_free_cap_grp_current:
370     cap_free(current_cap_group, cap);
371     new_cap_group = NULL;
372 out_free_obj_new_grp:
373     obj_free(new_cap_group);
374 out_fail:
375     return r;
376 }
377 
378 /* This is for creating the first (init) user process. */
create_root_cap_group(char * name,size_t name_len)379 struct cap_group *create_root_cap_group(char *name, size_t name_len)
380 {
381     struct cap_group *cap_group;
382     struct vmspace *vmspace;
383     cap_t slot_id;
384 
385     cap_group = obj_alloc(TYPE_CAP_GROUP, sizeof(*cap_group));
386     BUG_ON(!cap_group);
387     cap_group_init(cap_group,
388                    BASE_OBJECT_NUM,
389                    /* Fixed badge */ ROOT_CAP_GROUP_BADGE);
390 
391     slot_id = cap_alloc(cap_group, cap_group);
392     BUG_ON(slot_id != CAP_GROUP_OBJ_ID);
393 
394     vmspace = obj_alloc(TYPE_VMSPACE, sizeof(*vmspace));
395     BUG_ON(!vmspace);
396 
397     /* fixed PCID 1 for root process, PCID 0 is not used. */
398     vmspace_init(vmspace, ROOT_PROCESS_PCID);
399 
400     slot_id = cap_alloc(cap_group, vmspace);
401     BUG_ON(slot_id != VMSPACE_OBJ_ID);
402 
403     /* Set the cap_group_name (process_name) for easing debugging */
404     memset(cap_group->cap_group_name, 0, MAX_GROUP_NAME_LEN + 1);
405     if (name_len > MAX_GROUP_NAME_LEN)
406         name_len = MAX_GROUP_NAME_LEN;
407     memcpy(cap_group->cap_group_name, name, name_len);
408 
409     root_cap_group = cap_group;
410     return cap_group;
411 }
412