1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <machine.h>
13 #include <common/sync.h>
14 #include <ipc/connection.h>
15 #include <ipc/notification.h>
16 #include <object/memory.h>
17 #include <object/object.h>
18 #include <object/cap_group.h>
19 #include <object/thread.h>
20 #include <object/irq.h>
21 #include <mm/kmalloc.h>
22 #include <mm/uaccess.h>
23 #include <mm/vmspace.h>
24 #include <lib/printk.h>
25 #include <sched/context.h>
26 #ifdef CHCORE_OH_TEE
27 #include <ipc/channel.h>
28 #endif /* CHCORE_OH_TEE */
29
30 const obj_deinit_func obj_deinit_tbl[TYPE_NR] = {
31 [0 ... TYPE_NR - 1] = NULL,
32 [TYPE_CAP_GROUP] = cap_group_deinit,
33 [TYPE_THREAD] = thread_deinit,
34 [TYPE_CONNECTION] = connection_deinit,
35 [TYPE_NOTIFICATION] = notification_deinit,
36 [TYPE_IRQ] = irq_deinit,
37 [TYPE_PMO] = pmo_deinit,
38 [TYPE_VMSPACE] = vmspace_deinit,
39 #ifdef CHCORE_OH_TEE
40 [TYPE_CHANNEL] = channel_deinit,
41 [TYPE_MSG_HDL] = msg_hdl_deinit,
42 #endif /* CHCORE_OH_TEE */
43 };
44
45 /*
46 * Usage:
47 * obj = obj_alloc(...);
48 * initialize the obj;
49 * cap_alloc(obj);
50 */
obj_alloc(u64 type,u64 size)51 void *obj_alloc(u64 type, u64 size)
52 {
53 u64 total_size;
54 struct object *object;
55
56 total_size = sizeof(*object) + size;
57 object = kzalloc(total_size);
58 if (!object)
59 return NULL;
60
61 object->type = type;
62 object->size = size;
63 object->refcount = 0;
64
65 /*
66 * If the cap of the object is copied, then the copied cap (slot) is
67 * stored in such a list.
68 */
69 init_list_head(&object->copies_head);
70 lock_init(&object->copies_lock);
71
72 return object->opaque;
73 }
74
75 /*
76 * After the fail initialization of a cap (after obj_alloc and before
77 * cap_alloc), invoke this interface to free the object allocated by obj_alloc.
78 */
obj_free(void * obj)79 void obj_free(void *obj)
80 {
81 struct object *object;
82
83 if (!obj)
84 return;
85 object = container_of(obj, struct object, opaque);
86
87 BUG_ON(object->refcount != 0);
88 kfree(object);
89 }
90
cap_alloc(struct cap_group * cap_group,void * obj)91 cap_t cap_alloc(struct cap_group *cap_group, void *obj)
92 {
93 struct object *object;
94 struct slot_table *slot_table;
95 struct object_slot *slot;
96 cap_t r, slot_id;
97
98 object = container_of(obj, struct object, opaque);
99 slot_table = &cap_group->slot_table;
100
101 write_lock(&slot_table->table_guard);
102 slot_id = alloc_slot_id(cap_group);
103 if (slot_id < 0) {
104 r = -ENOMEM;
105 goto out_unlock_table;
106 }
107
108 slot = kmalloc(sizeof(*slot));
109 if (!slot) {
110 r = -ENOMEM;
111 goto out_free_slot_id;
112 }
113 slot->slot_id = slot_id;
114 slot->cap_group = cap_group;
115 slot->isvalid = true;
116 slot->object = object;
117 list_add(&slot->copies, &object->copies_head);
118
119 BUG_ON(object->refcount != 0);
120 object->refcount = 1;
121
122 install_slot(cap_group, slot_id, slot);
123
124 write_unlock(&slot_table->table_guard);
125 return slot_id;
126 out_free_slot_id:
127 free_slot_id(cap_group, slot_id);
128 out_unlock_table:
129 write_unlock(&slot_table->table_guard);
130 return r;
131 }
132
133 #ifndef TEST_OBJECT
134 /* @object->type == TYPE_THREAD */
clear_fpu_owner(struct object * object)135 static void clear_fpu_owner(struct object *object)
136 {
137 struct thread *thread;
138 int cpuid;
139
140 thread = (struct thread *)object->opaque;
141 cpuid = thread->thread_ctx->is_fpu_owner;
142 /* If is_fpu_owner >= 0, then the thread is the FPU owner of some CPU.
143 */
144 if (cpuid >= 0) {
145 /*
146 * If the thread to free is the FPU owner of some CPU,
147 * then clear the FPU owner on that CPU first.
148 */
149 lock(&fpu_owner_locks[cpuid]);
150 if (cpu_info[cpuid].fpu_owner == thread)
151 cpu_info[cpuid].fpu_owner = NULL;
152 unlock(&fpu_owner_locks[cpuid]);
153 thread->thread_ctx->is_fpu_owner = -1;
154 }
155 }
156 #endif
157
158 /* An internal interface: only invoked by __cap_free and obj_put. */
__free_object(struct object * object)159 void __free_object(struct object *object)
160 {
161 #ifndef TEST_OBJECT
162 obj_deinit_func func;
163
164 if (object->type == TYPE_THREAD)
165 clear_fpu_owner(object);
166
167 /* Invoke the object-specific free routine */
168 func = obj_deinit_tbl[object->type];
169 if (func)
170 func(object->opaque);
171 #endif
172
173 BUG_ON(!list_empty(&object->copies_head));
174 kfree(object);
175 }
176
free_object_internal(struct object * object)177 void free_object_internal(struct object *object)
178 {
179 __free_object(object);
180 }
181
182 /* cap_free (__cap_free) only removes one cap, which differs from cap_free_all.
183 */
__cap_free(struct cap_group * cap_group,cap_t slot_id,bool slot_table_locked,bool copies_list_locked)184 int __cap_free(struct cap_group *cap_group, cap_t slot_id,
185 bool slot_table_locked, bool copies_list_locked)
186 {
187 struct object_slot *slot;
188 struct object *object;
189 struct slot_table *slot_table;
190 int r = 0;
191 u64 old_refcount;
192
193 /* Step-1: free the slot_id (i.e., the capability number) in the slot
194 * table */
195 slot_table = &cap_group->slot_table;
196 if (!slot_table_locked)
197 write_lock(&slot_table->table_guard);
198 slot = get_slot(cap_group, slot_id);
199 if (!slot || slot->isvalid == false) {
200 r = -ECAPBILITY;
201 goto out_unlock_table;
202 }
203
204 free_slot_id(cap_group, slot_id);
205 if (!slot_table_locked)
206 write_unlock(&slot_table->table_guard);
207
208 /* Step-2: remove the slot in the copies-list of the object and free the
209 * slot */
210 object = slot->object;
211 if (copies_list_locked) {
212 list_del(&slot->copies);
213 } else {
214 lock(&object->copies_lock);
215 list_del(&slot->copies);
216 unlock(&object->copies_lock);
217 }
218 kfree(slot);
219
220 /* Step-3: decrease the refcnt of the object and free it if necessary */
221 old_refcount = atomic_fetch_sub_long(&object->refcount, 1);
222
223 if (old_refcount == 1)
224 __free_object(object);
225
226 return 0;
227
228 out_unlock_table:
229 if (!slot_table_locked)
230 write_unlock(&slot_table->table_guard);
231 return r;
232 }
233
cap_free(struct cap_group * cap_group,cap_t slot_id)234 int cap_free(struct cap_group *cap_group, cap_t slot_id)
235 {
236 return __cap_free(cap_group, slot_id, false, false);
237 }
238
cap_copy(struct cap_group * src_cap_group,struct cap_group * dest_cap_group,cap_t src_slot_id)239 cap_t cap_copy(struct cap_group *src_cap_group,
240 struct cap_group *dest_cap_group, cap_t src_slot_id)
241 {
242 struct object_slot *src_slot, *dest_slot;
243 cap_t r, dest_slot_id;
244 struct rwlock *src_table_guard, *dest_table_guard;
245 bool local_copy;
246
247 struct object *object;
248
249 local_copy = (src_cap_group == dest_cap_group);
250 src_table_guard = &src_cap_group->slot_table.table_guard;
251 dest_table_guard = &dest_cap_group->slot_table.table_guard;
252 if (local_copy) {
253 write_lock(dest_table_guard);
254 } else {
255 /* avoid deadlock */
256 while (true) {
257 read_lock(src_table_guard);
258 if (write_try_lock(dest_table_guard) == 0)
259 break;
260 read_unlock(src_table_guard);
261 }
262 }
263
264 src_slot = get_slot(src_cap_group, src_slot_id);
265 if (!src_slot || src_slot->isvalid == false) {
266 r = -ECAPBILITY;
267 goto out_unlock;
268 }
269
270 dest_slot_id = alloc_slot_id(dest_cap_group);
271 if (dest_slot_id == -1) {
272 r = -ENOMEM;
273 goto out_unlock;
274 }
275
276 dest_slot = kmalloc(sizeof(*dest_slot));
277 if (!dest_slot) {
278 r = -ENOMEM;
279 goto out_free_slot_id;
280 }
281 src_slot = get_slot(src_cap_group, src_slot_id);
282 if (!src_slot || src_slot->isvalid == false) {
283 r = -ECAPBILITY;
284 goto out_free_slot;
285 }
286 atomic_fetch_add_long(&src_slot->object->refcount, 1);
287
288 dest_slot->slot_id = dest_slot_id;
289 dest_slot->cap_group = dest_cap_group;
290 dest_slot->isvalid = true;
291 dest_slot->object = src_slot->object;
292
293 object = src_slot->object;
294 lock(&object->copies_lock);
295 list_add(&dest_slot->copies, &src_slot->copies);
296 unlock(&object->copies_lock);
297
298 install_slot(dest_cap_group, dest_slot_id, dest_slot);
299
300 write_unlock(dest_table_guard);
301 if (!local_copy)
302 read_unlock(src_table_guard);
303 return dest_slot_id;
304 out_free_slot:
305 kfree(dest_slot);
306 out_free_slot_id:
307 free_slot_id(dest_cap_group, dest_slot_id);
308 out_unlock:
309 write_unlock(dest_table_guard);
310 if (!local_copy)
311 read_unlock(src_table_guard);
312 return r;
313 }
314
315 /*
316 * Free an object points by some cap, which also removes all the caps point to
317 * the object.
318 */
cap_free_all(struct cap_group * cap_group,cap_t slot_id)319 int cap_free_all(struct cap_group *cap_group, cap_t slot_id)
320 {
321 void *obj;
322 struct object *object;
323 struct object_slot *slot_iter = NULL, *slot_iter_tmp = NULL;
324 int r;
325
326 /*
327 * Since obj_get requires to pass the cap type
328 * which is not available here, get_opaque is used instead.
329 */
330 obj = get_opaque(cap_group, slot_id, false, 0);
331
332 if (!obj) {
333 r = -ECAPBILITY;
334 goto out_fail;
335 }
336
337 object = container_of(obj, struct object, opaque);
338
339 write_lock(&cap_group->slot_table.table_guard);
340
341 /* free all copied slots */
342 lock(&object->copies_lock);
343 for_each_in_list_safe (
344 slot_iter, slot_iter_tmp, copies, &object->copies_head) {
345 u64 iter_slot_id = slot_iter->slot_id;
346 struct cap_group *iter_cap_group = slot_iter->cap_group;
347
348 r = __cap_free(
349 iter_cap_group, iter_slot_id, iter_cap_group == cap_group, true);
350 BUG_ON(r != 0);
351 }
352 unlock(&object->copies_lock);
353
354 write_unlock(&cap_group->slot_table.table_guard);
355
356 /* get_opaque will also add the reference cnt */
357 obj_put(obj);
358
359 return 0;
360
361 out_fail:
362 return r;
363 }
364
365 /* Transfer a number (@nr_caps) of caps from current_cap_group to
366 * dest_group_cap. */
sys_transfer_caps(cap_t dest_group_cap,unsigned long src_caps_buf,int nr_caps,unsigned long dst_caps_buf)367 int sys_transfer_caps(cap_t dest_group_cap, unsigned long src_caps_buf,
368 int nr_caps, unsigned long dst_caps_buf)
369 {
370 struct cap_group *dest_cap_group;
371 int i;
372 int *src_caps;
373 int *dst_caps;
374 size_t size;
375 int ret;
376
377 #define MAX_TRANSFER_NUM 16
378 if ((nr_caps <= 0) || (nr_caps > MAX_TRANSFER_NUM))
379 return -EINVAL;
380
381 size = sizeof(int) * nr_caps;
382 if ((check_user_addr_range(src_caps_buf, size) != 0)
383 || (check_user_addr_range(dst_caps_buf, size) != 0))
384 return -EINVAL;
385
386 dest_cap_group = obj_get(current_cap_group, dest_group_cap, TYPE_CAP_GROUP);
387 if (!dest_cap_group)
388 return -ECAPBILITY;
389
390 src_caps = kmalloc(size);
391 dst_caps = kmalloc(size);
392
393 /* get args from user buffer @src_caps_buf */
394 ret = copy_from_user((void *)src_caps, (void *)src_caps_buf, size);
395 if (ret) {
396 ret = -EINVAL;
397 goto out_obj_put;
398 }
399
400 for (i = 0; i < nr_caps; ++i) {
401 dst_caps[i] = cap_copy(current_cap_group, dest_cap_group, src_caps[i]);
402 }
403
404 /* write results to user buffer @dst_caps_buf */
405 ret = copy_to_user((void *)dst_caps_buf, (void *)dst_caps, size);
406 if (ret) {
407 ret = -EINVAL;
408 goto out_obj_put;
409 }
410
411 kfree(src_caps);
412 kfree(dst_caps);
413
414 obj_put(dest_cap_group);
415 return 0;
416 out_obj_put:
417 obj_put(dest_cap_group);
418 return ret;
419 }
420
sys_revoke_cap(cap_t obj_cap,bool revoke_copy)421 int sys_revoke_cap(cap_t obj_cap, bool revoke_copy)
422 {
423 int ret;
424 void *obj;
425
426 /*
427 * Disallow to revoke the cap of current_cap_group, current_vmspace,
428 * or current_thread.
429 */
430 obj = obj_get(current_cap_group, obj_cap, TYPE_CAP_GROUP);
431 if (obj == current_cap_group) {
432 ret = -EINVAL;
433 goto out_fail;
434 }
435 if (obj)
436 obj_put(obj);
437
438 obj = obj_get(current_cap_group, obj_cap, TYPE_VMSPACE);
439 if (obj == current_thread->vmspace) {
440 ret = -EINVAL;
441 goto out_fail;
442 }
443 if (obj)
444 obj_put(obj);
445
446 obj = obj_get(current_cap_group, obj_cap, TYPE_THREAD);
447 if (obj == current_thread) {
448 ret = -EINVAL;
449 goto out_fail;
450 }
451 if (obj)
452 obj_put(obj);
453
454 if (revoke_copy)
455 ret = cap_free_all(current_cap_group, obj_cap);
456 else
457 ret = cap_free(current_cap_group, obj_cap);
458 return ret;
459
460 out_fail:
461 obj_put(obj);
462 return ret;
463 }
464