1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 * Decription: agent manager function, such as register and send cmd
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14 #include "agent.h"
15 #include <linux/sched.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/atomic.h>
23 #include <linux/fs.h>
24 #include <linux/file.h>
25 #include <linux/path.h>
26 #include <linux/uaccess.h>
27 #include <linux/mm.h>
28 #include <linux/mm_types.h>
29 #if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
30 #include <linux/sched/mm.h>
31 #include <linux/sched/task.h>
32 #endif
33 #if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
34 #include <linux/mman.h>
35 #else
36 #include <asm/mman.h>
37 #endif
38 #include <linux/signal.h>
39 #include <securec.h>
40 #ifdef CONFIG_MM_VLTMM
41 #include <linux/ion/mm_ion.h>
42 #endif
43 #ifdef CONFIG_MEMORY_VLTMM
44 #include <linux/dmabuf/mm_dma_heap.h>
45 #endif
46 #include "teek_client_constants.h"
47 #include "teek_ns_client.h"
48 #include "smc_smp.h"
49 #include "mem.h"
50 #include "tui.h"
51 #include "tc_ns_log.h"
52 #include "mailbox_mempool.h"
53 #include "tc_client_driver.h"
54 #include "cmdmonitor.h"
55 #include "agent_rpmb.h"
56 #include "ko_adapt.h"
57 #include "internal_functions.h"
58 #include "auth_base_impl.h"
59
60 #ifdef CONFIG_CMS_CAHASH_AUTH
61 #define HASH_FILE_MAX_SIZE CONFIG_HASH_FILE_SIZE
62 #else
63 #define HASH_FILE_MAX_SIZE (16 * 1024)
64 #endif
65 #define AGENT_BUFF_SIZE (4 * 1024)
66 #define AGENT_MAX 32
67 #define PAGE_ORDER_RATIO 2
68
69 static struct list_head g_tee_agent_list;
70
71 struct agent_control {
72 spinlock_t lock;
73 struct list_head agent_list;
74 };
75
76 static struct agent_control g_agent_control;
77
is_allowed_agent_ca(const struct ca_info * ca,bool check_agent_id)78 int __attribute__((weak)) is_allowed_agent_ca(const struct ca_info *ca,
79 bool check_agent_id)
80 {
81 (void)ca;
82 (void)check_agent_id;
83
84 return -EFAULT;
85 }
86
check_mm_struct(struct mm_struct * mm)87 static int check_mm_struct(struct mm_struct *mm)
88 {
89 if (!mm)
90 return -EINVAL;
91
92 if (!mm->exe_file) {
93 mmput(mm);
94 return -EINVAL;
95 }
96
97 return 0;
98 }
99
100 #ifdef CONFIG_LIBLINUX
get_proc_dpath(char * path,int path_len)101 char *get_proc_dpath(char *path, int path_len)
102 {
103 int rc;
104 char cmdstring[MAX_PATH_SIZE] = { 0 };
105
106 if (!path || path_len != MAX_PATH_SIZE) {
107 tloge("bad params\n");
108 return NULL;
109 }
110
111 if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) {
112 tloge("memset error\n");
113 return NULL;
114 }
115
116 rc = sprintf_s(cmdstring, MAX_PATH_SIZE, "/proc/%d/exe", current->tgid);
117 if (rc < 0) {
118 tloge("set path in get proc dpath failed\n");
119 return NULL;
120 }
121
122 if (liblinux_pal_vfs_readlink(cmdstring, path, MAX_PATH_SIZE) == 0) {
123 tloge("get CA realpath in get proc dpath failed\n");
124 return NULL;
125 }
126
127 return path;
128 }
129 #else
get_proc_dpath(char * path,int path_len)130 char *get_proc_dpath(char *path, int path_len)
131 {
132 char *dpath = NULL;
133 struct path base_path = {
134 .mnt = NULL,
135 .dentry = NULL
136 };
137 struct mm_struct *mm = NULL;
138 struct file *exe_file = NULL;
139
140 if (!path || path_len != MAX_PATH_SIZE) {
141 tloge("bad params\n");
142 return NULL;
143 }
144
145 if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) {
146 tloge("memset error\n");
147 return NULL;
148 }
149
150 mm = get_task_mm(current);
151 if (check_mm_struct(mm) != 0) {
152 tloge("check mm_struct failed\n");
153 return NULL;
154 }
155 #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
156 exe_file = mm->exe_file;
157 #else
158 exe_file = get_mm_exe_file(mm);
159 #endif
160 if (!exe_file) {
161 mmput(mm);
162 return NULL;
163 }
164
165 base_path = exe_file->f_path;
166 path_get(&base_path);
167 dpath = d_path(&base_path, path, MAX_PATH_SIZE);
168 path_put(&base_path);
169 #if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE)
170 fput(exe_file);
171 #endif
172 mmput(mm);
173
174 return dpath;
175 }
176 #endif
177
get_ca_path_and_uid(struct ca_info * ca)178 static int get_ca_path_and_uid(struct ca_info *ca)
179 {
180 char *path = NULL;
181 const struct cred *cred = NULL;
182 int message_size;
183 char *tpath = NULL;
184
185 tpath = kmalloc(MAX_PATH_SIZE, GFP_KERNEL);
186 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)tpath)) {
187 tloge("tpath kmalloc fail\n");
188 return -ENOMEM;
189 }
190
191 path = get_proc_dpath(tpath, MAX_PATH_SIZE);
192 if (IS_ERR_OR_NULL(path)) {
193 tloge("get process path failed\n");
194 kfree(tpath);
195 return -ENOMEM;
196 }
197
198 message_size = snprintf_s(ca->path, MAX_PATH_SIZE,
199 MAX_PATH_SIZE - 1, "%s", path);
200 if (message_size <= 0) {
201 tloge("pack path failed\n");
202 kfree(tpath);
203 return -EFAULT;
204 }
205
206 get_task_struct(current);
207 cred = koadpt_get_task_cred(current);
208 if (!cred) {
209 tloge("cred is NULL\n");
210 kfree(tpath);
211 put_task_struct(current);
212 return -EACCES;
213 }
214
215 ca->uid = cred->uid.val;
216 tlogd("ca_task->comm is %s, path is %s, ca uid is %u\n",
217 current->comm, path, cred->uid.val);
218
219 put_cred(cred);
220 put_task_struct(current);
221 kfree(tpath);
222 return 0;
223 }
224
check_ext_agent_access(uint32_t agent_id)225 int check_ext_agent_access(uint32_t agent_id)
226 {
227 int ret;
228 struct ca_info agent_ca = { {0}, 0, 0 };
229
230 ret = get_ca_path_and_uid(&agent_ca);
231 if (ret != 0) {
232 tloge("get cp path or uid failed\n");
233 return ret;
234 }
235 agent_ca.agent_id = agent_id;
236
237 return is_allowed_agent_ca(&agent_ca, true);
238 }
239
get_buf_len(const uint8_t * inbuf,uint32_t * buf_len)240 static int get_buf_len(const uint8_t *inbuf, uint32_t *buf_len)
241 {
242 if (copy_from_user(buf_len, inbuf, sizeof(*buf_len))) {
243 tloge("copy from user failed\n");
244 return -EFAULT;
245 }
246
247 if (*buf_len > HASH_FILE_MAX_SIZE) {
248 tloge("ERROR: file size[0x%x] too big\n", *buf_len);
249 return -EFAULT;
250 }
251
252 return 0;
253 }
254
send_set_smc_cmd(struct mb_cmd_pack * mb_pack,struct tc_ns_smc_cmd * smc_cmd,unsigned int cmd_id,const uint8_t * buf_to_tee,uint32_t buf_len)255 static int send_set_smc_cmd(struct mb_cmd_pack *mb_pack,
256 struct tc_ns_smc_cmd *smc_cmd, unsigned int cmd_id,
257 const uint8_t *buf_to_tee, uint32_t buf_len)
258 {
259 int ret = 0;
260
261 mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
262 (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
263 mb_pack->operation.params[0].value.a =
264 (unsigned int)mailbox_virt_to_phys((uintptr_t)buf_to_tee);
265 mb_pack->operation.params[0].value.b =
266 (uint64_t)mailbox_virt_to_phys((uintptr_t)buf_to_tee) >> ADDR_TRANS_NUM;
267 mb_pack->operation.params[1].value.a = buf_len;
268 smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
269 smc_cmd->cmd_id = cmd_id;
270 smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
271 smc_cmd->operation_h_phys =
272 (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
273 if (tc_ns_smc(smc_cmd) != 0) {
274 ret = -EPERM;
275 tloge("set native hash failed\n");
276 }
277
278 return ret;
279 }
280
tc_ns_set_native_hash(unsigned long arg,unsigned int cmd_id)281 int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id)
282 {
283 int ret;
284 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
285 uint8_t *inbuf = (uint8_t *)(uintptr_t)arg;
286 uint32_t buf_len = 0;
287 uint8_t *buf_to_tee = NULL;
288 struct mb_cmd_pack *mb_pack = NULL;
289
290 ret = check_teecd_auth();
291 #ifdef CONFIG_CADAEMON_AUTH
292 if (ret != 0)
293 ret = check_cadaemon_auth();
294 #endif
295 if (ret != 0) {
296 tloge("teecd or cadaemon auth failed, ret %d\n", ret);
297 return -EACCES;
298 }
299
300 if (!inbuf)
301 return -EINVAL;
302
303 if (get_buf_len(inbuf, &buf_len) != 0)
304 return -EFAULT;
305
306 buf_to_tee = mailbox_alloc(buf_len, 0);
307 if (!buf_to_tee) {
308 tloge("failed to alloc memory!\n");
309 return -ENOMEM;
310 }
311
312 if (copy_from_user(buf_to_tee, inbuf, buf_len)) {
313 tloge("copy from user failed\n");
314 mailbox_free(buf_to_tee);
315 return -EFAULT;
316 }
317
318 mb_pack = mailbox_alloc_cmd_pack();
319 if (!mb_pack) {
320 tloge("alloc cmd pack failed\n");
321 mailbox_free(buf_to_tee);
322 return -ENOMEM;
323 }
324
325 ret = send_set_smc_cmd(mb_pack, &smc_cmd, cmd_id, buf_to_tee, buf_len);
326 mailbox_free(buf_to_tee);
327 mailbox_free(mb_pack);
328
329 return ret;
330 }
331
tc_ns_late_init(unsigned long arg)332 int tc_ns_late_init(unsigned long arg)
333 {
334 int ret = 0;
335 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
336 uint32_t index = (uint32_t)arg; /* index is uint32, no truncate risk */
337 struct mb_cmd_pack *mb_pack = NULL;
338
339 mb_pack = mailbox_alloc_cmd_pack();
340 if (!mb_pack) {
341 tloge("alloc cmd pack failed\n");
342 return -ENOMEM;
343 }
344
345 mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT;
346 mb_pack->operation.params[0].value.a = index;
347
348 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
349 smc_cmd.cmd_id = GLOBAL_CMD_ID_LATE_INIT;
350 smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
351 smc_cmd.operation_h_phys =
352 (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
353
354 if (tc_ns_smc(&smc_cmd)) {
355 ret = -EPERM;
356 tloge("late int failed\n");
357 }
358 mailbox_free(mb_pack);
359
360 return ret;
361 }
362
send_event_response_single(const struct tc_ns_dev_file * dev_file)363 void send_event_response_single(const struct tc_ns_dev_file *dev_file)
364 {
365 struct smc_event_data *event_data = NULL;
366 struct smc_event_data *tmp = NULL;
367 unsigned long flags;
368 unsigned int agent_id = 0;
369
370 if (!dev_file)
371 return;
372
373 spin_lock_irqsave(&g_agent_control.lock, flags);
374 list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
375 head) {
376 if (event_data->owner == dev_file) {
377 agent_id = event_data->agent_id;
378 break;
379 }
380 }
381 spin_unlock_irqrestore(&g_agent_control.lock, flags);
382 send_event_response(agent_id);
383 return;
384 }
385
find_event_control(unsigned int agent_id)386 struct smc_event_data *find_event_control(unsigned int agent_id)
387 {
388 struct smc_event_data *event_data = NULL;
389 struct smc_event_data *tmp_data = NULL;
390 unsigned long flags;
391
392 spin_lock_irqsave(&g_agent_control.lock, flags);
393 list_for_each_entry(event_data, &g_agent_control.agent_list, head) {
394 if (event_data->agent_id == agent_id) {
395 tmp_data = event_data;
396 get_agent_event(event_data);
397 break;
398 }
399 }
400 spin_unlock_irqrestore(&g_agent_control.lock, flags);
401
402 return tmp_data;
403 }
404
unmap_agent_buffer(struct smc_event_data * event_data)405 static void unmap_agent_buffer(struct smc_event_data *event_data)
406 {
407 if (!event_data) {
408 tloge("event data is NULL\n");
409 return;
410 }
411
412 if (IS_ERR_OR_NULL(event_data->agent_buff_user))
413 return;
414
415 if (vm_munmap((unsigned long)(uintptr_t)event_data->agent_buff_user,
416 event_data->agent_buff_size) != 0)
417 tloge("unmap failed\n");
418
419 event_data->agent_buff_user = NULL;
420 }
421
free_event_control(unsigned int agent_id)422 static void free_event_control(unsigned int agent_id)
423 {
424 struct smc_event_data *event_data = NULL;
425 struct smc_event_data *tmp_event = NULL;
426 unsigned long flags;
427 bool find = false;
428
429 spin_lock_irqsave(&g_agent_control.lock, flags);
430 list_for_each_entry_safe(event_data, tmp_event, &g_agent_control.agent_list, head) {
431 if (event_data->agent_id == agent_id) {
432 list_del(&event_data->head);
433 find = true;
434 break;
435 }
436 }
437 spin_unlock_irqrestore(&g_agent_control.lock, flags);
438
439 if (!find)
440 return;
441
442 unmap_agent_buffer(event_data);
443 mailbox_free(event_data->agent_buff_kernel);
444 event_data->agent_buff_kernel = NULL;
445 put_agent_event(event_data);
446 }
447
init_agent_context(unsigned int agent_id,const struct tc_ns_smc_cmd * smc_cmd,struct smc_event_data ** event_data)448 static int init_agent_context(unsigned int agent_id,
449 const struct tc_ns_smc_cmd *smc_cmd,
450 struct smc_event_data **event_data)
451 {
452 *event_data = find_event_control(agent_id);
453 if (!(*event_data)) {
454 tloge("agent %u not exist\n", agent_id);
455 return -EINVAL;
456 }
457 tlogd("agent-0x%x: returning client command", agent_id);
458
459 /* store tui working device for terminate tui when device is closed. */
460 if (is_tui_agent(agent_id)) {
461 tloge("TEE_TUI_AGENT_ID: pid-%d", current->pid);
462 set_tui_caller_info(smc_cmd->dev_file_id, current->pid);
463 }
464
465 isb();
466 wmb();
467
468 return 0;
469 }
470
wait_agent_response(struct smc_event_data * event_data)471 static int wait_agent_response(struct smc_event_data *event_data)
472 {
473 int ret = 0;
474 /* only userspace CA need freeze */
475 bool need_freeze = !(current->flags & PF_KTHREAD);
476 bool sig_pending = !sigisemptyset(¤t->pending.signal);
477 bool answered = true;
478 int rc;
479
480 do {
481 answered = true;
482 /*
483 * wait_event_freezable will be interrupted by signal and
484 * freezer which is called to free a userspace task in suspend.
485 * Freezing a task means wakeup a task by fake_signal_wake_up
486 * and let it have an opportunity to enter into 'refrigerator'
487 * by try_to_freeze used in wait_event_freezable.
488 *
489 * What scenes can be freezed ?
490 * 1. CA is waiting agent -> suspend -- OK
491 * 2. suspend -> CA start agent request -- OK
492 * 3. CA is waiting agent -> CA is killed -> suspend -- NOK
493 */
494 if (need_freeze && !sig_pending) {
495 rc = wait_event_freezable(event_data->ca_pending_wq,
496 atomic_read(&event_data->ca_run));
497 if (rc != -ERESTARTSYS)
498 continue;
499 if (!sigisemptyset(¤t->pending.signal))
500 sig_pending = true;
501 tloge("agent wait event is interrupted by %s\n",
502 sig_pending ? "signal" : "freezer");
503 /*
504 * When freezing a userspace task, fake_signal_wake_up
505 * only set TIF_SIGPENDING but not set a real signal.
506 * After task thawed, CA need wait agent response again
507 * so TIF_SIGPENDING need to be cleared.
508 */
509 if (!sig_pending)
510 clear_thread_flag(TIF_SIGPENDING);
511 answered = false;
512 } else {
513 rc = wait_event_timeout(event_data->ca_pending_wq,
514 atomic_read(&event_data->ca_run),
515 (long)(RESLEEP_TIMEOUT * HZ));
516 if (rc)
517 continue;
518 tloge("agent wait event is timeout\n");
519 /* if no kill signal, just resleep before agent wake */
520 if (!sigkill_pending(current)) {
521 answered = false;
522 } else {
523 tloge("CA is killed, no need to \
524 wait agent response\n");
525 event_data->ret_flag = 0;
526 ret = -EFAULT;
527 }
528 }
529 } while (!answered);
530
531 return ret;
532 }
533
agent_process_work(const struct tc_ns_smc_cmd * smc_cmd,unsigned int agent_id)534 int agent_process_work(const struct tc_ns_smc_cmd *smc_cmd,
535 unsigned int agent_id)
536 {
537 struct smc_event_data *event_data = NULL;
538 int ret;
539
540 if (!smc_cmd) {
541 tloge("smc_cmd is null\n");
542 return -EINVAL;
543 }
544
545 if (init_agent_context(agent_id, smc_cmd, &event_data))
546 return -EINVAL;
547
548 if (atomic_read(&event_data->agent_ready) == AGENT_CRASHED) {
549 tloge("agent 0x%x is killed and restarting\n", agent_id);
550 put_agent_event(event_data);
551 return -EFAULT;
552 }
553
554 event_data->ret_flag = 1;
555 /* Wake up the agent that will process the command */
556 tlogd("agent process work: wakeup the agent");
557 wake_up(&event_data->wait_event_wq);
558 tlogd("agent 0x%x request, goto sleep, pe->run=%d\n",
559 agent_id, atomic_read(&event_data->ca_run));
560
561 ret = wait_agent_response(event_data);
562 atomic_set(&event_data->ca_run, 0);
563 put_agent_event(event_data);
564
565 /*
566 * when agent work is done, reset cmd monitor time
567 * add agent call count, cause it's a new smc cmd.
568 */
569 cmd_monitor_reset_context();
570 return ret;
571 }
572
is_agent_alive(unsigned int agent_id)573 int is_agent_alive(unsigned int agent_id)
574 {
575 struct smc_event_data *event_data = NULL;
576
577 event_data = find_event_control(agent_id);
578 if (event_data) {
579 put_agent_event(event_data);
580 return AGENT_ALIVE;
581 }
582
583 return AGENT_DEAD;
584 }
585
tc_ns_wait_event(unsigned int agent_id)586 int tc_ns_wait_event(unsigned int agent_id)
587 {
588 int ret = -EINVAL;
589 struct smc_event_data *event_data = NULL;
590
591 tlogd("agent %u waits for command\n", agent_id);
592
593 event_data = find_event_control(agent_id);
594 if (event_data) {
595 /* only when agent wait event, it's in ready state to work */
596 atomic_set(&(event_data->agent_ready), AGENT_READY);
597 ret = wait_event_interruptible(event_data->wait_event_wq, event_data->ret_flag);
598 put_agent_event(event_data);
599 }
600
601 return ret;
602 }
603
tc_ns_sync_sys_time(const struct tc_ns_client_time * tc_ns_time)604 int tc_ns_sync_sys_time(const struct tc_ns_client_time *tc_ns_time)
605 {
606 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
607 int ret = 0;
608 struct mb_cmd_pack *mb_pack = NULL;
609
610 if (!tc_ns_time) {
611 tloge("tc_ns_time is NULL input buffer\n");
612 return -EINVAL;
613 }
614
615 mb_pack = mailbox_alloc_cmd_pack();
616 if (!mb_pack) {
617 tloge("alloc mb pack failed\n");
618 return -ENOMEM;
619 }
620
621 mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT;
622 mb_pack->operation.params[0].value.a = tc_ns_time->seconds;
623 mb_pack->operation.params[0].value.b = tc_ns_time->millis;
624
625 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
626 smc_cmd.cmd_id = GLOBAL_CMD_ID_ADJUST_TIME;
627 smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
628 smc_cmd.operation_h_phys =
629 (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
630 if (tc_ns_smc(&smc_cmd)) {
631 tloge("tee adjust time failed, return error\n");
632 ret = -EPERM;
633 }
634 mailbox_free(mb_pack);
635
636 return ret;
637 }
638
sync_system_time_from_user(const struct tc_ns_client_time * user_time)639 int sync_system_time_from_user(const struct tc_ns_client_time *user_time)
640 {
641 int ret = 0;
642 struct tc_ns_client_time time = { 0 };
643
644 if (!user_time) {
645 tloge("user time is NULL input buffer\n");
646 return -EINVAL;
647 }
648
649 if (copy_from_user(&time, user_time, sizeof(time))) {
650 tloge("copy from user failed\n");
651 return -EFAULT;
652 }
653
654 ret = tc_ns_sync_sys_time(&time);
655 if (ret != 0)
656 tloge("sync system time from user failed, ret = 0x%x\n", ret);
657
658 return ret;
659 }
660
sync_system_time_from_kernel(void)661 void sync_system_time_from_kernel(void)
662 {
663 int ret = 0;
664 struct tc_ns_client_time time = { 0 };
665
666 struct time_spec kernel_time = {0};
667 get_time_spec(&kernel_time);
668
669 time.seconds = (uint32_t)kernel_time.ts.tv_sec;
670 time.millis = (uint32_t)(kernel_time.ts.tv_nsec / MS_TO_NS);
671
672 ret = tc_ns_sync_sys_time(&time);
673 if (ret != 0)
674 tloge("sync system time from kernel failed, ret = 0x%x\n", ret);
675
676 return;
677 }
678
check_response_access(unsigned int agent_id)679 static struct smc_event_data *check_response_access(unsigned int agent_id)
680 {
681 struct smc_event_data *event_data = find_event_control(agent_id);
682
683 if (!event_data) {
684 tloge("Can't get event_data\n");
685 return NULL;
686 }
687 return event_data;
688 }
689
process_send_event_response(struct smc_event_data * event_data)690 static void process_send_event_response(struct smc_event_data *event_data)
691 {
692 if (event_data->ret_flag == 0)
693 return;
694
695 event_data->ret_flag = 0;
696 /* Send the command back to the TA session waiting for it */
697 tlogd("agent wakeup ca\n");
698 atomic_set(&event_data->ca_run, 1);
699 /* make sure reset working_ca before wakeup CA */
700 wake_up(&event_data->ca_pending_wq);
701 }
702
tc_ns_send_event_response(unsigned int agent_id)703 int tc_ns_send_event_response(unsigned int agent_id)
704 {
705 struct smc_event_data *event_data = NULL;
706
707 event_data = check_response_access(agent_id);
708 if (!event_data) {
709 tlogd("agent %u pre-check failed\n", agent_id);
710 return -EINVAL;
711 }
712
713 tlogd("agent %u sends answer back\n", agent_id);
714 process_send_event_response(event_data);
715 put_agent_event(event_data);
716
717 return 0;
718 }
719
send_event_response(unsigned int agent_id)720 void send_event_response(unsigned int agent_id)
721 {
722 struct smc_event_data *event_data = find_event_control(agent_id);
723
724 if (!event_data) {
725 tloge("Can't get event_data\n");
726 return;
727 }
728
729 tlogi("agent 0x%x sends answer back\n", agent_id);
730 atomic_set(&event_data->agent_ready, AGENT_CRASHED);
731 process_send_event_response(event_data);
732 put_agent_event(event_data);
733 }
734
init_restart_agent_node(struct tc_ns_dev_file * dev_file,struct smc_event_data * event_data)735 static void init_restart_agent_node(struct tc_ns_dev_file *dev_file,
736 struct smc_event_data *event_data)
737 {
738 tlogi("agent: 0x%x restarting\n", event_data->agent_id);
739 event_data->ret_flag = 0;
740 event_data->owner = dev_file;
741 atomic_set(&event_data->agent_ready, AGENT_REGISTERED);
742 init_waitqueue_head(&(event_data->wait_event_wq));
743 init_waitqueue_head(&(event_data->send_response_wq));
744 init_waitqueue_head(&(event_data->ca_pending_wq));
745 atomic_set(&(event_data->ca_run), 0);
746 }
747
create_new_agent_node(struct tc_ns_dev_file * dev_file,struct smc_event_data ** event_data,unsigned int agent_id,void ** agent_buff,uint32_t agent_buff_size)748 static int create_new_agent_node(struct tc_ns_dev_file *dev_file,
749 struct smc_event_data **event_data, unsigned int agent_id,
750 void **agent_buff, uint32_t agent_buff_size)
751 {
752 *agent_buff = mailbox_alloc(agent_buff_size, MB_FLAG_ZERO);
753 if (!(*agent_buff)) {
754 tloge("alloc agent buff failed\n");
755 return -ENOMEM;
756 }
757 *event_data = kzalloc(sizeof(**event_data), GFP_KERNEL);
758 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*event_data))) {
759 mailbox_free(*agent_buff);
760 *agent_buff = NULL;
761 *event_data = NULL;
762 tloge("alloc event data failed\n");
763 return -ENOMEM;
764 }
765 (*event_data)->agent_id = agent_id;
766 (*event_data)->ret_flag = 0;
767 (*event_data)->agent_buff_kernel = *agent_buff;
768 (*event_data)->agent_buff_size = agent_buff_size;
769 (*event_data)->owner = dev_file;
770 atomic_set(&(*event_data)->agent_ready, AGENT_REGISTERED);
771 init_waitqueue_head(&(*event_data)->wait_event_wq);
772 init_waitqueue_head(&(*event_data)->send_response_wq);
773 INIT_LIST_HEAD(&(*event_data)->head);
774 init_waitqueue_head(&(*event_data)->ca_pending_wq);
775 atomic_set(&(*event_data)->ca_run, 0);
776
777 return 0;
778 }
779
780 #ifdef CONFIG_LIBLINUX
agent_buffer_map(unsigned long buffer,uint32_t size)781 static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size)
782 {
783 struct vm_area_struct *vma = NULL;
784 unsigned long user_addr;
785 int ret;
786
787 void *priv = NULL;
788 pgprot_t pro;
789 pro.pgprot = VM_READ | VM_WRITE;
790
791 size = PAGE_ALIGN(size);
792 if (!size)
793 return -ENOMEM;
794
795 user_addr = liblinux_pal_usermap_prepare(user_addr, size, PROT_READ | PROT_WRITE,
796 MAP_SHARED | MAP_ANONYMOUS, &priv);
797 if (IS_ERR_OR_NULL((const void *)user_addr)) {
798 tloge("agent usermap prepare failed\n");
799 return user_addr;
800 }
801 liblinux_pal_usermap_finish((const void *)priv, !IS_ERR_VALUE(ret));
802
803 ret = remap_pfn_range(NULL, user_addr, buffer >> PAGE_SHIFT, size, pro);
804 if (ret) {
805 tloge("remap agent buffer failed, err=%d", ret);
806 goto err_out;
807 }
808
809 return user_addr;
810 err_out:
811 if (vm_munmap(user_addr, size))
812 tloge("munmap failed\n");
813 return -EFAULT;
814 }
815 #else
agent_buffer_map(unsigned long buffer,uint32_t size)816 static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size)
817 {
818 struct vm_area_struct *vma = NULL;
819 unsigned long user_addr;
820 int ret;
821
822 user_addr = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
823 MAP_SHARED | MAP_ANONYMOUS, 0);
824 if (IS_ERR_VALUE((uintptr_t)user_addr)) {
825 tloge("vm mmap failed\n");
826 return user_addr;
827 }
828
829 down_read(&mm_sem_lock(current->mm));
830 vma = find_vma(current->mm, user_addr);
831 if (!vma) {
832 tloge("user_addr is not valid in vma");
833 goto err_out;
834 }
835
836 ret = remap_pfn_range(vma, user_addr, buffer >> PAGE_SHIFT, size,
837 vma->vm_page_prot);
838 if (ret != 0) {
839 tloge("remap agent buffer failed, err=%d", ret);
840 goto err_out;
841 }
842
843 up_read(&mm_sem_lock(current->mm));
844 return user_addr;
845 err_out:
846 up_read(&mm_sem_lock(current->mm));
847 if (vm_munmap(user_addr, size))
848 tloge("munmap failed\n");
849 return -EFAULT;
850 }
851 #endif
852
is_valid_agent(unsigned int agent_id,unsigned int buffer_size,bool user_agent)853 static bool is_valid_agent(unsigned int agent_id,
854 unsigned int buffer_size, bool user_agent)
855 {
856 (void)agent_id;
857 if (user_agent && (buffer_size > SZ_4K)) {
858 tloge("size: %u of user agent's shared mem is invalid\n",
859 buffer_size);
860 return false;
861 }
862
863 return true;
864 }
865
is_agent_already_exist(unsigned int agent_id,struct smc_event_data ** event_data,struct tc_ns_dev_file * dev_file,bool * find_flag)866 static int is_agent_already_exist(unsigned int agent_id,
867 struct smc_event_data **event_data, struct tc_ns_dev_file *dev_file, bool *find_flag)
868 {
869 unsigned long flags;
870 bool flag = false;
871 struct smc_event_data *agent_node = NULL;
872
873 spin_lock_irqsave(&g_agent_control.lock, flags);
874 list_for_each_entry(agent_node, &g_agent_control.agent_list, head) {
875 if (agent_node->agent_id == agent_id) {
876 if (atomic_read(&agent_node->agent_ready) != AGENT_CRASHED) {
877 tloge("no allow agent proc to reg twice\n");
878 spin_unlock_irqrestore(&g_agent_control.lock, flags);
879 return -EINVAL;
880 }
881 flag = true;
882 get_agent_event(agent_node);
883 /*
884 * We find the agent event_data aready in agent_list, it indicate agent
885 * didn't unregister normally, so the event_data will be reused.
886 */
887 init_restart_agent_node(dev_file, agent_node);
888 break;
889 }
890 }
891 spin_unlock_irqrestore(&g_agent_control.lock, flags);
892 *find_flag = flag;
893 if (flag)
894 *event_data = agent_node;
895 return 0;
896 }
897
add_event_node_to_list(struct smc_event_data * event_data)898 static void add_event_node_to_list(struct smc_event_data *event_data)
899 {
900 unsigned long flags;
901
902 spin_lock_irqsave(&g_agent_control.lock, flags);
903 list_add_tail(&event_data->head, &g_agent_control.agent_list);
904 atomic_set(&event_data->usage, 1);
905 spin_unlock_irqrestore(&g_agent_control.lock, flags);
906 }
907
register_agent_to_tee(unsigned int agent_id,const void * agent_buff,uint32_t agent_buff_size)908 static int register_agent_to_tee(unsigned int agent_id, const void *agent_buff, uint32_t agent_buff_size)
909 {
910 int ret = 0;
911 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
912 struct mb_cmd_pack *mb_pack = NULL;
913
914 mb_pack = mailbox_alloc_cmd_pack();
915 if (!mb_pack) {
916 tloge("alloc mailbox failed\n");
917 return -ENOMEM;
918 }
919
920 mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
921 (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
922 mb_pack->operation.params[0].value.a =
923 mailbox_virt_to_phys((uintptr_t)agent_buff);
924 mb_pack->operation.params[0].value.b =
925 (uint64_t)mailbox_virt_to_phys((uintptr_t)agent_buff) >> ADDR_TRANS_NUM;
926 mb_pack->operation.params[1].value.a = agent_buff_size;
927 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
928 smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_AGENT;
929 smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
930 smc_cmd.operation_h_phys =
931 (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
932 smc_cmd.agent_id = agent_id;
933
934 if (tc_ns_smc(&smc_cmd)) {
935 ret = -EPERM;
936 tloge("register agent to tee failed\n");
937 }
938 mailbox_free(mb_pack);
939
940 return ret;
941 }
942
get_agent_buffer(struct smc_event_data * event_data,bool user_agent,void ** buffer)943 static int get_agent_buffer(struct smc_event_data *event_data,
944 bool user_agent, void **buffer)
945 {
946 /* agent first start or restart, both need a remap */
947 if (user_agent) {
948 event_data->agent_buff_user =
949 (void *)(uintptr_t)agent_buffer_map(
950 mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel),
951 event_data->agent_buff_size);
952 if (IS_ERR(event_data->agent_buff_user)) {
953 tloge("vm map agent buffer failed\n");
954 return -EFAULT;
955 }
956 *buffer = event_data->agent_buff_user;
957 } else {
958 *buffer = event_data->agent_buff_kernel;
959 }
960
961 return 0;
962 }
963
tc_ns_register_agent(struct tc_ns_dev_file * dev_file,unsigned int agent_id,unsigned int buffer_size,void ** buffer,bool user_agent)964 int tc_ns_register_agent(struct tc_ns_dev_file *dev_file,
965 unsigned int agent_id, unsigned int buffer_size,
966 void **buffer, bool user_agent)
967 {
968 struct smc_event_data *event_data = NULL;
969 int ret = -EINVAL;
970 bool find_flag = false;
971 void *agent_buff = NULL;
972 uint32_t size_align;
973
974 /* dev can be null */
975 if (!buffer)
976 return ret;
977
978 if (!is_valid_agent(agent_id, buffer_size, user_agent))
979 return ret;
980
981 size_align = ALIGN(buffer_size, SZ_4K);
982
983 if (is_agent_already_exist(agent_id, &event_data, dev_file, &find_flag))
984 return ret;
985 if (!find_flag) {
986 ret = create_new_agent_node(dev_file, &event_data,
987 agent_id, &agent_buff, size_align);
988 if (ret != 0)
989 return ret;
990 }
991
992 if (get_agent_buffer(event_data, user_agent, buffer))
993 goto release_rsrc;
994
995 /* find_flag is false means it's a new agent register */
996 if (!find_flag) {
997 /*
998 * Obtain share memory which is released
999 * in tc_ns_unregister_agent
1000 */
1001 ret = register_agent_to_tee(agent_id, agent_buff, size_align);
1002 if (ret != 0) {
1003 unmap_agent_buffer(event_data);
1004 goto release_rsrc;
1005 }
1006 add_event_node_to_list(event_data);
1007 }
1008 if (find_flag)
1009 put_agent_event(event_data); /* match get action */
1010 return 0;
1011
1012 release_rsrc:
1013 if (find_flag)
1014 put_agent_event(event_data); /* match get action */
1015 else
1016 kfree(event_data); /* here event_data can never be NULL */
1017
1018 if (agent_buff)
1019 mailbox_free(agent_buff);
1020 return ret;
1021 }
1022
tc_ns_unregister_agent(unsigned int agent_id)1023 int tc_ns_unregister_agent(unsigned int agent_id)
1024 {
1025 struct smc_event_data *event_data = NULL;
1026 int ret = 0;
1027 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
1028 struct mb_cmd_pack *mb_pack = NULL;
1029
1030 event_data = find_event_control(agent_id);
1031 if (!event_data || !event_data->agent_buff_kernel) {
1032 tloge("agent is not found or kaddr is not allocated\n");
1033 return -EINVAL;
1034 }
1035
1036 mb_pack = mailbox_alloc_cmd_pack();
1037 if (!mb_pack) {
1038 tloge("alloc mailbox failed\n");
1039 put_agent_event(event_data);
1040 return -ENOMEM;
1041 }
1042 mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
1043 (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
1044 mb_pack->operation.params[0].value.a =
1045 mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel);
1046 mb_pack->operation.params[0].value.b =
1047 (uint64_t)mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel) >> ADDR_TRANS_NUM;
1048 mb_pack->operation.params[1].value.a = SZ_4K;
1049 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
1050 smc_cmd.cmd_id = GLOBAL_CMD_ID_UNREGISTER_AGENT;
1051 smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
1052 smc_cmd.operation_h_phys =
1053 (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
1054 smc_cmd.agent_id = agent_id;
1055 tlogd("unregistering agent 0x%x\n", agent_id);
1056
1057 if (tc_ns_smc(&smc_cmd) == 0) {
1058 free_event_control(agent_id);
1059 } else {
1060 ret = -EPERM;
1061 tloge("unregister agent failed\n");
1062 }
1063 put_agent_event(event_data);
1064 mailbox_free(mb_pack);
1065 return ret;
1066 }
1067
is_system_agent(const struct tc_ns_dev_file * dev_file)1068 bool is_system_agent(const struct tc_ns_dev_file *dev_file)
1069 {
1070 struct smc_event_data *event_data = NULL;
1071 struct smc_event_data *tmp = NULL;
1072 bool system_agent = false;
1073 unsigned long flags;
1074
1075 if (!dev_file)
1076 return system_agent;
1077
1078 spin_lock_irqsave(&g_agent_control.lock, flags);
1079 list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
1080 head) {
1081 if (event_data->owner == dev_file) {
1082 system_agent = true;
1083 break;
1084 }
1085 }
1086 spin_unlock_irqrestore(&g_agent_control.lock, flags);
1087
1088 return system_agent;
1089 }
1090
send_crashed_event_response_all(const struct tc_ns_dev_file * dev_file)1091 void send_crashed_event_response_all(const struct tc_ns_dev_file *dev_file)
1092 {
1093 struct smc_event_data *event_data = NULL;
1094 struct smc_event_data *tmp = NULL;
1095 unsigned int agent_id[AGENT_MAX] = {0};
1096 unsigned int i = 0;
1097 unsigned long flags;
1098
1099 if (!dev_file)
1100 return;
1101
1102 spin_lock_irqsave(&g_agent_control.lock, flags);
1103 list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
1104 head) {
1105 if (event_data->owner == dev_file && i < AGENT_MAX)
1106 agent_id[i++] = event_data->agent_id;
1107 }
1108 spin_unlock_irqrestore(&g_agent_control.lock, flags);
1109
1110 for (i = 0; i < AGENT_MAX; i++) {
1111 if (agent_id[i] != 0)
1112 send_event_response(agent_id[i]);
1113 }
1114
1115 return;
1116 }
1117
tee_agent_clear_dev_owner(const struct tc_ns_dev_file * dev_file)1118 void tee_agent_clear_dev_owner(const struct tc_ns_dev_file *dev_file)
1119 {
1120 struct smc_event_data *event_data = NULL;
1121 struct smc_event_data *tmp = NULL;
1122 unsigned long flags;
1123
1124 spin_lock_irqsave(&g_agent_control.lock, flags);
1125 list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
1126 head) {
1127 if (event_data->owner == dev_file) {
1128 event_data->owner = NULL;
1129 break;
1130 }
1131 }
1132 spin_unlock_irqrestore(&g_agent_control.lock, flags);
1133 }
1134
1135
def_tee_agent_work(void * instance)1136 static int def_tee_agent_work(void *instance)
1137 {
1138 int ret = 0;
1139 struct tee_agent_kernel_ops *agent_instance = NULL;
1140
1141 agent_instance = instance;
1142 while (!kthread_should_stop()) {
1143 tlogd("%s agent loop++++\n", agent_instance->agent_name);
1144 ret = tc_ns_wait_event(agent_instance->agent_id);
1145 if (ret != 0) {
1146 tloge("%s wait event fail\n",
1147 agent_instance->agent_name);
1148 break;
1149 }
1150 if (agent_instance->tee_agent_work) {
1151 ret = agent_instance->tee_agent_work(agent_instance);
1152 if (ret != 0)
1153 tloge("%s agent work fail\n",
1154 agent_instance->agent_name);
1155 }
1156 ret = tc_ns_send_event_response(agent_instance->agent_id);
1157 if (ret != 0) {
1158 tloge("%s send event response fail\n",
1159 agent_instance->agent_name);
1160 break;
1161 }
1162 tlogd("%s agent loop----\n", agent_instance->agent_name);
1163 }
1164
1165 return ret;
1166 }
1167
def_tee_agent_run(struct tee_agent_kernel_ops * agent_instance)1168 static int def_tee_agent_run(struct tee_agent_kernel_ops *agent_instance)
1169 {
1170 struct tc_ns_dev_file dev = {0};
1171 int ret;
1172
1173 /* 1. Register agent buffer to TEE */
1174 ret = tc_ns_register_agent(&dev, agent_instance->agent_id,
1175 agent_instance->agent_buff_size, &agent_instance->agent_buff,
1176 false);
1177 if (ret != 0) {
1178 tloge("register agent buffer fail,ret =0x%x\n", ret);
1179 ret = -EINVAL;
1180 goto out;
1181 }
1182
1183 /* 2. Creat thread to run agent */
1184 agent_instance->agent_thread =
1185 kthread_create(def_tee_agent_work, agent_instance,
1186 "agent_%s", agent_instance->agent_name);
1187 if (IS_ERR_OR_NULL(agent_instance->agent_thread)) {
1188 tloge("kthread create fail\n");
1189 ret = PTR_ERR(agent_instance->agent_thread);
1190 agent_instance->agent_thread = NULL;
1191 goto out;
1192 }
1193 tz_kthread_bind_mask(agent_instance->agent_thread);
1194 wake_up_process(agent_instance->agent_thread);
1195 return 0;
1196
1197 out:
1198 return ret;
1199 }
1200
def_tee_agent_stop(struct tee_agent_kernel_ops * agent_instance)1201 static int def_tee_agent_stop(struct tee_agent_kernel_ops *agent_instance)
1202 {
1203 int ret;
1204
1205 if (tc_ns_send_event_response(agent_instance->agent_id) != 0)
1206 tloge("failed to send response for agent %u\n",
1207 agent_instance->agent_id);
1208 ret = tc_ns_unregister_agent(agent_instance->agent_id);
1209 if (ret != 0)
1210 tloge("failed to unregister agent %u\n",
1211 agent_instance->agent_id);
1212 if (!IS_ERR_OR_NULL(agent_instance->agent_thread))
1213 kthread_stop(agent_instance->agent_thread);
1214
1215 return 0;
1216 }
1217
1218 static struct tee_agent_kernel_ops g_def_tee_agent_ops = {
1219 .agent_name = "default",
1220 .agent_id = 0,
1221 .tee_agent_init = NULL,
1222 .tee_agent_run = def_tee_agent_run,
1223 .tee_agent_work = NULL,
1224 .tee_agent_exit = NULL,
1225 .tee_agent_stop = def_tee_agent_stop,
1226 .tee_agent_crash_work = NULL,
1227 .agent_buff_size = PAGE_SIZE,
1228 .list = LIST_HEAD_INIT(g_def_tee_agent_ops.list)
1229 };
1230
tee_agent_kernel_init(void)1231 static int tee_agent_kernel_init(void)
1232 {
1233 struct tee_agent_kernel_ops *agent_ops = NULL;
1234 int ret = 0;
1235
1236 list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
1237 /* Check the agent validity */
1238 if (!agent_ops->agent_id ||
1239 !agent_ops->agent_name ||
1240 !agent_ops->tee_agent_work) {
1241 tloge("agent is invalid\n");
1242 continue;
1243 }
1244 tlogd("ready to init %s agent, id=0x%x\n",
1245 agent_ops->agent_name, agent_ops->agent_id);
1246
1247 /* Set agent buff size */
1248 if (!agent_ops->agent_buff_size)
1249 agent_ops->agent_buff_size =
1250 g_def_tee_agent_ops.agent_buff_size;
1251
1252 /* Initialize the agent */
1253 if (agent_ops->tee_agent_init)
1254 ret = agent_ops->tee_agent_init(agent_ops);
1255 else if (g_def_tee_agent_ops.tee_agent_init)
1256 ret = g_def_tee_agent_ops.tee_agent_init(agent_ops);
1257 else
1258 tlogw("agent id %u has no init function\n",
1259 agent_ops->agent_id);
1260 if (ret != 0) {
1261 tloge("tee_agent_init %s failed\n",
1262 agent_ops->agent_name);
1263 continue;
1264 }
1265
1266 /* Run the agent */
1267 if (agent_ops->tee_agent_run)
1268 ret = agent_ops->tee_agent_run(agent_ops);
1269 else if (g_def_tee_agent_ops.tee_agent_run)
1270 ret = g_def_tee_agent_ops.tee_agent_run(agent_ops);
1271 else
1272 tlogw("agent id %u has no run function\n",
1273 agent_ops->agent_id);
1274
1275 if (ret != 0) {
1276 tloge("tee_agent_run %s failed\n",
1277 agent_ops->agent_name);
1278 if (agent_ops->tee_agent_exit)
1279 agent_ops->tee_agent_exit(agent_ops);
1280 continue;
1281 }
1282 }
1283
1284 return 0;
1285 }
1286
tee_agent_kernel_exit(void)1287 static void tee_agent_kernel_exit(void)
1288 {
1289 struct tee_agent_kernel_ops *agent_ops = NULL;
1290
1291 list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
1292 /* Stop the agent */
1293 if (agent_ops->tee_agent_stop)
1294 agent_ops->tee_agent_stop(agent_ops);
1295 else if (g_def_tee_agent_ops.tee_agent_stop)
1296 g_def_tee_agent_ops.tee_agent_stop(agent_ops);
1297 else
1298 tlogw("agent id %u has no stop function\n",
1299 agent_ops->agent_id);
1300
1301 /* Uninitialize the agent */
1302 if (agent_ops->tee_agent_exit)
1303 agent_ops->tee_agent_exit(agent_ops);
1304 else if (g_def_tee_agent_ops.tee_agent_exit)
1305 g_def_tee_agent_ops.tee_agent_exit(agent_ops);
1306 else
1307 tlogw("agent id %u has no exit function\n",
1308 agent_ops->agent_id);
1309 }
1310 }
1311
tee_agent_clear_work(struct tc_ns_client_context * context,unsigned int dev_file_id)1312 int tee_agent_clear_work(struct tc_ns_client_context *context,
1313 unsigned int dev_file_id)
1314 {
1315 struct tee_agent_kernel_ops *agent_ops = NULL;
1316
1317 list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
1318 if (agent_ops->tee_agent_crash_work)
1319 agent_ops->tee_agent_crash_work(agent_ops,
1320 context, dev_file_id);
1321 }
1322 return 0;
1323 }
1324
tee_agent_kernel_register(struct tee_agent_kernel_ops * new_agent)1325 int tee_agent_kernel_register(struct tee_agent_kernel_ops *new_agent)
1326 {
1327 if (!new_agent)
1328 return -EINVAL;
1329
1330 INIT_LIST_HEAD(&new_agent->list);
1331 list_add_tail(&new_agent->list, &g_tee_agent_list);
1332
1333 return 0;
1334 }
1335
agent_init(void)1336 void agent_init(void)
1337 {
1338 spin_lock_init(&g_agent_control.lock);
1339 INIT_LIST_HEAD(&g_agent_control.agent_list);
1340 INIT_LIST_HEAD(&g_tee_agent_list);
1341
1342 rpmb_agent_register();
1343 #if defined(CONFIG_MM_VLTMM) || defined(CONFIG_MEMORY_VLTMM)
1344 (void)vltmm_agent_register();
1345 #endif
1346 if (tee_agent_kernel_init())
1347 tloge("tee agent kernel init failed\n");
1348 return;
1349 }
1350
free_agent(void)1351 void free_agent(void)
1352 {
1353 struct smc_event_data *event_data = NULL;
1354 struct smc_event_data *temp = NULL;
1355 unsigned long flags;
1356
1357 tee_agent_kernel_exit();
1358
1359 spin_lock_irqsave(&g_agent_control.lock, flags);
1360 list_for_each_entry_safe(event_data, temp, &g_agent_control.agent_list, head) {
1361 list_del(&event_data->head);
1362 unmap_agent_buffer(event_data);
1363 mailbox_free(event_data->agent_buff_kernel);
1364 event_data->agent_buff_kernel = NULL;
1365 kfree(event_data);
1366 }
1367 spin_unlock_irqrestore(&g_agent_control.lock, flags);
1368 }
1369