• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3  * Decription: function for sending smc cmd.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  */
14 #include "smc_smp.h"
15 #include <linux/of.h>
16 #include <linux/of_reserved_mem.h>
17 #include <linux/sched.h>
18 #include <linux/delay.h>
19 #include <linux/mutex.h>
20 #include <linux/kthread.h>
21 #include <linux/freezer.h>
22 #include <linux/semaphore.h>
23 #include <linux/device.h>
24 #include <linux/workqueue.h>
25 #include <linux/list.h>
26 #include <linux/cpu.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/rtc.h>
32 #include <linux/clk-provider.h>
33 #include <linux/clk.h>
34 #include <linux/string.h>
35 #include <linux/debugfs.h>
36 #include <linux/module.h>
37 #include <linux/bitops.h>
38 #include <linux/version.h>
39 #include <linux/cpumask.h>
40 #include <linux/err.h>
41 #ifdef CONFIG_SCHED_SMT_EXPELLING
42 #include <linux/sched/smt.h>
43 #endif
44 
45 #if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
46 #include <linux/sched/mm.h>
47 #include <linux/sched/signal.h>
48 #endif
49 #include <securec.h>
50 #include <asm/cacheflush.h>
51 
52 #ifdef CONFIG_TEE_AUDIT
53 #include <chipset_common/security/hw_kernel_stp_interface.h>
54 #endif
55 
56 #ifdef CONFIG_TEE_LOG_EXCEPTION
57 #include <huawei_platform/log/imonitor.h>
58 #define IMONITOR_TA_CRASH_EVENT_ID 901002003
59 #endif
60 
61 #include "tc_ns_log.h"
62 #include "teek_client_constants.h"
63 #include "tc_ns_client.h"
64 #include "agent.h"
65 #include "teek_ns_client.h"
66 #include "mailbox_mempool.h"
67 #include "cmdmonitor.h"
68 #include "tlogger.h"
69 #include "ko_adapt.h"
70 #include "log_cfg_api.h"
71 #include "tee_compat_check.h"
72 #include "secs_power_ctrl.h"
73 #include "shared_mem.h"
74 #include "tui.h"
75 #include "internal_functions.h"
76 #ifdef CONFIG_SMC_HOOK
77 #include "smc_hook.h"
78 #endif
79 #include "smc_call.h"
80 
81 #define PREEMPT_COUNT		10000
82 #define HZ_COUNT			10
83 #define IDLED_COUNT			100
84 /*
85  * when cannot find smc entry, will sleep 1ms
86  * because the task will be killed in 25s if it not return,
87  * so the retry count is 25s/1ms
88  */
89 #define FIND_SMC_ENTRY_SLEEP 1
90 #define FIND_SMC_ENTRY_RETRY_MAX_COUNT (CMD_MAX_EXECUTE_TIME * S_TO_MS / FIND_SMC_ENTRY_SLEEP)
91 
92 #define CPU_ZERO	0
93 #define CPU_ONE	 	1
94 #define CPU_FOUR	4
95 #define CPU_FIVE	5
96 #define CPU_SIX	 	6
97 #define CPU_SEVEN   7
98 #define LOW_BYTE	0xF
99 
100 #define PENDING2_RETRY	  (-1)
101 
102 #define RETRY_WITH_PM	  1
103 #define CLEAN_WITHOUT_PM  2
104 
105 #define MAX_CHAR 0xff
106 
107 #define MAX_SIQ_NUM 4
108 
109 /* Current state of the system */
110 static bool g_sys_crash;
111 
112 struct shadow_work {
113 	struct kthread_work kthwork;
114 	struct work_struct work;
115 	uint64_t target;
116 };
117 
118 unsigned long g_shadow_thread_id = 0;
119 static struct task_struct *g_siq_thread;
120 static struct task_struct *g_smc_svc_thread;
121 static struct task_struct *g_ipi_helper_thread;
122 static DEFINE_KTHREAD_WORKER(g_ipi_helper_worker);
123 
124 enum cmd_reuse {
125 	CLEAR,	  /* clear this cmd index */
126 	RESEND,	  /* use this cmd index resend */
127 };
128 
129 struct cmd_reuse_info {
130 	int cmd_index;
131 	int saved_index;
132 	enum cmd_reuse cmd_usage;
133 };
134 
135 #if (CONFIG_CPU_AFF_NR != 0)
136 static struct cpumask g_cpu_mask;
137 static int g_mask_flag = 0;
138 #endif
139 
140 #ifdef CONFIG_DRM_ADAPT
141 static struct cpumask g_drm_cpu_mask;
142 static int g_drm_mask_flag = 0;
143 #endif
144 
145 struct tc_ns_smc_queue *g_cmd_data;
146 phys_addr_t g_cmd_phys;
147 
148 static struct list_head g_pending_head;
149 static spinlock_t g_pend_lock;
150 
151 static DECLARE_WAIT_QUEUE_HEAD(siq_th_wait);
152 static DECLARE_WAIT_QUEUE_HEAD(ipi_th_wait);
153 static atomic_t g_siq_th_run;
154 static uint32_t g_siq_queue[MAX_SIQ_NUM];
155 DEFINE_MUTEX(g_siq_lock);
156 
157 enum smc_ops_exit {
158 	SMC_OPS_NORMAL   	= 0x0,
159 	SMC_OPS_SCHEDTO     	= 0x1,
160 	SMC_OPS_START_SHADOW	= 0x2,
161 	SMC_OPS_START_FIQSHD	= 0x3,
162 	SMC_OPS_PROBE_ALIVE	= 0x4,
163 	SMC_OPS_ABORT_TASK	= 0x5,
164 	SMC_EXIT_NORMAL		= 0x0,
165 	SMC_EXIT_PREEMPTED	= 0x1,
166 	SMC_EXIT_SHADOW		= 0x2,
167 	SMC_EXIT_ABORT		= 0x3,
168 #ifdef CONFIG_THIRDPARTY_COMPATIBLE
169 	SMC_EXIT_CRASH          = 0x4,
170 	SMC_EXIT_MAX            = 0x5,
171 #else
172 	SMC_EXIT_MAX		= 0x4,
173 #endif
174 };
175 
176 #define SHADOW_EXIT_RUN			 	0x1234dead
177 #define SMC_EXIT_TARGET_SHADOW_EXIT 0x1
178 
179 #define compile_time_assert(cond, msg) typedef char g_assert_##msg[(cond) ? 1 : -1]
180 
181 #ifndef CONFIG_BIG_SESSION
182 compile_time_assert(sizeof(struct tc_ns_smc_queue) <= PAGE_SIZE,
183 	size_of_tc_ns_smc_queue_too_large);
184 #endif
185 
186 static bool g_reserved_cmd_buffer = false;
187 static u64 g_cmd_size = 0;
188 static bool g_tz_uefi_enable = false;
189 
tz_check_uefi_enable_func(char * str)190 static int __init tz_check_uefi_enable_func(char *str)
191 {
192 	if (str != NULL && *str == '1')
193 		g_tz_uefi_enable = true;
194 
195 	return 0;
196 }
197 early_param("tz_uefi_enable", tz_check_uefi_enable_func);
198 
199 #define MIN_CMDLINE_SIZE 0x1000
reserved_cmdline(struct reserved_mem * rmem)200 static int reserved_cmdline(struct reserved_mem *rmem)
201 {
202 	if (g_tz_uefi_enable && rmem && rmem->size >= MIN_CMDLINE_SIZE) {
203 		g_cmd_phys = rmem->base;
204 		g_cmd_size = rmem->size;
205 		g_reserved_cmd_buffer = true;
206 	} else {
207 		g_reserved_cmd_buffer = false;
208 	}
209 
210 	return 0;
211 }
212 RESERVEDMEM_OF_DECLARE(g_teeos_cmdline, "teeos-cmdline", reserved_cmdline);
213 
acquire_smc_buf_lock(smc_buf_lock_t * lock)214 static void acquire_smc_buf_lock(smc_buf_lock_t *lock)
215 {
216 	int ret;
217 
218 	preempt_disable();
219 	do
220 		ret = (int)cmpxchg(lock, 0, 1);
221 	while (ret != 0);
222 }
223 
release_smc_buf_lock(smc_buf_lock_t * lock)224 static inline void release_smc_buf_lock(smc_buf_lock_t *lock)
225 {
226 	(void)cmpxchg(lock, 1, 0);
227 	preempt_enable();
228 }
229 
occupy_setbit_smc_in_doing_entry(int32_t i,int32_t * idx)230 static void occupy_setbit_smc_in_doing_entry(int32_t i, int32_t *idx)
231 {
232 	g_cmd_data->in[i].event_nr = (unsigned int)i;
233 	isb();
234 	wmb();
235 	set_bit((unsigned int)i, (unsigned long *)g_cmd_data->in_bitmap);
236 	set_bit((unsigned int)i, (unsigned long *)g_cmd_data->doing_bitmap);
237 	*idx = i;
238 }
239 
occupy_free_smc_in_entry(const struct tc_ns_smc_cmd * cmd)240 static int occupy_free_smc_in_entry(const struct tc_ns_smc_cmd *cmd)
241 {
242 	int idx = -1;
243 	int i;
244 	uint32_t retry_count = 0;
245 
246 	if (!cmd) {
247 		tloge("bad parameters! cmd is NULL\n");
248 		return -1;
249 	}
250 	/*
251 	 * Note:
252 	 * acquire_smc_buf_lock will disable preempt and kernel will forbid
253 	 * call mutex_lock in preempt disabled scenes.
254 	 * To avoid such case(update_timestamp and update_chksum will call
255 	 * mutex_lock), only cmd copy is done when preempt is disable,
256 	 * then do update_timestamp and update_chksum.
257 	 * As soon as this idx of in_bitmap is set, gtask will see this
258 	 * cmd_in, but the cmd_in is not ready that lack of update_xxx,
259 	 * so we make a tricky here, set doing_bitmap and in_bitmap both
260 	 * at first, after update_xxx is done, clear doing_bitmap.
261 	 */
262 get_smc_retry:
263 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
264 	for (i = 0; i < MAX_SMC_CMD; i++) {
265 		if (test_bit(i, (unsigned long *)g_cmd_data->in_bitmap) != 0)
266 			continue;
267 		if (memcpy_s(&g_cmd_data->in[i], sizeof(g_cmd_data->in[i]),
268 			cmd, sizeof(*cmd)) != EOK) {
269 			tloge("memcpy failed,%s line:%d", __func__, __LINE__);
270 			break;
271 		}
272 		occupy_setbit_smc_in_doing_entry(i, &idx);
273 		break;
274 	}
275 	release_smc_buf_lock(&g_cmd_data->smc_lock);
276 	if (idx == -1) {
277 		if (retry_count <= FIND_SMC_ENTRY_RETRY_MAX_COUNT) {
278 			msleep(FIND_SMC_ENTRY_SLEEP);
279 			retry_count++;
280 			tlogd("can't get any free smc entry and retry:%u\n", retry_count);
281 			goto get_smc_retry;
282 		}
283 		tloge("can't get any free smc entry after retry:%u\n", retry_count);
284 		return -1;
285 	}
286 
287 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
288 	isb();
289 	wmb();
290 	clear_bit((uint32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap);
291 	release_smc_buf_lock(&g_cmd_data->smc_lock);
292 	return idx;
293 }
294 
reuse_smc_in_entry(uint32_t idx)295 static int reuse_smc_in_entry(uint32_t idx)
296 {
297 	int rc = 0;
298 
299 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
300 	if (!(test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0 &&
301 		test_bit((int32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0)) {
302 		tloge("invalid cmd to reuse\n");
303 		rc = -1;
304 		goto out;
305 	}
306 	if (memcpy_s(&g_cmd_data->in[idx], sizeof(g_cmd_data->in[idx]),
307 		&g_cmd_data->out[idx], sizeof(g_cmd_data->out[idx])) != EOK) {
308 		tloge("memcpy failed,%s line:%d", __func__, __LINE__);
309 		rc = -1;
310 		goto out;
311 	}
312 
313 	isb();
314 	wmb();
315 	clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
316 out:
317 	release_smc_buf_lock(&g_cmd_data->smc_lock);
318 	return rc;
319 }
320 
copy_smc_out_entry(uint32_t idx,struct tc_ns_smc_cmd * copy,enum cmd_reuse * usage)321 static int copy_smc_out_entry(uint32_t idx, struct tc_ns_smc_cmd *copy,
322 	enum cmd_reuse *usage)
323 {
324 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
325 	if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) == 0) {
326 		tloge("cmd out %u is not ready\n", idx);
327 		release_smc_buf_lock(&g_cmd_data->smc_lock);
328 		show_cmd_bitmap();
329 		return -ENOENT;
330 	}
331 	if (memcpy_s(copy, sizeof(*copy), &g_cmd_data->out[idx],
332 		sizeof(g_cmd_data->out[idx])) != EOK) {
333 		tloge("copy smc out failed\n");
334 		release_smc_buf_lock(&g_cmd_data->smc_lock);
335 		return -EFAULT;
336 	}
337 
338 	isb();
339 	wmb();
340 	if (g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING2 ||
341 		g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING) {
342 		*usage = RESEND;
343 	} else {
344 		clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
345 		clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
346 		*usage = CLEAR;
347 	}
348 	clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap);
349 	release_smc_buf_lock(&g_cmd_data->smc_lock);
350 
351 	return 0;
352 }
353 
clear_smc_in_entry(uint32_t idx)354 static inline void clear_smc_in_entry(uint32_t idx)
355 {
356 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
357 	clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
358 	release_smc_buf_lock(&g_cmd_data->smc_lock);
359 }
360 
release_smc_entry(uint32_t idx)361 static void release_smc_entry(uint32_t idx)
362 {
363 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
364 	clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
365 	clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
366 	clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap);
367 	release_smc_buf_lock(&g_cmd_data->smc_lock);
368 }
369 
is_cmd_working_done(uint32_t idx)370 static bool is_cmd_working_done(uint32_t idx)
371 {
372 	bool ret = false;
373 
374 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
375 	if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0)
376 		ret = true;
377 	release_smc_buf_lock(&g_cmd_data->smc_lock);
378 	return ret;
379 }
380 
occupy_clean_cmd_buf(void)381 void occupy_clean_cmd_buf(void)
382 {
383 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
384 	memset_s(g_cmd_data, sizeof(struct tc_ns_smc_queue), 0, sizeof(struct tc_ns_smc_queue));
385 	release_smc_buf_lock(&g_cmd_data->smc_lock);
386 }
387 
show_in_bitmap(int * cmd_in,uint32_t len)388 static void show_in_bitmap(int *cmd_in, uint32_t len)
389 {
390 	uint32_t idx;
391 	uint32_t in = 0;
392 	char bitmap[MAX_SMC_CMD + 1];
393 
394 	if (len != MAX_SMC_CMD || !g_cmd_data)
395 		return;
396 
397 	for (idx = 0; idx < MAX_SMC_CMD; idx++) {
398 		if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0) {
399 			bitmap[idx] = '1';
400 			cmd_in[in++] = (int)idx;
401 		} else {
402 			bitmap[idx] = '0';
403 		}
404 	}
405 	bitmap[MAX_SMC_CMD] = '\0';
406 	tloge("in bitmap: %s\n", bitmap);
407 }
408 
show_out_bitmap(int * cmd_out,uint32_t len)409 static void show_out_bitmap(int *cmd_out, uint32_t len)
410 {
411 	uint32_t idx;
412 	uint32_t out = 0;
413 	char bitmap[MAX_SMC_CMD + 1];
414 
415 	if (len != MAX_SMC_CMD || !g_cmd_data)
416 		return;
417 
418 	for (idx = 0; idx < MAX_SMC_CMD; idx++) {
419 		if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0) {
420 			bitmap[idx] = '1';
421 			cmd_out[out++] = (int)idx;
422 		} else {
423 			bitmap[idx] = '0';
424 		}
425 	}
426 	bitmap[MAX_SMC_CMD] = '\0';
427 	tloge("out bitmap: %s\n", bitmap);
428 }
429 
show_doing_bitmap(void)430 static void show_doing_bitmap(void)
431 {
432 	uint32_t idx;
433 	char bitmap[MAX_SMC_CMD + 1];
434 
435 	if (!g_cmd_data)
436 		return;
437 	for (idx = 0; idx < MAX_SMC_CMD; idx++) {
438 		if (test_bit((int)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0)
439 			bitmap[idx] = '1';
440 		else
441 			bitmap[idx] = '0';
442 	}
443 	bitmap[MAX_SMC_CMD] = '\0';
444 	tloge("doing bitmap: %s\n", bitmap);
445 }
446 
show_single_cmd_info(const int * cmd,uint32_t len)447 static void show_single_cmd_info(const int *cmd, uint32_t len)
448 {
449 	uint32_t idx;
450 
451 	if (len != MAX_SMC_CMD || !g_cmd_data)
452 		return;
453 
454 	for (idx = 0; idx < MAX_SMC_CMD; idx++) {
455 		if (cmd[idx] == -1)
456 			break;
457 		tloge("cmd[%d]: cmd_id=%u, ca_pid=%u, dev_id = 0x%x, "
458 			"event_nr=%u, ret_val=0x%x\n",
459 			cmd[idx],
460 			g_cmd_data->in[cmd[idx]].cmd_id,
461 			g_cmd_data->in[cmd[idx]].ca_pid,
462 			g_cmd_data->in[cmd[idx]].dev_file_id,
463 			g_cmd_data->in[cmd[idx]].event_nr,
464 			g_cmd_data->in[cmd[idx]].ret_val);
465 	}
466 }
467 
show_cmd_bitmap(void)468 void show_cmd_bitmap(void)
469 {
470 	int *cmd_in = NULL;
471 	int *cmd_out = NULL;
472 
473 	cmd_in = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL);
474 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_in)) {
475 		tloge("out of mem! cannot show in bitmap\n");
476 		return;
477 	}
478 
479 	cmd_out = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL);
480 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_out)) {
481 		kfree(cmd_in);
482 		tloge("out of mem! cannot show out bitmap\n");
483 		return;
484 	}
485 
486 	if (memset_s(cmd_in, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0 ||
487 		memset_s(cmd_out, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0) {
488 		tloge("memset failed\n");
489 		goto error;
490 	}
491 
492 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
493 
494 	show_in_bitmap(cmd_in, MAX_SMC_CMD);
495 	show_doing_bitmap();
496 	show_out_bitmap(cmd_out, MAX_SMC_CMD);
497 
498 	tloge("cmd in value:\n");
499 	show_single_cmd_info(cmd_in, MAX_SMC_CMD);
500 
501 	tloge("cmd_out value:\n");
502 	show_single_cmd_info(cmd_out, MAX_SMC_CMD);
503 
504 	release_smc_buf_lock(&g_cmd_data->smc_lock);
505 
506 error:
507 	kfree(cmd_in);
508 	kfree(cmd_out);
509 }
510 
init_pending_entry(void)511 static struct pending_entry *init_pending_entry(void)
512 {
513 	struct pending_entry *pe = NULL;
514 
515 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
516 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pe)) {
517 		tloge("alloc pe failed\n");
518 		return NULL;
519 	}
520 
521 	atomic_set(&pe->users, 1);
522 	get_task_struct(current);
523 	pe->task = current;
524 
525 #ifdef CONFIG_TA_AFFINITY
526 	cpumask_copy(&pe->ca_mask, CURRENT_CPUS_ALLOWED);
527 	cpumask_copy(&pe->ta_mask, CURRENT_CPUS_ALLOWED);
528 #endif
529 
530 	init_waitqueue_head(&pe->wq);
531 	atomic_set(&pe->run, 0);
532 	INIT_LIST_HEAD(&pe->list);
533 	spin_lock(&g_pend_lock);
534 	list_add_tail(&pe->list, &g_pending_head);
535 	spin_unlock(&g_pend_lock);
536 
537 	return pe;
538 }
539 
find_pending_entry(pid_t pid)540 struct pending_entry *find_pending_entry(pid_t pid)
541 {
542 	struct pending_entry *pe = NULL;
543 
544 	spin_lock(&g_pend_lock);
545 	list_for_each_entry(pe, &g_pending_head, list) {
546 		if (pe->task->pid == pid) {
547 			atomic_inc(&pe->users);
548 			spin_unlock(&g_pend_lock);
549 			return pe;
550 		}
551 	}
552 	spin_unlock(&g_pend_lock);
553 	return NULL;
554 }
555 
foreach_pending_entry(void (* func)(struct pending_entry *))556 void foreach_pending_entry(void (*func)(struct pending_entry *))
557 {
558 	struct pending_entry *pe = NULL;
559 
560 	if (!func)
561 		return;
562 
563 	spin_lock(&g_pend_lock);
564 	list_for_each_entry(pe, &g_pending_head, list) {
565 		func(pe);
566 	}
567 	spin_unlock(&g_pend_lock);
568 }
569 
put_pending_entry(struct pending_entry * pe)570 void put_pending_entry(struct pending_entry *pe)
571 {
572 	if (!pe)
573 		return;
574 
575 	if (!atomic_dec_and_test(&pe->users))
576 		return;
577 
578 	put_task_struct(pe->task);
579 	kfree(pe);
580 }
581 
582 #ifdef CONFIG_TA_AFFINITY
restore_cpu_mask(struct pending_entry * pe)583 static void restore_cpu_mask(struct pending_entry *pe)
584 {
585 	if (cpumask_equal(&pe->ca_mask, &pe->ta_mask))
586 		return;
587 
588 	set_cpus_allowed_ptr(current, &pe->ca_mask);
589 }
590 #endif
591 
release_pending_entry(struct pending_entry * pe)592 static void release_pending_entry(struct pending_entry *pe)
593 {
594 #ifdef CONFIG_TA_AFFINITY
595 	restore_cpu_mask(pe);
596 #endif
597 	spin_lock(&g_pend_lock);
598 	list_del(&pe->list);
599 	spin_unlock(&g_pend_lock);
600 	put_pending_entry(pe);
601 }
602 
is_shadow_exit(uint64_t target)603 static inline bool is_shadow_exit(uint64_t target)
604 {
605 	return target & SMC_EXIT_TARGET_SHADOW_EXIT;
606 }
607 
608 /*
609  * check ca and ta's affinity is match in 2 scene:
610  * 1. when TA is blocked to REE
611  * 2. when CA is wakeup by SPI wakeup
612  * match_ta_affinity return true if affinity is changed
613  */
614 #ifdef CONFIG_TA_AFFINITY
match_ta_affinity(struct pending_entry * pe)615 static bool match_ta_affinity(struct pending_entry *pe)
616 {
617 	if (!cpumask_equal(CURRENT_CPUS_ALLOWED, &pe->ta_mask)) {
618 		if (set_cpus_allowed_ptr(current, &pe->ta_mask)) {
619 			tlogw("set %s affinity failed\n", current->comm);
620 			return false;
621 		}
622 		return true;
623 	}
624 
625 	return false;
626 }
627 #else
match_ta_affinity(struct pending_entry * pe)628 static inline bool match_ta_affinity(struct pending_entry *pe)
629 {
630 	(void)pe;
631 	return false;
632 }
633 #endif
634 
635 struct smc_cmd_ret {
636 	unsigned long exit;
637 	unsigned long ta;
638 	unsigned long target;
639 };
640 
sigkill_pending(struct task_struct * tsk)641 bool sigkill_pending(struct task_struct *tsk)
642 {
643 	bool flag = false;
644 
645 	if (!tsk) {
646 		tloge("tsk is null!\n");
647 		return false;
648 	}
649 
650 	flag = (sigismember(&tsk->pending.signal, SIGKILL) != 0) ||
651 		(sigismember(&tsk->pending.signal, SIGUSR1) != 0);
652 
653 	if (tsk->signal)
654 		return flag || sigismember(&tsk->signal->shared_pending.signal,
655 			SIGKILL);
656 	return flag;
657 }
658 
659 #if (CONFIG_CPU_AFF_NR != 0)
set_cpu_strategy(struct cpumask * old_mask)660 static void set_cpu_strategy(struct cpumask *old_mask)
661 {
662 	unsigned int i;
663 
664 	if (g_mask_flag == 0) {
665 		cpumask_clear(&g_cpu_mask);
666 		for (i = 0; i < CONFIG_CPU_AFF_NR; i++)
667 			cpumask_set_cpu(i, &g_cpu_mask);
668 		g_mask_flag = 1;
669 	}
670 	cpumask_copy(old_mask, CURRENT_CPUS_ALLOWED);
671 	set_cpus_allowed_ptr(current, &g_cpu_mask);
672 }
673 #endif
674 
675 #if (CONFIG_CPU_AFF_NR != 0)
restore_cpu(struct cpumask * old_mask)676 static void restore_cpu(struct cpumask *old_mask)
677 {
678 	/* current equal old means no set cpu affinity, no need to restore */
679 	if (cpumask_equal(CURRENT_CPUS_ALLOWED, old_mask))
680 		return;
681 
682 	set_cpus_allowed_ptr(current, old_mask);
683 	schedule();
684 }
685 #endif
686 
is_ready_to_kill(bool need_kill)687 static bool is_ready_to_kill(bool need_kill)
688 {
689 	return (need_kill && sigkill_pending(current) &&
690 			is_thread_reported(current->pid));
691 }
692 
set_smc_send_arg(struct smc_in_params * in_param,const struct smc_cmd_ret * secret,unsigned long ops)693 static void set_smc_send_arg(struct smc_in_params *in_param,
694 	const struct smc_cmd_ret *secret, unsigned long ops)
695 {
696 	if (secret->exit == SMC_EXIT_PREEMPTED) {
697 		in_param->x1 = SMC_OPS_SCHEDTO;
698 		in_param->x3 = secret->ta;
699 		in_param->x4 = secret->target;
700 	}
701 
702 	if (ops == SMC_OPS_SCHEDTO || ops == SMC_OPS_START_FIQSHD)
703 		in_param->x4 = secret->target;
704 
705 	tlogd("[cpu %d]begin send x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n",
706 		raw_smp_processor_id(), in_param->x0, in_param->x1,
707 		in_param->x2, in_param->x3, in_param->x4);
708 }
709 
send_asm_smc_cmd(struct smc_in_params * in_param,struct smc_out_params * out_param)710 static void send_asm_smc_cmd(struct smc_in_params *in_param, struct smc_out_params *out_param)
711 {
712 	smc_req(in_param, out_param, 0);
713 }
714 
715 #ifdef CONFIG_TEE_REBOOT
send_smc_cmd_rebooting(uint32_t cmd_id,phys_addr_t cmd_addr,uint32_t cmd_type,const struct tc_ns_smc_cmd * in_cmd)716 int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd)
717 {
718 	struct tc_ns_smc_cmd cmd = { {0}, 0 };
719 	struct smc_in_params in_param = {cmd_id, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM, TEE_ERROR_IS_DEAD};
720 	struct smc_out_params out_param = {0};
721 
722 	if (in_cmd != NULL) {
723 		if (memcpy_s(&cmd, sizeof(cmd), in_cmd, sizeof(*in_cmd)) != EOK) {
724 			tloge("memcpy in cmd failed\n");
725 			return -EFAULT;
726 		}
727 		if (occupy_free_smc_in_entry(&cmd) == -1) {
728 			tloge("there's no more smc entry\n");
729 			return -ENOMEM;
730 		}
731 	}
732 retry:
733 	isb();
734 	wmb();
735 	send_asm_smc_cmd(&in_param, &out_param);
736 	isb();
737 	wmb();
738 	if (out_param.exit_reason == SMC_EXIT_PREEMPTED)
739 		goto retry;
740 
741 	return out_param.exit_reason;
742 }
743 #else
send_smc_cmd_rebooting(uint32_t cmd_id,phys_addr_t cmd_addr,uint32_t cmd_type,const struct tc_ns_smc_cmd * in_cmd)744 int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd)
745 {
746 	(void)cmd_id;
747 	(void)cmd_addr;
748 	(void)cmd_type;
749 	(void)in_cmd;
750 	return 0;
751 }
752 #endif
753 
smp_smc_send(uint32_t cmd,unsigned long ops,unsigned long ca,struct smc_cmd_ret * secret,bool need_kill)754 static noinline int smp_smc_send(uint32_t cmd, unsigned long ops, unsigned long ca,
755 	struct smc_cmd_ret *secret, bool need_kill)
756 {
757 	struct smc_in_params in_param = { cmd, ops, ca, 0, 0 };
758 	struct smc_out_params out_param = {0};
759 #if (CONFIG_CPU_AFF_NR != 0)
760 	struct cpumask old_mask;
761 #endif
762 
763 #if (CONFIG_CPU_AFF_NR != 0)
764 	set_cpu_strategy(&old_mask);
765 #endif
766 retry:
767 	set_smc_send_arg(&in_param, secret, ops);
768 	tee_trace_add_event(SMC_SEND, 0);
769 	send_asm_smc_cmd(&in_param, &out_param);
770 	tee_trace_add_event(SMC_DONE, 0);
771 	tlogd("[cpu %d] return val %lx exit_reason %lx ta %lx targ %lx\n",
772 		raw_smp_processor_id(), out_param.ret, out_param.exit_reason,
773 		out_param.ta, out_param.target);
774 
775 	secret->exit = out_param.exit_reason;
776 	secret->ta = out_param.ta;
777 	secret->target = out_param.target;
778 
779 	if (out_param.exit_reason == SMC_EXIT_PREEMPTED) {
780 		/*
781 		 * There's 2 ways to send a terminate cmd to kill a running TA,
782 		 * in current context or another. If send terminate in another
783 		 * context, may encounter concurrency problem, as terminate cmd
784 		 * is send but not process, the original cmd has finished.
785 		 * So we send the terminate cmd in current context.
786 		 */
787 		if (is_ready_to_kill(need_kill)) {
788 			secret->exit = SMC_EXIT_ABORT;
789 			tloge("receive kill signal\n");
790 		} else {
791 #if (!defined(CONFIG_PREEMPT)) || defined(CONFIG_RTOS_PREEMPT_OFF)
792 			/* yield cpu to avoid soft lockup */
793 			cond_resched();
794 #endif
795 			goto retry;
796 		}
797 	}
798 #if (CONFIG_CPU_AFF_NR != 0)
799 	restore_cpu(&old_mask);
800 #endif
801 	return (int)out_param.ret;
802 }
803 
send_smc_cmd(uint32_t cmd,phys_addr_t cmd_addr,uint32_t cmd_type,uint8_t wait)804 static uint64_t send_smc_cmd(uint32_t cmd, phys_addr_t cmd_addr, uint32_t cmd_type, uint8_t wait)
805 {
806 	uint64_t ret = 0;
807 	struct smc_in_params in_param = { cmd, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM };
808 	struct smc_out_params out_param = { ret };
809 #ifdef CONFIG_THIRDPARTY_COMPATIBLE
810 	if (g_sys_crash) {
811 		out_param.ret = TSP_CRASH;
812 		return out_param.ret;
813 	}
814 #endif
815 	smc_req(&in_param, &out_param, wait);
816 	ret = out_param.ret;
817 	return ret;
818 }
819 
raw_smc_send(uint32_t cmd,phys_addr_t cmd_addr,uint32_t cmd_type,uint8_t wait)820 unsigned long raw_smc_send(uint32_t cmd, phys_addr_t cmd_addr,
821 	uint32_t cmd_type, uint8_t wait)
822 {
823 	unsigned long x0;
824 
825 #if (CONFIG_CPU_AFF_NR != 0)
826 	struct cpumask old_mask;
827 	set_cpu_strategy(&old_mask);
828 #endif
829 
830 	x0 = send_smc_cmd(cmd, cmd_addr, cmd_type, wait);
831 
832 #if (CONFIG_CPU_AFF_NR != 0)
833 	restore_cpu(&old_mask);
834 #endif
835 	return x0;
836 }
837 
siq_dump(phys_addr_t mode,uint32_t siq_mode)838 static void siq_dump(phys_addr_t mode, uint32_t siq_mode)
839 {
840 	int ret = raw_smc_send(TSP_REE_SIQ, mode, 0, false);
841 	if (ret == TSP_CRASH) {
842 		tloge("TEEOS has crashed!\n");
843 		g_sys_crash = true;
844 		cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
845 	}
846 
847 	if (siq_mode == SIQ_DUMP_TIMEOUT) {
848 		tz_log_write();
849 	} else if (siq_mode == SIQ_DUMP_SHELL) {
850 #ifdef CONFIG_TEE_LOG_DUMP_PATH
851 		(void)tlogger_store_msg(CONFIG_TEE_LOG_DUMP_PATH,
852 			sizeof(CONFIG_TEE_LOG_DUMP_PATH));
853 #else
854 		tz_log_write();
855 #endif
856 	}
857 	do_cmd_need_archivelog();
858 }
859 
get_free_siq_index(void)860 static uint32_t get_free_siq_index(void)
861 {
862 	uint32_t i;
863 
864 	for (i = 0; i < MAX_SIQ_NUM; i++) {
865 		if (g_siq_queue[i] == 0)
866 			return i;
867 	}
868 
869 	return MAX_SIQ_NUM;
870 }
871 
get_undo_siq_index(void)872 static uint32_t get_undo_siq_index(void)
873 {
874 	uint32_t i;
875 
876 	for (i = 0; i < MAX_SIQ_NUM; i++) {
877 		if (g_siq_queue[i] != 0)
878 			return i;
879 	}
880 
881 	return MAX_SIQ_NUM;
882 }
883 
884 #define RUN_SIQ_THREAD 1
885 #define STOP_SIQ_THREAD 2
siq_thread_fn(void * arg)886 static int siq_thread_fn(void *arg)
887 {
888 	int ret;
889 	uint32_t i;
890 	(void)arg;
891 
892 	while (true) {
893 		ret = (int)wait_event_interruptible(siq_th_wait,
894 			atomic_read(&g_siq_th_run));
895 		if (ret != 0) {
896 			tloge("wait event interruptible failed!\n");
897 			return -EINTR;
898 		}
899 		if (atomic_read(&g_siq_th_run) == STOP_SIQ_THREAD)
900 			return 0;
901 
902 		mutex_lock(&g_siq_lock);
903 		do {
904 			i = get_undo_siq_index();
905 			if (i >= MAX_SIQ_NUM)
906 				break;
907 			siq_dump((phys_addr_t)(1), g_siq_queue[i]);
908 			g_siq_queue[i] = 0;
909 		} while (true);
910 		atomic_set(&g_siq_th_run, 0);
911 		mutex_unlock(&g_siq_lock);
912 	}
913 }
914 
915 #ifdef CONFIG_TEE_AUDIT
916 #define MAX_UPLOAD_INFO_LEN	  4
917 #define INFO_HIGH_OFFSET	  24U
918 #define INFO_MID_OFFSET		  16U
919 #define INFO_LOW_OFFSET		  8U
920 
upload_audit_event(unsigned int eventindex)921 static void upload_audit_event(unsigned int eventindex)
922 {
923 #ifdef CONFIG_HW_KERNEL_STP
924 	struct stp_item item;
925 	int ret;
926 	char att_info[MAX_UPLOAD_INFO_LEN + 1] = {0};
927 
928 	att_info[0] = (unsigned char)(eventindex >> INFO_HIGH_OFFSET);
929 	att_info[1] = (unsigned char)(eventindex >> INFO_MID_OFFSET);
930 	att_info[2] = (unsigned char)(eventindex >> INFO_LOW_OFFSET);
931 	att_info[3] = (unsigned char)eventindex;
932 	att_info[MAX_UPLOAD_INFO_LEN] = '\0';
933 	item.id = item_info[ITRUSTEE].id; /* 0x00000185 */
934 	item.status = STP_RISK;
935 	item.credible = STP_REFERENCE;
936 	item.version = 0;
937 	ret = strcpy_s(item.name, STP_ITEM_NAME_LEN, STP_NAME_ITRUSTEE);
938 	if (ret) {
939 		tloge("strncpy failed %x\n", ret);
940 		return;
941 	}
942 	tlogd("stp get size %lx succ\n", sizeof(item_info[ITRUSTEE].name));
943 	ret = kernel_stp_upload(item, att_info);
944 	if (ret)
945 		tloge("stp %x event upload failed\n", eventindex);
946 	else
947 		tloge("stp %x event upload succ\n", eventindex);
948 #else
949 	(void)eventindex;
950 #endif
951 }
952 #endif
953 
cmd_result_check(const struct tc_ns_smc_cmd * cmd,int cmd_index)954 static void cmd_result_check(const struct tc_ns_smc_cmd *cmd, int cmd_index)
955 {
956 	if (cmd->ret_val == (int)TEEC_PENDING || cmd->ret_val == (int)TEEC_PENDING2)
957 		tlogd("wakeup command %u\n", cmd->event_nr);
958 
959 	if (cmd->ret_val == (int)TEE_ERROR_TAGET_DEAD) {
960 		bool ta_killed = g_cmd_data->in[cmd_index].cmd_id == GLOBAL_CMD_ID_KILL_TASK;
961 		tloge("error smc call: ret = %x and cmd.err_origin=%x, [ta is %s]\n",
962 			cmd->ret_val, cmd->err_origin, (ta_killed == true) ? "killed" : "crash");
963 		cmd_monitor_ta_crash((ta_killed == true) ? TYPE_KILLED_TA : TYPE_CRASH_TA,
964 			cmd->uuid, sizeof(struct tc_uuid));
965 		ta_crash_report_log();
966 	} else if (cmd->ret_val == (int)TEEC_ERROR_TUI_NOT_AVAILABLE) {
967 		do_ns_tui_release();
968 	} else if (cmd->ret_val == (int)TEE_ERROR_AUDIT_FAIL) {
969 		tloge("error smc call: ret = %x and err-origin=%x\n",
970 			cmd->ret_val, cmd->err_origin);
971 #ifdef CONFIG_TEE_AUDIT
972 		tloge("error smc call: status = %x and err-origin=%x\n",
973 			cmd->eventindex, cmd->err_origin);
974 		upload_audit_event(cmd->eventindex);
975 #endif
976 	}
977 }
978 
set_shadow_smc_param(struct smc_in_params * in_params,const struct smc_out_params * out_params,int * n_idled)979 static void set_shadow_smc_param(struct smc_in_params *in_params,
980 	const struct smc_out_params *out_params, int *n_idled)
981 {
982 	if (out_params->exit_reason == SMC_EXIT_PREEMPTED) {
983 		in_params->x0 = TSP_REQUEST;
984 		in_params->x1 = SMC_OPS_SCHEDTO;
985 		in_params->x2 = (unsigned long)current->pid;
986 		in_params->x3 = out_params->ta;
987 		in_params->x4 = out_params->target;
988 	} else if (out_params->exit_reason == SMC_EXIT_NORMAL) {
989 		in_params->x0 = TSP_REQUEST;
990 		in_params->x1 = SMC_OPS_SCHEDTO;
991 		in_params->x2 = (unsigned long)current->pid;
992 		in_params->x3 = 0;
993 		in_params->x4 = 0;
994 		if (*n_idled > IDLED_COUNT) {
995 			*n_idled = 0;
996 			in_params->x1 = SMC_OPS_PROBE_ALIVE;
997 		}
998 	}
999 }
1000 
shadow_wo_pm(const void * arg,struct smc_out_params * out_params,int * n_idled)1001 static void shadow_wo_pm(const void *arg, struct smc_out_params *out_params,
1002 	int *n_idled)
1003 {
1004 	struct smc_in_params in_params = {
1005 		TSP_REQUEST, SMC_OPS_START_SHADOW, current->pid, 0, *(unsigned long *)arg
1006 	};
1007 
1008 	set_shadow_smc_param(&in_params, out_params, n_idled);
1009 	tlogd("%s: [cpu %d] x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n",
1010 		__func__, raw_smp_processor_id(), in_params.x0, in_params.x1,
1011 		in_params.x2, in_params.x3, in_params.x4);
1012 
1013 #ifdef CONFIG_THIRDPARTY_COMPATIBLE
1014 	if (g_sys_crash) {
1015 		out_params->ret = TSP_CRASH;
1016 		return;
1017 	}
1018 #endif
1019 	smc_req(&in_params, out_params, 0);
1020 }
1021 
set_preempted_counter(int * n_preempted,int * n_idled,struct pending_entry * pe)1022 static void set_preempted_counter(int *n_preempted, int *n_idled,
1023 	struct pending_entry *pe)
1024 {
1025 	*n_idled = 0;
1026 	(*n_preempted)++;
1027 
1028 	if (*n_preempted > PREEMPT_COUNT) {
1029 		tlogd("counter too large: retry 10K times on CPU%d\n", smp_processor_id());
1030 		*n_preempted = 0;
1031 	}
1032 #ifndef CONFIG_PREEMPT
1033 	/* yield cpu to avoid soft lockup */
1034 	cond_resched();
1035 #endif
1036 	if (match_ta_affinity(pe))
1037 		tloge("set shadow pid %d affinity after preempted\n",
1038 			pe->task->pid);
1039 }
1040 
proc_shadow_thread_normal_exit(struct pending_entry * pe,int * n_preempted,int * n_idled,int * ret_val)1041 static int proc_shadow_thread_normal_exit(struct pending_entry *pe,
1042 	int *n_preempted, int *n_idled, int *ret_val)
1043 {
1044 	long long timeout;
1045 	int rc;
1046 
1047 	if (power_down_cc() != 0) {
1048 		tloge("power down cc failed\n");
1049 		*ret_val = -1;
1050 		return CLEAN_WITHOUT_PM;
1051 	}
1052 	*n_preempted = 0;
1053 
1054 	timeout = HZ * (long)(HZ_COUNT + ((uint8_t)current->pid & LOW_BYTE));
1055 	rc = (int)wait_event_freezable_timeout(pe->wq,
1056 		atomic_read(&pe->run), (long)timeout);
1057 	if (rc == 0)
1058 		(*n_idled)++;
1059 	if (atomic_read(&pe->run) == SHADOW_EXIT_RUN) {
1060 		tlogd("shadow thread work quit, be killed\n");
1061 		return CLEAN_WITHOUT_PM;
1062 	} else {
1063 		atomic_set(&pe->run, 0);
1064 		return RETRY_WITH_PM;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
check_shadow_crash(uint64_t crash_reason,int * ret_val)1070 static bool check_shadow_crash(uint64_t crash_reason, int *ret_val)
1071 {
1072 	if (crash_reason != TSP_CRASH)
1073 		return false;
1074 
1075 	tloge("TEEOS shadow has crashed!\n");
1076 	if (power_down_cc() != 0)
1077 		tloge("power down cc failed\n");
1078 
1079 	g_sys_crash = true;
1080 	cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
1081 	report_log_system_error();
1082 	*ret_val = -1;
1083 	return true;
1084 }
1085 
show_other_exit_reason(const struct smc_out_params * params)1086 static void show_other_exit_reason(const struct smc_out_params *params)
1087 {
1088 	if (params->exit_reason == SMC_EXIT_SHADOW) {
1089 		tlogd("probe shadow thread non exit, just quit\n");
1090 		return;
1091 	}
1092 
1093 	tloge("exit on unknown code %ld\n", (long)params->exit_reason);
1094 }
1095 
shadow_thread_fn(void * arg)1096 static int shadow_thread_fn(void *arg)
1097 {
1098 	int n_preempted = 0;
1099 	int ret = 0;
1100 	struct smc_out_params params = { 0, SMC_EXIT_MAX, 0, 0 };
1101 	int n_idled = 0;
1102 	struct pending_entry *pe = NULL;
1103 
1104 	set_freezable();
1105 	pe = init_pending_entry();
1106 	if (!pe) {
1107 		kfree(arg);
1108 		tloge("init pending entry failed\n");
1109 		return -ENOMEM;
1110 	}
1111 	isb();
1112 	wmb();
1113 
1114 retry:
1115 	if (power_on_cc() != 0) {
1116 		ret = -EINVAL;
1117 		tloge("power on cc failed\n");
1118 		goto clean_wo_pm;
1119 	}
1120 
1121 retry_wo_pm:
1122 	shadow_wo_pm(arg, &params, &n_idled);
1123 	if (check_shadow_crash(params.ret, &ret))
1124 		goto clean_wo_pm;
1125 
1126 	if (params.exit_reason == SMC_EXIT_PREEMPTED) {
1127 		set_preempted_counter(&n_preempted, &n_idled, pe);
1128 		goto retry_wo_pm;
1129 	} else if (params.exit_reason == SMC_EXIT_NORMAL) {
1130 		ret = proc_shadow_thread_normal_exit(pe, &n_preempted, &n_idled, &ret);
1131 		if (ret == CLEAN_WITHOUT_PM) {
1132 			goto clean_wo_pm;
1133 		} else if (ret == RETRY_WITH_PM) {
1134 			if (match_ta_affinity(pe))
1135 				tlogd("set shadow pid %d\n", pe->task->pid);
1136 			goto retry;
1137 		}
1138 	} else {
1139 		show_other_exit_reason(&params);
1140 	}
1141 
1142 	if (power_down_cc() != 0) {
1143 		tloge("power down cc failed\n");
1144 		ret = -1;
1145 	}
1146 clean_wo_pm:
1147 	kfree(arg);
1148 	release_pending_entry(pe);
1149 	return ret;
1150 }
1151 
shadow_work_func(struct kthread_work * work)1152 static void shadow_work_func(struct kthread_work *work)
1153 {
1154 	struct task_struct *shadow_thread = NULL;
1155 	struct shadow_work *s_work =
1156 		container_of(work, struct shadow_work, kthwork);
1157 	uint64_t *target_arg = kzalloc(sizeof(uint64_t), GFP_KERNEL);
1158 
1159 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)target_arg)) {
1160 		tloge("%s: kmalloc failed\n", __func__);
1161 		return;
1162 	}
1163 
1164 	*target_arg = s_work->target;
1165 	shadow_thread = kthread_create(shadow_thread_fn,
1166 		(void *)(uintptr_t)target_arg, "shadow th/%lu",
1167 		g_shadow_thread_id++);
1168 	if (IS_ERR_OR_NULL(shadow_thread)) {
1169 		kfree(target_arg);
1170 		tloge("couldn't create shadow_thread %ld\n",
1171 			PTR_ERR(shadow_thread));
1172 		return;
1173 	}
1174 	tlogd("%s: create shadow thread %lu for target %llx\n",
1175 		__func__, g_shadow_thread_id, *target_arg);
1176 	tz_kthread_bind_mask(shadow_thread);
1177 #if CONFIG_CPU_AFF_NR
1178 	struct cpumask shadow_mask;
1179 	unsigned int i;
1180 
1181 	cpumask_clear(&shadow_mask);
1182 	for (i = 0; i < CONFIG_CPU_AFF_NR; i++)
1183 		cpumask_set_cpu(i, &shadow_mask);
1184 
1185 	koadpt_kthread_bind_mask(shadow_thread, &shadow_mask);
1186 #endif
1187 	wake_up_process(shadow_thread);
1188 }
1189 
proc_smc_wakeup_ca(pid_t ca,int which)1190 static int proc_smc_wakeup_ca(pid_t ca, int which)
1191 {
1192 	if (ca <= 0) {
1193 		tlogw("wakeup for ca <= 0\n");
1194 	} else {
1195 		struct pending_entry *pe = find_pending_entry(ca);
1196 
1197 		if (!pe) {
1198 			(void)raw_smc_send(TSP_REE_SIQ, (phys_addr_t)ca, 0, false);
1199 			tlogd("invalid ca pid=%d for pending entry\n",
1200 				(int)ca);
1201 			return -1;
1202 		}
1203 		atomic_set(&pe->run, which);
1204 		wake_up(&pe->wq);
1205 		tlogd("wakeup pending thread %ld\n", (long)ca);
1206 		put_pending_entry(pe);
1207 	}
1208 	return 0;
1209 }
1210 
wakeup_pe(struct pending_entry * pe)1211 void wakeup_pe(struct pending_entry *pe)
1212 {
1213 	if (!pe)
1214 		return;
1215 
1216 	atomic_set(&pe->run, 1);
1217 	wake_up(&pe->wq);
1218 }
1219 
smc_wakeup_broadcast(void)1220 int smc_wakeup_broadcast(void)
1221 {
1222 	foreach_pending_entry(wakeup_pe);
1223 	return 0;
1224 }
1225 
smc_wakeup_ca(pid_t ca)1226 int smc_wakeup_ca(pid_t ca)
1227 {
1228 	tee_trace_add_event(SPI_WAKEUP, (uint64_t)ca);
1229 	return proc_smc_wakeup_ca(ca, 1);
1230 }
1231 
smc_shadow_exit(pid_t ca)1232 int smc_shadow_exit(pid_t ca)
1233 {
1234 	return proc_smc_wakeup_ca(ca, SHADOW_EXIT_RUN);
1235 }
1236 
fiq_shadow_work_func(uint64_t target)1237 void fiq_shadow_work_func(uint64_t target)
1238 {
1239 	struct smc_cmd_ret secret = { SMC_EXIT_MAX, 0, target };
1240 	tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_SCHEDULED, target);
1241 	secs_suspend_status(target);
1242 	if (power_on_cc() != 0) {
1243 		tloge("power on cc failed\n");
1244 		return;
1245 	}
1246 
1247 	livepatch_down_read_sem();
1248 	smp_smc_send(TSP_REQUEST, (unsigned long)SMC_OPS_START_FIQSHD,
1249 		(unsigned long)(uint32_t)(current->pid), &secret, false);
1250 	livepatch_up_read_sem();
1251 
1252 	if (power_down_cc() != 0)
1253 		tloge("power down cc failed\n");
1254 
1255 	return;
1256 }
1257 
smc_queue_shadow_worker(uint64_t target)1258 int smc_queue_shadow_worker(uint64_t target)
1259 {
1260 	struct shadow_work work = {
1261 		KTHREAD_WORK_INIT(work.kthwork, shadow_work_func),
1262 		.target = target,
1263 	};
1264 
1265 #if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
1266 	if (!queue_kthread_work(&g_ipi_helper_worker, &work.kthwork)) {
1267 #else
1268 	if (!kthread_queue_work(&g_ipi_helper_worker, &work.kthwork)) {
1269 #endif
1270 		tloge("ipi helper work fail queue, was already pending\n");
1271 		return -1;
1272 	}
1273 
1274 #if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
1275 	flush_kthread_work(&work.kthwork);
1276 #else
1277 	kthread_flush_work(&work.kthwork);
1278 #endif
1279 	return 0;
1280 }
1281 
1282 #ifdef CONFIG_DRM_ADAPT
1283 #define DRM_USR_PRIOR (-5)
1284 static void set_drm_strategy(void)
1285 {
1286 	if (!g_drm_mask_flag) {
1287 		cpumask_clear(&g_drm_cpu_mask);
1288 		cpumask_set_cpu(CPU_FOUR, &g_drm_cpu_mask);
1289 		cpumask_set_cpu(CPU_FIVE, &g_drm_cpu_mask);
1290 		cpumask_set_cpu(CPU_SIX, &g_drm_cpu_mask);
1291 		cpumask_set_cpu(CPU_SEVEN, &g_drm_cpu_mask);
1292 		g_drm_mask_flag = 1;
1293 	}
1294 
1295 	if (current->group_leader &&
1296 		strstr(current->group_leader->comm, "drm@1.")) {
1297 		set_cpus_allowed_ptr(current, &g_drm_cpu_mask);
1298 		set_user_nice(current, DRM_USR_PRIOR);
1299 	}
1300 }
1301 #endif
1302 
1303 static int smc_ops_normal(struct cmd_reuse_info *info,
1304 	const struct tc_ns_smc_cmd *cmd, u64 ops)
1305 {
1306 	if (ops != SMC_OPS_NORMAL)
1307 		return 0;
1308 
1309 	if (info->cmd_usage == RESEND) {
1310 		if (reuse_smc_in_entry((uint32_t)info->cmd_index) != 0) {
1311 			tloge("reuse smc entry failed\n");
1312 			release_smc_entry((uint32_t)info->cmd_index);
1313 			return -ENOMEM;
1314 		}
1315 	} else {
1316 		info->cmd_index = occupy_free_smc_in_entry(cmd);
1317 		if (info->cmd_index == -1) {
1318 			tloge("there's no more smc entry\n");
1319 			return -ENOMEM;
1320 		}
1321 	}
1322 
1323 	if (info->cmd_usage != CLEAR) {
1324 		info->cmd_index = info->saved_index;
1325 		info->cmd_usage = CLEAR;
1326 	} else {
1327 		info->saved_index = info->cmd_index;
1328 	}
1329 
1330 	tlogd("submit new cmd: cmd.ca=%u cmd-id=%x ev-nr=%u "
1331 		"cmd-index=%u saved-index=%d\n",
1332 		cmd->ca_pid, cmd->cmd_id,
1333 		g_cmd_data->in[info->cmd_index].event_nr, info->cmd_index,
1334 		info->saved_index);
1335 	return 0;
1336 }
1337 
1338 static int smp_smc_send_cmd_done(int cmd_index, struct tc_ns_smc_cmd *cmd,
1339 	struct tc_ns_smc_cmd *in)
1340 {
1341 	cmd_result_check(cmd, cmd_index);
1342 	switch (cmd->ret_val) {
1343 	case TEEC_PENDING2: {
1344 		unsigned int agent_id = cmd->agent_id;
1345 		/* If the agent does not exist post
1346 		 * the answer right back to the TEE
1347 		 */
1348 		if (agent_process_work(cmd, agent_id) != 0)
1349 			tloge("agent process work failed\n");
1350 		return PENDING2_RETRY;
1351 	}
1352 	case TEE_ERROR_TAGET_DEAD:
1353 	case TEEC_PENDING:
1354 	/* just copy out, and let out to proceed */
1355 	default:
1356 		if (memcpy_s(in, sizeof(*in), cmd, sizeof(*cmd)) != EOK) {
1357 			tloge("memcpy failed,%s line:%d", __func__, __LINE__);
1358 			cmd->ret_val = -1;
1359 		}
1360 
1361 		break;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 #define KERNEL_INDEX 5
1368 static void print_crash_msg(union crash_inf *crash_info)
1369 {
1370 	static const char *tee_critical_app[] = {
1371 		"gtask",
1372 		"teesmcmgr",
1373 		"hmsysmgr",
1374 		"hmfilemgr",
1375 		"platdrv",
1376 		"kernel", /* index must be same with KERNEL_INDEX */
1377 		"vltmm_service",
1378 		"tee_drv_server"
1379 	};
1380 	int app_num = sizeof(tee_critical_app) / sizeof(tee_critical_app[0]);
1381 	const char *crash_app_name = "NULL";
1382 	uint16_t off = crash_info->crash_msg.off;
1383 	int app_index = crash_info->crash_msg.app & LOW_BYTE;
1384 	int halt_reason = crash_info->crash_msg.halt_reason;
1385 
1386 	crash_info->crash_msg.off = 0;
1387 
1388 	if (app_index >= 0 && app_index < app_num)
1389 		crash_app_name = tee_critical_app[app_index];
1390 	else
1391 		tloge("index error: %x\n", crash_info->crash_msg.app);
1392 
1393 	if (app_index == KERNEL_INDEX) {
1394 		tloge("====crash app:%s user sym:%s kernel crash off/size: "
1395 			"<0x%x/0x%x>\n", crash_app_name,
1396 			crash_info->crash_msg.sym_name,
1397 			off, crash_info->crash_msg.size);
1398 		tloge("====crash halt reason: 0x%x far:0x%x fault:0x%x "
1399 			"elr:0x%x (ret_ip: 0x%llx)\n",
1400 			halt_reason, crash_info->crash_msg.far,
1401 			crash_info->crash_msg.fault, crash_info->crash_msg.elr,
1402 			crash_info->crash_reg[2]);
1403 	} else {
1404 		char syms[SYM_NAME_LEN_MAX] = {0};
1405 
1406 		if (memcpy_s(syms, SYM_NAME_LEN_MAX,
1407 			crash_info->crash_msg.sym_name, SYM_NAME_LEN_1) != EOK)
1408 			tloge("memcpy sym name failed!\n");
1409 
1410 		if (memcpy_s(syms + SYM_NAME_LEN_1,
1411 			SYM_NAME_LEN_MAX - SYM_NAME_LEN_1,
1412 			crash_info->crash_msg.sym_name_append, SYM_NAME_LEN_2) != EOK)
1413 			tloge("memcpy sym_name_append failed!\n");
1414 		tloge("====crash app:%s user_sym:%s + <0x%x/0x%x>\n",
1415 			  crash_app_name, syms, off, crash_info->crash_msg.size);
1416 		tloge("====crash far:0x%x fault:%x\n",
1417 			  crash_info->crash_msg.far, crash_info->crash_msg.fault);
1418 	}
1419 }
1420 
1421 void clr_system_crash_flag(void)
1422 {
1423 	g_sys_crash = false;
1424 }
1425 
1426 static int smp_smc_send_process(struct tc_ns_smc_cmd *cmd, u64 ops,
1427 	struct smc_cmd_ret *cmd_ret, int cmd_index)
1428 {
1429 	int ret;
1430 	tlogd("smc send start cmd_id = %u, ca = %u\n",
1431 		cmd->cmd_id, cmd->ca_pid);
1432 
1433 	if (power_on_cc() != 0) {
1434 		tloge("power on cc failed\n");
1435 		cmd->ret_val = -1;
1436 		return -1;
1437 	}
1438 
1439 	ret = smp_smc_send(TSP_REQUEST, (unsigned long)ops,
1440 		(unsigned long)(uint32_t)(current->pid), cmd_ret, ops != SMC_OPS_ABORT_TASK);
1441 
1442 	if (power_down_cc() != 0) {
1443 		tloge("power down cc failed\n");
1444 		cmd->ret_val = -1;
1445 		return -1;
1446 	}
1447 
1448 	tlogd("smc send ret = %x, cmd ret.exit=%ld, cmd index=%d\n",
1449 		ret, (long)cmd_ret->exit, cmd_index);
1450 	isb();
1451 	wmb();
1452 	if (ret == (int)TSP_CRASH) {
1453 		union crash_inf crash_info;
1454 		crash_info.crash_reg[0] = cmd_ret->exit;
1455 		crash_info.crash_reg[1] = cmd_ret->ta;
1456 		crash_info.crash_reg[2] = cmd_ret->target;
1457 
1458 		tloge("TEEOS has crashed!\n");
1459 		print_crash_msg(&crash_info);
1460 
1461 		g_sys_crash = true;
1462 		cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
1463 
1464 		tee_wake_up_reboot();
1465 #ifndef CONFIG_TEE_REBOOT
1466 		report_log_system_error();
1467 #endif
1468 		cmd->ret_val = TEE_ERROR_IS_DEAD;
1469 		return -1;
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 static int init_for_smc_send(struct tc_ns_smc_cmd *in,
1476 	struct pending_entry **pe, struct tc_ns_smc_cmd *cmd,
1477 	bool reuse)
1478 {
1479 #ifdef CONFIG_DRM_ADAPT
1480 	set_drm_strategy();
1481 #endif
1482 	*pe = init_pending_entry();
1483 	if (!(*pe)) {
1484 		tloge("init pending entry failed\n");
1485 		return -ENOMEM;
1486 	}
1487 
1488 	in->ca_pid = (unsigned int)current->pid;
1489 	if (reuse)
1490 		return 0;
1491 
1492 	if (memcpy_s(cmd, sizeof(*cmd), in, sizeof(*in)) != EOK) {
1493 		tloge("memcpy in cmd failed\n");
1494 		release_pending_entry(*pe);
1495 		return -EFAULT;
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 static bool is_ca_killed(int cmd_index)
1502 {
1503 	(void)cmd_index;
1504 	/* if CA has not been killed */
1505 	if (sigkill_pending(current)) {
1506 		/* signal pending, send abort cmd */
1507 		tloge("wait event timeout and find pending signal\n");
1508 		return true;
1509 	}
1510 	return false;
1511 }
1512 
1513 static void clean_smc_resrc(struct cmd_reuse_info info,
1514 	const struct tc_ns_smc_cmd *cmd,
1515 	struct pending_entry *pe)
1516 {
1517 	if (info.cmd_usage != CLEAR && cmd->ret_val != (int)TEEC_PENDING)
1518 		release_smc_entry((uint32_t)info.cmd_index);
1519 
1520 	release_pending_entry(pe);
1521 }
1522 
1523 static int set_abort_cmd(int index)
1524 {
1525 	acquire_smc_buf_lock(&g_cmd_data->smc_lock);
1526 	if (test_bit(index, (unsigned long *)g_cmd_data->doing_bitmap) == 0) {
1527 		release_smc_buf_lock(&g_cmd_data->smc_lock);
1528 		tloge("can't abort an unprocess cmd\n");
1529 		return -1;
1530 	}
1531 
1532 	g_cmd_data->in[index].cmd_id = GLOBAL_CMD_ID_KILL_TASK;
1533 	g_cmd_data->in[index].cmd_type = CMD_TYPE_GLOBAL;
1534 	/* these phy addrs are not necessary, clear them to avoid gtask check err */
1535 	g_cmd_data->in[index].operation_phys = 0;
1536 	g_cmd_data->in[index].operation_h_phys = 0;
1537 	g_cmd_data->in[index].login_data_phy = 0;
1538 	g_cmd_data->in[index].login_data_h_addr = 0;
1539 
1540 	clear_bit((unsigned int)index, (unsigned long *)g_cmd_data->doing_bitmap);
1541 	release_smc_buf_lock(&g_cmd_data->smc_lock);
1542 	tloge("set abort cmd success\n");
1543 
1544 	return 0;
1545 }
1546 
1547 static enum smc_ops_exit process_abort_cmd(int index, const struct pending_entry *pe)
1548 {
1549 	(void)pe;
1550 	if (set_abort_cmd(index) == 0)
1551 		return SMC_OPS_ABORT_TASK;
1552 
1553 	return SMC_OPS_SCHEDTO;
1554 }
1555 
1556 #define TO_STEP_SIZE 5
1557 #define INVALID_STEP_SIZE 0xFFFFFFFFU
1558 
1559 struct timeout_step_t {
1560 	unsigned long steps[TO_STEP_SIZE];
1561 	uint32_t size;
1562 	uint32_t cur;
1563 	bool timeout_reset;
1564 };
1565 
1566 static void init_timeout_step(uint32_t timeout, struct timeout_step_t *step)
1567 {
1568 	uint32_t i = 0;
1569 
1570 	if (timeout == 0) {
1571 		step->steps[0] = RESLEEP_TIMEOUT * HZ;
1572 		step->size = 1;
1573 	} else {
1574 		uint32_t timeout_in_jiffies;
1575 
1576 		if (timeout > RESLEEP_TIMEOUT * MSEC_PER_SEC)
1577 			timeout = RESLEEP_TIMEOUT * MSEC_PER_SEC;
1578 		timeout_in_jiffies = (uint32_t)msecs_to_jiffies(timeout);
1579 
1580 		/*
1581 		 * [timeout_in_jiffies-1, timeout_in_jiffies+2] jiffies
1582 		 * As REE and TEE tick have deviation, to make sure last REE timeout
1583 		 * is after TEE timeout, we set a timeout step from
1584 		 * 'timeout_in_jiffies -1' to 'timeout_in_jiffies + 2'
1585 		 */
1586 		if (timeout_in_jiffies > 1) {
1587 			step->steps[i++] = timeout_in_jiffies - 1;
1588 			step->steps[i++] = 1;
1589 		} else {
1590 			step->steps[i++] = timeout_in_jiffies;
1591 		}
1592 		step->steps[i++] = 1;
1593 		step->steps[i++] = 1;
1594 
1595 		if (RESLEEP_TIMEOUT * HZ > (timeout_in_jiffies + 2))
1596 			step->steps[i++] = RESLEEP_TIMEOUT * HZ - 2 - timeout_in_jiffies;
1597 		step->size = i;
1598 	}
1599 	step->cur = 0;
1600 }
1601 
1602 enum pending_t {
1603 	PD_WAKEUP,
1604 	PD_TIMEOUT,
1605 	PD_DONE,
1606 	PD_RETRY,
1607 };
1608 
1609 enum smc_status_t {
1610 	ST_DONE,
1611 	ST_RETRY,
1612 };
1613 
1614 static long wait_event_internal(struct pending_entry *pe, struct timeout_step_t *step)
1615 {
1616 	if (!current->mm) {
1617 		/*
1618 		 * smc svc thread need freezable, to solve the problem:
1619 		 * When the system is in hibernation, the TEE image needs
1620 		 * to be backed up in some scenarios, all smc cmds are not allowed to enter tee
1621 		 */
1622 		return wait_event_freezable_timeout(pe->wq, atomic_read(&pe->run),
1623 				step->steps[step->cur]);
1624 	} else {
1625 		return wait_event_timeout(pe->wq, atomic_read(&pe->run),
1626 				step->steps[step->cur]);
1627 	}
1628 }
1629 static enum pending_t proc_ta_pending(struct pending_entry *pe,
1630 	struct timeout_step_t *step, uint64_t pending_args, uint32_t cmd_index,
1631 	u64 *ops)
1632 {
1633 	bool kernel_call = false;
1634 	bool woke_up = false;
1635 	/*
1636 	 * if ->mm is NULL, it's a kernel thread and a kthread will never
1637 	 * receive a signal.
1638 	 */
1639 	uint32_t timeout = (uint32_t)pending_args;
1640 	bool timer_no_irq = (pending_args >> 32) == 0 ? false : true;
1641 	uint32_t cur_timeout;
1642 	if (step->cur == INVALID_STEP_SIZE)
1643 		init_timeout_step(timeout, step);
1644 	if (!current->mm)
1645 		kernel_call = true;
1646 resleep:
1647 	cur_timeout = jiffies_to_msecs(step->steps[step->cur]);
1648 	tee_trace_add_event(SMC_SLEEP, 0);
1649 	if (wait_event_internal(pe, step) == 0) {
1650 		if (step->cur < (step->size - 1)) {
1651 			step->cur++;
1652 			/*
1653 			 * As there may no timer irq in TEE, we need a chance to
1654 			 * run timer's irq handler initiatively by SMC_OPS_SCHEDTO.
1655 			 */
1656 			if (timer_no_irq) {
1657 				*ops = SMC_OPS_SCHEDTO;
1658 				return PD_TIMEOUT;
1659 			} else {
1660 				goto resleep;
1661 			}
1662 		}
1663 		if (is_ca_killed(cmd_index)) {
1664 			*ops = (u64)process_abort_cmd(cmd_index, pe);
1665 			return PD_WAKEUP;
1666 		}
1667 	} else {
1668 		woke_up = true;
1669 		tlogd("%s woke up\n", __func__);
1670 	}
1671 	atomic_set(&pe->run, 0);
1672 	if (!is_cmd_working_done(cmd_index)) {
1673 		*ops = SMC_OPS_SCHEDTO;
1674 		return PD_WAKEUP;
1675 	} else if (!kernel_call && !woke_up) {
1676 		tloge("cmd done, may miss a spi!\n");
1677 		show_cmd_bitmap();
1678 	}
1679 	tlogd("cmd is done\n");
1680 	return PD_DONE;
1681 }
1682 
1683 static void set_timeout_step(struct timeout_step_t *timeout_step)
1684 {
1685 	if (!timeout_step->timeout_reset)
1686 		return;
1687 
1688 	timeout_step->cur = INVALID_STEP_SIZE;
1689 	timeout_step->timeout_reset = false;
1690 }
1691 
1692 static enum smc_status_t proc_normal_exit(struct pending_entry *pe, u64 *ops,
1693 	struct timeout_step_t *timeout_step, struct smc_cmd_ret *cmd_ret,
1694 	int cmd_index)
1695 {
1696 	enum pending_t pd_ret;
1697 
1698 	/* notify and set affinity came first, goto retry directly */
1699 	if (match_ta_affinity(pe)) {
1700 		*ops = SMC_OPS_SCHEDTO;
1701 		return ST_RETRY;
1702 	}
1703 
1704 	pd_ret = proc_ta_pending(pe, timeout_step,
1705 		cmd_ret->ta, (uint32_t)cmd_index, ops);
1706 	if (pd_ret == PD_DONE)
1707 		return ST_DONE;
1708 
1709 	if (pd_ret == PD_WAKEUP)
1710 		timeout_step->timeout_reset = true;
1711 	return ST_RETRY;
1712 }
1713 
1714 static enum smc_status_t handle_cmd_working_done(
1715 	struct tc_ns_smc_cmd *cmd, u64 *ops, struct tc_ns_smc_cmd *in,
1716 	struct cmd_reuse_info *info)
1717 {
1718 	if (copy_smc_out_entry((uint32_t)info->cmd_index, cmd, &info->cmd_usage) != 0) {
1719 		cmd->ret_val = TEEC_ERROR_GENERIC;
1720 		return ST_DONE;
1721 	}
1722 
1723 	if (smp_smc_send_cmd_done(info->cmd_index, cmd, in) != 0) {
1724 		*ops = SMC_OPS_NORMAL; /* cmd will be reused */
1725 		return ST_RETRY;
1726 	}
1727 
1728 	return ST_DONE;
1729 }
1730 
1731 static int smp_smc_send_func(struct tc_ns_smc_cmd *in, bool reuse)
1732 {
1733 	struct cmd_reuse_info info = { 0, 0, CLEAR };
1734 	struct smc_cmd_ret cmd_ret = {0};
1735 	struct tc_ns_smc_cmd cmd = { {0}, 0 };
1736 	struct pending_entry *pe = NULL;
1737 	u64 ops;
1738 	struct timeout_step_t timeout_step =
1739 		{{0, 0, 0, 0}, TO_STEP_SIZE, -1, false};
1740 
1741 	if (init_for_smc_send(in, &pe, &cmd, reuse) != 0)
1742 		return TEEC_ERROR_GENERIC;
1743 
1744 	if (reuse) {
1745 		info.saved_index = (int)in->event_nr;
1746 		info.cmd_index = (int)in->event_nr;
1747 		info.cmd_usage = RESEND;
1748 	}
1749 	ops = SMC_OPS_NORMAL;
1750 
1751 #ifdef CONFIG_SCHED_SMT_EXPELLING
1752 	force_smt_expeller_prepare();
1753 #endif
1754 
1755 retry:
1756 #ifdef CONFIG_TEE_REBOOT
1757 	if (is_tee_rebooting() && in->cmd_id == GLOBAL_CMD_ID_SET_SERVE_CMD) {
1758 		return TEE_ERROR_IS_DEAD;
1759 	}
1760 #endif
1761 
1762 	set_timeout_step(&timeout_step);
1763 
1764 	if (smc_ops_normal(&info, &cmd, ops) != 0) {
1765 		release_pending_entry(pe);
1766 		return TEEC_ERROR_GENERIC;
1767 	}
1768 
1769 	if (smp_smc_send_process(&cmd, ops, &cmd_ret, info.cmd_index) == -1)
1770 		goto clean;
1771 
1772 	if (!is_cmd_working_done((uint32_t)info.cmd_index)) {
1773 		if (cmd_ret.exit == SMC_EXIT_NORMAL) {
1774 			if (proc_normal_exit(pe, &ops, &timeout_step, &cmd_ret,
1775 				info.cmd_index) == ST_RETRY)
1776 				goto retry;
1777 		} else if (cmd_ret.exit == SMC_EXIT_ABORT) {
1778 			ops = (u64)process_abort_cmd(info.cmd_index, pe);
1779 			goto retry;
1780 		} else {
1781 			tloge("invalid cmd work state\n");
1782 			cmd.ret_val = TEEC_ERROR_GENERIC;
1783 			goto clean;
1784 		}
1785 	}
1786 
1787 	if (handle_cmd_working_done(&cmd, &ops, in, &info) == ST_RETRY)
1788 		goto retry;
1789 clean:
1790 	clean_smc_resrc(info, &cmd, pe);
1791 	return cmd.ret_val;
1792 }
1793 
1794 static int smc_svc_thread_fn(void *arg)
1795 {
1796 	(void)arg;
1797 	set_freezable();
1798 	while (!kthread_should_stop()) {
1799 		struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
1800 		int ret;
1801 
1802 		smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
1803 		smc_cmd.cmd_id = GLOBAL_CMD_ID_SET_SERVE_CMD;
1804 		ret = smp_smc_send_func(&smc_cmd, false);
1805 		tlogd("smc svc return 0x%x\n", ret);
1806 	}
1807 	tloge("smc svc thread stop\n");
1808 	return 0;
1809 }
1810 
1811 void wakeup_tc_siq(uint32_t siq_mode)
1812 {
1813 	uint32_t i;
1814 
1815 	if (siq_mode == 0)
1816 		return;
1817 
1818 	mutex_lock(&g_siq_lock);
1819 	i = get_free_siq_index();
1820 	if (i >= MAX_SIQ_NUM) {
1821 		tloge("dump is too frequent\n");
1822 		mutex_unlock(&g_siq_lock);
1823 		return;
1824 	}
1825 	g_siq_queue[i] = siq_mode;
1826 	atomic_set(&g_siq_th_run, RUN_SIQ_THREAD);
1827 	mutex_unlock(&g_siq_lock);
1828 	wake_up_interruptible(&siq_th_wait);
1829 }
1830 
1831 /*
1832  * This function first power on crypto cell, then send smc cmd to trustedcore.
1833  * After finished, power off crypto cell.
1834  */
1835 static int proc_tc_ns_smc(struct tc_ns_smc_cmd *cmd, bool reuse)
1836 {
1837 	int ret;
1838 	struct cmd_monitor *item = NULL;
1839 
1840 	if (g_sys_crash) {
1841 		tloge("ERROR: sys crash happened!!!\n");
1842 		return TEE_ERROR_IS_DEAD;
1843 	}
1844 
1845 	if (!cmd) {
1846 		tloge("invalid cmd\n");
1847 		return TEEC_ERROR_GENERIC;
1848 	}
1849 	tlogd(KERN_INFO "***smc call start on cpu %d ***\n",
1850 		raw_smp_processor_id());
1851 
1852 	item = cmd_monitor_log(cmd);
1853 	ret = smp_smc_send_func(cmd, reuse);
1854 	cmd_monitor_logend(item);
1855 
1856 	return ret;
1857 }
1858 
1859 int tc_ns_smc(struct tc_ns_smc_cmd *cmd)
1860 {
1861 	return proc_tc_ns_smc(cmd, false);
1862 }
1863 
1864 int tc_ns_smc_with_no_nr(struct tc_ns_smc_cmd *cmd)
1865 {
1866 	return proc_tc_ns_smc(cmd, true);
1867 }
1868 
1869 static void smc_work_no_wait(uint32_t type)
1870 {
1871 	(void) raw_smc_send(TSP_REQUEST, g_cmd_phys, type, true);
1872 }
1873 
1874 void send_smc_reset_cmd_buffer(void)
1875 {
1876 	send_smc_cmd_rebooting(TSP_REQUEST, g_cmd_phys, TC_NS_CMD_TYPE_SECURE_CONFIG, NULL);
1877 }
1878 
1879 static void smc_work_set_cmd_buffer(struct work_struct *work)
1880 {
1881 	(void)work;
1882 	smc_work_no_wait(TC_NS_CMD_TYPE_SECURE_CONFIG);
1883 }
1884 
1885 void smc_set_cmd_buffer(void)
1886 {
1887 	struct work_struct work;
1888 	/*
1889 	 * If the TEE supports independent reset, the "TEE reset" clears the cmd_buffer information in gtask.
1890 	 * Therefore, the tzdriver needs to be re-registered cmd_buffer.
1891 	 * Even if ite has been registerd in the UEFI phase.
1892 	 */
1893 #ifndef CONFIG_TEE_RESET
1894 	if (g_reserved_cmd_buffer)
1895 		return;
1896 #endif
1897 
1898 	INIT_WORK_ONSTACK(&work, smc_work_set_cmd_buffer);
1899 	/* Run work on CPU 0 */
1900 	schedule_work_on(0, &work);
1901 	flush_work(&work);
1902 	tlogd("smc set cmd buffer done\n");
1903 }
1904 
1905 static int alloc_cmd_buffer(void)
1906 {
1907 	if (g_reserved_cmd_buffer) {
1908 		tlogi("use reserved cmd buffer");
1909 		g_cmd_data = (struct tc_ns_smc_queue *)get_reserved_cmd_vaddr_of(g_cmd_phys, (uint64_t)g_cmd_size);
1910 		if (!g_cmd_data)
1911 			return -ENOMEM;
1912 
1913 		return 0;
1914 	}
1915 	g_cmd_data = (struct tc_ns_smc_queue *)(uintptr_t)get_cmd_mem_vaddr();
1916 	if (!g_cmd_data)
1917 		return -ENOMEM;
1918 
1919 	g_cmd_phys = get_cmd_mem_paddr((uint64_t)(uintptr_t)g_cmd_data);
1920 	return 0;
1921 }
1922 
1923 static int init_smc_related_rsrc(const struct device *class_dev)
1924 {
1925 	struct cpumask new_mask;
1926 	int ret;
1927 
1928 	/*
1929 	 * TEE Dump will disable IRQ/FIQ for about 500 ms, it's not
1930 	 * a good choice to ask CPU0/CPU1 to do the dump.
1931 	 * So, bind this kernel thread to other CPUs rather than CPU0/CPU1.
1932 	 */
1933 	cpumask_setall(&new_mask);
1934 	cpumask_clear_cpu(CPU_ZERO, &new_mask);
1935 	cpumask_clear_cpu(CPU_ONE, &new_mask);
1936 	koadpt_kthread_bind_mask(g_siq_thread, &new_mask);
1937 	/* some products specify the cpu that kthread need to bind */
1938 	tz_kthread_bind_mask(g_siq_thread);
1939 	g_ipi_helper_thread = kthread_create(kthread_worker_fn,
1940 		&g_ipi_helper_worker, "ipihelper");
1941 	if (IS_ERR_OR_NULL(g_ipi_helper_thread)) {
1942 		dev_err(class_dev, "couldn't create ipi helper threads %ld\n",
1943 			PTR_ERR(g_ipi_helper_thread));
1944 		ret = (int)PTR_ERR(g_ipi_helper_thread);
1945 		return ret;
1946 	}
1947 
1948 	tz_kthread_bind_mask(g_ipi_helper_thread);
1949 	wake_up_process(g_ipi_helper_thread);
1950 	wake_up_process(g_siq_thread);
1951 	init_cmd_monitor();
1952 	INIT_LIST_HEAD(&g_pending_head);
1953 	spin_lock_init(&g_pend_lock);
1954 
1955 	return 0;
1956 }
1957 
1958 static int parse_params_from_tee(void)
1959 {
1960 	int ret;
1961 	void *buffer = NULL;
1962 
1963 	/* enable uefi and reserved buffer, not check teeos compat level */
1964 	if (g_reserved_cmd_buffer) {
1965 		tlogw("uefi mode, not check teeos compat level\n");
1966 		return 0;
1967 	}
1968 
1969 	buffer = (void *)(g_cmd_data->in);
1970 	ret = check_teeos_compat_level((uint32_t *)buffer,
1971 		COMPAT_LEVEL_BUF_LEN);
1972 	if (ret != 0) {
1973 		tloge("check teeos compatibility failed\n");
1974 		return ret;
1975 	}
1976 	if (memset_s(buffer, sizeof(g_cmd_data->in),
1977 		0, sizeof(g_cmd_data->in)) != EOK) {
1978 		tloge("Clean the command buffer failed\n");
1979 		ret = -EFAULT;
1980 		return ret;
1981 	}
1982 	return 0;
1983 }
1984 
1985 int smc_context_init(const struct device *class_dev)
1986 {
1987 	int ret;
1988 
1989 	if (!class_dev || IS_ERR_OR_NULL(class_dev))
1990 		return -ENOMEM;
1991 
1992 	ret = alloc_cmd_buffer();
1993 	if (ret != 0)
1994 		return ret;
1995 
1996 	/* Send the allocated buffer to TrustedCore for init */
1997 	smc_set_cmd_buffer();
1998 
1999 	ret = parse_params_from_tee();
2000 	if (ret != 0) {
2001 		tloge("parse params from tee failed\n");
2002 		goto free_mem;
2003 	}
2004 
2005 	g_siq_thread = kthread_create(siq_thread_fn, NULL, "siqthread/%d", 0);
2006 	if (unlikely(IS_ERR_OR_NULL(g_siq_thread))) {
2007 		dev_err(class_dev, "couldn't create siqthread %ld\n",
2008 			PTR_ERR(g_siq_thread));
2009 		ret = (int)PTR_ERR(g_siq_thread);
2010 		goto free_mem;
2011 	}
2012 
2013 	ret = init_smc_related_rsrc(class_dev);
2014 	if (ret != 0)
2015 		goto free_siq_worker;
2016 
2017 	return 0;
2018 
2019 free_siq_worker:
2020 	kthread_stop(g_siq_thread);
2021 	g_siq_thread = NULL;
2022 free_mem:
2023 	free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data);
2024 	g_cmd_data = NULL;
2025 	return ret;
2026 }
2027 
2028 int init_smc_svc_thread(void)
2029 {
2030 	g_smc_svc_thread = kthread_create(smc_svc_thread_fn, NULL,
2031 		"smc_svc_thread");
2032 	if (unlikely(IS_ERR_OR_NULL(g_smc_svc_thread))) {
2033 		tloge("couldn't create smc_svc_thread %ld\n",
2034 			PTR_ERR(g_smc_svc_thread));
2035 		return (int)PTR_ERR(g_smc_svc_thread);
2036 	}
2037 #ifdef CONFIG_SCHED_SMT_EXPELLING
2038 	set_task_expeller(g_smc_svc_thread, SMT_EXPELLER_FORCE_LONG);
2039 #endif
2040 	tz_kthread_bind_mask(g_smc_svc_thread);
2041 	wake_up_process(g_smc_svc_thread);
2042 	return 0;
2043 }
2044 
2045 int teeos_log_exception_archive(unsigned int eventid,
2046 	const char *exceptioninfo)
2047 {
2048 #ifdef CONFIG_TEE_LOG_EXCEPTION
2049 	int ret;
2050 	struct imonitor_eventobj *teeos_obj = NULL;
2051 
2052 	teeos_obj = imonitor_create_eventobj(eventid);
2053 	if (exceptioninfo) {
2054 		tlogi("upload exception info: [%s]\n", exceptioninfo);
2055 		ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)exceptioninfo);
2056 	} else {
2057 		ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)"teeos something crash");
2058 	}
2059 	if (ret) {
2060 		tloge("imonitor_set_param failed\n");
2061 		imonitor_destroy_eventobj(teeos_obj);
2062 		return ret;
2063 	}
2064 	ret = imonitor_add_dynamic_path(teeos_obj, "/data/vendor/log/hisi_logs/tee");
2065 	if (ret) {
2066 		tloge("add path failed\n");
2067 		imonitor_destroy_eventobj(teeos_obj);
2068 		return ret;
2069 	}
2070 	ret = imonitor_add_dynamic_path(teeos_obj, "/data/log/tee");
2071 	if (ret) {
2072 		tloge("add path failed\n");
2073 		imonitor_destroy_eventobj(teeos_obj);
2074 		return ret;
2075 	}
2076 	ret = imonitor_send_event(teeos_obj);
2077 	imonitor_destroy_eventobj(teeos_obj);
2078 	return ret;
2079 #else
2080 	(void)eventid;
2081 	(void)exceptioninfo;
2082 	return 0;
2083 #endif
2084 }
2085 
2086 void svc_thread_release(void)
2087 {
2088 	if (!IS_ERR_OR_NULL(g_smc_svc_thread)) {
2089 		kthread_stop(g_smc_svc_thread);
2090 		g_smc_svc_thread = NULL;
2091 	}
2092 }
2093 
2094 void free_smc_data(void)
2095 {
2096 	struct pending_entry *pe = NULL, *temp = NULL;
2097 	if (g_reserved_cmd_buffer)
2098 		iounmap((void __iomem *)g_cmd_data);
2099 	else
2100 		free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data);
2101 	smc_wakeup_broadcast();
2102 	svc_thread_release();
2103 	if (!IS_ERR_OR_NULL(g_siq_thread)) {
2104 		atomic_set(&g_siq_th_run, STOP_SIQ_THREAD);
2105 		wake_up_interruptible(&siq_th_wait);
2106 		kthread_stop(g_siq_thread);
2107 		g_siq_thread = NULL;
2108 	}
2109 
2110 #if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
2111 	flush_kthread_worker(&g_ipi_helper_worker);
2112 #else
2113 	kthread_flush_worker(&g_ipi_helper_worker);
2114 #endif
2115 	if (!IS_ERR_OR_NULL(g_ipi_helper_thread)) {
2116 		kthread_stop(g_ipi_helper_thread);
2117 		g_ipi_helper_thread = NULL;
2118 	}
2119 	free_cmd_monitor();
2120 
2121 	spin_lock(&g_pend_lock);
2122 	list_for_each_entry_safe(pe, temp, &g_pending_head, list) {
2123 		list_del(&pe->list);
2124 		put_task_struct(pe->task);
2125 		kfree(pe);
2126 	}
2127 	spin_unlock(&g_pend_lock);
2128 }
2129