1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "smc.h"
33 #include <securec.h>
34 #include <linux/sched.h>
35 #include "agent.h"
36 #include "cmdmonitor.h"
37 #include "tc_ns_client.h"
38 #include "tc_ns_log.h"
39 #include "teek_client_constants.h"
40 #include "teek_ns_client.h"
41 #include "tzdriver_compat.h"
42 #ifdef SECURITY_AUTH_ENHANCE
43 #include "security_auth_enhance.h"
44
45 struct SessionCryptoInfo *g_sessionRootKey = NULL;
GetSessionRootKeyInstance(void)46 struct SessionCryptoInfo *GetSessionRootKeyInstance(void)
47 {
48 return g_sessionRootKey;
49 }
50 #endif
51
52 #define SECS_SUSPEND_STATUS 0xA5A5
53 #define PREEMPT_COUNT 10000
54 #define HZ_COUNT 10
55 #define IDLED_COUNT 100
56
57 #define MAX_EMPTY_RUNS 100
58 #define TZ_CPU_ZERO 0
59 #define TZ_CPU_ONE 1
60 #define TZ_CPU_FOUR 4
61 #define TZ_CPU_FIVE 5
62 #define TZ_CPU_SIX 6
63 #define TZ_CPU_SEVEN 7
64 #define LOW_BYTE 0xF
65
66 /* Current state of the system */
67 static uint8_t g_sysCrash;
68
69 enum SPI_CLK_MODE {
70 SPI_CLK_OFF = 0,
71 SPI_CLK_ON,
72 };
73
74 typedef struct {
75 int *nIdled;
76 uint64_t *ret;
77 uint64_t *exitReason;
78 uint64_t *ta;
79 uint64_t *target;
80 } WoPmParams;
81
82 struct ShadowWork {
83 struct work_struct work;
84 uint64_t target;
85 };
86
87 unsigned long g_shadowThreadId = 0;
88 static LosTaskCB *g_siqThread = NULL;
89 static LosTaskCB *g_smcSvcThread = NULL;
90 struct workqueue_struct *g_ipiHelperWq = NULL;
91
92 #ifdef SECURITY_AUTH_ENHANCE
93 #define MAX_SMC_CMD 18
94 #else
95 #define MAX_SMC_CMD 23
96 #endif
97
98 typedef uint32_t SmcBufLockT;
99
100 typedef struct __attribute__((__packed__)) TcNsSmcQueue {
101 /* set when CA send cmdIn, clear after cmdOut return */
102 DECLARE_BITMAP(inBitmap, MAX_SMC_CMD);
103 /* set when gtask get cmdIn, clear after cmdOut return */
104 DECLARE_BITMAP(doingBitmap, MAX_SMC_CMD);
105 /* set when gtask get cmdOut, clear after cmdOut return */
106 DECLARE_BITMAP(outBitmap, MAX_SMC_CMD);
107 SmcBufLockT smcLock;
108 uint32_t lastIn;
109 TcNsSmcCmd in[MAX_SMC_CMD];
110 uint32_t lastOut;
111 TcNsSmcCmd out[MAX_SMC_CMD];
112 } TcNsSmcQueue;
113
114 TcNsSmcQueue *g_cmdData = NULL;
115 paddr_t g_cmdPhys;
116
117 static struct list_head g_pendingHead;
118 static spinlock_t g_pendLock;
119
AcquireSmcBufLock(SmcBufLockT * lock)120 static inline void AcquireSmcBufLock(SmcBufLockT *lock)
121 {
122 int ret;
123 PreemptDisable();
124 do {
125 ret = CmpXchg(lock, 0, 1);
126 } while (ret);
127 }
128
ReleaseSmcBufLock(SmcBufLockT * lock)129 static inline void ReleaseSmcBufLock(SmcBufLockT *lock)
130 {
131 (void)CmpXchg(lock, 1, 0);
132 PreemptEnable();
133 }
134
OccupyFreeSmcInEntry(const TcNsSmcCmd * cmd)135 static int OccupyFreeSmcInEntry(const TcNsSmcCmd *cmd)
136 {
137 int idx = -1;
138 int i;
139
140 if (cmd == NULL) {
141 tloge("Bad parameters! cmd is NULL.\n");
142 return -1;
143 }
144 /* Note:
145 * AcquireSmcBufLock will disable preempt and kernel will forbid
146 * call mutex_lock in preempt disabled scenes.
147 * To avoid such case(UpdateTimestamp and UpdateChksum will call
148 * mutex_lock), only cmd copy is done when preempt is disable,
149 * then do UpdateTimestamp and UpdateChksum.
150 * As soon as this idx of inBitmap is set, gtask will see this
151 * cmdIn, but the cmdIn is not ready that lack of update_xxx,
152 * so we make a tricky here, set doingBitmap and inBitmap both
153 * at first, after update_xxx is done, clear doingBitmap.
154 */
155 AcquireSmcBufLock(&g_cmdData->smcLock);
156 for (i = 0; i < MAX_SMC_CMD; i++) {
157 if (HmTestBit(i, g_cmdData->inBitmap)) {
158 continue;
159 }
160 if (memcpy_s(&g_cmdData->in[i], sizeof(g_cmdData->in[i]), cmd, sizeof(*cmd)) != EOK) {
161 tloge("memcpy_s failed,%s line:%d", __func__, __LINE__);
162 break;
163 }
164 g_cmdData->in[i].eventNr = i;
165 ISB;
166 DSB;
167 HmSetBit(i, g_cmdData->inBitmap);
168 HmSetBit(i, g_cmdData->doingBitmap);
169 idx = i;
170 break;
171 }
172 ReleaseSmcBufLock(&g_cmdData->smcLock);
173 if (idx == -1) {
174 tloge("can't get any free smc entry\n");
175 return -1;
176 }
177 #ifdef SECURITY_AUTH_ENHANCE
178 if (UpdateTimestamp(&g_cmdData->in[idx])) {
179 tloge("UpdateTimestamp failed !\n");
180 goto clean;
181 }
182 if (UpdateChksum(&g_cmdData->in[idx])) {
183 tloge("UpdateChksum failed.\n");
184 goto clean;
185 }
186 #endif
187
188 AcquireSmcBufLock(&g_cmdData->smcLock);
189 ISB;
190 DSB;
191 HmClearBit(idx, g_cmdData->doingBitmap);
192 ReleaseSmcBufLock(&g_cmdData->smcLock);
193 return idx;
194 clean:
195 AcquireSmcBufLock(&g_cmdData->smcLock);
196 HmClearBit(i, g_cmdData->inBitmap);
197 HmClearBit(i, g_cmdData->doingBitmap);
198 ReleaseSmcBufLock(&g_cmdData->smcLock);
199 return -1;
200 }
201
ReuseSmcInEntry(uint32_t idx)202 static int ReuseSmcInEntry(uint32_t idx)
203 {
204 int rc = 0;
205
206 AcquireSmcBufLock(&g_cmdData->smcLock);
207 if (!(HmTestBit(idx, g_cmdData->inBitmap) &&
208 HmTestBit(idx, g_cmdData->doingBitmap))) {
209 tloge("invalid cmd to reuse\n");
210 rc = -1;
211 goto out;
212 }
213 if (memcpy_s(&g_cmdData->in[idx], sizeof(g_cmdData->in[idx]),
214 &g_cmdData->out[idx], sizeof(g_cmdData->out[idx])) != EOK) {
215 tloge("memcpy_s failed,%s line:%d", __func__, __LINE__);
216 rc = -1;
217 goto out;
218 }
219 ReleaseSmcBufLock(&g_cmdData->smcLock);
220 #ifdef SECURITY_AUTH_ENHANCE
221 if (UpdateTimestamp(&g_cmdData->in[idx])) {
222 tloge("UpdateTimestamp failed !\n");
223 return -1;
224 }
225 if (UpdateChksum(&g_cmdData->in[idx])) {
226 tloge("UpdateChksum failed.\n");
227 return -1;
228 }
229 #endif
230
231 AcquireSmcBufLock(&g_cmdData->smcLock);
232 ISB;
233 DSB;
234 HmClearBit(idx, g_cmdData->doingBitmap);
235 out:
236 ReleaseSmcBufLock(&g_cmdData->smcLock);
237 return rc;
238 }
239
240 enum CmdReuse {
241 CLEAR, /* clear this cmd index */
242 RESEND, /* use this cmd index resend */
243 };
244
CopySmcOutEntry(uint32_t idx,TcNsSmcCmd * copy,enum CmdReuse * usage)245 static int CopySmcOutEntry(uint32_t idx, TcNsSmcCmd *copy,
246 enum CmdReuse *usage)
247 {
248 bool paramCheck = false;
249
250 paramCheck = (copy == NULL || usage == NULL);
251 if (paramCheck == true) {
252 tloge("Bad parameters!\n");
253 return -1;
254 }
255 AcquireSmcBufLock(&g_cmdData->smcLock);
256 if (!HmTestBit(idx, g_cmdData->outBitmap)) {
257 tloge("cmd out %u is not ready\n", idx);
258 ShowCmdBitmap();
259 ReleaseSmcBufLock(&g_cmdData->smcLock);
260 return -1;
261 }
262
263 if (memcpy_s(copy, sizeof(*copy), &g_cmdData->out[idx],
264 sizeof(g_cmdData->out[idx]))) {
265 tloge("copy smc out failed\n");
266 ReleaseSmcBufLock(&g_cmdData->smcLock);
267 return -1;
268 }
269
270 ISB;
271 DSB;
272 if (g_cmdData->out[idx].retVal == TEEC_PENDING2 ||
273 g_cmdData->out[idx].retVal == TEEC_PENDING) {
274 *usage = RESEND;
275 } else {
276 HmClearBit(idx, g_cmdData->inBitmap);
277 HmClearBit(idx, g_cmdData->doingBitmap);
278 *usage = CLEAR;
279 }
280 HmClearBit(idx, g_cmdData->outBitmap);
281 ReleaseSmcBufLock(&g_cmdData->smcLock);
282 return 0;
283 }
284
ReleaseSmcEntry(uint32_t idx)285 static inline void ReleaseSmcEntry(uint32_t idx)
286 {
287 AcquireSmcBufLock(&g_cmdData->smcLock);
288 HmClearBit(idx, g_cmdData->inBitmap);
289 HmClearBit(idx, g_cmdData->doingBitmap);
290 HmClearBit(idx, g_cmdData->outBitmap);
291 ReleaseSmcBufLock(&g_cmdData->smcLock);
292 }
293
IsCmdWorkingDone(uint32_t idx)294 static inline int IsCmdWorkingDone(uint32_t idx)
295 {
296 bool ret = false;
297 AcquireSmcBufLock(&g_cmdData->smcLock);
298
299 if (HmTestBit(idx, g_cmdData->outBitmap)) {
300 ret = true;
301 }
302 ReleaseSmcBufLock(&g_cmdData->smcLock);
303 return ret;
304 }
305
ShowInBitmap(int * cmdIn,uint32_t len)306 static void ShowInBitmap(int *cmdIn, uint32_t len)
307 {
308 uint32_t idx;
309 uint32_t in = 0;
310 char bitmap[MAX_SMC_CMD + 1];
311 bool checkValue = (len != MAX_SMC_CMD ||
312 g_cmdData == NULL);
313
314 if (checkValue == true) {
315 return;
316 }
317 for (idx = 0; idx < MAX_SMC_CMD; idx++) {
318 if (HmTestBit(idx, g_cmdData->inBitmap)) {
319 bitmap[idx] = '1';
320 cmdIn[in++] = idx;
321 } else {
322 bitmap[idx] = '0';
323 }
324 }
325 bitmap[MAX_SMC_CMD] = '\0';
326 tloge("inBitmap: %s\n", bitmap);
327 }
328
ShowOutBitmap(int * cmdOut,uint32_t len)329 static void ShowOutBitmap(int *cmdOut, uint32_t len)
330 {
331 uint32_t idx;
332 uint32_t out = 0;
333 char bitmap[MAX_SMC_CMD + 1];
334 bool checkValue = (len != MAX_SMC_CMD ||
335 g_cmdData == NULL);
336
337 if (checkValue == true) {
338 return;
339 }
340 for (idx = 0; idx < MAX_SMC_CMD; idx++) {
341 if (HmTestBit(idx, g_cmdData->outBitmap)) {
342 bitmap[idx] = '1';
343 cmdOut[out++] = idx;
344 } else {
345 bitmap[idx] = '0';
346 }
347 }
348 bitmap[MAX_SMC_CMD] = '\0';
349 tloge("outBitmap: %s\n", bitmap);
350 }
351
ShowDoingBitmap(void)352 static void ShowDoingBitmap(void)
353 {
354 uint32_t idx;
355 char bitmap[MAX_SMC_CMD + 1];
356
357 if (g_cmdData == NULL) {
358 return;
359 }
360 for (idx = 0; idx < MAX_SMC_CMD; idx++) {
361 if (HmTestBit(idx, g_cmdData->doingBitmap)) {
362 bitmap[idx] = '1';
363 } else {
364 bitmap[idx] = '0';
365 }
366 }
367 bitmap[MAX_SMC_CMD] = '\0';
368 tloge("doingBitmap: %s\n", bitmap);
369 }
370
ShowCmdBitmapWithLock(void)371 void ShowCmdBitmapWithLock(void)
372 {
373 if (g_cmdData == NULL) {
374 return;
375 }
376 AcquireSmcBufLock(&g_cmdData->smcLock);
377 ShowCmdBitmap();
378 ReleaseSmcBufLock(&g_cmdData->smcLock);
379 }
380
ShowCmdBitmap(void)381 void ShowCmdBitmap(void)
382 {
383 uint32_t idx;
384 int cmdIn[MAX_SMC_CMD];
385 int cmdOut[MAX_SMC_CMD];
386 bool checkValue = false;
387
388 if (g_cmdData == NULL) {
389 return;
390 }
391
392 checkValue = memset_s(cmdIn, sizeof(cmdIn), -1, sizeof(cmdIn)) ||
393 memset_s(cmdOut, sizeof(cmdOut), -1, sizeof(cmdOut));
394 if (checkValue) {
395 tloge("memset failed\n");
396 return;
397 }
398 ShowInBitmap(cmdIn, MAX_SMC_CMD);
399 ShowDoingBitmap();
400 ShowOutBitmap(cmdOut, MAX_SMC_CMD);
401
402 tloge("cmdIn value:\n");
403 for (idx = 0; idx < MAX_SMC_CMD; idx++) {
404 if (cmdIn[idx] == -1) {
405 break;
406 }
407 tloge("cmd[%d]: cmdId=%u, caPid=%u, devId = 0x%x, eventNr=%u, retVal=0x%x\n",
408 cmdIn[idx],
409 g_cmdData->in[cmdIn[idx]].cmdId,
410 g_cmdData->in[cmdIn[idx]].caPid,
411 g_cmdData->in[cmdIn[idx]].devFileId,
412 g_cmdData->in[cmdIn[idx]].eventNr,
413 g_cmdData->in[cmdIn[idx]].retVal);
414 }
415
416 tloge("cmdOut value:\n");
417 for (idx = 0; idx < MAX_SMC_CMD; idx++) {
418 if (cmdOut[idx] == -1) {
419 break;
420 }
421 tloge("cmd[%d]: cmdId=%u, caPid=%u, devId = 0x%x, eventNr=%u, retVal=0x%x\n",
422 cmdOut[idx],
423 g_cmdData->out[cmdOut[idx]].cmdId,
424 g_cmdData->out[cmdOut[idx]].caPid,
425 g_cmdData->out[cmdOut[idx]].devFileId,
426 g_cmdData->out[cmdOut[idx]].eventNr,
427 g_cmdData->out[cmdOut[idx]].retVal);
428 }
429 }
430
InitPendingEntry(pid_t pid)431 static struct PendingEntry *InitPendingEntry(pid_t pid)
432 {
433 struct PendingEntry *pe = NULL;
434
435 pe = malloc(sizeof(*pe));
436 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pe)) {
437 tloge("alloc pe failed\n");
438 return NULL;
439 }
440 if (memset_s(pe, sizeof(*pe), 0, sizeof(*pe)) != EOK) {
441 tloge("memset pe failed!.\n");
442 free(pe);
443 return NULL;
444 }
445 atomic_set(&pe->users, 1); // init pe->users to 1
446 pe->pid = pid;
447 init_waitqueue_head(&pe->wq);
448 atomic_set(&pe->run, 0);
449 INIT_LIST_HEAD(&pe->list);
450 spin_lock(&g_pendLock);
451 list_add_tail(&pe->list, &g_pendingHead);
452 spin_unlock(&g_pendLock);
453 return pe;
454 }
455
FindPendingEntry(pid_t pid)456 struct PendingEntry *FindPendingEntry(pid_t pid)
457 {
458 struct PendingEntry *pe = NULL;
459
460 spin_lock(&g_pendLock);
461 list_for_each_entry(pe, &g_pendingHead, list) {
462 if (pe->pid == pid) {
463 atomic_inc(&pe->users);
464 spin_unlock(&g_pendLock);
465 return pe;
466 }
467 }
468 spin_unlock(&g_pendLock);
469 return NULL;
470 }
471
ForeachPendingEntry(void (* func)(struct PendingEntry *))472 void ForeachPendingEntry(void (*func)(struct PendingEntry *))
473 {
474 struct PendingEntry *pe = NULL;
475
476 if (func == NULL) {
477 return;
478 }
479 spin_lock(&g_pendLock);
480 list_for_each_entry(pe, &g_pendingHead, list) {
481 func(pe);
482 }
483 spin_unlock(&g_pendLock);
484 }
485
PutPendingEntry(struct PendingEntry * pe)486 void PutPendingEntry(struct PendingEntry *pe)
487 {
488 if (pe != NULL) {
489 if (atomic_dec_and_test(&pe->users)) {
490 free(pe);
491 }
492 }
493 }
494
ReleasePendingEntry(struct PendingEntry * pe)495 static void ReleasePendingEntry(struct PendingEntry *pe)
496 {
497 spin_lock(&g_pendLock);
498 list_del(&pe->list);
499 spin_unlock(&g_pendLock);
500 PutPendingEntry(pe);
501 }
502
503 static DECLARE_WAIT_QUEUE_HEAD(siqThWait);
504 static DECLARE_WAIT_QUEUE_HEAD(ipi_th_wait);
505 static atomic_t g_siqThRun;
506
507 enum {
508 TYPE_CRASH_TA = 1,
509 TYPE_CRASH_TEE = 2,
510 };
511
512 enum SmcOpsExit {
513 SMC_OPS_NORMAL = 0x0,
514 SMC_OPS_SCHEDTO = 0x1,
515 SMC_OPS_START_SHADOW = 0x2,
516 SMC_OPS_START_FIQSHD = 0x3,
517 SMC_OPS_PROBE_ALIVE = 0x4,
518 SMC_OPS_TERMINATE = 0x5,
519 SMC_EXIT_NORMAL = 0x0,
520 SMC_EXIT_PREEMPTED = 0x1,
521 SMC_EXIT_SHADOW = 0x2,
522 SMC_EXIT_MAX = 0x3,
523 };
524
525 #define SHADOW_EXIT_RUN 0x1234dead
526
527 typedef struct SmcCmdRet {
528 uint64_t exit;
529 uint64_t ta;
530 uint64_t target;
531 } SmcCmdRetT;
532
SecretFill(SmcCmdRetT * ret,uint64_t exit,uint64_t ta,uint64_t target)533 static inline void SecretFill(SmcCmdRetT *ret, uint64_t exit, uint64_t ta, uint64_t target)
534 {
535 if (ret != NULL) {
536 ret->exit = exit;
537 ret->ta = ta;
538 ret->target = target;
539 }
540 }
541
SigkillPending(LosTaskCB * tsk)542 int SigkillPending(LosTaskCB *tsk)
543 {
544 if (tsk == NULL) {
545 tloge("tsk is null!\n");
546 return 0;
547 }
548
549 return OsSigIsMember(&tsk->sig.sigwaitmask, SIGKILL) ||
550 OsSigIsMember(&tsk->sig.sigwaitmask, SIGUSR1);
551 }
552
553 enum CmdState {
554 START,
555 KILLING,
556 KILLED,
557 };
558
559 #define CPU0_ONLY_MASK 0x0001
560
561 #if CONFIG_CPU_AFF_NR
SetCpuStrategy(UINT16 * oldMask)562 static void SetCpuStrategy(UINT16 *oldMask)
563 {
564 LosTaskCB *taskCB = OsCurrTaskGet();
565 UINT16 newMask = CPU0_ONLY_MASK;
566
567 *oldMask = taskCB->cpuAffiMask;
568 KthreadBindMask(taskCB, newMask);
569 }
570 #endif
571
572 #if CONFIG_CPU_AFF_NR
RestoreCpu(UINT16 * oldMask)573 static void RestoreCpu(UINT16 *oldMask)
574 {
575 LosTaskCB *taskCB = OsCurrTaskGet();
576 KthreadBindMask(taskCB, *oldMask);
577 }
578 #endif
579
580 struct SmcParam {
581 uint32_t r0;
582 uint32_t r1;
583 uint32_t r2;
584 uint32_t r3;
585 uint32_t r4;
586 SmcCmdRetT *secret;
587 uint32_t cmd;
588 uint64_t ca;
589 uint32_t ta;
590 uint32_t exitReason;
591 uint32_t target;
592 enum CmdState state;
593 uint64_t ops;
594 };
595
DoSmpSmcSend(struct SmcParam * param)596 static int DoSmpSmcSend(struct SmcParam *param)
597 {
598 int ret;
599 if (param->secret != NULL && param->secret->exit == SMC_EXIT_PREEMPTED) {
600 param->r0 = param->cmd;
601 if (param->state == KILLING) {
602 param->state = KILLED;
603 param->r1 = SMC_OPS_TERMINATE;
604 param->r2 = param->ca;
605 } else {
606 param->r1 = SMC_OPS_SCHEDTO;
607 param->r2 = param->ca;
608 param->r3 = param->secret->ta;
609 param->r4 = param->secret->target;
610 }
611 }
612 int checkValue = param->ops == SMC_OPS_SCHEDTO || param->ops == SMC_OPS_START_FIQSHD;
613 if (param->secret != NULL && checkValue) {
614 param->r4 = param->secret->target;
615 }
616 ISB;
617 DSB;
618
619 do {
620 __asm__ volatile(
621 "mov r0, %[fid]\n"
622 "mov r1, %[a1]\n"
623 "mov r2, %[a2]\n"
624 "mov r3, %[a3]\n"
625 ".arch_extension sec\n"
626 "smc #0\n"
627 "str r0, [%[re0]]\n"
628 "str r1, [%[re1]]\n"
629 "str r2, [%[re2]]\n"
630 "str r3, [%[re3]]\n"
631 : [fid] "+r" (param->r0), [a1] "+r" (param->r1), [a2] "+r" (param->r2),
632 [a3] "+r" (param->r3)
633 : [re0] "r" (&ret), [re1] "r" (¶m->exitReason),
634 [re2] "r" (¶m->ta), [re3] "r" (¶m->target)
635 : "r0", "r1", "r2", "r3");
636 } while (0);
637 ISB;
638 DSB;
639 return ret;
640 }
SmpSmcSend(uint32_t cmd,uint64_t ops,uint64_t ca,SmcCmdRetT * secret,bool needKill)641 static noinline int SmpSmcSend(uint32_t cmd, uint64_t ops, uint64_t ca,
642 SmcCmdRetT *secret, bool needKill)
643 {
644 uint32_t ret = 0;
645 bool checkValue = false;
646 #if CONFIG_CPU_AFF_NR
647 UINT16 oldMask;
648 #endif
649 struct SmcParam param;
650 param.r0 = cmd;
651 param.r1 = ops;
652 param.r2 = ca;
653 param.r3 = 0;
654 param.r4 = 0;
655 param.exitReason = 0;
656 param.ta = 0;
657 param.target = 0;
658 param.state = START;
659 param.cmd = cmd;
660 param.ca = ca;
661 param.secret = secret;
662 param.ops = ops;
663
664 RETRY:
665 #if CONFIG_CPU_AFF_NR
666 SetCpuStrategy(&oldMask);
667 #endif
668
669 ret = DoSmpSmcSend(¶m);
670
671 if (secret == NULL) {
672 return ret;
673 }
674 SecretFill(secret, param.exitReason, param.ta, param.target);
675 if (param.exitReason == SMC_EXIT_PREEMPTED) {
676 /* There's 2 ways to send a terminate cmd to kill a running TA,
677 * in current context or another. If send terminate in another
678 * context, may encounter concurrency problem, as terminate cmd
679 * is send but not process, the original cmd has finished.
680 * So we send the terminate cmd in current context.
681 */
682 checkValue = needKill && SigkillPending(OsCurrTaskGet()) && param.state == START &&
683 IsThreadReported(OsCurrTaskGet()->taskID);
684 if (checkValue == true) {
685 param.state = KILLING;
686 tloge("receive kill signal\n");
687 }
688 #ifndef CONFIG_PREEMPT
689 /* yield cpu to avoid soft lockup */
690 cond_resched();
691 #endif
692 goto RETRY;
693 }
694 #if CONFIG_CPU_AFF_NR
695 RestoreCpu(&oldMask);
696 #endif
697 return ret;
698 }
699
SendSmcCmd(uint32_t cmd,paddr_t cmdAddr,uint32_t cmdType,uint8_t wait)700 static uint32_t SendSmcCmd(uint32_t cmd, paddr_t cmdAddr,
701 uint32_t cmdType, uint8_t wait)
702 {
703 register uint32_t r0 asm("r0") = cmd;
704 register uint32_t r1 asm("r1") = cmdAddr;
705 register uint32_t r2 asm("r2") = cmdType;
706 register uint32_t r3 asm("r3") = 0;
707 do {
708 __asm__ volatile(
709 ".ifnc %0, r0;.err;.endif;\n"
710 ".ifnc %1, r0;.err;.endif;\n"
711 ".ifnc %2, r1;.err;.endif;\n"
712 ".ifnc %3, r2;.err;.endif;\n"
713 ".ifnc %4, r3;.err;.endif;\n"
714 ".arch_extension sec\n"
715 "smc #0\n"
716 : "+r"(r0)
717 : "r"(r0), "r"(r1), "r"(r2), "r"(r3));
718 } while (r0 == TSP_REQUEST && wait);
719 return r0;
720 }
721
RawSmcSend(uint32_t cmd,paddr_t cmdAddr,uint32_t cmdType,uint8_t wait)722 int RawSmcSend(uint32_t cmd, paddr_t cmdAddr,
723 uint32_t cmdType, uint8_t wait)
724 {
725 uint32_t r0;
726
727 #if (CONFIG_CPU_AFF_NR != 0)
728 UINT16 oldMask;
729
730 SetCpuStrategy(&oldMask);
731 #endif
732 r0 = SendSmcCmd(cmd, cmdAddr, cmdType, wait);
733 #if (CONFIG_CPU_AFF_NR != 0)
734 RestoreCpu(&oldMask);
735 #endif
736 return r0;
737 }
738
SiqDump(paddr_t mode)739 void SiqDump(paddr_t mode)
740 {
741 (void)RawSmcSend(TSP_REE_SIQ, mode, 0, false);
742 DoCmdNeedArchivelog();
743 }
744
SiqThreadFn(UINTPTR arg,int len)745 static int SiqThreadFn(UINTPTR arg, int len)
746 {
747 int ret;
748
749 while (1) {
750 ret = wait_event_interruptible(siqThWait,
751 atomic_read(&g_siqThRun));
752 if (ret) {
753 tloge("wait_event_interruptible failed!\n");
754 return -EINTR;
755 }
756 atomic_set(&g_siqThRun, 0);
757 SiqDump((paddr_t)(1)); // set this addr to 1
758 }
759 }
760
CmdResultCheck(TcNsSmcCmd * cmd)761 static void CmdResultCheck(TcNsSmcCmd *cmd)
762 {
763 bool checkValue = false;
764 #ifdef SECURITY_AUTH_ENHANCE
765 checkValue = (cmd->retVal == TEEC_SUCCESS) &&
766 (VerifyChksum(cmd) != TEEC_SUCCESS);
767 if (checkValue == true) {
768 cmd->retVal = TEEC_ERROR_GENERIC;
769 tloge("VerifyChksum failed.\n");
770 }
771 #endif
772 checkValue = cmd->retVal == TEEC_PENDING ||
773 cmd->retVal == TEEC_PENDING2;
774
775 if (checkValue == true) {
776 tlogd("wakeup command %u\n", cmd->eventNr);
777 }
778 if (cmd->retVal == TEE_ERROR_TAGET_DEAD) {
779 tloge("error smc call: ret = %x and cmd.errOrigin=%x\n",
780 cmd->retVal, cmd->errOrigin);
781 #ifdef CONFIG_TEELOG
782 CmdMonitorTaCrash(TYPE_CRASH_TA);
783 #endif
784 } else if (cmd->retVal == TEE_ERROR_AUDIT_FAIL) {
785 tloge("error smc call: ret = %x and cmd.errOrigin=%x\n",
786 cmd->retVal, cmd->errOrigin);
787 #ifdef SECURITY_AUTH_ENHANCE
788 tloge("error smc call: status = %x and cmd.errOrigin=%x\n",
789 cmd->eventindex, cmd->errOrigin);
790 #endif
791 }
792 }
793
ShadowWoPm(const void * arg,const WoPmParams * params)794 static int ShadowWoPm(const void *arg, const WoPmParams *params)
795 {
796 uint32_t r0 = TSP_REQUEST;
797 uint32_t r1 = SMC_OPS_START_SHADOW;
798 uint32_t r2 = OsCurrTaskGet()->taskID;
799 uint32_t r3 = 0;
800 uint32_t r4 = *(uint32_t *)arg;
801
802 if (*(params->exitReason) == SMC_EXIT_PREEMPTED) {
803 r0 = TSP_REQUEST;
804 r1 = SMC_OPS_SCHEDTO;
805 r2 = OsCurrTaskGet()->taskID;
806 r3 = *(params->ta);
807 r4 = *(params->target);
808 } else if (*(params->exitReason) == SMC_EXIT_NORMAL) {
809 r0 = TSP_REQUEST;
810 r1 = SMC_OPS_SCHEDTO;
811 r2 = OsCurrTaskGet()->taskID;
812 r3 = 0;
813 r4 = 0;
814 if (*(params->nIdled) > IDLED_COUNT) {
815 *(params->nIdled) = 0;
816 r1 = SMC_OPS_PROBE_ALIVE;
817 }
818 }
819 ISB;
820 DSB;
821 tlogd("%s: [cpu %d] r0=%x r1=%x r2=%x r3=%x r4=%x\n", __func__,
822 RawSmpProcessorId(), r0, r1, r2, r3, r4);
823 do {
824 __asm__ volatile(
825 "mov r0, %[fid]\n"
826 "mov r1, %[a1]\n"
827 "mov r2, %[a2]\n"
828 "mov r3, %[a3]\n"
829 "mov r4, %[a4]\n"
830 ".arch_extension sec\n"
831 "smc #0\n"
832 "str r0, [%[re0]]\n"
833 "str r1, [%[re1]]\n"
834 "str r2, [%[re2]]\n"
835 "str r3, [%[re3]]\n"
836 :[fid] "+r"(r0), [a1] "+r"(r1), [a2] "+r"(r2),
837 [a3] "+r"(r3), [a4] "+r"(r4)
838 :[re0] "r"(params->ret), [re1] "r"(params->exitReason),
839 [re2] "r"(params->ta), [re3] "r"(params->target)
840 : "r0", "r1", "r2", "r3");
841 } while (0);
842
843 ISB;
844 DSB;
845
846 return 0;
847 }
848
CheckShadowParam(UINTPTR arg,int len,struct PendingEntry ** pe)849 static int CheckShadowParam(UINTPTR arg, int len, struct PendingEntry **pe)
850 {
851 if (arg == 0) {
852 return -ENOMEM;
853 }
854 if (len != sizeof(uint64_t)) {
855 free((void *)arg);
856 return -ENOMEM;
857 }
858
859 *pe = InitPendingEntry(GetCurrentPid());
860 if (*pe == NULL) {
861 tloge("init pending entry failed\n");
862 free((void *)arg);
863 return -ENOMEM;
864 }
865
866 ISB;
867 DSB;
868 return 0;
869 }
870
ShadowThreadFn(UINTPTR arg,int len)871 static int ShadowThreadFn(UINTPTR arg, int len)
872 {
873 uint64_t ret = 0;
874 uint64_t exitReason = SMC_EXIT_MAX;
875 uint64_t ta = 0;
876 uint64_t target = 0;
877 int nPreempted = 0;
878 int nIdled = 0;
879 int retVal;
880 struct PendingEntry *pe = NULL;
881 int rc;
882 WoPmParams params = {&nIdled, &ret, &exitReason, &ta, &target};
883
884 ret = CheckShadowParam(arg, len, &pe);
885 if (ret) {
886 return ret;
887 }
888
889 RETRY_WO_PM:
890 retVal = ShadowWoPm((void *)arg, ¶ms);
891 if (retVal == -1) {
892 goto CLEAN_WO_PM;
893 }
894 tlogd("shadow thread return %lld\n", exitReason);
895 if (exitReason == SMC_EXIT_PREEMPTED) {
896 nIdled = 0;
897 if (++nPreempted > PREEMPT_COUNT) {
898 tlogi("%s: retry 10K times on CPU%d\n", __func__, RawSmpProcessorId());
899 nPreempted = 0;
900 }
901 goto RETRY_WO_PM;
902 } else if (exitReason == SMC_EXIT_NORMAL) {
903 nPreempted = 0;
904 long long timeout = HZ * (long)(HZ_COUNT + ((uint8_t)GetCurrentPid() & LOW_BYTE));
905 rc = wait_event_interruptible_timeout(pe->wq, atomic_read(&pe->run), (long)timeout);
906 if (!rc) {
907 nIdled++;
908 }
909 if (atomic_read(&pe->run) == SHADOW_EXIT_RUN) {
910 tlogd("shadow thread work quit, be killed\n");
911 goto CLEAN_WO_PM;
912 } else {
913 atomic_set(&pe->run, 0);
914 goto RETRY_WO_PM;
915 }
916 } else if (exitReason == SMC_EXIT_SHADOW) {
917 tlogd("shadow thread exit, it self\n");
918 } else {
919 tlogd("shadow thread exit with unknown code %ld\n", (long)exitReason);
920 }
921
922 CLEAN_WO_PM:
923 free((void *)arg);
924 ReleasePendingEntry(pe);
925 return retVal;
926 }
927
ShadowWorkFunc(struct work_struct * work)928 static void ShadowWorkFunc(struct work_struct *work)
929 {
930 LosTaskCB *shadowThread = NULL;
931 if (work == NULL) {
932 return;
933 }
934 struct ShadowWork *sWork =
935 container_of(work, struct ShadowWork, work);
936 uint64_t *targetArg = malloc(sizeof(uint64_t));
937
938 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)targetArg)) {
939 tloge("%s: malloc(8 bytes) failed\n", __func__);
940 return;
941 }
942 if (memset_s(targetArg, sizeof(uint64_t),
943 0, sizeof(uint64_t)) != EOK) {
944 tloge("memset targetArg failed!.\n");
945 free(targetArg);
946 return;
947 }
948 *targetArg = sWork->target;
949
950 char shadowName[OS_TCB_NAME_LEN] = {0};
951 int ret = sprintf_s(shadowName, OS_TCB_NAME_LEN, "shadow_th/%lu", g_shadowThreadId++);
952 if (ret < 0) {
953 free(targetArg);
954 return;
955 }
956 shadowThread = KthreadRun(ShadowThreadFn, targetArg, sizeof(uint64_t), shadowName);
957 if (IS_ERR_OR_NULL(shadowThread)) {
958 free(targetArg);
959 tloge("couldn't create shadowThread %ld\n",
960 PTR_ERR(shadowThread));
961 return;
962 }
963 tlogd("%s: create shadow thread %lu for target %llx\n",
964 __func__, g_shadowThreadId, *targetArg);
965 WakeUpProcess(shadowThread);
966 }
967
ProcSmcWakeupCa(pid_t ca,int which)968 static int ProcSmcWakeupCa(pid_t ca, int which)
969 {
970 if (ca == 0) {
971 tlogw("wakeup for ca = 0\n");
972 } else {
973 struct PendingEntry *pe = FindPendingEntry(ca);
974
975 if (pe == NULL) {
976 tloge("invalid ca pid=%d for pending entry\n", (int)ca);
977 return -1;
978 }
979 atomic_set(&pe->run, which);
980 wake_up(&pe->wq);
981 tlogd("wakeup pending thread %ld\n", (long)ca);
982 PutPendingEntry(pe);
983 }
984 return 0;
985 }
986
WakeupPe(struct PendingEntry * pe)987 void WakeupPe(struct PendingEntry *pe)
988 {
989 if (pe != NULL) {
990 atomic_set(&pe->run, 1);
991 wake_up(&pe->wq);
992 }
993 }
994
SmcWakeupBroadcast(void)995 int SmcWakeupBroadcast(void)
996 {
997 ForeachPendingEntry(WakeupPe);
998 return 0;
999 }
1000
SmcWakeupCa(pid_t ca)1001 int SmcWakeupCa(pid_t ca)
1002 {
1003 return ProcSmcWakeupCa(ca, 1); // set pe->run to 1
1004 }
1005
SmcShadowExit(pid_t ca)1006 int SmcShadowExit(pid_t ca)
1007 {
1008 return ProcSmcWakeupCa(ca, SHADOW_EXIT_RUN);
1009 }
1010
FiqShadowWorkFunc(uint64_t target)1011 void FiqShadowWorkFunc(uint64_t target)
1012 {
1013 SmcCmdRetT secret = { SMC_EXIT_MAX, 0, target };
1014
1015 (void)SmpSmcSend(TSP_REQUEST, SMC_OPS_START_FIQSHD, GetCurrentPid(),
1016 &secret, false);
1017 return;
1018 }
1019
SmcQueueShadowWorker(uint64_t target)1020 int SmcQueueShadowWorker(uint64_t target)
1021 {
1022 struct ShadowWork shadowWork;
1023 INIT_WORK_ONSTACK(&shadowWork.work, ShadowWorkFunc);
1024 shadowWork.target = target;
1025
1026 /* Run work on CPU 0 */
1027 queue_work(g_ipiHelperWq, &shadowWork.work);
1028 flush_work(&shadowWork.work);
1029 return 0;
1030 }
1031
SmcOpsNormal(enum CmdReuse * cmdUsage,int * cmdIndex,int * lastIndex,struct PendingEntry * pe,const TcNsSmcCmd * cmd)1032 static int SmcOpsNormal(enum CmdReuse *cmdUsage, int *cmdIndex,
1033 int *lastIndex, struct PendingEntry *pe, const TcNsSmcCmd *cmd)
1034 {
1035 if (*cmdUsage == RESEND) {
1036 if (ReuseSmcInEntry(*cmdIndex)) {
1037 tloge("reuse smc entry failed\n");
1038 ReleaseSmcEntry(*cmdIndex);
1039 ReleasePendingEntry(pe);
1040 return -1;
1041 }
1042 } else {
1043 *cmdIndex = OccupyFreeSmcInEntry(cmd);
1044 if (*cmdIndex == -1) {
1045 tloge("there's no more smc entry\n");
1046 ReleasePendingEntry(pe);
1047 return -1;
1048 }
1049 }
1050 if (*cmdUsage != CLEAR) {
1051 *cmdIndex = *lastIndex;
1052 *cmdUsage = CLEAR;
1053 } else {
1054 *lastIndex = *cmdIndex;
1055 }
1056 tlogd("submit new cmd: cmd.ca=%u cmd-id=%x ev-nr=%u cmd-index=%u last-index=%d\n",
1057 cmd->caPid, cmd->cmdId,
1058 g_cmdData->in[*cmdIndex].eventNr, *cmdIndex,
1059 *lastIndex);
1060 return 0;
1061 }
1062
SmpSmcSendCmdDone(int cmdIndex,TcNsSmcCmd * cmd,TcNsSmcCmd * in)1063 static int SmpSmcSendCmdDone(int cmdIndex, TcNsSmcCmd *cmd,
1064 TcNsSmcCmd *in)
1065 {
1066 CmdResultCheck(cmd);
1067 switch (cmd->retVal) {
1068 case TEEC_PENDING2: {
1069 unsigned int agentId = cmd->agentId;
1070 /* If the agent does not exist post
1071 * the answer right back to the TEE
1072 */
1073 if (AgentProcessWork(cmd, agentId) != TEEC_SUCCESS) {
1074 tloge("agent process work failed\n");
1075 }
1076 return -1;
1077 }
1078 case TEE_ERROR_TAGET_DEAD:
1079 case TEEC_PENDING:
1080 /* just copy out, and let out to proceed */
1081 default:
1082 if (memcpy_s(in, sizeof(*in),
1083 cmd, sizeof(*cmd)) != EOK) {
1084 tloge("memcpy_s failed,%s line:%d", __func__, __LINE__);
1085 cmd->retVal = -1;
1086 }
1087 break;
1088 }
1089
1090 return 0;
1091 }
1092
1093 #define SYM_NAME_LEN_MAX 16
1094 #define SYM_NAME_LEN_1 7
1095 #define SYM_NAME_LEN_2 4
1096 #define CRASH_REG_NUM 3
1097 #define LOW_FOUR_BITE 4
1098
1099 typedef union {
1100 uint64_t crashReg[CRASH_REG_NUM];
1101 struct {
1102 uint8_t haltReason : LOW_FOUR_BITE;
1103 uint8_t app : LOW_FOUR_BITE;
1104 char symName[SYM_NAME_LEN_1];
1105 uint16_t off;
1106 uint16_t size;
1107 uint32_t far;
1108 uint32_t fault;
1109 union {
1110 char symNameAppend[SYM_NAME_LEN_2];
1111 uint32_t elr;
1112 };
1113 } CrashMsg;
1114 } CrashInf;
1115
PrintCrashMsg(CrashInf * crashInfo)1116 static void PrintCrashMsg(CrashInf *crashInfo)
1117 {
1118 static const char *teeCriticalApp[] = {
1119 "gtask",
1120 "teesmcmgr",
1121 "hmsysmgr",
1122 "hmfilemgr",
1123 "platdrv",
1124 "kernel"
1125 };
1126 int appNum = sizeof(teeCriticalApp) / sizeof(teeCriticalApp[0]);
1127 const char *crashAppName = "NULL";
1128 uint16_t off = crashInfo->CrashMsg.off;
1129 int appIndex = crashInfo->CrashMsg.app & LOW_BYTE;
1130 int haltReason = crashInfo->CrashMsg.haltReason;
1131 int sret;
1132
1133 crashInfo->CrashMsg.off = 0; // for end of symName
1134
1135 if (appIndex >= 0 && appIndex < appNum) {
1136 crashAppName = teeCriticalApp[appIndex];
1137 } else {
1138 tloge("index error: %x\n", crashInfo->CrashMsg.app);
1139 }
1140
1141 // kernel
1142 if (appIndex == (appNum - 1)) {
1143 tloge("====crash app:%s user_sym:%s kernel crash off/size: <0x%x/0x%x>\n",
1144 crashAppName, crashInfo->CrashMsg.symName,
1145 off, crashInfo->CrashMsg.size);
1146 tloge("====crash haltReason: 0x%x far:0x%x fault:0x%x elr:0x%x (ret_ip: 0x%llx)\n",
1147 haltReason, crashInfo->CrashMsg.far,
1148 crashInfo->CrashMsg.fault, crashInfo->CrashMsg.elr,
1149 crashInfo->crashReg[CRASH_RET_IP]);
1150 } else { // user app
1151 char syms[SYM_NAME_LEN_MAX] = {0};
1152
1153 sret = memcpy_s(syms, SYM_NAME_LEN_MAX,
1154 crashInfo->CrashMsg.symName, SYM_NAME_LEN_1);
1155 if (sret != EOK) {
1156 tloge("memcpy symName failed!\n");
1157 }
1158 sret = memcpy_s(syms + SYM_NAME_LEN_1,
1159 SYM_NAME_LEN_MAX - SYM_NAME_LEN_1,
1160 crashInfo->CrashMsg.symNameAppend, SYM_NAME_LEN_2);
1161 if (sret != EOK) {
1162 tloge("memcpy symNameAppend failed!\n");
1163 }
1164 tloge("====crash app:%s user_sym:%s + <0x%x/0x%x>\n",
1165 crashAppName, syms, off, crashInfo->CrashMsg.size);
1166 tloge("====crash far:0x%x fault:%x\n",
1167 crashInfo->CrashMsg.far, crashInfo->CrashMsg.fault);
1168 }
1169 }
1170
SmpSmcSendProcess(TcNsSmcCmd * cmd,uint64_t ops,SmcCmdRetT * cmdRet,int cmdIndex)1171 static int SmpSmcSendProcess(TcNsSmcCmd *cmd, uint64_t ops,
1172 SmcCmdRetT *cmdRet, int cmdIndex)
1173 {
1174 int ret;
1175
1176 ret = SmpSmcSend(TSP_REQUEST, ops, GetCurrentPid(), cmdRet, true);
1177 tlogd("SmpSmcSend ret = %x, cmdRet.exit=%ld, cmdIndex=%d\n",
1178 ret, (long)cmdRet->exit, cmdIndex);
1179 ISB;
1180 DSB;
1181 if (ret == (int)TSP_CRASH) {
1182 CrashInf crashInfo;
1183 crashInfo.crashReg[CRASH_RET_EXIT] = cmdRet->exit;
1184 crashInfo.crashReg[CRASH_RET_TA] = cmdRet->ta;
1185 crashInfo.crashReg[CRASH_RET_IP] = cmdRet->target;
1186
1187 tloge("TEEOS has crashed!\n");
1188 PrintCrashMsg(&crashInfo);
1189
1190 g_sysCrash = 1;
1191 #ifdef CONFIG_TEELOG
1192 CmdMonitorTaCrash(TYPE_CRASH_TEE);
1193 #endif
1194 cmd->retVal = -1;
1195 return -1;
1196 }
1197
1198 return 0;
1199 }
1200
InitForSmcSend(TcNsSmcCmd * in,struct PendingEntry ** pe,TcNsSmcCmd * cmd,bool reuse)1201 static int InitForSmcSend(TcNsSmcCmd *in, struct PendingEntry **pe,
1202 TcNsSmcCmd *cmd, bool reuse)
1203 {
1204 if (in == NULL) {
1205 tloge("Bad params\n");
1206 return -1;
1207 }
1208
1209 *pe = InitPendingEntry(GetCurrentPid());
1210 if (*pe == NULL) {
1211 tloge("init pending entry failed\n");
1212 return -ENOMEM;
1213 }
1214 in->caPid = GetCurrentPid();
1215 if (!reuse) {
1216 if (memcpy_s(cmd, sizeof(*cmd), in, sizeof(*in))) {
1217 tloge("memcpy_s failed,%s line:%d", __func__, __LINE__);
1218 ReleasePendingEntry(*pe);
1219 return -1;
1220 }
1221 }
1222 return 0;
1223 }
1224
1225 #define GOTO_RESLEEP 1
1226 #define GOTO_RETRY_WITH_CMD 2
1227 #define GOTO_RETRY 3
1228 #define GOTO_CLEAN 4
1229
CheckIsCaKilled(int cmdIndex,uint64_t * ops)1230 static int CheckIsCaKilled(int cmdIndex, uint64_t *ops)
1231 {
1232 /* if CA has not been killed */
1233 if (SigkillPending(OsCurrTaskGet()) == 0) {
1234 if (!IsCmdWorkingDone(cmdIndex)) {
1235 return GOTO_RESLEEP;
1236 } else {
1237 tloge("cmd done, may miss a spi!\n");
1238 ShowCmdBitmapWithLock();
1239 }
1240 } else {
1241 /* if CA killed, send terminate cmd */
1242 *ops = SMC_OPS_TERMINATE;
1243 tloge("CA is killed, send terminate!\n");
1244 return GOTO_RETRY_WITH_CMD;
1245 }
1246 return 0;
1247 }
1248
1249 struct CmdPram {
1250 TcNsSmcCmd *cmd;
1251 int cmdIndex;
1252 enum CmdReuse *cmdUsage;
1253 };
1254
CmdDoneProcess(TcNsSmcCmd * in,struct CmdPram * cmdParam,uint64_t * ops)1255 static int CmdDoneProcess(TcNsSmcCmd *in, struct CmdPram *cmdParam, uint64_t *ops)
1256 {
1257 if ((in == NULL) || (cmdParam == NULL) || (ops == NULL)) {
1258 return 0;
1259 }
1260
1261 if (CopySmcOutEntry(cmdParam->cmdIndex, cmdParam->cmd, cmdParam->cmdUsage)) {
1262 cmdParam->cmd->retVal = -1;
1263 return GOTO_CLEAN;
1264 }
1265
1266 if (SmpSmcSendCmdDone(cmdParam->cmdIndex, cmdParam->cmd, in) == -1) {
1267 *ops = SMC_OPS_NORMAL;
1268 /* cmd will be reused */
1269 return GOTO_RETRY;
1270 }
1271
1272 return 0;
1273 }
1274
RetryWithFillCmdProcess(TcNsSmcCmd * in,struct CmdPram * cmdParam,struct PendingEntry * pe,uint64_t * ops)1275 static int RetryWithFillCmdProcess(TcNsSmcCmd *in, struct CmdPram *cmdParam, struct PendingEntry *pe, uint64_t *ops)
1276 {
1277 SmcCmdRetT cmdRet = {0};
1278
1279 if ((in == NULL) || (cmdParam == NULL) || (pe == NULL) || (ops == NULL)) {
1280 return 0;
1281 }
1282
1283 while (1) {
1284 tlogd("SmpSmcSend start cmdId = %u, ca = %u\n", cmdParam->cmd->cmdId, cmdParam->cmd->caPid);
1285 if (SmpSmcSendProcess(cmdParam->cmd, *ops, &cmdRet, cmdParam->cmdIndex) == -1) {
1286 return GOTO_CLEAN;
1287 }
1288 if (IsCmdWorkingDone(cmdParam->cmdIndex)) {
1289 return CmdDoneProcess(in, cmdParam, ops);
1290 }
1291
1292 if (cmdRet.exit != SMC_EXIT_NORMAL) {
1293 tloge("invalid cmd work state\n");
1294 cmdParam->cmd->retVal = -1;
1295 return GOTO_CLEAN;
1296 }
1297 /* task pending exit */
1298 tlogd("goto sleep, exitReason=%lld\n", cmdRet.exit);
1299 RESLEEP:
1300 if (wait_event_interruptible_timeout(pe->wq, atomic_read(&pe->run),
1301 (long)(RESLEEP_TIMEOUT * HZ)) == 0) {
1302 tlogd("CA wait event for %d s\n", RESLEEP_TIMEOUT);
1303 int ret = CheckIsCaKilled(cmdParam->cmdIndex, ops);
1304 if (ret == GOTO_RESLEEP) {
1305 goto RESLEEP;
1306 } else if (ret == GOTO_RETRY_WITH_CMD) {
1307 continue;
1308 }
1309 }
1310 atomic_set(&pe->run, 0);
1311
1312 if (IsCmdWorkingDone(cmdParam->cmdIndex)) {
1313 tlogd("cmd is done\n");
1314 return CmdDoneProcess(in, cmdParam, ops);
1315 }
1316 *ops = SMC_OPS_SCHEDTO;
1317 }
1318
1319 return 0;
1320 }
1321
SmpSmcSendFunc(TcNsSmcCmd * in,uint32_t cmdType,bool reuse)1322 static int SmpSmcSendFunc(TcNsSmcCmd *in, uint32_t cmdType,
1323 bool reuse)
1324 {
1325 int cmdIndex = 0;
1326 int lastIndex = 0;
1327 TcNsSmcCmd cmd = { {0}, 0 };
1328 struct PendingEntry *pe = NULL;
1329 uint64_t ops;
1330 enum CmdReuse cmdUsage = CLEAR;
1331 int ret;
1332 bool check = false;
1333
1334 if (InitForSmcSend(in, &pe, &cmd, reuse) != 0) {
1335 tloge(KERN_INFO "InitForSmcSend fail\n");
1336 return -1;
1337 }
1338 if (reuse) {
1339 lastIndex = cmdIndex = in->eventNr;
1340 cmdUsage = RESEND;
1341 }
1342 ops = SMC_OPS_NORMAL;
1343
1344 RETRY:
1345 if ((ops == SMC_OPS_NORMAL) &&
1346 SmcOpsNormal(&cmdUsage, &cmdIndex, &lastIndex, pe, &cmd) == -1) {
1347 tloge(KERN_INFO "SmcOpsNormal fail\n");
1348 return -1;
1349 }
1350
1351 struct CmdPram cmdParam;
1352 cmdParam.cmd = &cmd;
1353 cmdParam.cmdIndex = cmdIndex;
1354 cmdParam.cmdUsage = &cmdUsage;
1355
1356 ret = RetryWithFillCmdProcess(in, &cmdParam, pe, &ops);
1357 if (ret == GOTO_CLEAN) {
1358 goto CLEAN;
1359 } else if (ret == GOTO_RETRY) {
1360 goto RETRY;
1361 }
1362
1363 CLEAN:
1364 check = (cmdUsage != CLEAR && cmd.retVal != TEEC_PENDING);
1365 if (check == true) {
1366 ReleaseSmcEntry(cmdIndex);
1367 }
1368 ReleasePendingEntry(pe);
1369 return cmd.retVal;
1370 }
1371
SmcSvcThreadFn(UINTPTR arg,int len)1372 static int SmcSvcThreadFn(UINTPTR arg, int len)
1373 {
1374 while (!KthreadShouldStop()) {
1375 TcNsSmcCmd smcCmd = { {0}, 0 };
1376 int ret;
1377
1378 smcCmd.globalCmd = true;
1379 smcCmd.cmdId = GLOBAL_CMD_ID_SET_SERVE_CMD;
1380 ret = SmpSmcSendFunc(&smcCmd,
1381 TC_NS_CMD_TYPE_NS_TO_SECURE, false);
1382 tlogd("smc svc return 0x%x\n", ret);
1383 }
1384 tloge("smc_svc_thread stop ...\n");
1385 return 0;
1386 }
1387
WakeupTcSiq(void)1388 void WakeupTcSiq(void)
1389 {
1390 atomic_set(&g_siqThRun, 1); // init g_siqThRun to 1
1391 wake_up_interruptible(&siqThWait);
1392 }
1393 /*
1394 * This function first power on crypto cell, then send smc cmd to trustedcore.
1395 * After finished, power off crypto cell.
1396 */
ProcTcNsSmc(TcNsSmcCmd * cmd,bool reuse)1397 static int ProcTcNsSmc(TcNsSmcCmd *cmd, bool reuse)
1398 {
1399 int ret;
1400
1401 if (g_sysCrash) {
1402 tloge("ERROR: sys crash happened!!!\n");
1403 return TEEC_ERROR_GENERIC;
1404 }
1405 if (cmd == NULL) {
1406 tloge("invalid cmd\n");
1407 return TEEC_ERROR_GENERIC;
1408 }
1409 tlogd(KERN_INFO "***TC_NS_SMC call start on cpu %d ***\n",
1410 RawSmpProcessorId());
1411 CmdMonitorLog(cmd);
1412 ret = SmpSmcSendFunc(cmd, TC_NS_CMD_TYPE_NS_TO_SECURE, reuse);
1413 CmdMonitorLogend();
1414 return ret;
1415 }
1416
TcNsSmc(TcNsSmcCmd * cmd)1417 int TcNsSmc(TcNsSmcCmd *cmd)
1418 {
1419 return ProcTcNsSmc(cmd, false);
1420 }
1421
TcNsSmcWithNoNr(TcNsSmcCmd * cmd)1422 int TcNsSmcWithNoNr(TcNsSmcCmd *cmd)
1423 {
1424 return ProcTcNsSmc(cmd, true);
1425 }
1426
SmcWorkNoWait(uint32_t type)1427 static void SmcWorkNoWait(uint32_t type)
1428 {
1429 (void)RawSmcSend(TSP_REQUEST, g_cmdPhys, type, true);
1430 }
1431
SmcWorkSetCmdBuffer(struct work_struct * work)1432 static void SmcWorkSetCmdBuffer(struct work_struct *work)
1433 {
1434 (void)work;
1435 SmcWorkNoWait(TC_NS_CMD_TYPE_SECURE_CONFIG);
1436 }
1437
SmcWorkInitSecondaryCpus(struct work_struct * work)1438 static void SmcWorkInitSecondaryCpus(struct work_struct *work)
1439 {
1440 (void)work;
1441 SmcWorkNoWait(TC_NS_CMD_TYPE_NS_TO_SECURE);
1442 }
1443
SmcSetCmdBuffer(void)1444 static int SmcSetCmdBuffer(void)
1445 {
1446 struct work_struct work;
1447
1448 INIT_WORK_ONSTACK(&work, SmcWorkSetCmdBuffer);
1449 /* Run work on CPU 0 */
1450 ScheduleWorkOn(0, &work);
1451 flush_work(&work);
1452 tlogd("smc set cmd buffer done\n");
1453 return 0;
1454 }
1455
SmcInitSecondaryCpus(void)1456 static int SmcInitSecondaryCpus(void)
1457 {
1458 unsigned int i;
1459 struct work_struct work;
1460
1461 INIT_WORK_ONSTACK(&work, SmcWorkInitSecondaryCpus);
1462 /* Run work on all secondary cpus */
1463 for (i = 1; i < LOSCFG_KERNEL_CORE_NUM; i++) {
1464 #if CONFIG_CPU_AFF_NR
1465 if (i >= CONFIG_CPU_AFF_NR) {
1466 break;
1467 }
1468 #endif
1469 ScheduleWorkOn((int)i, &work);
1470 flush_work(&work);
1471 tlogd("init secondary cpu %u done\n", i);
1472 }
1473 return 0;
1474 }
1475
1476 #ifdef SECURITY_AUTH_ENHANCE
1477 #define ALIGN_BIT 0x3
1478
FreeRootKey(void)1479 static void FreeRootKey(void)
1480 {
1481 if (memset_s((void *)g_sessionRootKey, sizeof(*g_sessionRootKey),
1482 0, sizeof(*g_sessionRootKey)) != EOK) {
1483 tloge("memset mem failed\n");
1484 }
1485 free(g_sessionRootKey);
1486 g_sessionRootKey = NULL;
1487 }
1488
GetSessionRootKey(void)1489 static int GetSessionRootKey(void)
1490 {
1491 int ret;
1492 uint32_t *buffer = (uint32_t *)(g_cmdData->in);
1493 #ifdef CONFIG_ARM64
1494 if (buffer == NULL || ((uint64_t)(uintptr_t)buffer & ALIGN_BIT)) {
1495 #else
1496 if (buffer == NULL || ((uint32_t)(uintptr_t)buffer & ALIGN_BIT)) {
1497 #endif
1498 tloge("Session root key must be 4bytes aligned\n");
1499 return -EFAULT;
1500 }
1501 g_sessionRootKey = calloc(1, sizeof(*g_sessionRootKey));
1502 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_sessionRootKey)) {
1503 tloge("No memory to store session root key\n");
1504 return -ENOMEM;
1505 }
1506 if (memcpy_s(g_sessionRootKey, sizeof(*g_sessionRootKey),
1507 (void *)(buffer + 1), sizeof(*g_sessionRootKey))) {
1508 tloge("Copy session root key from TEE failed\n");
1509 ret = -EFAULT;
1510 goto FREE_MEM;
1511 }
1512 if (memset_s((void *)(g_cmdData->in), sizeof(g_cmdData->in),
1513 0, sizeof(g_cmdData->in))) {
1514 tloge("Clean the command buffer failed\n");
1515 ret = -EFAULT;
1516 goto FREE_MEM;
1517 }
1518 return 0;
1519 FREE_MEM:
1520 FreeRootKey();
1521 return ret;
1522 }
1523 #endif
1524
1525 static int SmcInitDataCmdData(void)
1526 {
1527 g_cmdData = (TcNsSmcQueue *)GetPhyPage();
1528 if (g_cmdData == NULL) {
1529 return -ENOMEM;
1530 }
1531 if (memset_s(g_cmdData, sizeof(TcNsSmcQueue), 0, sizeof(TcNsSmcQueue))) {
1532 FreePhyPage(g_cmdData);
1533 g_cmdData = NULL;
1534 return -ENOMEM;
1535 }
1536 g_cmdPhys = LOS_PaddrQuery(g_cmdData);
1537 return 0;
1538 }
1539
1540 int SmcInitData(void)
1541 {
1542 int ret = SmcInitDataCmdData();
1543 if (ret) {
1544 return ret;
1545 }
1546 /* Send the allocated buffer to TrustedCore for init */
1547 if (SmcSetCmdBuffer()) {
1548 ret = -EINVAL;
1549 goto FREE_MEM;
1550 }
1551 if (SmcInitSecondaryCpus()) {
1552 ret = -EINVAL;
1553 goto FREE_MEM;
1554 }
1555 #ifdef SECURITY_AUTH_ENHANCE
1556 if (GetSessionRootKey()) {
1557 ret = -EFAULT;
1558 goto FREE_MEM;
1559 }
1560 #endif
1561
1562 g_siqThread = KthreadRun(SiqThreadFn, NULL, 0, "siqthread/0");
1563 if (unlikely(IS_ERR_OR_NULL(g_siqThread))) {
1564 pr_err("couldn't create siqthread %ld\n",
1565 PTR_ERR(g_siqThread));
1566 ret = (int)PTR_ERR(g_siqThread);
1567 goto FREE_MEM;
1568 }
1569
1570 g_ipiHelperWq = create_workqueue("ipihelper");
1571 if (g_ipiHelperWq == NULL) {
1572 tloge("couldn't create workqueue.\n");
1573 ret = -ENOMEM;
1574 goto FREE_SIQ_WORKER;
1575 }
1576
1577 WakeUpProcess(g_siqThread);
1578 InitCmdMonitor();
1579 INIT_LIST_HEAD(&g_pendingHead);
1580 spin_lock_init(&g_pendLock);
1581 return 0;
1582
1583 FREE_SIQ_WORKER:
1584 KthreadStop(g_siqThread);
1585 g_siqThread = NULL;
1586 FREE_MEM:
1587 FreePhyPage(g_cmdData);
1588 g_cmdData = NULL;
1589 #ifdef SECURITY_AUTH_ENHANCE
1590 if (!IS_ERR_OR_NULL(g_sessionRootKey)) {
1591 FreeRootKey();
1592 }
1593 #endif
1594 return ret;
1595 }
1596
1597 int InitSmcSvcThread(void)
1598 {
1599 g_smcSvcThread = KthreadRun(SmcSvcThreadFn, NULL, 0, "smc_svc_thread");
1600 if (unlikely(IS_ERR_OR_NULL(g_smcSvcThread))) {
1601 tloge("couldn't create smc_svc_thread %ld\n", PTR_ERR(g_smcSvcThread));
1602 return PTR_ERR(g_smcSvcThread);
1603 }
1604 WakeUpProcess(g_smcSvcThread);
1605 return 0;
1606 }
1607
1608 void SmcFreeData(void)
1609 {
1610 FreePhyPage(g_cmdData);
1611 g_cmdData = NULL;
1612 if (!IS_ERR_OR_NULL(g_smcSvcThread)) {
1613 KthreadStop(g_smcSvcThread);
1614 g_smcSvcThread = NULL;
1615 }
1616 #ifdef SECURITY_AUTH_ENHANCE
1617 if (!IS_ERR_OR_NULL(g_sessionRootKey)) {
1618 FreeRootKey();
1619 }
1620 #endif
1621 }
1622
1623