1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12
13 #include <stddef.h>
14 #include <string.h>
15 #include <mem_ops.h>
16 #include <sys/mman.h>
17 #include <dlist.h>
18 #include "tee_log.h"
19 #include "tee_mem_mgmt_api.h"
20 #include "ta_framework.h"
21 #include "gtask_inner.h"
22 #include "mem_manager.h"
23 #include "tee_common.h"
24 #include "tee_ext_api.h"
25 #include "tee_config.h"
26 #include "gtask_config_hal.h"
27 #include "mem_page_ops.h"
28 #include "securec.h"
29
30 #include "gtask_adapt.h"
31 #include "tee_ta2ta.h" /* for smc_operation */
32 #include "gtask_para_config.h"
33
34 #define ADDR_MAX 0xffffffffffffffff /* UINT64_MAX */
35 #define CA_TA_CMD_VALUE_INDEX 1
36 #define CA_TA_CMD_PUBKEY_INDEX 2
37 #define CA_TA_CMD_PKGNAME_INDEX 3
38
39 #define RESERVED_MEM_SECURE 0x1001 /* cmd id for setting reserved memory secure */
40 #define RESERVED_MEM_NONSECURE 0x1000 /* cmd id for setting reserved memory non-secure */
41 #define PG_SIZE_4K 4096
42 #define PG_SIZE_64K 65536
43 #define MAX_MEM_SIZE_1G 1073741824
44
45 extern struct session_struct *g_cur_session;
46 extern struct service_struct *g_cur_service;
47
48 static TEE_Result task_add_mem_region(uint32_t event_nr, uint32_t task_pid, uint64_t addr, uint32_t size, bool ta2ta);
49 static TEE_Result check_operation_params_in_mailbox_range(const tee_operation_gtask *operation);
50
51 // for map to ns
52 struct mem_region_ns {
53 uint64_t addr;
54 uint32_t size;
55 uint32_t event_nr;
56 uint32_t task_id;
57 bool ta2ta;
58 struct dlist_node list;
59 };
60
61 struct mempool_state {
62 bool init;
63 paddr_t start;
64 uint32_t size;
65 uintptr_t va;
66 };
67
68 static struct dlist_node g_mem_ns;
69 static struct mempool_state g_mb_state;
70 static struct pam_node *g_gt_pam_node = NULL;
71 static tee_operation_gtask *g_gt_oper_addr = NULL;
72
get_index_value_a(const struct pam_node * node,uint32_t index)73 static uint32_t get_index_value_a(const struct pam_node *node, uint32_t index)
74 {
75 if (node->param_type) {
76 tee_param_64 *param_64 = node->p_for_ta;
77 return param_64[index].value.a;
78 } else {
79 tee_param_32 *param_32 = node->p_for_ta;
80 return param_32[index].value.a;
81 }
82 }
83
get_index_value_b(const struct pam_node * node,uint32_t index)84 static uint32_t get_index_value_b(const struct pam_node *node, uint32_t index)
85 {
86 if (node->param_type) {
87 tee_param_64 *param_64 = node->p_for_ta;
88 return param_64[index].value.b;
89 } else {
90 tee_param_32 *param_32 = node->p_for_ta;
91 return param_32[index].value.b;
92 }
93 }
94
get_index_memref_size(const struct pam_node * node,uint32_t index)95 static uint32_t get_index_memref_size(const struct pam_node *node, uint32_t index)
96 {
97 if (node->param_type) {
98 tee_param_64 *param_64 = node->p_for_ta;
99 return (uint32_t)(param_64[index].memref.size);
100 } else {
101 tee_param_32 *param_32 = node->p_for_ta;
102 return param_32[index].memref.size;
103 }
104 }
105
set_index_value_a(const struct pam_node * node,uint32_t index,uint32_t a)106 static void set_index_value_a(const struct pam_node *node, uint32_t index, uint32_t a)
107 {
108 if (node->param_type) {
109 tee_param_64 *param_64 = node->p_for_ta;
110 param_64[index].value.a = a;
111 } else {
112 tee_param_32 *param_32 = node->p_for_ta;
113 param_32[index].value.a = a;
114 }
115 }
116
set_index_value_b(const struct pam_node * node,uint32_t index,uint32_t b)117 static void set_index_value_b(const struct pam_node *node, uint32_t index, uint32_t b)
118 {
119 if (node->param_type) {
120 tee_param_64 *param_64 = node->p_for_ta;
121 param_64[index].value.b = b;
122 } else {
123 tee_param_32 *param_32 = node->p_for_ta;
124 param_32[index].value.b = b;
125 }
126 }
127
set_index_memref_size(const struct pam_node * node,uint32_t index,uint32_t size)128 static void set_index_memref_size(const struct pam_node *node, uint32_t index, uint32_t size)
129 {
130 if (node->param_type) {
131 tee_param_64 *param_64 = node->p_for_ta;
132 param_64[index].memref.size = size;
133 } else {
134 tee_param_32 *param_32 = node->p_for_ta;
135 param_32[index].memref.size = size;
136 }
137 }
138
set_index_memref_buffer(const struct pam_node * node,uint32_t index,uint64_t buf)139 static void set_index_memref_buffer(const struct pam_node *node, uint32_t index, uint64_t buf)
140 {
141 if (node->param_type) {
142 tee_param_64 *param_64 = node->p_for_ta;
143 param_64[index].memref.buffer = buf;
144 } else {
145 tee_param_32 *param_32 = node->p_for_ta;
146 param_32[index].memref.buffer = (uint32_t)buf;
147 }
148 }
free_tee_mem(const void * addr,uint32_t size)149 static void free_tee_mem(const void *addr, uint32_t size)
150 {
151 if (addr == NULL)
152 return;
153 free_sharemem((void *)addr, size);
154 }
155
copy_from_src(uint32_t task_id,void ** tee_addr,void * ree_addr,uint32_t size,uint32_t type)156 static TEE_Result copy_from_src(uint32_t task_id, void **tee_addr, void *ree_addr, uint32_t size, uint32_t type)
157 {
158 (void)type;
159 if (tee_addr == NULL || ree_addr == NULL || size == 0) {
160 tloge("copy_from_src invalid input\n");
161 return TEE_ERROR_BAD_PARAMETERS;
162 }
163
164 TEE_UUID ta_uuid = {0};
165 int32_t ret = get_ta_info(task_id, NULL, &ta_uuid);
166 if (ret != 0) {
167 tloge("get ta uuid failed\n");
168 return TEE_ERROR_GENERIC;
169 }
170 *tee_addr = alloc_sharemem_aux(&ta_uuid, size + 1);
171 if (*tee_addr == NULL) {
172 tloge("copy tee mem alloc failed, size=0x%x.\n", size);
173 return TEE_ERROR_OUT_OF_MEMORY;
174 }
175
176 if (memcpy_s(*tee_addr, size + 1, ree_addr, size) != EOK) {
177 free_tee_mem(*tee_addr, size + 1);
178 return TEE_ERROR_GENERIC;
179 }
180
181 ((char *)(*tee_addr))[size] = 0;
182
183 return TEE_SUCCESS;
184 }
185
copy_to_src(void * ree_addr,uint32_t ree_size,const void * tee_addr,uint32_t tee_size)186 static TEE_Result copy_to_src(void *ree_addr, uint32_t ree_size, const void *tee_addr, uint32_t tee_size)
187 {
188 /* this condition should never hanppen */
189 if (tee_addr == NULL) {
190 tloge("tee_addr is null\n");
191 return TEE_ERROR_BAD_PARAMETERS;
192 }
193
194 /* this condition should never hanppen */
195 if (ree_addr == NULL) {
196 tloge("ree_addr is null\n");
197 return TEE_ERROR_BAD_PARAMETERS;
198 }
199
200 if (ree_size == 0) {
201 tloge("ree_size is 0\n");
202 return TEE_ERROR_BAD_PARAMETERS;
203 }
204
205 /* this condition is valid */
206 if (tee_size == 0) {
207 tlogd("tee_size is 0\n");
208 return TEE_SUCCESS;
209 }
210
211 if (ree_size < tee_size) {
212 tloge("invalid tee_size:%u\n", tee_size);
213 return TEE_ERROR_BAD_PARAMETERS;
214 }
215 if (memcpy_s(ree_addr, ree_size, tee_addr, tee_size) != EOK)
216 return TEE_ERROR_GENERIC;
217
218 return TEE_SUCCESS;
219 }
220
alloc_pam_node(void)221 static struct pam_node *alloc_pam_node(void)
222 {
223 return TEE_Malloc(sizeof(*g_gt_pam_node), 0);
224 }
225
release_pam_node(struct pam_node * node)226 void release_pam_node(struct pam_node *node)
227 {
228 uint32_t i;
229 uint32_t size;
230
231 if (node == NULL)
232 return;
233 /* free param's memref tee mem */
234 for (i = 0; i < TEE_PARAM_NUM; i++) {
235 uint32_t type = TEE_PARAM_TYPE_GET(node->op.p_type, i);
236 switch (type) {
237 case TEE_PARAM_TYPE_MEMREF_INPUT:
238 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
239 case TEE_PARAM_TYPE_MEMREF_INOUT:
240 /* in order to adopt copy_from_src funtionc(add + 1) */
241 (void)memset_s(node->p_vaddr_gt_tee[i], node->op.p[i].memref.size + 1, 0, node->op.p[i].memref.size + 1);
242 free_tee_mem(node->p_vaddr_gt_tee[i], node->op.p[i].memref.size + 1);
243 break;
244 default:
245 break;
246 }
247 }
248
249 size = get_tee_param_len(node->param_type) * TEE_PARAM_NUM;
250 free_tee_mem(node->p_for_ta, size);
251 /* free pam node itself */
252 TEE_Free(node);
253 }
254
copy_pam_from_src(const void * operation,uint32_t operation_size,struct pam_node ** pam_node)255 static TEE_Result copy_pam_from_src(const void *operation, uint32_t operation_size, struct pam_node **pam_node)
256 {
257 if (operation == NULL || pam_node == NULL)
258 return TEE_ERROR_BAD_PARAMETERS;
259
260 /* copy the ns shared mem into tee */
261 struct pam_node *n_tee = alloc_pam_node();
262 if (n_tee == NULL) {
263 tloge("operation in use mem alloc failed.\n");
264 return TEE_ERROR_OUT_OF_MEMORY;
265 }
266
267 /* back up the mem size of input params using n_tee->op.p */
268 if (memcpy_s(n_tee, sizeof(*n_tee), operation, operation_size) != EOK) {
269 TEE_Free(n_tee);
270 return TEE_ERROR_GENERIC;
271 }
272
273 /* alloc mem later */
274 n_tee->p_for_ta = NULL;
275
276 *pam_node = n_tee;
277
278 return TEE_SUCCESS;
279 }
280
copy_params_back_to_ree(const struct pam_node * n_tee,tee_operation_gtask * p_ree)281 static TEE_Result copy_params_back_to_ree(const struct pam_node *n_tee, tee_operation_gtask *p_ree)
282 {
283 TEE_Result ret = TEE_SUCCESS;
284 TEE_Result e_ret;
285 uint32_t i;
286
287 for (i = 0; i < TEE_PARAM_NUM; i++) {
288 uint32_t type = TEE_PARAM_TYPE_GET(n_tee->op.p_type, i);
289 switch (type) {
290 case TEE_PARAM_TYPE_VALUE_OUTPUT:
291 case TEE_PARAM_TYPE_VALUE_INOUT:
292 p_ree->p[i].value.a = get_index_value_a(n_tee, i);
293 p_ree->p[i].value.b = get_index_value_b(n_tee, i);
294 break;
295 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
296 case TEE_PARAM_TYPE_MEMREF_INOUT:
297 /* this condition should not happen */
298 if (n_tee->op.p[i].memref.size != p_ree->p[i].memref.size) {
299 tloge("ERROR:memref size is wrong:%u %u %u\n", n_tee->op.p[i].memref.size, p_ree->p[i].memref.size,
300 get_index_memref_size(n_tee, i));
301 ret = TEE_ERROR_BAD_PARAMETERS;
302 return ret;
303 }
304
305 /*
306 * for tee size > ree size condition,no buffer copy,
307 * just return tee size and short buffer error
308 */
309 if (n_tee->op.p[i].memref.size < get_index_memref_size(n_tee, i)) {
310 tloge("ERROR:short memref size:%u/%u\n", n_tee->op.p[i].memref.size, get_index_memref_size(n_tee, i));
311 p_ree->p[i].memref.size = get_index_memref_size(n_tee, i);
312 ret = TEE_ERROR_SHORT_BUFFER;
313 break;
314 }
315
316 /* tee memref buffer -> ree memref buffer */
317 e_ret = copy_to_src(n_tee->p_vaddr_gt_ree[i], p_ree->p[i].memref.size, n_tee->p_vaddr_gt_tee[i],
318 get_index_memref_size(n_tee, i));
319 if (e_ret != TEE_SUCCESS) {
320 tloge("copy to ree p_%u failed:0x%x\n", i, e_ret);
321 return e_ret;
322 }
323
324 /* tee memref size -> ree memref size */
325 p_ree->p[i].memref.size = get_index_memref_size(n_tee, i);
326 break;
327 default:
328 break;
329 }
330 }
331
332 return ret;
333 }
334
copy_pam_to_src(uint32_t cmd_id,bool ta2ta)335 TEE_Result copy_pam_to_src(uint32_t cmd_id, bool ta2ta)
336 {
337 TEE_Result ret;
338
339 bool is_global = false;
340 struct pam_node *n_tee = NULL;
341 tee_operation_gtask *p_ree = NULL;
342 if (g_cur_session == NULL)
343 is_global = true;
344
345 /* global handled cmds */
346 if (is_global == true) {
347 n_tee = g_gt_pam_node;
348 p_ree = g_gt_oper_addr;
349 /* ta handled cmds */
350 } else {
351 n_tee = g_cur_session->pam_node;
352 p_ree = g_cur_session->oper_addr;
353 }
354
355 /* some cases there is no params input */
356 if (n_tee == NULL || p_ree == NULL) {
357 tlogd("n_tee or p_ree is null\n");
358 return TEE_SUCCESS;
359 }
360
361 /* special case for load TA cmd */
362 if (is_global == true && !ta2ta && cmd_id == GLOBAL_CMD_ID_LOAD_SECURE_APP) {
363 p_ree->p[CA_TA_CMD_VALUE_INDEX].value.a = get_index_value_a(n_tee, CA_TA_CMD_VALUE_INDEX);
364 p_ree->p[CA_TA_CMD_VALUE_INDEX].value.b = get_index_value_b(n_tee, CA_TA_CMD_VALUE_INDEX);
365
366 if (p_ree->p[CA_TA_CMD_PUBKEY_INDEX].memref.size == get_index_memref_size(n_tee, CA_TA_CMD_PUBKEY_INDEX))
367 copy_to_src(n_tee->p_vaddr_gt_ree[CA_TA_CMD_PUBKEY_INDEX],
368 p_ree->p[CA_TA_CMD_PUBKEY_INDEX].memref.size,
369 n_tee->p_vaddr_gt_tee[CA_TA_CMD_PUBKEY_INDEX],
370 get_index_memref_size(n_tee, CA_TA_CMD_PUBKEY_INDEX));
371
372 release_pam_node(n_tee);
373 g_gt_pam_node = NULL;
374 g_gt_oper_addr = NULL;
375 return TEE_SUCCESS;
376 }
377
378 /* copy params back to ree */
379 ret = copy_params_back_to_ree(n_tee, p_ree);
380
381 release_pam_node(n_tee);
382 if (is_global == true) {
383 g_gt_pam_node = NULL;
384 g_gt_oper_addr = NULL;
385 } else {
386 g_cur_session->pam_node = NULL;
387 g_cur_session->oper_addr = NULL;
388 }
389
390 return ret;
391 }
392
mem_manager_init(void)393 void mem_manager_init(void)
394 {
395 dlist_init(&g_mem_ns);
396 }
397
store_s_cmd(const smc_cmd_t * cmd)398 TEE_Result store_s_cmd(const smc_cmd_t *cmd)
399 {
400 if (g_cur_session != NULL && cmd != NULL) {
401 if (memcpy_s(&g_cur_session->cmd_in, sizeof(smc_cmd_t), cmd, sizeof(smc_cmd_t)) != EOK) {
402 tloge("memcpy_s cmd_in failed\n");
403 return TEE_ERROR_GENERIC;
404 }
405 g_cur_session->cmd = &g_cur_session->cmd_in;
406 } else {
407 tloge("ta2ta target ta agent request error: g_cur_session or cmd is null\n");
408 return TEE_ERROR_GENERIC;
409 }
410
411 return TEE_SUCCESS;
412 }
413
414 /*
415 * task_id=0, params are copy to gtask -- 32 bit
416 * no need to change
417 */
cmd_global_ns_get_params(const smc_cmd_t * cmd,uint32_t * param_type,TEE_Param ** params)418 TEE_Result cmd_global_ns_get_params(const smc_cmd_t *cmd, uint32_t *param_type, TEE_Param **params)
419 {
420 TEE_Result ret;
421 uint64_t gtask_param = 0;
422
423 if (cmd == NULL || param_type == NULL || params == NULL)
424 return TEE_ERROR_BAD_PARAMETERS;
425
426 ret = cmd_ns_get_params(0, cmd, param_type, >ask_param);
427 *params = (TEE_Param *)(uintptr_t)gtask_param;
428
429 return ret;
430 }
431
is_opensession_cmd(const smc_cmd_t * cmd)432 bool is_opensession_cmd(const smc_cmd_t *cmd)
433 {
434 if (cmd == NULL)
435 return false;
436
437 return ((cmd->cmd_type == CMD_TYPE_GLOBAL) && (cmd->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION));
438 }
439
map_memref_for_gtask(bool ta2ta,const smc_cmd_t * cmd,tee_param_32 p,paddr_t buffer_h_addr,uint32_t type,void ** ree_addr)440 static TEE_Result map_memref_for_gtask(bool ta2ta, const smc_cmd_t *cmd, tee_param_32 p,
441 paddr_t buffer_h_addr, uint32_t type, void **ree_addr)
442 {
443 (void)type;
444 if (ta2ta) {
445 uint64_t tmp_addr;
446 if (map_sharemem(cmd->uid, (uint32_t)p.memref.buffer | (buffer_h_addr << SHIFT_OFFSET),
447 p.memref.size, &tmp_addr) != 0) {
448 tloge("ta2ta map smc cmd operation failed\n");
449 return TEE_ERROR_GENERIC;
450 }
451
452 /* gtask is 32bit */
453 *ree_addr = (void *)(uintptr_t)tmp_addr;
454 TEE_Result ret = task_add_mem_region(cmd->event_nr, 0, (uint64_t)(uintptr_t)(*ree_addr),
455 p.memref.size, ta2ta);
456 if (ret != TEE_SUCCESS) {
457 tloge("failed to refcount the memory\n");
458 if (unmap_sharemem(*ree_addr, p.memref.size) != 0)
459 tloge("munmap ree_addr failed\n");
460 return ret;
461 }
462 } else {
463 paddr_t tmp_addr = (paddr_t)p.memref.buffer | (buffer_h_addr << SHIFT_OFFSET);
464 *ree_addr = mailbox_phys_to_virt(tmp_addr);
465 if (*ree_addr == NULL) {
466 tloge("buffer addr value invalid\n");
467 return TEE_ERROR_BAD_PARAMETERS;
468 }
469 }
470
471 return TEE_SUCCESS;
472 }
473
params_map_for_ta_memref(uint32_t i,struct pam_node * node,uint32_t task_id,const smc_cmd_t * cmd,bool ta2ta,uint32_t type)474 static TEE_Result params_map_for_ta_memref(uint32_t i, struct pam_node *node, uint32_t task_id,
475 const smc_cmd_t *cmd, bool ta2ta, uint32_t type)
476 {
477 void *ree_addr = NULL;
478 void *tee_addr = NULL;
479 uint32_t *buffer_h_addr = node->op.p_h_addr;
480 tee_param_32 *p = node->op.p;
481
482 set_index_memref_size(node, i, (unsigned int)p[i].memref.size);
483 /*
484 * copy memref from ree to tee
485 * 1.first map memref for global_task
486 */
487 if (map_memref_for_gtask(ta2ta, cmd, p[i], (paddr_t)buffer_h_addr[i], type, &ree_addr) != TEE_SUCCESS) {
488 tloge("%u map memref failed\n", task_id);
489 return TEE_ERROR_GENERIC;
490 }
491
492 /* skip the ta_load cmd, because it will do copy itself */
493 bool is_ta_load = ((cmd->cmd_id == GLOBAL_CMD_ID_LOAD_SECURE_APP) && (task_id == 0) && (!ta2ta) && (i == 0));
494 if (is_ta_load) {
495 set_index_memref_buffer(node, i, (uint64_t)(uintptr_t)ree_addr);
496 return TEE_SUCCESS;
497 }
498
499 /*
500 * 2.copy param memref ree->tee, if type is reserved memory,
501 * we do not copy, just set tee addr same to ree addr.
502 */
503 TEE_Result ret = copy_from_src(task_id, &tee_addr, ree_addr, p[i].memref.size, type);
504 if (ret != TEE_SUCCESS) {
505 tloge("p[%u] copy from ree failed:0x%x and cmdid=%x\n", i, ret, cmd->cmd_id);
506 return ret;
507 }
508
509 /* 3.save the params member's virt addr */
510 node->p_vaddr_gt_tee[i] = tee_addr;
511 node->p_vaddr_gt_ree[i] = ree_addr;
512
513 /* 4.map tee mem addr for target ta */
514 if (task_id == 0) {
515 /* for global_task use tee_addr directly */
516 set_index_memref_buffer(node, i, (uint64_t)(uintptr_t)tee_addr);
517 return TEE_SUCCESS;
518 }
519
520 set_index_memref_buffer(node, i, (uint64_t)(uintptr_t)tee_addr);
521 return TEE_SUCCESS;
522 }
523
set_params_for_ta(uint32_t task_id,const smc_cmd_t * cmd,struct pam_node * node,bool ta2ta,uint32_t index)524 static TEE_Result set_params_for_ta(uint32_t task_id, const smc_cmd_t *cmd, struct pam_node *node,
525 bool ta2ta, uint32_t index)
526 {
527 TEE_Result ret = TEE_SUCCESS;
528 tee_param_32 *p = node->op.p;
529 uint32_t type = TEE_PARAM_TYPE_GET(node->op.p_type, index);
530
531 switch (type) {
532 case TEE_PARAM_TYPE_NONE:
533 break;
534 case TEE_PARAM_TYPE_VALUE_INPUT:
535 case TEE_PARAM_TYPE_VALUE_OUTPUT:
536 case TEE_PARAM_TYPE_VALUE_INOUT:
537 set_index_value_a(node, index, p[index].value.a);
538 set_index_value_b(node, index, p[index].value.b);
539 break;
540 case TEE_PARAM_TYPE_MEMREF_INPUT:
541 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
542 case TEE_PARAM_TYPE_MEMREF_INOUT:
543 ret = params_map_for_ta_memref(index, node, task_id, cmd, ta2ta, type);
544 break;
545 default:
546 tloge("invalid param type %u\n", type);
547 ret = TEE_ERROR_GENERIC;
548 }
549
550 return ret;
551 }
552
params_map_for_ta(uint32_t task_id,const smc_cmd_t * cmd,struct pam_node * node,bool ta2ta)553 static TEE_Result params_map_for_ta(uint32_t task_id, const smc_cmd_t *cmd, struct pam_node *node, bool ta2ta)
554 {
555 TEE_Result ret = alloc_tee_param_for_ta(task_id, node);
556 if (ret != TEE_SUCCESS)
557 return ret;
558
559 /* map ns smc cmd operation buffer to secure os */
560 for (uint32_t i = 0; i < TEE_PARAM_NUM; i++) {
561 ret = set_params_for_ta(task_id, cmd, node, ta2ta, i);
562 if (ret != TEE_SUCCESS)
563 return ret;
564 }
565
566 return TEE_SUCCESS;
567 }
568
__operation_map_for_gt(paddr_t phys,uint32_t size,bool * mapped)569 static void *__operation_map_for_gt(paddr_t phys, uint32_t size, bool *mapped)
570 {
571 void *operation = NULL;
572 uint64_t op_vaddr;
573
574 if (g_mb_state.init == true)
575 return mailbox_phys_to_virt(phys);
576
577 /* Before mailbox initialized, we still need map the operation. */
578 if (task_map_ns_phy_mem(0, (uint64_t)phys, size, &op_vaddr)) {
579 tloge("2map smc cmd operation failed\n");
580 return NULL;
581 }
582 *mapped = true;
583 operation = (void *)(uintptr_t)op_vaddr;
584 return operation;
585 }
586
map_cmd_to_operation(bool ta2ta,uint32_t * operation_size,const smc_cmd_t * cmd,void ** operation)587 static TEE_Result map_cmd_to_operation(bool ta2ta, uint32_t *operation_size, const smc_cmd_t *cmd,
588 void **operation)
589 {
590 TEE_Result ret;
591 bool mapped = false;
592
593 if (ta2ta) {
594 uint64_t tmp_operation;
595
596 *operation_size = sizeof(struct smc_operation);
597 if (map_sharemem(cmd->uid, cmd->operation_phys | ((paddr_t)cmd->operation_h_phys << SHIFT_OFFSET),
598 *operation_size, &tmp_operation) != 0) {
599 tloge("ta2ta mode map smc cmd operation failed\n");
600 return TEE_ERROR_GENERIC;
601 }
602
603 *operation = (void *)(uintptr_t)tmp_operation;
604 mapped = true;
605 } else {
606 paddr_t tmp_addr = cmd->operation_phys | ((paddr_t)cmd->operation_h_phys << SHIFT_OFFSET);
607
608 *operation_size = sizeof(uint32_t) * PARAM_CNT + TEE_PARAM_NUM * sizeof(tee_param_32);
609 *operation = __operation_map_for_gt(tmp_addr, *operation_size, &mapped);
610 if (*operation == NULL) {
611 tloge("operation map for gt failed\n");
612 return TEE_ERROR_GENERIC;
613 }
614 }
615
616 if (mapped) {
617 ret = task_add_mem_region(cmd->event_nr, 0, (uint64_t)(uintptr_t)(*operation), *operation_size, ta2ta);
618 if (ret != TEE_SUCCESS) {
619 if (ta2ta) {
620 if (unmap_sharemem(*operation, *operation_size) != 0)
621 tloge("unmap sharemem operation failed\n");
622 } else {
623 if (task_unmap(0, (uint64_t)*operation, *operation_size) != 0)
624 tloge("task unmap operation failed\n");
625 }
626 return ret;
627 }
628 }
629
630 return TEE_SUCCESS;
631 }
632
operation_map_for_gt(uint32_t task_id,const smc_cmd_t * cmd,uint32_t * param_type,uint64_t * params,bool ta2ta)633 static TEE_Result operation_map_for_gt(uint32_t task_id, const smc_cmd_t *cmd, uint32_t *param_type, uint64_t *params,
634 bool ta2ta)
635 {
636 TEE_Result ret;
637 void *operation = NULL;
638 struct pam_node *pam_node = NULL;
639 uint32_t operation_size;
640
641 ret = map_cmd_to_operation(ta2ta, &operation_size, cmd, &operation);
642 if (ret != TEE_SUCCESS)
643 return ret;
644
645 /* copy the ns shared mem into tee */
646 ret = copy_pam_from_src(operation, operation_size, &pam_node);
647 if (ret != 0) {
648 tloge("copy pam from ree failed.\n");
649 return ret;
650 }
651
652 if (!ta2ta) {
653 ret = check_operation_params_in_mailbox_range(&(pam_node->op));
654 if (ret != 0) {
655 tloge("operation buffer is not in mailbox\n");
656 release_pam_node(pam_node);
657 return ret;
658 }
659 }
660
661 ret = params_map_for_ta(task_id, cmd, pam_node, ta2ta);
662 if (ret != 0) {
663 tloge("operation map for ta failed:%x\n", ret);
664 release_pam_node(pam_node);
665 return ret;
666 }
667
668 if (task_id == 0) {
669 bool reset_flag = false;
670
671 *params = (uintptr_t)pam_node->p_for_ta;
672
673 reset_flag = (g_gt_pam_node != NULL || g_gt_oper_addr != NULL);
674 if (reset_flag)
675 tloge("ERROR: g_gt_pam_node is not null\n");
676
677 g_gt_pam_node = pam_node;
678 g_gt_oper_addr = operation;
679 } else {
680 /* map virt addr of param for task_id */
681 *params = (uint64_t)(uintptr_t)pam_node->p_for_ta;
682
683 g_cur_session->pam_node = pam_node;
684 g_cur_session->oper_addr = operation;
685 }
686
687 *param_type = pam_node->op.p_type;
688 return ret;
689 }
690
cmd_ns_get_params(uint32_t task_id,const smc_cmd_t * cmd,uint32_t * param_type,uint64_t * params)691 TEE_Result cmd_ns_get_params(uint32_t task_id, const smc_cmd_t *cmd, uint32_t *param_type, uint64_t *params)
692 {
693 TEE_Result ret;
694 paddr_t tmp_operation_addr;
695
696 if (cmd == NULL || param_type == NULL || params == NULL)
697 return TEE_ERROR_BAD_PARAMETERS;
698
699 tmp_operation_addr = cmd->operation_phys | (((paddr_t)cmd->operation_h_phys) << SHIFT_OFFSET);
700
701 *param_type = 0;
702 if (tmp_operation_addr != 0) {
703 ret = operation_map_for_gt(task_id, cmd, param_type, params, false);
704 if (ret != 0) {
705 tloge("operation ns map for gt failed:%x\n", ret);
706 return ret;
707 }
708 }
709
710 return TEE_SUCCESS;
711 }
712
cmd_secure_get_params(uint32_t task_id,const smc_cmd_t * cmd,uint32_t * param_type,uint64_t * params)713 TEE_Result cmd_secure_get_params(uint32_t task_id, const smc_cmd_t *cmd, uint32_t *param_type, uint64_t *params)
714 {
715 TEE_Result ret;
716 paddr_t tmp_operation_addr;
717
718 if (cmd == NULL || param_type == NULL || params == NULL)
719 return TEE_ERROR_BAD_PARAMETERS;
720
721 if (cmd->uid == task_id)
722 return TEE_SUCCESS; /* Nothing to do, no need to remap */
723
724 tmp_operation_addr = cmd->operation_phys | (((paddr_t)cmd->operation_h_phys) << SHIFT_OFFSET);
725 *param_type = 0;
726 *params = 0;
727 if (tmp_operation_addr != 0) {
728 ret = operation_map_for_gt(task_id, cmd, param_type, params, true);
729 if (ret != 0) {
730 tloge("operation ns map for gt failed:%x\n", ret);
731 return ret;
732 }
733 }
734
735 return TEE_SUCCESS;
736 }
737
task_add_mem_region(uint32_t event_nr,uint32_t task_pid,uint64_t addr,uint32_t size,bool ta2ta)738 static TEE_Result task_add_mem_region(uint32_t event_nr, uint32_t task_pid, uint64_t addr, uint32_t size, bool ta2ta)
739 {
740 struct mem_region_ns *mem = NULL;
741
742 mem = (struct mem_region_ns *)TEE_Malloc(sizeof(struct mem_region_ns), 0);
743 if (mem == NULL)
744 return TEE_ERROR_OUT_OF_MEMORY;
745
746 mem->task_id = task_pid;
747 mem->addr = addr;
748 mem->size = size;
749 mem->event_nr = event_nr;
750 mem->ta2ta = ta2ta;
751
752 if (g_cur_session != NULL) {
753 dlist_insert_tail(&mem->list, &(g_cur_session->map_mem));
754 } else {
755 dlist_insert_tail(&mem->list, &g_mem_ns);
756 }
757
758 return TEE_SUCCESS;
759 }
760
task_del_mem_region(struct dlist_node * mem_list,bool is_service_dead)761 void task_del_mem_region(struct dlist_node *mem_list, bool is_service_dead)
762 {
763 struct mem_region_ns *mem = NULL;
764 struct mem_region_ns *tmp = NULL;
765
766 dlist_for_each_entry_safe(mem, tmp, mem_list, struct mem_region_ns, list) {
767 if (!is_service_dead && task_unmap(mem->task_id, mem->addr, mem->size) != 0)
768 tloge("unmap mem addr failed, id=0x%x\n", mem->task_id);
769 dlist_delete(&mem->list);
770 TEE_Free(mem);
771 mem = NULL;
772 }
773 }
774
map_ns_cmd(paddr_t cmd_phy)775 void *map_ns_cmd(paddr_t cmd_phy)
776 {
777 uint64_t cmd_virt;
778 /* map ns smc cmd to secure os */
779 if (task_map_ns_phy_mem(0, (uint64_t)cmd_phy, GT_SHARED_CMD_QUEUES_SIZE, &cmd_virt)) {
780 tloge("map smc cmd failed\n");
781 return NULL;
782 }
783
784 return (void *)(uintptr_t)(cmd_virt);
785 }
786
map_secure_operation(uint64_t tacmd,smc_cmd_t * out_cmd,uint32_t task_id)787 TEE_Result map_secure_operation(uint64_t tacmd, smc_cmd_t *out_cmd, uint32_t task_id)
788 {
789 smc_cmd_t *cmd = NULL;
790 TEE_Result ret = TEE_SUCCESS;
791 uint64_t tmp_cmd;
792
793 if (out_cmd == NULL) {
794 tloge("map smc cmd failed\n");
795 return TEE_ERROR_GENERIC;
796 }
797
798 /* 1. do cmd copy */
799 if (map_sharemem(task_id, tacmd, sizeof(smc_cmd_t), &tmp_cmd) != 0) {
800 tloge("map smc cmd failed\n");
801 return TEE_ERROR_GENERIC;
802 }
803
804 cmd = (smc_cmd_t *)(uintptr_t)tmp_cmd;
805 cmd->uid = task_id;
806
807 if (memcpy_s(out_cmd, sizeof(*out_cmd), cmd, sizeof(*cmd)) != EOK) {
808 tloge("copy ta2ta out cmd failed\n");
809 ret = TEE_ERROR_GENERIC;
810 }
811
812 if (unmap_sharemem(cmd, sizeof(smc_cmd_t)) != 0)
813 tloge("unmap cmd failed\n");
814 return ret;
815 }
816
unmap_secure_operation(const smc_cmd_t * cmd)817 TEE_Result unmap_secure_operation(const smc_cmd_t *cmd)
818 {
819 struct mem_region_ns *mem = NULL;
820 struct mem_region_ns *tmp = NULL;
821
822 if (cmd == NULL) {
823 tloge("map smc cmd failed\n");
824 return TEE_ERROR_GENERIC;
825 }
826
827 if (g_cur_session == NULL) {
828 tlogd("g_cur_session is null\n");
829 return TEE_SUCCESS;
830 }
831
832 dlist_for_each_entry_safe(mem, tmp, &(g_cur_session->map_mem), struct mem_region_ns, list) {
833 if (cmd->event_nr == mem->event_nr && mem->ta2ta) {
834 if (unmap_sharemem((void *)(uintptr_t)mem->addr, mem->size) != 0)
835 tloge("s unmap failed\n");
836 dlist_delete(&mem->list);
837 TEE_Free(mem);
838 mem = NULL;
839 }
840 }
841 g_cur_session->cmd = NULL;
842
843 return TEE_SUCCESS;
844 }
845
846 /* Unmap all NS memory related to the smc_cmd */
unmap_ns_operation(smc_cmd_t * cmd)847 TEE_Result unmap_ns_operation(smc_cmd_t *cmd)
848 {
849 struct mem_region_ns *mem = NULL;
850 struct mem_region_ns *tmp = NULL;
851 uint32_t error_flag = 0;
852 struct dlist_node *mem_list = NULL;
853
854 if (cmd == NULL)
855 return TEE_ERROR_BAD_PARAMETERS;
856
857 tlogd("unmap_ns_operation: cmd->event_nr is %x\n", cmd->event_nr);
858
859 if (g_cur_session != NULL) {
860 mem_list = &(g_cur_session->map_mem);
861 } else {
862 mem_list = &g_mem_ns;
863 }
864 dlist_for_each_entry_safe(mem, tmp, mem_list, struct mem_region_ns, list) {
865 if (cmd->event_nr == mem->event_nr && !mem->ta2ta) {
866 if (task_unmap(mem->task_id, mem->addr, mem->size) != 0) {
867 tloge("ns unmap mem addr failed\n");
868 error_flag = 1;
869 }
870 dlist_delete(&mem->list);
871 TEE_Free(mem);
872 mem = NULL;
873 }
874 }
875
876 cmd->operation_phys = 0x0;
877 cmd->operation_h_phys = 0x0;
878
879 if (error_flag != 0)
880 return TEE_ERROR_GENERIC;
881
882 return TEE_SUCCESS;
883 }
884
register_mempool(const smc_cmd_t * cmd,struct mempool_state * state,uint32_t pool_size)885 static TEE_Result register_mempool(const smc_cmd_t *cmd, struct mempool_state *state, uint32_t pool_size)
886 {
887 uint32_t param_type = 0;
888 TEE_Param *param = NULL;
889 uint64_t vaddr;
890
891 if (state->init) {
892 tloge("mem pool has registered\n");
893 return TEE_ERROR_ACCESS_DENIED;
894 }
895
896 if (cmd == NULL)
897 return TEE_ERROR_BAD_PARAMETERS;
898
899 if (cmd_global_ns_get_params(cmd, ¶m_type, ¶m) != TEE_SUCCESS)
900 return TEE_ERROR_GENERIC;
901
902 /* check params types */
903 if ((TEE_PARAM_TYPE_GET(param_type, 0) != TEE_PARAM_TYPE_VALUE_INPUT) ||
904 (TEE_PARAM_TYPE_GET(param_type, 1) != TEE_PARAM_TYPE_VALUE_INPUT)) {
905 tloge("Bad expected parameter types.\n");
906 return TEE_ERROR_BAD_PARAMETERS;
907 }
908 /* this condition should never happen here */
909 if (param == NULL)
910 return TEE_ERROR_BAD_PARAMETERS;
911
912 state->start = (paddr_t)(param[0].value.a | ((paddr_t)param[0].value.b << SHIFT_OFFSET));
913 state->size = param[1].value.a;
914 if (state->start > ADDR_MAX - state->size || state->size != pool_size) {
915 tloge("mem pool addr is invalid\n");
916 return TEE_ERROR_BAD_PARAMETERS;
917 }
918
919 if (task_map_ns_phy_mem(0, (uint64_t)state->start, state->size, &vaddr)) {
920 tloge("map mem pool failed\n");
921 return TEE_ERROR_GENERIC;
922 }
923
924 state->va = (uintptr_t)vaddr;
925 state->init = true;
926 return TEE_SUCCESS;
927 }
928
register_mailbox(const smc_cmd_t * cmd)929 TEE_Result register_mailbox(const smc_cmd_t *cmd)
930 {
931 uint32_t pool_size = get_mailbox_size();
932 return register_mempool(cmd, &g_mb_state, pool_size);
933 }
934
in_mailbox_range(paddr_t addr,uint32_t size)935 bool in_mailbox_range(paddr_t addr, uint32_t size)
936 {
937 if (g_mb_state.init == false) {
938 tlogd("mailbox is not initialized\n");
939 return true;
940 }
941
942 if (addr > ADDR_MAX - size || (addr < g_mb_state.start || addr >= (g_mb_state.start + g_mb_state.size)) ||
943 ((addr + size) < g_mb_state.start || (addr + size) > (g_mb_state.start + g_mb_state.size))) {
944 tloge("ns addr is illegal\n");
945 return false;
946 }
947
948 return true;
949 }
950
mailbox_phys_to_virt(paddr_t phys)951 void *mailbox_phys_to_virt(paddr_t phys)
952 {
953 /* Before call this function to derive mailbox virtual address of mailbox in
954 * gtask, non-zero phys is always checed by 'in_mailbox_range'. So we needn't
955 * check physical address is legal again except 0.
956 */
957 if (phys == 0 || g_mb_state.init == false)
958 return NULL;
959 return (void *)(g_mb_state.va + (uintptr_t)(phys - g_mb_state.start));
960 }
961
check_operation_params_in_mailbox_range(const tee_operation_gtask * operation)962 static TEE_Result check_operation_params_in_mailbox_range(const tee_operation_gtask *operation)
963 {
964 TEE_Result ret = TEE_SUCCESS;
965 paddr_t buffer_addr;
966
967 if (operation == NULL)
968 return TEE_ERROR_BAD_PARAMETERS;
969
970 for (uint32_t i = 0; i < TEE_PARAM_NUM; i++) {
971 uint32_t type = TEE_PARAM_TYPE_GET(operation->p_type, i);
972 switch (type) {
973 case TEE_PARAM_TYPE_NONE:
974 case TEE_PARAM_TYPE_VALUE_INPUT:
975 case TEE_PARAM_TYPE_VALUE_OUTPUT:
976 case TEE_PARAM_TYPE_VALUE_INOUT:
977 break;
978 case TEE_PARAM_TYPE_MEMREF_INPUT:
979 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
980 case TEE_PARAM_TYPE_MEMREF_INOUT:
981 buffer_addr = (paddr_t)((uint32_t)operation->p[i].memref.buffer |
982 ((paddr_t)operation->p_h_addr[i] << SHIFT_OFFSET));
983 if (buffer_addr && !in_mailbox_range(buffer_addr, operation->p[i].memref.size)) {
984 tloge("buffer[%u] is not in mailbox\n", i);
985 ret = TEE_ERROR_BAD_PARAMETERS;
986 }
987 break;
988 default:
989 tloge("invalid param type %u operation->p_type : %x\n", type, operation->p_type);
990 break;
991 }
992 }
993
994 return ret;
995 }
996
check_cmd_in_mailbox_range(const smc_cmd_t * cmd)997 TEE_Result check_cmd_in_mailbox_range(const smc_cmd_t *cmd)
998 {
999 paddr_t operation_addr, login_data_addr;
1000
1001 if (cmd == NULL) {
1002 tloge("cmd is null\n");
1003 return TEE_ERROR_BAD_PARAMETERS;
1004 }
1005
1006 if (g_mb_state.init == false)
1007 return TEE_SUCCESS;
1008
1009 operation_addr = (paddr_t)(cmd->operation_phys | ((paddr_t)cmd->operation_h_phys << SHIFT_OFFSET));
1010 if (operation_addr != 0 &&
1011 !in_mailbox_range(operation_addr, sizeof(uint32_t) * PARAM_CNT + TEE_PARAM_NUM * sizeof(TEE_Param))) {
1012 tloge("operation is not in mailbox\n");
1013 return TEE_ERROR_BAD_PARAMETERS;
1014 }
1015
1016 login_data_addr = (paddr_t)(cmd->login_data_phy | ((paddr_t)cmd->login_data_h_phy << SHIFT_OFFSET));
1017 if (login_data_addr != 0 && !in_mailbox_range(login_data_addr, cmd->login_data_len)) {
1018 tloge("login data is not in mailbox\n");
1019 return TEE_ERROR_BAD_PARAMETERS;
1020 }
1021
1022 return TEE_SUCCESS;
1023 }
1024
check_param_stat(uint32_t param_types,const TEE_Param * tee_param)1025 static TEE_Result check_param_stat(uint32_t param_types, const TEE_Param *tee_param)
1026 {
1027 if (TEE_PARAM_TYPE_GET(param_types, 0) != TEE_PARAM_TYPE_MEMREF_INOUT) {
1028 tloge("Bad expected parameter types.\n");
1029 return TEE_ERROR_BAD_PARAMETERS;
1030 }
1031 if (TEE_PARAM_TYPE_GET(param_types, 1) != TEE_PARAM_TYPE_VALUE_INPUT) {
1032 tloge("Bad expected parameter types.\n");
1033 return TEE_ERROR_BAD_PARAMETERS;
1034 }
1035
1036 if (tee_param == NULL) {
1037 tloge("Something wrong happen, tee_param is wrong.\n");
1038 return TEE_ERROR_BAD_PARAMETERS;
1039 }
1040 if (tee_param[0].memref.size < sizeof(struct stat_mem_info)) {
1041 tloge("Bad size \n");
1042 return TEE_ERROR_BAD_PARAMETERS;
1043 }
1044 return TEE_SUCCESS;
1045 }
1046
dump_statmeminfo(const smc_cmd_t * cmd)1047 TEE_Result dump_statmeminfo(const smc_cmd_t *cmd)
1048 {
1049 TEE_Param *tee_param = NULL;
1050 uint32_t param_types = 0;
1051 TEE_Result ret;
1052 int dump_stat;
1053 int print_history;
1054
1055 if (cmd == NULL) {
1056 tloge("CAUTION!!! invalid cmd, please check.\n");
1057 return TEE_ERROR_BAD_PARAMETERS;
1058 }
1059
1060 if (cmd_global_ns_get_params(cmd, ¶m_types, &tee_param) != TEE_SUCCESS) {
1061 tloge("failed to map operation!\n");
1062 return TEE_ERROR_GENERIC;
1063 }
1064
1065 ret = check_param_stat(param_types, tee_param);
1066 if (ret != TEE_SUCCESS) {
1067 tloge("Bad expected parameter types.\n");
1068 return ret;
1069 }
1070
1071 print_history = (int32_t)tee_param[1].value.b;
1072 if (tee_param[1].value.a == 0) {
1073 struct stat_mem_info *meminfo = (struct stat_mem_info *)tee_param[0].memref.buffer;
1074 dump_stat = dump_mem_info(meminfo, print_history);
1075 if (meminfo != NULL && dump_stat == 0) {
1076 tlogd("total=%u,pmem=%u,free=%u,lowest=%u\n", meminfo->total_mem, meminfo->mem, meminfo->free_mem,
1077 meminfo->free_mem_min);
1078 for (uint32_t i = 0; i < meminfo->proc_num; i++)
1079 tlogd("i=%u,name=%s,mem=%u,memmax=%u,memlimit=%u\n", i, meminfo->proc_mem[i].name,
1080 meminfo->proc_mem[i].mem, meminfo->proc_mem[i].mem_max, meminfo->proc_mem[i].mem_limit);
1081 } else {
1082 tlogd("meminfo is NULL or dump mem info failed.\n");
1083 }
1084 } else {
1085 dump_stat = dump_mem_info(NULL, print_history);
1086 }
1087 if (dump_stat == 0)
1088 return TEE_SUCCESS;
1089 else
1090 return TEE_ERROR_GENERIC;
1091 }
1092
check_short_buffer(void)1093 bool check_short_buffer(void)
1094 {
1095 struct pam_node *n_tee = NULL;
1096 uint32_t i;
1097
1098 if (g_cur_session == NULL) {
1099 tloge("cur session is NULL, this never happen\n");
1100 return false;
1101 }
1102
1103 n_tee = g_cur_session->pam_node;
1104 if (n_tee == NULL) {
1105 tlogd("no pam node\n");
1106 return false;
1107 }
1108
1109 for (i = 0; i < TEE_PARAM_NUM; i++) {
1110 uint32_t type = TEE_PARAM_TYPE_GET(n_tee->op.p_type, i);
1111 if (type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1112 type != TEE_PARAM_TYPE_MEMREF_INOUT)
1113 continue;
1114 /* size changed by ta is bigger than size given by ca */
1115 if (n_tee->op.p[i].memref.size < get_index_memref_size(n_tee, i)) {
1116 tloge("short buffer happen\n");
1117 return true;
1118 }
1119 }
1120 return false;
1121 }
1122