• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3  * Decription: alloc global operation and pass params to TEE.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  */
14 #include "gp_ops.h"
15 #include <linux/uaccess.h>
16 #include <linux/uidgid.h>
17 #include <linux/cred.h>
18 #include <linux/sched.h>
19 #include <linux/list.h>
20 #include <linux/mutex.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/version.h>
27 #include <linux/types.h>
28 #include <linux/cred.h>
29 #include <linux/pagemap.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <asm/memory.h>
33 #include <securec.h>
34 #include "teek_client_constants.h"
35 #include "tc_ns_client.h"
36 #include "agent.h"
37 #include "tc_ns_log.h"
38 #include "smc_smp.h"
39 #include "mem.h"
40 #include "mailbox_mempool.h"
41 #include "tc_client_driver.h"
42 #include "internal_functions.h"
43 #include "reserved_mempool.h"
44 #include "tlogger.h"
45 #include "dynamic_ion_mem.h"
46 
47 #define MAX_SHARED_SIZE 0x100000	  /* 1 MiB */
48 
49 static void free_operation(const struct tc_call_params *call_params,
50 	struct tc_op_params *op_params);
51 
52 /* dir: 0-inclue input, 1-include output, 2-both */
53 #define INPUT  0
54 #define OUTPUT 1
55 #define INOUT  2
56 
is_input_type(int dir)57 static inline bool is_input_type(int dir)
58 {
59 	if (dir == INPUT || dir == INOUT)
60 		return true;
61 
62 	return false;
63 }
64 
is_output_type(int dir)65 static inline bool is_output_type(int dir)
66 {
67 	if (dir == OUTPUT || dir == INOUT)
68 		return true;
69 
70 	return false;
71 }
72 
teec_value_type(unsigned int type,int dir)73 static inline bool teec_value_type(unsigned int type, int dir)
74 {
75 	return ((is_input_type(dir) && type == TEEC_VALUE_INPUT) ||
76 		(is_output_type(dir) && type == TEEC_VALUE_OUTPUT) ||
77 		type == TEEC_VALUE_INOUT) ? true : false;
78 }
79 
teec_tmpmem_type(unsigned int type,int dir)80 static inline bool teec_tmpmem_type(unsigned int type, int dir)
81 {
82 	return ((is_input_type(dir) && type == TEEC_MEMREF_TEMP_INPUT) ||
83 		(is_output_type(dir) && type == TEEC_MEMREF_TEMP_OUTPUT) ||
84 		type == TEEC_MEMREF_TEMP_INOUT) ? true : false;
85 }
86 
teec_memref_type(unsigned int type,int dir)87 static inline bool teec_memref_type(unsigned int type, int dir)
88 {
89 	return ((is_input_type(dir) && type == TEEC_MEMREF_PARTIAL_INPUT) ||
90 		(is_output_type(dir) && type == TEEC_MEMREF_PARTIAL_OUTPUT) ||
91 		type == TEEC_MEMREF_PARTIAL_INOUT) ? true : false;
92 }
93 
check_user_param(const struct tc_ns_client_context * client_context,unsigned int index)94 static int check_user_param(const struct tc_ns_client_context *client_context,
95 	unsigned int index)
96 {
97 	if (!client_context) {
98 		tloge("client_context is null\n");
99 		return -EINVAL;
100 	}
101 
102 	if (index >= PARAM_NUM) {
103 		tloge("index is invalid, index:%x\n", index);
104 		return -EINVAL;
105 	}
106 	return 0;
107 }
108 
is_tmp_mem(uint32_t param_type)109 bool is_tmp_mem(uint32_t param_type)
110 {
111 	if (param_type == TEEC_MEMREF_TEMP_INPUT ||
112 		param_type == TEEC_MEMREF_TEMP_OUTPUT ||
113 		param_type == TEEC_MEMREF_TEMP_INOUT)
114 		return true;
115 
116 	return false;
117 }
118 
is_ref_mem(uint32_t param_type)119 bool is_ref_mem(uint32_t param_type)
120 {
121 	if (param_type == TEEC_MEMREF_PARTIAL_INPUT ||
122 		param_type == TEEC_MEMREF_PARTIAL_OUTPUT ||
123 		param_type == TEEC_MEMREF_PARTIAL_INOUT)
124 		return true;
125 
126 	return false;
127 }
128 
is_val_param(uint32_t param_type)129 bool is_val_param(uint32_t param_type)
130 {
131 	if (param_type == TEEC_VALUE_INPUT ||
132 		param_type == TEEC_VALUE_OUTPUT ||
133 		param_type == TEEC_VALUE_INOUT ||
134 		param_type == TEEC_ION_INPUT ||
135 		param_type == TEEC_ION_SGLIST_INPUT)
136 		return true;
137 
138 	return false;
139 }
140 
is_mem_param(uint32_t param_type)141 static bool is_mem_param(uint32_t param_type)
142 {
143 	if (is_tmp_mem(param_type) || is_ref_mem(param_type))
144 		return true;
145 
146 	return false;
147 }
148 
149 /* Check the size and buffer addresses  have valid userspace addresses */
is_usr_refmem_valid(const union tc_ns_client_param * client_param)150 static bool is_usr_refmem_valid(const union tc_ns_client_param *client_param)
151 {
152 	uint32_t size = 0;
153 	uint64_t size_addr = client_param->memref.size_addr |
154 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
155 	uint64_t buffer_addr = client_param->memref.buffer |
156 		((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
157 
158 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
159 	LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
160 	if (access_ok(VERIFY_READ, (void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0)
161 #else
162 	if (access_ok((void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0)
163 #endif
164 		return false;
165 
166 	get_user(size, (uint32_t *)(uintptr_t)size_addr);
167 
168 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
169 	LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
170 	if (access_ok(VERIFY_READ, (void *)(uintptr_t)buffer_addr, size) == 0)
171 #else
172 	if (access_ok((void *)(uintptr_t)buffer_addr, size) == 0)
173 #endif
174 		return false;
175 
176 	return true;
177 }
178 
is_usr_valmem_valid(const union tc_ns_client_param * client_param)179 static bool is_usr_valmem_valid(const union tc_ns_client_param *client_param)
180 {
181 	uint64_t a_addr = client_param->value.a_addr |
182 		((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
183 	uint64_t b_addr = client_param->value.b_addr |
184 		((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
185 
186 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
187 	LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
188 	if (access_ok(VERIFY_READ, (void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0)
189 #else
190 	if (access_ok((void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0)
191 #endif
192 		return false;
193 
194 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
195 	LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
196 	if (access_ok(VERIFY_READ, (void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0)
197 #else
198 	if (access_ok((void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0)
199 #endif
200 		return false;
201 
202 	return true;
203 }
204 
tc_user_param_valid(struct tc_ns_client_context * client_context,unsigned int index)205 bool tc_user_param_valid(struct tc_ns_client_context *client_context,
206 	unsigned int index)
207 {
208 	union tc_ns_client_param *client_param = NULL;
209 	unsigned int param_type;
210 
211 	if (check_user_param(client_context, index) != 0)
212 		return false;
213 
214 	client_param = &(client_context->params[index]);
215 	param_type = teec_param_type_get(client_context->param_types, index);
216 	tlogd("param %u type is %x\n", index, param_type);
217 	if (param_type == TEEC_NONE) {
218 		tlogd("param type is TEEC_NONE\n");
219 		return true;
220 	}
221 
222 	if (is_mem_param(param_type)) {
223 		if (!is_usr_refmem_valid(client_param))
224 			return false;
225 	} else if (is_val_param(param_type)) {
226 		if (!is_usr_valmem_valid(client_param))
227 			return false;
228 	} else {
229 		tloge("param types is not supported\n");
230 		return false;
231 	}
232 
233 	return true;
234 }
235 
236 /*
237  * These function handle read from client. Because client here can be
238  * kernel client or user space client, we must use the proper function
239  */
read_from_client(void * dest,size_t dest_size,const void __user * src,size_t size,uint8_t kernel_api)240 int read_from_client(void *dest, size_t dest_size,
241 	const void __user *src, size_t size, uint8_t kernel_api)
242 {
243 	int ret;
244 
245 	if (!dest || !src) {
246 		tloge("src or dest is NULL input buffer\n");
247 		return -EINVAL;
248 	}
249 
250 	if (size > dest_size) {
251 		tloge("size is larger than dest_size or size is 0\n");
252 		return -EINVAL;
253 	}
254 	if (size == 0)
255 		return 0;
256 
257 	if (kernel_api != 0) {
258 		ret = memcpy_s(dest, dest_size, src, size);
259 		if (ret != EOK) {
260 			tloge("memcpy fail. line=%d, s_ret=%d\n",
261 				__LINE__, ret);
262 			return ret;
263 		}
264 		return ret;
265 	}
266 	/* buffer is in user space(CA call TEE API) */
267 	if (copy_from_user(dest, src, size) != 0) {
268 		tloge("copy from user failed\n");
269 		return -EFAULT;
270 	}
271 
272 	return 0;
273 }
274 
write_to_client(void __user * dest,size_t dest_size,const void * src,size_t size,uint8_t kernel_api)275 int write_to_client(void __user *dest, size_t dest_size,
276 	const void *src, size_t size, uint8_t kernel_api)
277 {
278 	int ret;
279 
280 	if (!dest || !src) {
281 		tloge("src or dest is NULL input buffer\n");
282 		return -EINVAL;
283 	}
284 
285 	if (size > dest_size) {
286 		tloge("size is larger than dest_size\n");
287 		return -EINVAL;
288 	}
289 
290 	if (size == 0)
291 		return 0;
292 
293 	if (kernel_api != 0) {
294 		ret = memcpy_s(dest, dest_size, src, size);
295 		if (ret != EOK) {
296 			tloge("write to client fail. line=%d, ret=%d\n",
297 				  __LINE__, ret);
298 			return ret;
299 		}
300 		return ret;
301 	}
302 
303 	/* buffer is in user space(CA call TEE API) */
304 	if (copy_to_user(dest, src, size) != 0) {
305 		tloge("copy to user failed\n");
306 		return -EFAULT;
307 	}
308 	return 0;
309 }
310 
is_input_tempmem(unsigned int param_type)311 static bool is_input_tempmem(unsigned int param_type)
312 {
313 	if (param_type == TEEC_MEMREF_TEMP_INPUT ||
314 		param_type == TEEC_MEMREF_TEMP_INOUT)
315 		return true;
316 
317 	return false;
318 }
319 
update_input_data(const union tc_ns_client_param * client_param,uint32_t buffer_size,void * temp_buf,unsigned int param_type,uint8_t kernel_params)320 static int update_input_data(const union tc_ns_client_param *client_param,
321 	uint32_t buffer_size, void *temp_buf,
322 	unsigned int param_type, uint8_t kernel_params)
323 {
324 	uint64_t buffer_addr;
325 	if (!is_input_tempmem(param_type))
326 		return 0;
327 
328 	buffer_addr = client_param->memref.buffer |
329 		((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
330 	if (read_from_client(temp_buf, buffer_size,
331 		(void *)(uintptr_t)buffer_addr,
332 		buffer_size, kernel_params) != 0) {
333 		tloge("copy memref buffer failed\n");
334 		return -EFAULT;
335 	}
336 	return 0;
337 }
338 
339 /*
340  * temp buffers we need to allocate/deallocate
341  * for every operation
342  */
alloc_for_tmp_mem(const struct tc_call_params * call_params,struct tc_op_params * op_params,uint8_t kernel_params,uint32_t param_type,unsigned int index)343 static int alloc_for_tmp_mem(const struct tc_call_params *call_params,
344 	struct tc_op_params *op_params, uint8_t kernel_params,
345 	uint32_t param_type, unsigned int index)
346 {
347 	union tc_ns_client_param *client_param = NULL;
348 	void *temp_buf = NULL;
349 	uint32_t buffer_size = 0;
350 	uint64_t size_addr;
351 
352 	/* this never happens */
353 	if (index >= TEE_PARAM_NUM)
354 		return -EINVAL;
355 
356 	/* For compatibility sake we assume buffer size to be 32bits */
357 	client_param = &(call_params->context->params[index]);
358 	size_addr = client_param->memref.size_addr |
359 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
360 	if (read_from_client(&buffer_size, sizeof(buffer_size),
361 		(uint32_t __user *)(uintptr_t)size_addr,
362 		sizeof(uint32_t), kernel_params) != 0) {
363 		tloge("copy memref.size_addr failed\n");
364 		return -EFAULT;
365 	}
366 
367 	if (buffer_size > MAX_SHARED_SIZE) {
368 		tloge("buffer size %u from user is too large\n", buffer_size);
369 		return -EFAULT;
370 	}
371 
372 	op_params->mb_pack->operation.params[index].memref.size = buffer_size;
373 	/* TEEC_MEMREF_TEMP_INPUT equal to TEE_PARAM_TYPE_MEMREF_INPUT */
374 	op_params->trans_paramtype[index] = param_type;
375 
376 	if (buffer_size == 0) {
377 		op_params->local_tmpbuf[index].temp_buffer = NULL;
378 		op_params->local_tmpbuf[index].size = 0;
379 		op_params->mb_pack->operation.params[index].memref.buffer = 0;
380 		op_params->mb_pack->operation.buffer_h_addr[index] = 0;
381 		return 0;
382 	}
383 
384 	temp_buf = mailbox_alloc(buffer_size, MB_FLAG_ZERO);
385 	if (!temp_buf) {
386 		tloge("temp buf malloc failed, i = %u\n", index);
387 		return -ENOMEM;
388 	}
389 	op_params->local_tmpbuf[index].temp_buffer = temp_buf;
390 	op_params->local_tmpbuf[index].size = buffer_size;
391 
392 	if (update_input_data(client_param, buffer_size, temp_buf,
393 		param_type, kernel_params) != 0)
394 		return -EFAULT;
395 
396 	op_params->mb_pack->operation.params[index].memref.buffer =
397 		mailbox_virt_to_phys((uintptr_t)temp_buf);
398 	op_params->mb_pack->operation.buffer_h_addr[index] =
399 		(unsigned int)(mailbox_virt_to_phys((uintptr_t)temp_buf) >> ADDR_TRANS_NUM);
400 
401 	return 0;
402 }
403 
check_buffer_for_ref(uint32_t * buffer_size,const union tc_ns_client_param * client_param,uint8_t kernel_params)404 static int check_buffer_for_ref(uint32_t *buffer_size,
405 	const union tc_ns_client_param *client_param, uint8_t kernel_params)
406 {
407 	uint64_t size_addr = client_param->memref.size_addr |
408 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
409 	if (read_from_client(buffer_size, sizeof(*buffer_size),
410 		(uint32_t __user *)(uintptr_t)size_addr,
411 		sizeof(uint32_t), kernel_params) != 0) {
412 		tloge("copy memref.size_addr failed\n");
413 		return -EFAULT;
414 	}
415 	if (*buffer_size == 0) {
416 		tloge("buffer_size from user is 0\n");
417 		return -ENOMEM;
418 	}
419 	return 0;
420 }
421 
is_refmem_offset_valid(const struct tc_ns_shared_mem * shared_mem,const union tc_ns_client_param * client_param,uint32_t buffer_size)422 static bool is_refmem_offset_valid(const struct tc_ns_shared_mem *shared_mem,
423 	const union tc_ns_client_param *client_param, uint32_t buffer_size)
424 {
425 	/*
426 	 * arbitrary CA can control offset by ioctl, so in here
427 	 * offset must be checked, and avoid integer overflow.
428 	 */
429 	if (((shared_mem->len - client_param->memref.offset) >= buffer_size) &&
430 		(shared_mem->len > client_param->memref.offset))
431 		return true;
432 	tloge("Unexpected size %u vs %u", shared_mem->len, buffer_size);
433 	return false;
434 }
435 
is_phyaddr_valid(const struct tc_ns_operation * operation,int index)436 static bool is_phyaddr_valid(const struct tc_ns_operation *operation, int index)
437 {
438 	/*
439 	 * for 8G physical memory device, there is a chance that
440 	 * operation->params[i].memref.buffer could be all 0,
441 	 * buffer_h_addr cannot be 0 in the same time.
442 	 */
443 	if ((operation->params[index].memref.buffer == 0) &&
444 		(operation->buffer_h_addr[index]) == 0) {
445 		tloge("can not find shared buffer, exit\n");
446 		return false;
447 	}
448 
449 	return true;
450 }
451 
set_operation_buffer(const struct tc_ns_shared_mem * shared_mem,void * buffer_addr,uint32_t buffer_size,unsigned int index,struct tc_op_params * op_params)452 static int set_operation_buffer(const struct tc_ns_shared_mem *shared_mem, void *buffer_addr,
453 	uint32_t buffer_size, unsigned int index, struct tc_op_params *op_params)
454 {
455 	if (shared_mem->mem_type == RESERVED_TYPE) {
456 		/* no copy to mailbox */
457 		op_params->mb_pack->operation.mb_buffer[index] = buffer_addr;
458 		op_params->mb_pack->operation.params[index].memref.buffer =
459 			res_mem_virt_to_phys((uintptr_t)buffer_addr);
460 		op_params->mb_pack->operation.buffer_h_addr[index] =
461 			res_mem_virt_to_phys((uintptr_t)buffer_addr) >> ADDR_TRANS_NUM;
462 	} else {
463 		void *tmp_buffer_addr = mailbox_copy_alloc(buffer_addr, buffer_size);
464 		if (tmp_buffer_addr == NULL)
465 			return -ENOMEM;
466 
467 		op_params->mb_pack->operation.mb_buffer[index] = tmp_buffer_addr;
468 		op_params->mb_pack->operation.params[index].memref.buffer =
469 			(unsigned int)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr);
470 		op_params->mb_pack->operation.buffer_h_addr[index] =
471 			(unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr) >> ADDR_TRANS_NUM);
472 	}
473 	return 0;
474 }
475 
476 /*
477  * MEMREF_PARTIAL buffers are already allocated so we just
478  * need to search for the shared_mem ref;
479  * For interface compatibility we assume buffer size to be 32bits
480  */
alloc_for_ref_mem(const struct tc_call_params * call_params,struct tc_op_params * op_params,uint8_t kernel_params,uint32_t param_type,unsigned int index)481 static int alloc_for_ref_mem(const struct tc_call_params *call_params,
482 	struct tc_op_params *op_params, uint8_t kernel_params,
483 	uint32_t param_type, unsigned int index)
484 {
485 	union tc_ns_client_param *client_param = NULL;
486 	struct tc_ns_shared_mem *shared_mem = NULL;
487 	uint32_t buffer_size = 0;
488 	void *buffer_addr = NULL;
489 	int ret = 0;
490 
491 	/* this never happens */
492 	if (index >= TEE_PARAM_NUM)
493 		return -EINVAL;
494 
495 	client_param = &(call_params->context->params[index]);
496 	if (check_buffer_for_ref(&buffer_size, client_param, kernel_params) != 0)
497 		return -EINVAL;
498 
499 	op_params->mb_pack->operation.params[index].memref.buffer = 0;
500 
501 	mutex_lock(&call_params->dev->shared_mem_lock);
502 	list_for_each_entry(shared_mem,
503 		&call_params->dev->shared_mem_list, head) {
504 		buffer_addr = (void *)(uintptr_t)(client_param->memref.buffer |
505 			((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM));
506 		if (shared_mem->user_addr != buffer_addr)
507 			continue;
508 		if (!is_refmem_offset_valid(shared_mem, client_param,
509 			buffer_size)) {
510 			break;
511 		}
512 		buffer_addr = (void *)(uintptr_t)(
513 			(uintptr_t)shared_mem->kernel_addr +
514 			client_param->memref.offset);
515 
516 		ret = set_operation_buffer(shared_mem, buffer_addr, buffer_size, index, op_params);
517 		if (ret != 0) {
518 			tloge("set operation buffer failed\n");
519 			break;
520 		}
521 		op_params->mb_pack->operation.sharemem[index] = shared_mem;
522 		get_sharemem_struct(shared_mem);
523 		break;
524 	}
525 	mutex_unlock(&call_params->dev->shared_mem_lock);
526 	if (ret != 0)
527 		return ret;
528 
529 	if (!is_phyaddr_valid(&op_params->mb_pack->operation, index))
530 		return -EINVAL;
531 
532 	op_params->mb_pack->operation.params[index].memref.size = buffer_size;
533 	/* Change TEEC_MEMREF_PARTIAL_XXXXX  to TEE_PARAM_TYPE_MEMREF_XXXXX */
534 	op_params->trans_paramtype[index] = param_type -
535 		(TEEC_MEMREF_PARTIAL_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT);
536 
537 	if (shared_mem->mem_type == RESERVED_TYPE)
538 		op_params->trans_paramtype[index] +=
539 			(TEE_PARAM_TYPE_RESMEM_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT);
540 	return ret;
541 }
542 
543 #ifdef CONFIG_NOCOPY_SHAREDMEM
fill_shared_mem_info(void * start_vaddr,uint32_t pages_no,uint32_t offset,uint32_t buffer_size,void * buff)544 static int fill_shared_mem_info(void *start_vaddr, uint32_t pages_no, uint32_t offset, uint32_t buffer_size, void *buff)
545 {
546 	struct pagelist_info *page_info = NULL;
547 	struct page **pages = NULL;
548 	uint64_t *phys_addr = NULL;
549 	uint32_t page_num;
550 	uint32_t i;
551 	if (pages_no == 0)
552 		return -EFAULT;
553 	pages = (struct page **)vmalloc(pages_no * sizeof(uint64_t));
554 	if (pages == NULL)
555 		return -EFAULT;
556 	down_read(&mm_sem_lock(current->mm));
557 	page_num = get_user_pages((uintptr_t)start_vaddr, pages_no, FOLL_WRITE, pages, NULL);
558 	up_read(&mm_sem_lock(current->mm));
559 	if (page_num != pages_no) {
560 		tloge("get page phy addr failed\n");
561 		if (page_num > 0)
562 			release_pages(pages, page_num);
563 		vfree(pages);
564 		return -EFAULT;
565 	}
566 	page_info = buff;
567 	page_info->page_num = pages_no;
568 	page_info->page_size = PAGE_SIZE;
569 	page_info->sharedmem_offset = offset;
570 	page_info->sharedmem_size = buffer_size;
571 	phys_addr = (uint64_t *)buff + (sizeof(*page_info) / sizeof(uint64_t));
572 	for (i = 0; i < pages_no; i++) {
573 		struct page *page = pages[i];
574 		if (page == NULL) {
575 			release_pages(pages, page_num);
576 			vfree(pages);
577 			return -EFAULT;
578 		}
579 		phys_addr[i] = (uintptr_t)page_to_phys(page);
580 	}
581 	vfree(pages);
582 	return 0;
583 }
584 
check_buffer_for_sharedmem(uint32_t * buffer_size,const union tc_ns_client_param * client_param,uint8_t kernel_params)585 static int check_buffer_for_sharedmem(uint32_t *buffer_size,
586 	const union tc_ns_client_param *client_param, uint8_t kernel_params)
587 {
588 	uint64_t size_addr = client_param->memref.size_addr |
589 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
590 	uint64_t buffer_addr = client_param->memref.buffer |
591 		((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
592 	if (read_from_client(buffer_size, sizeof(*buffer_size),
593 		(uint32_t __user *)(uintptr_t)size_addr,
594 		sizeof(uint32_t), kernel_params)) {
595 		tloge("copy size_addr failed\n");
596 		return -EFAULT;
597 	}
598 
599 	if (*buffer_size == 0 || *buffer_size > SZ_256M) {
600 		tloge("invalid buffer size\n");
601 		return -ENOMEM;
602 	}
603 
604 	if ((client_param->memref.offset >= SZ_256M) ||
605 		(UINT64_MAX - buffer_addr <= client_param->memref.offset)) {
606 		tloge("invalid buff or offset\n");
607 		return -EFAULT;
608 	}
609 	return 0;
610 }
611 
transfer_shared_mem(const struct tc_call_params * call_params,struct tc_op_params * op_params,uint8_t kernel_params,uint32_t param_type,unsigned int index)612 static int transfer_shared_mem(const struct tc_call_params *call_params,
613 	struct tc_op_params *op_params, uint8_t kernel_params,
614 	uint32_t param_type, unsigned int index)
615 {
616 	void *buff = NULL;
617 	void *start_vaddr = NULL;
618 	union tc_ns_client_param *client_param = NULL;
619 	uint32_t buffer_size;
620 	uint32_t pages_no;
621 	uint32_t offset;
622 	uint32_t buff_len;
623 	uint64_t buffer_addr;
624 
625 	if (index >= TEE_PARAM_NUM)
626 		return -EINVAL;
627 
628 	client_param = &(call_params->context->params[index]);
629 	if (check_buffer_for_sharedmem(&buffer_size, client_param, kernel_params))
630 		return -EINVAL;
631 
632 	buffer_addr = client_param->memref.buffer |
633 		((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
634 	buff = (void *)(uint64_t)(buffer_addr + client_param->memref.offset);
635 	start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK);
636 	offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK);
637 	pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE;
638 
639 	buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no);
640 	buff = mailbox_alloc(buff_len, MB_FLAG_ZERO);
641 	if (buff == NULL)
642 		return -EFAULT;
643 
644 	if (fill_shared_mem_info(start_vaddr, pages_no, offset, buffer_size, buff)) {
645 		mailbox_free(buff);
646 		return -EFAULT;
647 	}
648 
649 	op_params->local_tmpbuf[index].temp_buffer = buff;
650 	op_params->local_tmpbuf[index].size = buff_len;
651 
652 	op_params->mb_pack->operation.params[index].memref.buffer = mailbox_virt_to_phys((uintptr_t)buff);
653 	op_params->mb_pack->operation.buffer_h_addr[index] = (uint64_t)mailbox_virt_to_phys((uintptr_t)buff) >> ADDR_TRANS_NUM;
654 	op_params->mb_pack->operation.params[index].memref.size = buff_len;
655 	op_params->trans_paramtype[index] = param_type;
656 	return 0;
657 }
658 #else
transfer_shared_mem(const struct tc_call_params * call_params,const struct tc_op_params * op_params,uint8_t kernel_params,uint32_t param_type,unsigned int index)659 static int transfer_shared_mem(const struct tc_call_params *call_params,
660 	const struct tc_op_params *op_params, uint8_t kernel_params,
661 	uint32_t param_type, unsigned int index)
662 {
663 	(void)call_params;
664 	(void)op_params;
665 	(void)kernel_params;
666 	(void)param_type;
667 	(void)index;
668 	tloge("invalid shared mem type\n");
669 	return -1;
670 }
671 #endif
672 
transfer_client_value(const struct tc_call_params * call_params,struct tc_op_params * op_params,uint8_t kernel_params,uint32_t param_type,unsigned int index)673 static int transfer_client_value(const struct tc_call_params *call_params,
674 	struct tc_op_params *op_params, uint8_t kernel_params,
675 	uint32_t param_type, unsigned int index)
676 {
677 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
678 	union tc_ns_client_param *client_param = NULL;
679 	uint64_t a_addr, b_addr;
680 
681 	/* this never happens */
682 	if (index >= TEE_PARAM_NUM)
683 		return -EINVAL;
684 
685 	client_param = &(call_params->context->params[index]);
686 	a_addr = client_param->value.a_addr |
687 		((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
688 	b_addr = client_param->value.b_addr |
689 		((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
690 
691 	if (read_from_client(&operation->params[index].value.a,
692 		sizeof(operation->params[index].value.a),
693 		(void *)(uintptr_t)a_addr,
694 		sizeof(operation->params[index].value.a),
695 		kernel_params) != 0) {
696 		tloge("copy valuea failed\n");
697 		return -EFAULT;
698 	}
699 	if (read_from_client(&operation->params[index].value.b,
700 		sizeof(operation->params[index].value.b),
701 		(void *)(uintptr_t)b_addr,
702 		sizeof(operation->params[index].value.b),
703 		kernel_params) != 0) {
704 		tloge("copy valueb failed\n");
705 		return -EFAULT;
706 	}
707 
708 	/* TEEC_VALUE_INPUT equal to TEE_PARAM_TYPE_VALUE_INPUT */
709 	op_params->trans_paramtype[index] = param_type;
710 	return 0;
711 }
712 
alloc_operation(const struct tc_call_params * call_params,struct tc_op_params * op_params)713 static int alloc_operation(const struct tc_call_params *call_params,
714 	struct tc_op_params *op_params)
715 {
716 	int ret = 0;
717 	uint32_t index;
718 	uint8_t kernel_params;
719 	uint32_t param_type;
720 
721 	kernel_params = call_params->dev->kernel_api;
722 	for (index = 0; index < TEE_PARAM_NUM; index++) {
723 		/*
724 		 * Normally kernel_params = kernel_api
725 		 * But when TC_CALL_LOGIN, params 2/3 will
726 		 * be filled by kernel. so under this circumstance,
727 		 * params 2/3 has to be set to kernel mode; and
728 		 * param 0/1 will keep the same with kernel_api.
729 		 */
730 		if ((call_params->flags & TC_CALL_LOGIN) && (index >= 2))
731 			kernel_params = TEE_REQ_FROM_KERNEL_MODE;
732 		param_type = teec_param_type_get(
733 			call_params->context->param_types, index);
734 
735 		tlogd("param %u type is %x\n", index, param_type);
736 		if (teec_tmpmem_type(param_type, INOUT))
737 			ret = alloc_for_tmp_mem(call_params, op_params,
738 				kernel_params, param_type, index);
739 		else if (teec_memref_type(param_type, INOUT))
740 			ret = alloc_for_ref_mem(call_params, op_params,
741 				kernel_params, param_type, index);
742 		else if (teec_value_type(param_type, INOUT))
743 			ret = transfer_client_value(call_params, op_params,
744 				kernel_params, param_type, index);
745 		else if (param_type == TEEC_ION_INPUT)
746 			ret = alloc_for_ion(call_params, op_params,
747 				kernel_params, param_type, index);
748 		else if (param_type == TEEC_ION_SGLIST_INPUT)
749 			ret = alloc_for_ion_sglist(call_params, op_params,
750 				kernel_params, param_type, index);
751 		else if (param_type == TEEC_MEMREF_SHARED_INOUT)
752 			ret = transfer_shared_mem(call_params, op_params,
753 				kernel_params, param_type, index);
754 		else
755 			tlogd("param type = TEEC_NONE\n");
756 
757 		if (ret != 0)
758 			break;
759 	}
760 	if (ret != 0) {
761 		free_operation(call_params, op_params);
762 		return ret;
763 	}
764 	op_params->mb_pack->operation.paramtypes =
765 		teec_param_types(op_params->trans_paramtype[0],
766 		op_params->trans_paramtype[1],
767 		op_params->trans_paramtype[2],
768 		op_params->trans_paramtype[3]);
769 	op_params->op_inited = true;
770 
771 	return ret;
772 }
773 
update_tmp_mem(const struct tc_call_params * call_params,const struct tc_op_params * op_params,unsigned int index,bool is_complete)774 static int update_tmp_mem(const struct tc_call_params *call_params,
775 	const struct tc_op_params *op_params, unsigned int index, bool is_complete)
776 {
777 	union tc_ns_client_param *client_param = NULL;
778 	uint32_t buffer_size;
779 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
780 	uint64_t size_addr, buffer_addr;
781 
782 	if (index >= TEE_PARAM_NUM) {
783 		tloge("tmp buf size or index is invalid\n");
784 		return -EFAULT;
785 	}
786 
787 	buffer_size = operation->params[index].memref.size;
788 	client_param = &(call_params->context->params[index]);
789 	size_addr = client_param->memref.size_addr |
790 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
791 	buffer_addr = client_param->memref.buffer |
792 		((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
793 	/* Size is updated all the time */
794 	if (write_to_client((void *)(uintptr_t)size_addr,
795 		sizeof(buffer_size),
796 		&buffer_size, sizeof(buffer_size),
797 		call_params->dev->kernel_api) != 0) {
798 		tloge("copy tempbuf size failed\n");
799 		return -EFAULT;
800 	}
801 	if (buffer_size > op_params->local_tmpbuf[index].size) {
802 		/* incomplete case, when the buffer size is invalid see next param */
803 		if (!is_complete)
804 			return 0;
805 		/*
806 		 * complete case, operation is allocated from mailbox
807 		 *  and share with gtask, so it's possible to be changed
808 		 */
809 		tloge("memref.size has been changed larger than the initial\n");
810 		return -EFAULT;
811 	}
812 	if (buffer_size == 0)
813 		return 0;
814 	/* Only update the buffer when the buffer size is valid in complete case */
815 	if (write_to_client((void *)(uintptr_t)buffer_addr,
816 		operation->params[index].memref.size,
817 		op_params->local_tmpbuf[index].temp_buffer,
818 		operation->params[index].memref.size,
819 		call_params->dev->kernel_api) != 0) {
820 		tloge("copy tempbuf failed\n");
821 		return -ENOMEM;
822 	}
823 	return 0;
824 }
825 
update_for_ref_mem(const struct tc_call_params * call_params,const struct tc_op_params * op_params,unsigned int index)826 static int update_for_ref_mem(const struct tc_call_params *call_params,
827 	const struct tc_op_params *op_params, unsigned int index)
828 {
829 	union tc_ns_client_param *client_param = NULL;
830 	uint32_t buffer_size;
831 	unsigned int orig_size = 0;
832 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
833 	uint64_t size_addr;
834 
835 	if (index >= TEE_PARAM_NUM) {
836 		tloge("index is invalid\n");
837 		return -EFAULT;
838 	}
839 
840 	/* update size */
841 	buffer_size = operation->params[index].memref.size;
842 	client_param = &(call_params->context->params[index]);
843 	size_addr = client_param->memref.size_addr |
844 		((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
845 
846 	if (read_from_client(&orig_size,
847 		sizeof(orig_size),
848 		(uint32_t __user *)(uintptr_t)size_addr,
849 		sizeof(orig_size), call_params->dev->kernel_api) != 0) {
850 		tloge("copy orig memref.size_addr failed\n");
851 		return -EFAULT;
852 	}
853 
854 	if (write_to_client((void *)(uintptr_t)size_addr,
855 		sizeof(buffer_size),
856 		&buffer_size, sizeof(buffer_size),
857 		call_params->dev->kernel_api) != 0) {
858 		tloge("copy buf size failed\n");
859 		return -EFAULT;
860 	}
861 
862 	/* reserved memory no need to copy */
863 	if (operation->sharemem[index]->mem_type == RESERVED_TYPE)
864 		return 0;
865 	/* copy from mb_buffer to sharemem */
866 	if (operation->mb_buffer[index] && orig_size >= buffer_size) {
867 		void *buffer_addr =
868 			(void *)(uintptr_t)((uintptr_t)
869 			operation->sharemem[index]->kernel_addr +
870 			client_param->memref.offset);
871 		if (memcpy_s(buffer_addr,
872 			operation->sharemem[index]->len -
873 			client_param->memref.offset,
874 			operation->mb_buffer[index], buffer_size) != 0) {
875 			tloge("copy to sharemem failed\n");
876 			return -EFAULT;
877 		}
878 	}
879 	return 0;
880 }
881 
update_for_value(const struct tc_call_params * call_params,const struct tc_op_params * op_params,unsigned int index)882 static int update_for_value(const struct tc_call_params *call_params,
883 	const struct tc_op_params *op_params, unsigned int index)
884 {
885 	union tc_ns_client_param *client_param = NULL;
886 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
887 	uint64_t a_addr, b_addr;
888 
889 	if (index >= TEE_PARAM_NUM) {
890 		tloge("index is invalid\n");
891 		return -EFAULT;
892 	}
893 	client_param = &(call_params->context->params[index]);
894 	a_addr = client_param->value.a_addr |
895 		((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
896 	b_addr = client_param->value.b_addr |
897 		((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
898 
899 	if (write_to_client((void *)(uintptr_t)a_addr,
900 		sizeof(operation->params[index].value.a),
901 		&operation->params[index].value.a,
902 		sizeof(operation->params[index].value.a),
903 		call_params->dev->kernel_api) != 0) {
904 		tloge("inc copy value.a_addr failed\n");
905 		return -EFAULT;
906 	}
907 	if (write_to_client((void *)(uintptr_t)b_addr,
908 		sizeof(operation->params[index].value.b),
909 		&operation->params[index].value.b,
910 		sizeof(operation->params[index].value.b),
911 		call_params->dev->kernel_api) != 0) {
912 		tloge("inc copy value.b_addr failed\n");
913 		return -EFAULT;
914 	}
915 	return 0;
916 }
917 
update_client_operation(const struct tc_call_params * call_params,const struct tc_op_params * op_params,bool is_complete)918 static int update_client_operation(const struct tc_call_params *call_params,
919 	const struct tc_op_params *op_params, bool is_complete)
920 {
921 	int ret = 0;
922 	uint32_t param_type;
923 	uint32_t index;
924 
925 	if (!op_params->op_inited)
926 		return 0;
927 
928 	/* if paramTypes is NULL, no need to update */
929 	if (call_params->context->param_types == 0)
930 		return 0;
931 
932 	for (index = 0; index < TEE_PARAM_NUM; index++) {
933 		param_type = teec_param_type_get(
934 			call_params->context->param_types, index);
935 		if (teec_tmpmem_type(param_type, OUTPUT))
936 			ret = update_tmp_mem(call_params, op_params,
937 				index, is_complete);
938 		else if (teec_memref_type(param_type, OUTPUT))
939 			ret = update_for_ref_mem(call_params,
940 				op_params, index);
941 		else if (is_complete && teec_value_type(param_type, OUTPUT))
942 			ret = update_for_value(call_params, op_params, index);
943 		else
944 			tlogd("param_type:%u don't need to update\n", param_type);
945 		if (ret != 0)
946 			break;
947 	}
948 	return ret;
949 }
950 
951 #ifdef CONFIG_NOCOPY_SHAREDMEM
release_page(void * buf)952 static void release_page(void *buf)
953 {
954 	uint32_t i;
955 	uint64_t *phys_addr = NULL;
956 	struct pagelist_info *page_info = NULL;
957 	struct page *page = NULL;
958 
959 	page_info = buf;
960 	phys_addr = (uint64_t *)buf + (sizeof(*page_info) / sizeof(uint64_t));
961 	for (i = 0; i < page_info->page_num; i++) {
962 		page = (struct page *)(uintptr_t)phys_to_page(phys_addr[i]);
963 		if (page == NULL)
964 			continue;
965 		set_bit(PG_dirty, &page->flags);
966 		put_page(page);
967 	}
968 }
969 #endif
free_operation(const struct tc_call_params * call_params,struct tc_op_params * op_params)970 static void free_operation(const struct tc_call_params *call_params, struct tc_op_params *op_params)
971 {
972 	uint32_t param_type;
973 	uint32_t index;
974 	void *temp_buf = NULL;
975 	struct tc_ns_temp_buf *local_tmpbuf = op_params->local_tmpbuf;
976 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
977 
978 	for (index = 0; index < TEE_PARAM_NUM; index++) {
979 		param_type = teec_param_type_get(call_params->context->param_types, index);
980 		if (is_tmp_mem(param_type)) {
981 			/* free temp buffer */
982 			temp_buf = local_tmpbuf[index].temp_buffer;
983 			tlogd("free temp buf, i = %u\n", index);
984 #if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED))
985 			/* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */
986 			if (!virt_addr_valid((unsigned long)(uintptr_t)temp_buf))
987 				continue;
988 #endif
989 			if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) {
990 				mailbox_free(temp_buf);
991 				temp_buf = NULL;
992 			}
993 		} else if (is_ref_mem(param_type)) {
994 			struct tc_ns_shared_mem *shm = operation->sharemem[index];
995 			if (shm != NULL && shm->mem_type == RESERVED_TYPE) {
996 				put_sharemem_struct(operation->sharemem[index]);
997 				continue;
998 			}
999 			put_sharemem_struct(operation->sharemem[index]);
1000 			if (operation->mb_buffer[index])
1001 				mailbox_free(operation->mb_buffer[index]);
1002 		} else if (param_type == TEEC_ION_SGLIST_INPUT) {
1003 			temp_buf = local_tmpbuf[index].temp_buffer;
1004 			tlogd("free ion sglist buf, i = %u\n", index);
1005 #if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED))
1006 			/* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */
1007 			if (!virt_addr_valid((uint64_t)(uintptr_t)temp_buf))
1008 				continue;
1009 #endif
1010 			if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) {
1011 				mailbox_free(temp_buf);
1012 				temp_buf = NULL;
1013 			}
1014 		} else if (param_type == TEEC_MEMREF_SHARED_INOUT) {
1015 #ifdef CONFIG_NOCOPY_SHAREDMEM
1016 			temp_buf = local_tmpbuf[index].temp_buffer;
1017 			if (temp_buf != NULL) {
1018 				release_page(temp_buf);
1019 				mailbox_free(temp_buf);
1020 			}
1021 #endif
1022 		}
1023 	}
1024 }
1025 
is_clicall_params_vaild(const struct tc_call_params * call_params)1026 static bool is_clicall_params_vaild(const struct tc_call_params *call_params)
1027 {
1028 	if (!call_params) {
1029 		tloge("call param is null");
1030 		return false;
1031 	}
1032 
1033 	if (!call_params->dev) {
1034 		tloge("dev file is null");
1035 		return false;
1036 	}
1037 
1038 	if (!call_params->context) {
1039 		tloge("client context is null");
1040 		return false;
1041 	}
1042 
1043 	return true;
1044 }
1045 
alloc_for_client_call(struct tc_op_params * op_params)1046 static int alloc_for_client_call(struct tc_op_params *op_params)
1047 {
1048 	op_params->smc_cmd = kzalloc(sizeof(*(op_params->smc_cmd)),
1049 		GFP_KERNEL);
1050 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(op_params->smc_cmd))) {
1051 		tloge("smc cmd malloc failed\n");
1052 		return -ENOMEM;
1053 	}
1054 
1055 	op_params->mb_pack = mailbox_alloc_cmd_pack();
1056 	if (!op_params->mb_pack) {
1057 		kfree(op_params->smc_cmd);
1058 		op_params->smc_cmd = NULL;
1059 		return -ENOMEM;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
init_smc_cmd(const struct tc_call_params * call_params,struct tc_op_params * op_params)1065 static int init_smc_cmd(const struct tc_call_params *call_params,
1066 	struct tc_op_params *op_params)
1067 {
1068 	struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd;
1069 	struct tc_ns_client_context *context = call_params->context;
1070 	struct tc_ns_operation *operation = &op_params->mb_pack->operation;
1071 	bool global = call_params->flags & TC_CALL_GLOBAL;
1072 
1073 	smc_cmd->cmd_type = global ? CMD_TYPE_GLOBAL : CMD_TYPE_TA;
1074 	if (memcpy_s(smc_cmd->uuid, sizeof(smc_cmd->uuid),
1075 		context->uuid, UUID_LEN) != 0) {
1076 		tloge("memcpy uuid error\n");
1077 		return -EFAULT;
1078 	}
1079 	smc_cmd->cmd_id = context->cmd_id;
1080 	smc_cmd->dev_file_id = call_params->dev->dev_file_id;
1081 	smc_cmd->context_id = context->session_id;
1082 	smc_cmd->err_origin = context->returns.origin;
1083 	smc_cmd->started = context->started;
1084 	smc_cmd->ca_pid = current->pid;
1085 	smc_cmd->pid = current->tgid;
1086 
1087 	tlogv("current uid is %u\n", smc_cmd->uid);
1088 	if (context->param_types != 0) {
1089 		smc_cmd->operation_phys =
1090 			mailbox_virt_to_phys((uintptr_t)operation);
1091 		smc_cmd->operation_h_phys =
1092 			(uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM;
1093 	} else {
1094 		smc_cmd->operation_phys = 0;
1095 		smc_cmd->operation_h_phys = 0;
1096 	}
1097 	smc_cmd->login_method = context->login.method;
1098 
1099 	/* if smc from kernel CA, set login_method to TEEK_LOGIN_IDENTIFY */
1100 	if (call_params->dev->kernel_api == TEE_REQ_FROM_KERNEL_MODE)
1101 		smc_cmd->login_method = TEEK_LOGIN_IDENTIFY;
1102 
1103 	return 0;
1104 }
1105 
need_check_login(const struct tc_call_params * call_params,const struct tc_op_params * op_params)1106 static bool need_check_login(const struct tc_call_params *call_params,
1107 	const struct tc_op_params *op_params)
1108 {
1109 	if (call_params->dev->pub_key_len == sizeof(uint32_t) &&
1110 		op_params->smc_cmd->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION &&
1111 		current->mm && ((call_params->flags & TC_CALL_GLOBAL) != 0))
1112 		return true;
1113 
1114 	return false;
1115 }
1116 
check_login_for_encrypt(const struct tc_call_params * call_params,struct tc_op_params * op_params)1117 static int check_login_for_encrypt(const struct tc_call_params *call_params,
1118 	struct tc_op_params *op_params)
1119 {
1120 	struct tc_ns_session *sess = call_params->sess;
1121 	struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd;
1122 	struct mb_cmd_pack *mb_pack = op_params->mb_pack;
1123 
1124 	if (need_check_login(call_params, op_params) && sess) {
1125 		if (memcpy_s(mb_pack->login_data, sizeof(mb_pack->login_data),
1126 			sess->auth_hash_buf,
1127 			sizeof(sess->auth_hash_buf)) != 0) {
1128 			tloge("copy login data failed\n");
1129 			return -EFAULT;
1130 		}
1131 		smc_cmd->login_data_phy = mailbox_virt_to_phys((uintptr_t)mb_pack->login_data);
1132 		smc_cmd->login_data_h_addr =
1133 			(uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pack->login_data) >> ADDR_TRANS_NUM;
1134 		smc_cmd->login_data_len = MAX_SHA_256_SZ * (NUM_OF_SO + 1);
1135 	} else {
1136 		smc_cmd->login_data_phy = 0;
1137 		smc_cmd->login_data_h_addr = 0;
1138 		smc_cmd->login_data_len = 0;
1139 	}
1140 	return 0;
1141 }
1142 
get_uid_for_cmd(void)1143 static uint32_t get_uid_for_cmd(void)
1144 {
1145 	kuid_t kuid;
1146 
1147 	kuid = current_uid();
1148 	return kuid.val;
1149 }
1150 
reset_session_id(const struct tc_call_params * call_params,const struct tc_op_params * op_params,int tee_ret)1151 static void reset_session_id(const struct tc_call_params *call_params,
1152 	const struct tc_op_params *op_params, int tee_ret)
1153 {
1154 	bool need_reset = false;
1155 
1156 	call_params->context->session_id = op_params->smc_cmd->context_id;
1157 	/*
1158 	 * if tee_ret error except TEEC_PENDING,
1159 	 * but context_id is seted,need to reset to 0
1160 	 */
1161 	need_reset = ((call_params->flags & TC_CALL_GLOBAL) &&
1162 		call_params->context->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION &&
1163 		tee_ret && tee_ret != (int)TEEC_PENDING);
1164 	if (need_reset)
1165 		call_params->context->session_id = 0;
1166 	return;
1167 }
1168 
pend_ca_thread(struct tc_ns_session * session,const struct tc_ns_smc_cmd * smc_cmd)1169 static void pend_ca_thread(struct tc_ns_session *session,
1170 	const struct tc_ns_smc_cmd *smc_cmd)
1171 {
1172 	struct tc_wait_data *wq = NULL;
1173 
1174 	if (session)
1175 		wq = &session->wait_data;
1176 
1177 	if (wq) {
1178 		tlogv("before wait event\n");
1179 		/*
1180 		 * use wait_event instead of wait_event_interruptible so
1181 		 * that ap suspend will not wake up the TEE wait call
1182 		 */
1183 		wait_event(wq->send_cmd_wq, wq->send_wait_flag != 0);
1184 		wq->send_wait_flag = 0;
1185 	}
1186 	tlogv("operation start is :%d\n", smc_cmd->started);
1187 	return;
1188 }
1189 
1190 
release_tc_call_resource(const struct tc_call_params * call_params,struct tc_op_params * op_params,int tee_ret)1191 static void release_tc_call_resource(const struct tc_call_params *call_params,
1192 	struct tc_op_params *op_params, int tee_ret)
1193 {
1194 	/* kfree(NULL) is safe and this check is probably not required */
1195 	call_params->context->returns.code = tee_ret;
1196 	call_params->context->returns.origin = op_params->smc_cmd->err_origin;
1197 
1198 	/*
1199 	 * 1. when CA invoke command and crash, Gtask release service node
1200 	 * then del ion won't be triggered, so here tzdriver need to kill ion;
1201 	 * 2. when ta crash, tzdriver also need to kill ion;
1202 	 */
1203 	if (tee_ret == (int)TEE_ERROR_TAGET_DEAD || tee_ret == (int)TEEC_ERROR_GENERIC)
1204 		kill_ion_by_uuid((struct tc_uuid *)op_params->smc_cmd->uuid);
1205 
1206 	if (op_params->op_inited)
1207 		free_operation(call_params, op_params);
1208 
1209 	kfree(op_params->smc_cmd);
1210 	mailbox_free(op_params->mb_pack);
1211 }
1212 
config_smc_cmd_context(const struct tc_call_params * call_params,struct tc_op_params * op_params)1213 static int config_smc_cmd_context(const struct tc_call_params *call_params,
1214 	struct tc_op_params *op_params)
1215 {
1216 	int ret;
1217 
1218 	ret = init_smc_cmd(call_params, op_params);
1219 	if (ret != 0)
1220 		return ret;
1221 
1222 	ret = check_login_for_encrypt(call_params, op_params);
1223 
1224 	return ret;
1225 }
1226 
handle_ta_pending(const struct tc_call_params * call_params,struct tc_op_params * op_params,int * tee_ret)1227 static int handle_ta_pending(const struct tc_call_params *call_params,
1228 	struct tc_op_params *op_params, int *tee_ret)
1229 {
1230 	if (*tee_ret != (int)TEEC_PENDING)
1231 		return 0;
1232 
1233 	while (*tee_ret == (int)TEEC_PENDING) {
1234 		pend_ca_thread(call_params->sess, op_params->smc_cmd);
1235 		*tee_ret = tc_ns_smc_with_no_nr(op_params->smc_cmd);
1236 	}
1237 
1238 	return 0;
1239 }
1240 
post_proc_smc_return(const struct tc_call_params * call_params,struct tc_op_params * op_params,int tee_ret)1241 static int post_proc_smc_return(const struct tc_call_params *call_params,
1242 	struct tc_op_params *op_params, int tee_ret)
1243 {
1244 	int ret;
1245 
1246 	if (tee_ret != 0) {
1247 		tloge("smc call ret 0x%x, cmd ret val 0x%x, origin %u\n", tee_ret,
1248 			op_params->smc_cmd->ret_val, op_params->smc_cmd->err_origin);
1249 		/* same as libteec_vendor, err from TEE, set ret positive */
1250 		ret = EFAULT;
1251 		if (tee_ret == (int)TEEC_CLIENT_INTR)
1252 			ret = -ERESTARTSYS;
1253 
1254 		if (tee_ret == (int)TEEC_ERROR_SHORT_BUFFER)
1255 			(void)update_client_operation(call_params, op_params, false);
1256 	} else {
1257 		tz_log_write();
1258 		ret = update_client_operation(call_params, op_params, true);
1259 	}
1260 
1261 	return ret;
1262 }
1263 
tc_client_call(const struct tc_call_params * call_params)1264 int tc_client_call(const struct tc_call_params *call_params)
1265 {
1266 	int ret;
1267 	int tee_ret = 0;
1268 	struct tc_op_params op_params = { NULL, NULL, {{0}}, {0}, false };
1269 
1270 	if (!is_clicall_params_vaild(call_params))
1271 		return -EINVAL;
1272 
1273 	if (alloc_for_client_call(&op_params) != 0)
1274 		return -ENOMEM;
1275 
1276 	op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS;
1277 	op_params.smc_cmd->uid = get_uid_for_cmd();
1278 	if (call_params->context->param_types != 0) {
1279 		ret = alloc_operation(call_params, &op_params);
1280 		if (ret != 0)
1281 			goto free_src;
1282 	}
1283 
1284 	ret = config_smc_cmd_context(call_params, &op_params);
1285 	if (ret != 0)
1286 		goto free_src;
1287 
1288 	tee_ret = tc_ns_smc(op_params.smc_cmd);
1289 
1290 	reset_session_id(call_params, &op_params, tee_ret);
1291 
1292 	ret = handle_ta_pending(call_params, &op_params, &tee_ret);
1293 	if (ret != 0)
1294 		goto free_src;
1295 
1296 	ret = post_proc_smc_return(call_params, &op_params, tee_ret);
1297 
1298 free_src:
1299 	if (ret < 0) /* if ret > 0, means err from TEE */
1300 		op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS;
1301 	release_tc_call_resource(call_params, &op_params, tee_ret);
1302 	return ret;
1303 }
1304