• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/i2c.h>
11 #include <linux/slab.h>
12 #include <linux/tee_drv.h>
13 #include "optee_private.h"
14 #include "optee_smc.h"
15 
16 struct wq_entry {
17 	struct list_head link;
18 	struct completion c;
19 	u32 key;
20 };
21 
optee_wait_queue_init(struct optee_wait_queue * priv)22 void optee_wait_queue_init(struct optee_wait_queue *priv)
23 {
24 	mutex_init(&priv->mu);
25 	INIT_LIST_HEAD(&priv->db);
26 }
27 
optee_wait_queue_exit(struct optee_wait_queue * priv)28 void optee_wait_queue_exit(struct optee_wait_queue *priv)
29 {
30 	mutex_destroy(&priv->mu);
31 }
32 
handle_rpc_func_cmd_get_time(struct optee_msg_arg * arg)33 static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
34 {
35 	struct timespec64 ts;
36 
37 	if (arg->num_params != 1)
38 		goto bad;
39 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
40 			OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
41 		goto bad;
42 
43 	ktime_get_real_ts64(&ts);
44 	arg->params[0].u.value.a = ts.tv_sec;
45 	arg->params[0].u.value.b = ts.tv_nsec;
46 
47 	arg->ret = TEEC_SUCCESS;
48 	return;
49 bad:
50 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
51 }
52 
53 #if IS_REACHABLE(CONFIG_I2C)
handle_rpc_func_cmd_i2c_transfer(struct tee_context * ctx,struct optee_msg_arg * arg)54 static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
55 					     struct optee_msg_arg *arg)
56 {
57 	struct tee_param *params;
58 	struct i2c_adapter *adapter;
59 	struct i2c_msg msg = { };
60 	size_t i;
61 	int ret = -EOPNOTSUPP;
62 	u8 attr[] = {
63 		TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
64 		TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
65 		TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
66 		TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT,
67 	};
68 
69 	if (arg->num_params != ARRAY_SIZE(attr)) {
70 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
71 		return;
72 	}
73 
74 	params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
75 			       GFP_KERNEL);
76 	if (!params) {
77 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
78 		return;
79 	}
80 
81 	if (optee_from_msg_param(params, arg->num_params, arg->params))
82 		goto bad;
83 
84 	for (i = 0; i < arg->num_params; i++) {
85 		if (params[i].attr != attr[i])
86 			goto bad;
87 	}
88 
89 	adapter = i2c_get_adapter(params[0].u.value.b);
90 	if (!adapter)
91 		goto bad;
92 
93 	if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) {
94 		if (!i2c_check_functionality(adapter,
95 					     I2C_FUNC_10BIT_ADDR)) {
96 			i2c_put_adapter(adapter);
97 			goto bad;
98 		}
99 
100 		msg.flags = I2C_M_TEN;
101 	}
102 
103 	msg.addr = params[0].u.value.c;
104 	msg.buf  = params[2].u.memref.shm->kaddr;
105 	msg.len  = params[2].u.memref.size;
106 
107 	switch (params[0].u.value.a) {
108 	case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD:
109 		msg.flags |= I2C_M_RD;
110 		break;
111 	case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR:
112 		break;
113 	default:
114 		i2c_put_adapter(adapter);
115 		goto bad;
116 	}
117 
118 	ret = i2c_transfer(adapter, &msg, 1);
119 
120 	if (ret < 0) {
121 		arg->ret = TEEC_ERROR_COMMUNICATION;
122 	} else {
123 		params[3].u.value.a = msg.len;
124 		if (optee_to_msg_param(arg->params, arg->num_params, params))
125 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
126 		else
127 			arg->ret = TEEC_SUCCESS;
128 	}
129 
130 	i2c_put_adapter(adapter);
131 	kfree(params);
132 	return;
133 bad:
134 	kfree(params);
135 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
136 }
137 #else
handle_rpc_func_cmd_i2c_transfer(struct tee_context * ctx,struct optee_msg_arg * arg)138 static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
139 					     struct optee_msg_arg *arg)
140 {
141 	arg->ret = TEEC_ERROR_NOT_SUPPORTED;
142 }
143 #endif
144 
wq_entry_get(struct optee_wait_queue * wq,u32 key)145 static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
146 {
147 	struct wq_entry *w;
148 
149 	mutex_lock(&wq->mu);
150 
151 	list_for_each_entry(w, &wq->db, link)
152 		if (w->key == key)
153 			goto out;
154 
155 	w = kmalloc(sizeof(*w), GFP_KERNEL);
156 	if (w) {
157 		init_completion(&w->c);
158 		w->key = key;
159 		list_add_tail(&w->link, &wq->db);
160 	}
161 out:
162 	mutex_unlock(&wq->mu);
163 	return w;
164 }
165 
wq_sleep(struct optee_wait_queue * wq,u32 key)166 static void wq_sleep(struct optee_wait_queue *wq, u32 key)
167 {
168 	struct wq_entry *w = wq_entry_get(wq, key);
169 
170 	if (w) {
171 		wait_for_completion(&w->c);
172 		mutex_lock(&wq->mu);
173 		list_del(&w->link);
174 		mutex_unlock(&wq->mu);
175 		kfree(w);
176 	}
177 }
178 
wq_wakeup(struct optee_wait_queue * wq,u32 key)179 static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
180 {
181 	struct wq_entry *w = wq_entry_get(wq, key);
182 
183 	if (w)
184 		complete(&w->c);
185 }
186 
handle_rpc_func_cmd_wq(struct optee * optee,struct optee_msg_arg * arg)187 static void handle_rpc_func_cmd_wq(struct optee *optee,
188 				   struct optee_msg_arg *arg)
189 {
190 	if (arg->num_params != 1)
191 		goto bad;
192 
193 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
194 			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
195 		goto bad;
196 
197 	switch (arg->params[0].u.value.a) {
198 	case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
199 		wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
200 		break;
201 	case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
202 		wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
203 		break;
204 	default:
205 		goto bad;
206 	}
207 
208 	arg->ret = TEEC_SUCCESS;
209 	return;
210 bad:
211 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
212 }
213 
handle_rpc_func_cmd_wait(struct optee_msg_arg * arg)214 static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
215 {
216 	u32 msec_to_wait;
217 
218 	if (arg->num_params != 1)
219 		goto bad;
220 
221 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
222 			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
223 		goto bad;
224 
225 	msec_to_wait = arg->params[0].u.value.a;
226 
227 	/* Go to interruptible sleep */
228 	msleep_interruptible(msec_to_wait);
229 
230 	arg->ret = TEEC_SUCCESS;
231 	return;
232 bad:
233 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
234 }
235 
handle_rpc_supp_cmd(struct tee_context * ctx,struct optee_msg_arg * arg)236 static void handle_rpc_supp_cmd(struct tee_context *ctx,
237 				struct optee_msg_arg *arg)
238 {
239 	struct tee_param *params;
240 
241 	arg->ret_origin = TEEC_ORIGIN_COMMS;
242 
243 	params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
244 			       GFP_KERNEL);
245 	if (!params) {
246 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
247 		return;
248 	}
249 
250 	if (optee_from_msg_param(params, arg->num_params, arg->params)) {
251 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
252 		goto out;
253 	}
254 
255 	arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
256 
257 	if (optee_to_msg_param(arg->params, arg->num_params, params))
258 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
259 out:
260 	kfree(params);
261 }
262 
cmd_alloc_suppl(struct tee_context * ctx,size_t sz)263 static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
264 {
265 	u32 ret;
266 	struct tee_param param;
267 	struct optee *optee = tee_get_drvdata(ctx->teedev);
268 	struct tee_shm *shm;
269 
270 	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
271 	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
272 	param.u.value.b = sz;
273 	param.u.value.c = 0;
274 
275 	ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
276 	if (ret)
277 		return ERR_PTR(-ENOMEM);
278 
279 	mutex_lock(&optee->supp.mutex);
280 	/* Increases count as secure world doesn't have a reference */
281 	shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
282 	mutex_unlock(&optee->supp.mutex);
283 	return shm;
284 }
285 
handle_rpc_func_cmd_shm_alloc(struct tee_context * ctx,struct optee_msg_arg * arg,struct optee_call_ctx * call_ctx)286 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
287 					  struct optee_msg_arg *arg,
288 					  struct optee_call_ctx *call_ctx)
289 {
290 	phys_addr_t pa;
291 	struct tee_shm *shm;
292 	size_t sz;
293 	size_t n;
294 
295 	arg->ret_origin = TEEC_ORIGIN_COMMS;
296 
297 	if (!arg->num_params ||
298 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
299 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
300 		return;
301 	}
302 
303 	for (n = 1; n < arg->num_params; n++) {
304 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
305 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
306 			return;
307 		}
308 	}
309 
310 	sz = arg->params[0].u.value.b;
311 	switch (arg->params[0].u.value.a) {
312 	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
313 		shm = cmd_alloc_suppl(ctx, sz);
314 		break;
315 	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
316 		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
317 		break;
318 	default:
319 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
320 		return;
321 	}
322 
323 	if (IS_ERR(shm)) {
324 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
325 		return;
326 	}
327 
328 	if (tee_shm_get_pa(shm, 0, &pa)) {
329 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
330 		goto bad;
331 	}
332 
333 	sz = tee_shm_get_size(shm);
334 
335 	if (tee_shm_is_registered(shm)) {
336 		struct page **pages;
337 		u64 *pages_list;
338 		size_t page_num;
339 
340 		pages = tee_shm_get_pages(shm, &page_num);
341 		if (!pages || !page_num) {
342 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
343 			goto bad;
344 		}
345 
346 		pages_list = optee_allocate_pages_list(page_num);
347 		if (!pages_list) {
348 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
349 			goto bad;
350 		}
351 
352 		call_ctx->pages_list = pages_list;
353 		call_ctx->num_entries = page_num;
354 
355 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
356 				      OPTEE_MSG_ATTR_NONCONTIG;
357 		/*
358 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
359 		 * from 4k page, as described in OP-TEE ABI.
360 		 */
361 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
362 			(tee_shm_get_page_offset(shm) &
363 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
364 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
365 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
366 
367 		optee_fill_pages_list(pages_list, pages, page_num,
368 				      tee_shm_get_page_offset(shm));
369 	} else {
370 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
371 		arg->params[0].u.tmem.buf_ptr = pa;
372 		arg->params[0].u.tmem.size = sz;
373 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
374 	}
375 
376 	arg->ret = TEEC_SUCCESS;
377 	return;
378 bad:
379 	tee_shm_free(shm);
380 }
381 
cmd_free_suppl(struct tee_context * ctx,struct tee_shm * shm)382 static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
383 {
384 	struct tee_param param;
385 
386 	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
387 	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
388 	param.u.value.b = tee_shm_get_id(shm);
389 	param.u.value.c = 0;
390 
391 	/*
392 	 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
393 	 * world has released its reference.
394 	 *
395 	 * It's better to do this before sending the request to supplicant
396 	 * as we'd like to let the process doing the initial allocation to
397 	 * do release the last reference too in order to avoid stacking
398 	 * many pending fput() on the client process. This could otherwise
399 	 * happen if secure world does many allocate and free in a single
400 	 * invoke.
401 	 */
402 	tee_shm_put(shm);
403 
404 	optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
405 }
406 
handle_rpc_func_cmd_shm_free(struct tee_context * ctx,struct optee_msg_arg * arg)407 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
408 					 struct optee_msg_arg *arg)
409 {
410 	struct tee_shm *shm;
411 
412 	arg->ret_origin = TEEC_ORIGIN_COMMS;
413 
414 	if (arg->num_params != 1 ||
415 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
416 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
417 		return;
418 	}
419 
420 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
421 	switch (arg->params[0].u.value.a) {
422 	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
423 		cmd_free_suppl(ctx, shm);
424 		break;
425 	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
426 		tee_shm_free(shm);
427 		break;
428 	default:
429 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
430 	}
431 	arg->ret = TEEC_SUCCESS;
432 }
433 
free_pages_list(struct optee_call_ctx * call_ctx)434 static void free_pages_list(struct optee_call_ctx *call_ctx)
435 {
436 	if (call_ctx->pages_list) {
437 		optee_free_pages_list(call_ctx->pages_list,
438 				      call_ctx->num_entries);
439 		call_ctx->pages_list = NULL;
440 		call_ctx->num_entries = 0;
441 	}
442 }
443 
optee_rpc_finalize_call(struct optee_call_ctx * call_ctx)444 void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
445 {
446 	free_pages_list(call_ctx);
447 }
448 
handle_rpc_func_cmd(struct tee_context * ctx,struct optee * optee,struct tee_shm * shm,struct optee_call_ctx * call_ctx)449 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
450 				struct tee_shm *shm,
451 				struct optee_call_ctx *call_ctx)
452 {
453 	struct optee_msg_arg *arg;
454 
455 	arg = tee_shm_get_va(shm, 0);
456 	if (IS_ERR(arg)) {
457 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
458 		return;
459 	}
460 
461 	switch (arg->cmd) {
462 	case OPTEE_MSG_RPC_CMD_GET_TIME:
463 		handle_rpc_func_cmd_get_time(arg);
464 		break;
465 	case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
466 		handle_rpc_func_cmd_wq(optee, arg);
467 		break;
468 	case OPTEE_MSG_RPC_CMD_SUSPEND:
469 		handle_rpc_func_cmd_wait(arg);
470 		break;
471 	case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
472 		free_pages_list(call_ctx);
473 		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
474 		break;
475 	case OPTEE_MSG_RPC_CMD_SHM_FREE:
476 		handle_rpc_func_cmd_shm_free(ctx, arg);
477 		break;
478 	case OPTEE_MSG_RPC_CMD_I2C_TRANSFER:
479 		handle_rpc_func_cmd_i2c_transfer(ctx, arg);
480 		break;
481 	default:
482 		handle_rpc_supp_cmd(ctx, arg);
483 	}
484 }
485 
486 /**
487  * optee_handle_rpc() - handle RPC from secure world
488  * @ctx:	context doing the RPC
489  * @param:	value of registers for the RPC
490  * @call_ctx:	call context. Preserved during one OP-TEE invocation
491  *
492  * Result of RPC is written back into @param.
493  */
optee_handle_rpc(struct tee_context * ctx,struct optee_rpc_param * param,struct optee_call_ctx * call_ctx)494 void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
495 		      struct optee_call_ctx *call_ctx)
496 {
497 	struct tee_device *teedev = ctx->teedev;
498 	struct optee *optee = tee_get_drvdata(teedev);
499 	struct tee_shm *shm;
500 	phys_addr_t pa;
501 
502 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
503 	case OPTEE_SMC_RPC_FUNC_ALLOC:
504 		shm = tee_shm_alloc(ctx, param->a1,
505 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
506 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
507 			reg_pair_from_64(&param->a1, &param->a2, pa);
508 			reg_pair_from_64(&param->a4, &param->a5,
509 					 (unsigned long)shm);
510 		} else {
511 			param->a1 = 0;
512 			param->a2 = 0;
513 			param->a4 = 0;
514 			param->a5 = 0;
515 		}
516 		break;
517 	case OPTEE_SMC_RPC_FUNC_FREE:
518 		shm = reg_pair_to_ptr(param->a1, param->a2);
519 		tee_shm_free(shm);
520 		break;
521 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
522 		/*
523 		 * A foreign interrupt was raised while secure world was
524 		 * executing, since they are handled in Linux a dummy RPC is
525 		 * performed to let Linux take the interrupt through the normal
526 		 * vector.
527 		 */
528 		break;
529 	case OPTEE_SMC_RPC_FUNC_CMD:
530 		shm = reg_pair_to_ptr(param->a1, param->a2);
531 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
532 		break;
533 	default:
534 		pr_warn("Unknown RPC func 0x%x\n",
535 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
536 		break;
537 	}
538 
539 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
540 }
541