• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4  * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
22 
23 #include "tegra-se.h"
24 
25 struct tegra_aes_ctx {
26 	struct tegra_se *se;
27 	u32 alg;
28 	u32 ivsize;
29 	u32 key1_id;
30 	u32 key2_id;
31 };
32 
33 struct tegra_aes_reqctx {
34 	struct tegra_se_datbuf datbuf;
35 	bool encrypt;
36 	u32 config;
37 	u32 crypto_config;
38 	u32 len;
39 	u32 *iv;
40 };
41 
42 struct tegra_aead_ctx {
43 	struct tegra_se *se;
44 	unsigned int authsize;
45 	u32 alg;
46 	u32 keylen;
47 	u32 key_id;
48 };
49 
50 struct tegra_aead_reqctx {
51 	struct tegra_se_datbuf inbuf;
52 	struct tegra_se_datbuf outbuf;
53 	struct scatterlist *src_sg;
54 	struct scatterlist *dst_sg;
55 	unsigned int assoclen;
56 	unsigned int cryptlen;
57 	unsigned int authsize;
58 	bool encrypt;
59 	u32 config;
60 	u32 crypto_config;
61 	u32 key_id;
62 	u32 iv[4];
63 	u8 authdata[16];
64 };
65 
66 struct tegra_cmac_ctx {
67 	struct tegra_se *se;
68 	unsigned int alg;
69 	u32 key_id;
70 	struct crypto_shash *fallback_tfm;
71 };
72 
73 struct tegra_cmac_reqctx {
74 	struct scatterlist *src_sg;
75 	struct tegra_se_datbuf datbuf;
76 	struct tegra_se_datbuf residue;
77 	unsigned int total_len;
78 	unsigned int blk_size;
79 	unsigned int task;
80 	u32 crypto_config;
81 	u32 config;
82 	u32 key_id;
83 	u32 *iv;
84 	u32 result[CMAC_RESULT_REG_COUNT];
85 };
86 
87 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)88 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
89 {
90 	do {
91 		--bits;
92 		nums += counter[bits];
93 		counter[bits] = nums & 0xff;
94 		nums >>= 8;
95 	} while (bits && nums);
96 }
97 
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)98 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
99 {
100 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
101 	unsigned int offset;
102 
103 	offset = req->cryptlen - ctx->ivsize;
104 
105 	if (rctx->encrypt)
106 		memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
107 	else
108 		scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
109 }
110 
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)111 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
112 {
113 	int num;
114 
115 	if (ctx->alg == SE_ALG_CBC) {
116 		tegra_cbc_iv_copyback(req, ctx);
117 	} else if (ctx->alg == SE_ALG_CTR) {
118 		num = req->cryptlen / ctx->ivsize;
119 		if (req->cryptlen % ctx->ivsize)
120 			num++;
121 
122 		ctr_iv_inc(req->iv, ctx->ivsize, num);
123 	}
124 }
125 
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)126 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
127 {
128 	switch (alg) {
129 	case SE_ALG_CMAC:
130 	case SE_ALG_GMAC:
131 	case SE_ALG_GCM:
132 	case SE_ALG_GCM_FINAL:
133 		return 0;
134 	case SE_ALG_CBC:
135 		if (encrypt)
136 			return SE_CRYPTO_CFG_CBC_ENCRYPT;
137 		else
138 			return SE_CRYPTO_CFG_CBC_DECRYPT;
139 	case SE_ALG_ECB:
140 		if (encrypt)
141 			return SE_CRYPTO_CFG_ECB_ENCRYPT;
142 		else
143 			return SE_CRYPTO_CFG_ECB_DECRYPT;
144 	case SE_ALG_XTS:
145 		if (encrypt)
146 			return SE_CRYPTO_CFG_XTS_ENCRYPT;
147 		else
148 			return SE_CRYPTO_CFG_XTS_DECRYPT;
149 
150 	case SE_ALG_CTR:
151 		return SE_CRYPTO_CFG_CTR;
152 	case SE_ALG_CBC_MAC:
153 		return SE_CRYPTO_CFG_CBC_MAC;
154 
155 	default:
156 		break;
157 	}
158 
159 	return -EINVAL;
160 }
161 
tegra234_aes_cfg(u32 alg,bool encrypt)162 static int tegra234_aes_cfg(u32 alg, bool encrypt)
163 {
164 	switch (alg) {
165 	case SE_ALG_CBC:
166 	case SE_ALG_ECB:
167 	case SE_ALG_XTS:
168 	case SE_ALG_CTR:
169 		if (encrypt)
170 			return SE_CFG_AES_ENCRYPT;
171 		else
172 			return SE_CFG_AES_DECRYPT;
173 
174 	case SE_ALG_GMAC:
175 		if (encrypt)
176 			return SE_CFG_GMAC_ENCRYPT;
177 		else
178 			return SE_CFG_GMAC_DECRYPT;
179 
180 	case SE_ALG_GCM:
181 		if (encrypt)
182 			return SE_CFG_GCM_ENCRYPT;
183 		else
184 			return SE_CFG_GCM_DECRYPT;
185 
186 	case SE_ALG_GCM_FINAL:
187 		if (encrypt)
188 			return SE_CFG_GCM_FINAL_ENCRYPT;
189 		else
190 			return SE_CFG_GCM_FINAL_DECRYPT;
191 
192 	case SE_ALG_CMAC:
193 		return SE_CFG_CMAC;
194 
195 	case SE_ALG_CBC_MAC:
196 		return SE_AES_ENC_ALG_AES_ENC |
197 		       SE_AES_DST_HASH_REG;
198 	}
199 	return -EINVAL;
200 }
201 
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)202 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
203 				       struct tegra_aes_reqctx *rctx)
204 {
205 	unsigned int data_count, res_bits, i = 0, j;
206 	struct tegra_se *se = ctx->se;
207 	u32 *cpuvaddr = se->cmdbuf->addr;
208 	dma_addr_t addr = rctx->datbuf.addr;
209 
210 	data_count = rctx->len / AES_BLOCK_SIZE;
211 	res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
212 
213 	/*
214 	 * Hardware processes data_count + 1 blocks.
215 	 * Reduce 1 block if there is no residue
216 	 */
217 	if (!res_bits)
218 		data_count--;
219 
220 	if (rctx->iv) {
221 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
222 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
223 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
224 			cpuvaddr[i++] = rctx->iv[j];
225 	}
226 
227 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
228 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
229 			SE_LAST_BLOCK_RES_BITS(res_bits);
230 
231 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
232 	cpuvaddr[i++] = rctx->config;
233 	cpuvaddr[i++] = rctx->crypto_config;
234 
235 	/* Source address setting */
236 	cpuvaddr[i++] = lower_32_bits(addr);
237 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
238 
239 	/* Destination address setting */
240 	cpuvaddr[i++] = lower_32_bits(addr);
241 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
242 			SE_ADDR_HI_SZ(rctx->len);
243 
244 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
245 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
246 			SE_AES_OP_START;
247 
248 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
249 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
250 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
251 
252 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
253 
254 	return i;
255 }
256 
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)257 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
258 {
259 	struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
260 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
261 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
262 	struct tegra_se *se = ctx->se;
263 	unsigned int cmdlen;
264 	int ret;
265 
266 	rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
267 	rctx->len = req->cryptlen;
268 
269 	/* Pad input to AES Block size */
270 	if (ctx->alg != SE_ALG_XTS) {
271 		if (rctx->len % AES_BLOCK_SIZE)
272 			rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
273 	}
274 
275 	rctx->datbuf.size = rctx->len;
276 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
277 					      &rctx->datbuf.addr, GFP_KERNEL);
278 	if (!rctx->datbuf.buf)
279 		return -ENOMEM;
280 
281 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
282 
283 	/* Prepare the command and submit for execution */
284 	cmdlen = tegra_aes_prep_cmd(ctx, rctx);
285 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
286 
287 	/* Copy the result */
288 	tegra_aes_update_iv(req, ctx);
289 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
290 
291 	/* Free the buffer */
292 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
293 			  rctx->datbuf.buf, rctx->datbuf.addr);
294 
295 	crypto_finalize_skcipher_request(se->engine, req, ret);
296 
297 	return 0;
298 }
299 
tegra_aes_cra_init(struct crypto_skcipher * tfm)300 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
301 {
302 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
303 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
304 	struct tegra_se_alg *se_alg;
305 	const char *algname;
306 	int ret;
307 
308 	se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
309 
310 	crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
311 
312 	ctx->ivsize = crypto_skcipher_ivsize(tfm);
313 	ctx->se = se_alg->se_dev;
314 	ctx->key1_id = 0;
315 	ctx->key2_id = 0;
316 
317 	algname = crypto_tfm_alg_name(&tfm->base);
318 	ret = se_algname_to_algid(algname);
319 	if (ret < 0) {
320 		dev_err(ctx->se->dev, "invalid algorithm\n");
321 		return ret;
322 	}
323 
324 	ctx->alg = ret;
325 
326 	return 0;
327 }
328 
tegra_aes_cra_exit(struct crypto_skcipher * tfm)329 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
330 {
331 	struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
332 
333 	if (ctx->key1_id)
334 		tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
335 
336 	if (ctx->key2_id)
337 		tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
338 }
339 
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)340 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
341 			    const u8 *key, u32 keylen)
342 {
343 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
344 
345 	if (aes_check_keylen(keylen)) {
346 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
347 		return -EINVAL;
348 	}
349 
350 	return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
351 }
352 
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)353 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
354 			    const u8 *key, u32 keylen)
355 {
356 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
357 	u32 len = keylen / 2;
358 	int ret;
359 
360 	ret = xts_verify_key(tfm, key, keylen);
361 	if (ret || aes_check_keylen(len)) {
362 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
363 		return -EINVAL;
364 	}
365 
366 	ret = tegra_key_submit(ctx->se, key, len,
367 			       ctx->alg, &ctx->key1_id);
368 	if (ret)
369 		return ret;
370 
371 	return tegra_key_submit(ctx->se, key + len, len,
372 			       ctx->alg, &ctx->key2_id);
373 
374 	return 0;
375 }
376 
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)377 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
378 {
379 	int manifest;
380 
381 	manifest = SE_KAC_USER_NS;
382 
383 	switch (alg) {
384 	case SE_ALG_CBC:
385 	case SE_ALG_ECB:
386 	case SE_ALG_CTR:
387 		manifest |= SE_KAC_ENC;
388 		break;
389 	case SE_ALG_XTS:
390 		manifest |= SE_KAC_XTS;
391 		break;
392 	case SE_ALG_GCM:
393 		manifest |= SE_KAC_GCM;
394 		break;
395 	case SE_ALG_CMAC:
396 		manifest |= SE_KAC_CMAC;
397 		break;
398 	case SE_ALG_CBC_MAC:
399 		manifest |= SE_KAC_ENC;
400 		break;
401 	default:
402 		return -EINVAL;
403 	}
404 
405 	switch (keylen) {
406 	case AES_KEYSIZE_128:
407 		manifest |= SE_KAC_SIZE_128;
408 		break;
409 	case AES_KEYSIZE_192:
410 		manifest |= SE_KAC_SIZE_192;
411 		break;
412 	case AES_KEYSIZE_256:
413 		manifest |= SE_KAC_SIZE_256;
414 		break;
415 	default:
416 		return -EINVAL;
417 	}
418 
419 	return manifest;
420 }
421 
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)422 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
423 
424 {
425 	struct crypto_skcipher *tfm;
426 	struct tegra_aes_ctx *ctx;
427 	struct tegra_aes_reqctx *rctx;
428 
429 	tfm = crypto_skcipher_reqtfm(req);
430 	ctx  = crypto_skcipher_ctx(tfm);
431 	rctx = skcipher_request_ctx(req);
432 
433 	if (ctx->alg != SE_ALG_XTS) {
434 		if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
435 			dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
436 			return -EINVAL;
437 		}
438 	} else if (req->cryptlen < XTS_BLOCK_SIZE) {
439 		dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
440 		return -EINVAL;
441 	}
442 
443 	if (!req->cryptlen)
444 		return 0;
445 
446 	rctx->encrypt = encrypt;
447 	rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
448 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
449 	rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
450 
451 	if (ctx->key2_id)
452 		rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
453 
454 	return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
455 }
456 
tegra_aes_encrypt(struct skcipher_request * req)457 static int tegra_aes_encrypt(struct skcipher_request *req)
458 {
459 	return tegra_aes_crypt(req, true);
460 }
461 
tegra_aes_decrypt(struct skcipher_request * req)462 static int tegra_aes_decrypt(struct skcipher_request *req)
463 {
464 	return tegra_aes_crypt(req, false);
465 }
466 
467 static struct tegra_se_alg tegra_aes_algs[] = {
468 	{
469 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
470 		.alg.skcipher.base = {
471 			.init = tegra_aes_cra_init,
472 			.exit = tegra_aes_cra_exit,
473 			.setkey	= tegra_aes_setkey,
474 			.encrypt = tegra_aes_encrypt,
475 			.decrypt = tegra_aes_decrypt,
476 			.min_keysize = AES_MIN_KEY_SIZE,
477 			.max_keysize = AES_MAX_KEY_SIZE,
478 			.ivsize	= AES_BLOCK_SIZE,
479 			.base = {
480 				.cra_name = "cbc(aes)",
481 				.cra_driver_name = "cbc-aes-tegra",
482 				.cra_priority = 500,
483 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
484 				.cra_blocksize = AES_BLOCK_SIZE,
485 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
486 				.cra_alignmask = 0xf,
487 				.cra_module = THIS_MODULE,
488 			},
489 		}
490 	}, {
491 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
492 		.alg.skcipher.base = {
493 			.init = tegra_aes_cra_init,
494 			.exit = tegra_aes_cra_exit,
495 			.setkey	= tegra_aes_setkey,
496 			.encrypt = tegra_aes_encrypt,
497 			.decrypt = tegra_aes_decrypt,
498 			.min_keysize = AES_MIN_KEY_SIZE,
499 			.max_keysize = AES_MAX_KEY_SIZE,
500 			.base = {
501 				.cra_name = "ecb(aes)",
502 				.cra_driver_name = "ecb-aes-tegra",
503 				.cra_priority = 500,
504 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
505 				.cra_blocksize = AES_BLOCK_SIZE,
506 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
507 				.cra_alignmask = 0xf,
508 				.cra_module = THIS_MODULE,
509 			},
510 		}
511 	}, {
512 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
513 		.alg.skcipher.base = {
514 			.init = tegra_aes_cra_init,
515 			.exit = tegra_aes_cra_exit,
516 			.setkey = tegra_aes_setkey,
517 			.encrypt = tegra_aes_encrypt,
518 			.decrypt = tegra_aes_decrypt,
519 			.min_keysize = AES_MIN_KEY_SIZE,
520 			.max_keysize = AES_MAX_KEY_SIZE,
521 			.ivsize	= AES_BLOCK_SIZE,
522 			.base = {
523 				.cra_name = "ctr(aes)",
524 				.cra_driver_name = "ctr-aes-tegra",
525 				.cra_priority = 500,
526 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
527 				.cra_blocksize = 1,
528 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
529 				.cra_alignmask = 0xf,
530 				.cra_module = THIS_MODULE,
531 			},
532 		}
533 	}, {
534 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
535 		.alg.skcipher.base = {
536 			.init = tegra_aes_cra_init,
537 			.exit = tegra_aes_cra_exit,
538 			.setkey	= tegra_xts_setkey,
539 			.encrypt = tegra_aes_encrypt,
540 			.decrypt = tegra_aes_decrypt,
541 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
542 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
543 			.ivsize	= AES_BLOCK_SIZE,
544 			.base = {
545 				.cra_name = "xts(aes)",
546 				.cra_driver_name = "xts-aes-tegra",
547 				.cra_priority = 500,
548 				.cra_blocksize = AES_BLOCK_SIZE,
549 				.cra_ctxsize	   = sizeof(struct tegra_aes_ctx),
550 				.cra_alignmask	   = (__alignof__(u64) - 1),
551 				.cra_module	   = THIS_MODULE,
552 			},
553 		}
554 	},
555 };
556 
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)557 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
558 					struct tegra_aead_reqctx *rctx)
559 {
560 	unsigned int data_count, res_bits, i = 0;
561 	struct tegra_se *se = ctx->se;
562 	u32 *cpuvaddr = se->cmdbuf->addr;
563 
564 	data_count = (rctx->assoclen / AES_BLOCK_SIZE);
565 	res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
566 
567 	/*
568 	 * Hardware processes data_count + 1 blocks.
569 	 * Reduce 1 block if there is no residue
570 	 */
571 	if (!res_bits)
572 		data_count--;
573 
574 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
575 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
576 			SE_LAST_BLOCK_RES_BITS(res_bits);
577 
578 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
579 	cpuvaddr[i++] = rctx->config;
580 	cpuvaddr[i++] = rctx->crypto_config;
581 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
582 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
583 			SE_ADDR_HI_SZ(rctx->assoclen);
584 
585 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
586 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
587 			SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
588 			SE_AES_OP_START;
589 
590 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
591 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
592 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
593 
594 	return i;
595 }
596 
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)597 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
598 					     struct tegra_aead_reqctx *rctx)
599 {
600 	unsigned int data_count, res_bits, i = 0, j;
601 	struct tegra_se *se = ctx->se;
602 	u32 *cpuvaddr = se->cmdbuf->addr, op;
603 
604 	data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
605 	res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
606 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
607 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
608 
609 	/*
610 	 * If there is no assoc data,
611 	 * this will be the init command
612 	 */
613 	if (!rctx->assoclen)
614 		op |= SE_AES_OP_INIT;
615 
616 	/*
617 	 * Hardware processes data_count + 1 blocks.
618 	 * Reduce 1 block if there is no residue
619 	 */
620 	if (!res_bits)
621 		data_count--;
622 
623 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
624 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
625 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
626 		cpuvaddr[i++] = rctx->iv[j];
627 
628 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
629 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
630 			SE_LAST_BLOCK_RES_BITS(res_bits);
631 
632 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
633 	cpuvaddr[i++] = rctx->config;
634 	cpuvaddr[i++] = rctx->crypto_config;
635 
636 	/* Source Address */
637 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
638 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
639 			SE_ADDR_HI_SZ(rctx->cryptlen);
640 
641 	/* Destination Address */
642 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
643 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
644 			SE_ADDR_HI_SZ(rctx->cryptlen);
645 
646 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
647 	cpuvaddr[i++] = op;
648 
649 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
650 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
651 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
652 
653 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
654 	return i;
655 }
656 
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)657 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
658 				    struct tegra_aead_reqctx *rctx)
659 {
660 	unsigned int i = 0, j;
661 	u32 op;
662 
663 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
664 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
665 
666 	/*
667 	 * Set init for zero sized vector
668 	 */
669 	if (!rctx->assoclen && !rctx->cryptlen)
670 		op |= SE_AES_OP_INIT;
671 
672 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
673 	cpuvaddr[i++] = rctx->assoclen * 8;
674 	cpuvaddr[i++] = 0;
675 
676 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
677 	cpuvaddr[i++] = rctx->cryptlen * 8;
678 	cpuvaddr[i++] = 0;
679 
680 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
681 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
682 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
683 		cpuvaddr[i++] = rctx->iv[j];
684 
685 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
686 	cpuvaddr[i++] = rctx->config;
687 	cpuvaddr[i++] = rctx->crypto_config;
688 	cpuvaddr[i++] = 0;
689 	cpuvaddr[i++] = 0;
690 
691 	/* Destination Address */
692 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
693 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
694 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
695 
696 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
697 	cpuvaddr[i++] = op;
698 
699 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
700 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
701 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
702 
703 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
704 
705 	return i;
706 }
707 
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)708 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
709 {
710 	struct tegra_se *se = ctx->se;
711 	unsigned int cmdlen;
712 
713 	scatterwalk_map_and_copy(rctx->inbuf.buf,
714 				 rctx->src_sg, 0, rctx->assoclen, 0);
715 
716 	rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
717 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
718 			      SE_AES_KEY_INDEX(ctx->key_id);
719 
720 	cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
721 
722 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
723 }
724 
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)725 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
726 {
727 	struct tegra_se *se = ctx->se;
728 	int cmdlen, ret;
729 
730 	scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
731 				 rctx->assoclen, rctx->cryptlen, 0);
732 
733 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
734 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
735 			      SE_AES_KEY_INDEX(ctx->key_id);
736 
737 	/* Prepare command and submit */
738 	cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
739 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
740 	if (ret)
741 		return ret;
742 
743 	/* Copy the result */
744 	scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
745 				 rctx->assoclen, rctx->cryptlen, 1);
746 
747 	return 0;
748 }
749 
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)750 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
751 {
752 	struct tegra_se *se = ctx->se;
753 	u32 *cpuvaddr = se->cmdbuf->addr;
754 	int cmdlen, ret, offset;
755 
756 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
757 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
758 			      SE_AES_KEY_INDEX(ctx->key_id);
759 
760 	/* Prepare command and submit */
761 	cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
762 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
763 	if (ret)
764 		return ret;
765 
766 	if (rctx->encrypt) {
767 		/* Copy the result */
768 		offset = rctx->assoclen + rctx->cryptlen;
769 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
770 					 offset, rctx->authsize, 1);
771 	}
772 
773 	return 0;
774 }
775 
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)776 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
777 {
778 	unsigned int offset;
779 	u8 mac[16];
780 
781 	offset = rctx->assoclen + rctx->cryptlen;
782 	scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
783 
784 	if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
785 		return -EBADMSG;
786 
787 	return 0;
788 }
789 
tegra_ccm_check_iv(const u8 * iv)790 static inline int tegra_ccm_check_iv(const u8 *iv)
791 {
792 	/* iv[0] gives value of q-1
793 	 * 2 <= q <= 8 as per NIST 800-38C notation
794 	 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
795 	 */
796 	if (iv[0] < 1 || iv[0] > 7) {
797 		pr_debug("ccm_check_iv failed %d\n", iv[0]);
798 		return -EINVAL;
799 	}
800 
801 	return 0;
802 }
803 
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)804 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
805 					  struct tegra_aead_reqctx *rctx)
806 {
807 	unsigned int data_count, i = 0;
808 	struct tegra_se *se = ctx->se;
809 	u32 *cpuvaddr = se->cmdbuf->addr;
810 
811 	data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
812 
813 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
814 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
815 
816 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
817 	cpuvaddr[i++] = rctx->config;
818 	cpuvaddr[i++] = rctx->crypto_config;
819 
820 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
821 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
822 			SE_ADDR_HI_SZ(rctx->inbuf.size);
823 
824 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
825 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
826 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
827 
828 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
829 	cpuvaddr[i++] = SE_AES_OP_WRSTALL |
830 			SE_AES_OP_LASTBUF | SE_AES_OP_START;
831 
832 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
833 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
834 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
835 
836 	return i;
837 }
838 
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)839 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
840 				       struct tegra_aead_reqctx *rctx)
841 {
842 	unsigned int i = 0, j;
843 	struct tegra_se *se = ctx->se;
844 	u32 *cpuvaddr = se->cmdbuf->addr;
845 
846 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
847 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
848 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
849 		cpuvaddr[i++] = rctx->iv[j];
850 
851 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
852 	cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
853 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
854 	cpuvaddr[i++] = rctx->config;
855 	cpuvaddr[i++] = rctx->crypto_config;
856 
857 	/* Source address setting */
858 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
859 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
860 			SE_ADDR_HI_SZ(rctx->inbuf.size);
861 
862 	/* Destination address setting */
863 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
864 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
865 			SE_ADDR_HI_SZ(rctx->inbuf.size);
866 
867 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
868 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
869 			SE_AES_OP_START;
870 
871 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
872 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
873 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
874 
875 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
876 		rctx->config, rctx->crypto_config);
877 
878 	return i;
879 }
880 
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)881 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
882 {
883 	struct tegra_se *se = ctx->se;
884 	int cmdlen;
885 
886 	rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
887 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
888 						      rctx->encrypt) |
889 						      SE_AES_KEY_INDEX(ctx->key_id);
890 
891 	/* Prepare command and submit */
892 	cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
893 
894 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
895 }
896 
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)897 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
898 {
899 	__be32 data;
900 
901 	memset(block, 0, csize);
902 	block += csize;
903 
904 	if (csize >= 4)
905 		csize = 4;
906 	else if (msglen > (1 << (8 * csize)))
907 		return -EOVERFLOW;
908 
909 	data = cpu_to_be32(msglen);
910 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
911 
912 	return 0;
913 }
914 
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)915 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
916 {
917 	unsigned int q, t;
918 	u8 *q_ptr, *iv = (u8 *)rctx->iv;
919 
920 	memcpy(nonce, rctx->iv, 16);
921 
922 	/*** 1. Prepare Flags Octet ***/
923 
924 	/* Encode t (mac length) */
925 	t = rctx->authsize;
926 	nonce[0] |= (((t - 2) / 2) << 3);
927 
928 	/* Adata */
929 	if (rctx->assoclen)
930 		nonce[0] |= (1 << 6);
931 
932 	/*** Encode Q - message length ***/
933 	q = iv[0] + 1;
934 	q_ptr = nonce + 16 - q;
935 
936 	return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
937 }
938 
tegra_ccm_format_adata(u8 * adata,unsigned int a)939 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
940 {
941 	int len = 0;
942 
943 	/* add control info for associated data
944 	 * RFC 3610 and NIST Special Publication 800-38C
945 	 */
946 	if (a < 65280) {
947 		*(__be16 *)adata = cpu_to_be16(a);
948 		len = 2;
949 	} else	{
950 		*(__be16 *)adata = cpu_to_be16(0xfffe);
951 		*(__be32 *)&adata[2] = cpu_to_be32(a);
952 		len = 6;
953 	}
954 
955 	return len;
956 }
957 
tegra_ccm_add_padding(u8 * buf,unsigned int len)958 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
959 {
960 	unsigned int padlen = 16 - (len % 16);
961 	u8 padding[16] = {0};
962 
963 	if (padlen == 16)
964 		return 0;
965 
966 	memcpy(buf, padding, padlen);
967 
968 	return padlen;
969 }
970 
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)971 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
972 {
973 	unsigned int alen = 0, offset = 0;
974 	u8 nonce[16], adata[16];
975 	int ret;
976 
977 	ret = tegra_ccm_format_nonce(rctx, nonce);
978 	if (ret)
979 		return ret;
980 
981 	memcpy(rctx->inbuf.buf, nonce, 16);
982 	offset = 16;
983 
984 	if (rctx->assoclen) {
985 		alen = tegra_ccm_format_adata(adata, rctx->assoclen);
986 		memcpy(rctx->inbuf.buf + offset, adata, alen);
987 		offset += alen;
988 
989 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
990 					 rctx->src_sg, 0, rctx->assoclen, 0);
991 
992 		offset += rctx->assoclen;
993 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
994 					 rctx->assoclen + alen);
995 	}
996 
997 	return offset;
998 }
999 
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1000 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1001 {
1002 	u32 result[16];
1003 	int i, ret;
1004 
1005 	/* Read and clear Result */
1006 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1007 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1008 
1009 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1010 		writel(0, se->base + se->hw->regs->result + (i * 4));
1011 
1012 	if (rctx->encrypt) {
1013 		memcpy(rctx->authdata, result, rctx->authsize);
1014 	} else {
1015 		ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1016 		if (ret)
1017 			return -EBADMSG;
1018 	}
1019 
1020 	return 0;
1021 }
1022 
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1023 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1024 {
1025 	/* Copy result */
1026 	scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1027 				 rctx->assoclen, rctx->cryptlen, 1);
1028 
1029 	if (rctx->encrypt)
1030 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1031 					 rctx->assoclen + rctx->cryptlen,
1032 					 rctx->authsize, 1);
1033 	else
1034 		memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1035 
1036 	return 0;
1037 }
1038 
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1039 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1040 {
1041 	struct tegra_se *se = ctx->se;
1042 	struct scatterlist *sg;
1043 	int offset, ret;
1044 
1045 	offset = tegra_ccm_format_blocks(rctx);
1046 	if (offset < 0)
1047 		return -EINVAL;
1048 
1049 	/* Copy plain text to the buffer */
1050 	sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1051 
1052 	scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1053 				 sg, rctx->assoclen,
1054 				 rctx->cryptlen, 0);
1055 	offset += rctx->cryptlen;
1056 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1057 
1058 	rctx->inbuf.size = offset;
1059 
1060 	ret = tegra_ccm_do_cbcmac(ctx, rctx);
1061 	if (ret)
1062 		return ret;
1063 
1064 	return tegra_ccm_mac_result(se, rctx);
1065 }
1066 
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1067 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1068 {
1069 	struct tegra_se *se = ctx->se;
1070 	unsigned int cmdlen, offset = 0;
1071 	struct scatterlist *sg = rctx->src_sg;
1072 	int ret;
1073 
1074 	rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1075 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1076 			      SE_AES_KEY_INDEX(ctx->key_id);
1077 
1078 	/* Copy authdata in the top of buffer for encryption/decryption */
1079 	if (rctx->encrypt)
1080 		memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1081 	else
1082 		scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1083 					 rctx->assoclen + rctx->cryptlen,
1084 					 rctx->authsize, 0);
1085 
1086 	offset += rctx->authsize;
1087 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1088 
1089 	/* If there is no cryptlen, proceed to submit the task */
1090 	if (rctx->cryptlen) {
1091 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1092 					 rctx->assoclen, rctx->cryptlen, 0);
1093 		offset += rctx->cryptlen;
1094 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1095 	}
1096 
1097 	rctx->inbuf.size = offset;
1098 
1099 	/* Prepare command and submit */
1100 	cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1101 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1102 	if (ret)
1103 		return ret;
1104 
1105 	return tegra_ccm_ctr_result(se, rctx);
1106 }
1107 
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)1108 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1109 				struct tegra_aead_reqctx *rctx)
1110 {
1111 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1112 	u8 *iv = (u8 *)rctx->iv;
1113 	int ret, i;
1114 
1115 	rctx->src_sg = req->src;
1116 	rctx->dst_sg = req->dst;
1117 	rctx->assoclen = req->assoclen;
1118 	rctx->authsize = crypto_aead_authsize(tfm);
1119 
1120 	if (rctx->encrypt)
1121 		rctx->cryptlen = req->cryptlen;
1122 	else
1123 		rctx->cryptlen = req->cryptlen - rctx->authsize;
1124 
1125 	memcpy(iv, req->iv, 16);
1126 
1127 	ret = tegra_ccm_check_iv(iv);
1128 	if (ret)
1129 		return ret;
1130 
1131 	/* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1132 	 * zero to encrypt auth tag.
1133 	 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1134 	 */
1135 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
1136 
1137 	/* Clear any previous result */
1138 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1139 		writel(0, se->base + se->hw->regs->result + (i * 4));
1140 
1141 	return 0;
1142 }
1143 
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)1144 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1145 {
1146 	struct aead_request *req = container_of(areq, struct aead_request, base);
1147 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1148 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1149 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1150 	struct tegra_se *se = ctx->se;
1151 	int ret;
1152 
1153 	ret = tegra_ccm_crypt_init(req, se, rctx);
1154 	if (ret)
1155 		return ret;
1156 
1157 	/* Allocate buffers required */
1158 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1159 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1160 					     &rctx->inbuf.addr, GFP_KERNEL);
1161 	if (!rctx->inbuf.buf)
1162 		return -ENOMEM;
1163 
1164 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1165 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1166 					      &rctx->outbuf.addr, GFP_KERNEL);
1167 	if (!rctx->outbuf.buf) {
1168 		ret = -ENOMEM;
1169 		goto outbuf_err;
1170 	}
1171 
1172 	if (rctx->encrypt) {
1173 		/* CBC MAC Operation */
1174 		ret = tegra_ccm_compute_auth(ctx, rctx);
1175 		if (ret)
1176 			goto out;
1177 
1178 		/* CTR operation */
1179 		ret = tegra_ccm_do_ctr(ctx, rctx);
1180 		if (ret)
1181 			goto out;
1182 	} else {
1183 		/* CTR operation */
1184 		ret = tegra_ccm_do_ctr(ctx, rctx);
1185 		if (ret)
1186 			goto out;
1187 
1188 		/* CBC MAC Operation */
1189 		ret = tegra_ccm_compute_auth(ctx, rctx);
1190 		if (ret)
1191 			goto out;
1192 	}
1193 
1194 out:
1195 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1196 			  rctx->outbuf.buf, rctx->outbuf.addr);
1197 
1198 outbuf_err:
1199 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1200 			  rctx->inbuf.buf, rctx->inbuf.addr);
1201 
1202 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1203 
1204 	return 0;
1205 }
1206 
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)1207 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1208 {
1209 	struct aead_request *req = container_of(areq, struct aead_request, base);
1210 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1211 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1212 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1213 	int ret;
1214 
1215 	rctx->src_sg = req->src;
1216 	rctx->dst_sg = req->dst;
1217 	rctx->assoclen = req->assoclen;
1218 	rctx->authsize = crypto_aead_authsize(tfm);
1219 
1220 	if (rctx->encrypt)
1221 		rctx->cryptlen = req->cryptlen;
1222 	else
1223 		rctx->cryptlen = req->cryptlen - ctx->authsize;
1224 
1225 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1226 	rctx->iv[3] = (1 << 24);
1227 
1228 	/* Allocate buffers required */
1229 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1230 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1231 					     &rctx->inbuf.addr, GFP_KERNEL);
1232 	if (!rctx->inbuf.buf)
1233 		return -ENOMEM;
1234 
1235 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1236 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1237 					      &rctx->outbuf.addr, GFP_KERNEL);
1238 	if (!rctx->outbuf.buf) {
1239 		ret = -ENOMEM;
1240 		goto outbuf_err;
1241 	}
1242 
1243 	/* If there is associated data perform GMAC operation */
1244 	if (rctx->assoclen) {
1245 		ret = tegra_gcm_do_gmac(ctx, rctx);
1246 		if (ret)
1247 			goto out;
1248 	}
1249 
1250 	/* GCM Encryption/Decryption operation */
1251 	if (rctx->cryptlen) {
1252 		ret = tegra_gcm_do_crypt(ctx, rctx);
1253 		if (ret)
1254 			goto out;
1255 	}
1256 
1257 	/* GCM_FINAL operation */
1258 	ret = tegra_gcm_do_final(ctx, rctx);
1259 	if (ret)
1260 		goto out;
1261 
1262 	if (!rctx->encrypt)
1263 		ret = tegra_gcm_do_verify(ctx->se, rctx);
1264 
1265 out:
1266 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1267 			  rctx->outbuf.buf, rctx->outbuf.addr);
1268 
1269 outbuf_err:
1270 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1271 			  rctx->inbuf.buf, rctx->inbuf.addr);
1272 
1273 	/* Finalize the request if there are no errors */
1274 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1275 
1276 	return 0;
1277 }
1278 
tegra_aead_cra_init(struct crypto_aead * tfm)1279 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1280 {
1281 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1282 	struct aead_alg *alg = crypto_aead_alg(tfm);
1283 	struct tegra_se_alg *se_alg;
1284 	const char *algname;
1285 	int ret;
1286 
1287 	algname = crypto_tfm_alg_name(&tfm->base);
1288 
1289 	se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1290 
1291 	crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1292 
1293 	ctx->se = se_alg->se_dev;
1294 	ctx->key_id = 0;
1295 
1296 	ret = se_algname_to_algid(algname);
1297 	if (ret < 0) {
1298 		dev_err(ctx->se->dev, "invalid algorithm\n");
1299 		return ret;
1300 	}
1301 
1302 	ctx->alg = ret;
1303 
1304 	return 0;
1305 }
1306 
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1307 static int tegra_ccm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1308 {
1309 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1310 
1311 	switch (authsize) {
1312 	case 4:
1313 	case 6:
1314 	case 8:
1315 	case 10:
1316 	case 12:
1317 	case 14:
1318 	case 16:
1319 		break;
1320 	default:
1321 		return -EINVAL;
1322 	}
1323 
1324 	ctx->authsize = authsize;
1325 
1326 	return 0;
1327 }
1328 
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1329 static int tegra_gcm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1330 {
1331 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1332 	int ret;
1333 
1334 	ret = crypto_gcm_check_authsize(authsize);
1335 	if (ret)
1336 		return ret;
1337 
1338 	ctx->authsize = authsize;
1339 
1340 	return 0;
1341 }
1342 
tegra_aead_cra_exit(struct crypto_aead * tfm)1343 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1344 {
1345 	struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1346 
1347 	if (ctx->key_id)
1348 		tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1349 }
1350 
tegra_aead_crypt(struct aead_request * req,bool encrypt)1351 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1352 {
1353 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1354 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1355 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1356 
1357 	rctx->encrypt = encrypt;
1358 
1359 	return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1360 }
1361 
tegra_aead_encrypt(struct aead_request * req)1362 static int tegra_aead_encrypt(struct aead_request *req)
1363 {
1364 	return tegra_aead_crypt(req, true);
1365 }
1366 
tegra_aead_decrypt(struct aead_request * req)1367 static int tegra_aead_decrypt(struct aead_request *req)
1368 {
1369 	return tegra_aead_crypt(req, false);
1370 }
1371 
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)1372 static int tegra_aead_setkey(struct crypto_aead *tfm,
1373 			     const u8 *key, u32 keylen)
1374 {
1375 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1376 
1377 	if (aes_check_keylen(keylen)) {
1378 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1379 		return -EINVAL;
1380 	}
1381 
1382 	return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1383 }
1384 
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)1385 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1386 					struct tegra_cmac_reqctx *rctx)
1387 {
1388 	unsigned int data_count, res_bits = 0, i = 0, j;
1389 	struct tegra_se *se = ctx->se;
1390 	u32 *cpuvaddr = se->cmdbuf->addr, op;
1391 
1392 	data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1393 
1394 	op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1395 
1396 	if (!(rctx->task & SHA_UPDATE)) {
1397 		op |= SE_AES_OP_FINAL;
1398 		res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1399 	}
1400 
1401 	if (!res_bits && data_count)
1402 		data_count--;
1403 
1404 	if (rctx->task & SHA_FIRST) {
1405 		rctx->task &= ~SHA_FIRST;
1406 
1407 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1408 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1409 		/* Load 0 IV */
1410 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1411 			cpuvaddr[i++] = 0;
1412 	}
1413 
1414 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1415 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1416 			SE_LAST_BLOCK_RES_BITS(res_bits);
1417 
1418 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1419 	cpuvaddr[i++] = rctx->config;
1420 	cpuvaddr[i++] = rctx->crypto_config;
1421 
1422 	/* Source Address */
1423 	cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1424 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1425 			SE_ADDR_HI_SZ(rctx->datbuf.size);
1426 	cpuvaddr[i++] = 0;
1427 	cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1428 
1429 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1430 	cpuvaddr[i++] = op;
1431 
1432 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1433 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1434 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1435 
1436 	return i;
1437 }
1438 
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1439 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1440 {
1441 	int i;
1442 
1443 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1444 		rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1445 }
1446 
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1447 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1448 {
1449 	int i;
1450 
1451 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1452 		writel(rctx->result[i],
1453 		       se->base + se->hw->regs->result + (i * 4));
1454 }
1455 
tegra_cmac_do_update(struct ahash_request * req)1456 static int tegra_cmac_do_update(struct ahash_request *req)
1457 {
1458 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1459 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1460 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1461 	struct tegra_se *se = ctx->se;
1462 	unsigned int nblks, nresidue, cmdlen;
1463 	int ret;
1464 
1465 	if (!req->nbytes)
1466 		return 0;
1467 
1468 	nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1469 	nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1470 
1471 	/*
1472 	 * Reserve the last block as residue during final() to process.
1473 	 */
1474 	if (!nresidue && nblks) {
1475 		nresidue += rctx->blk_size;
1476 		nblks--;
1477 	}
1478 
1479 	rctx->src_sg = req->src;
1480 	rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1481 	rctx->total_len += rctx->datbuf.size;
1482 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1483 	rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
1484 
1485 	/*
1486 	 * Keep one block and residue bytes in residue and
1487 	 * return. The bytes will be processed in final()
1488 	 */
1489 	if (nblks < 1) {
1490 		scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1491 					 rctx->src_sg, 0, req->nbytes, 0);
1492 
1493 		rctx->residue.size += req->nbytes;
1494 		return 0;
1495 	}
1496 
1497 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
1498 					      &rctx->datbuf.addr, GFP_KERNEL);
1499 	if (!rctx->datbuf.buf)
1500 		return -ENOMEM;
1501 
1502 	/* Copy the previous residue first */
1503 	if (rctx->residue.size)
1504 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1505 
1506 	scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1507 				 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1508 
1509 	scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1510 				 req->nbytes - nresidue, nresidue, 0);
1511 
1512 	/* Update residue value with the residue after current block */
1513 	rctx->residue.size = nresidue;
1514 
1515 	/*
1516 	 * If this is not the first task, paste the previous copied
1517 	 * intermediate results to the registers so that it gets picked up.
1518 	 */
1519 	if (!(rctx->task & SHA_FIRST))
1520 		tegra_cmac_paste_result(ctx->se, rctx);
1521 
1522 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1523 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1524 
1525 	tegra_cmac_copy_result(ctx->se, rctx);
1526 
1527 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
1528 			  rctx->datbuf.buf, rctx->datbuf.addr);
1529 
1530 	return ret;
1531 }
1532 
tegra_cmac_do_final(struct ahash_request * req)1533 static int tegra_cmac_do_final(struct ahash_request *req)
1534 {
1535 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1536 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1537 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1538 	struct tegra_se *se = ctx->se;
1539 	u32 *result = (u32 *)req->result;
1540 	int ret = 0, i, cmdlen;
1541 
1542 	if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1543 		return crypto_shash_tfm_digest(ctx->fallback_tfm,
1544 					NULL, 0, req->result);
1545 	}
1546 
1547 	if (rctx->residue.size) {
1548 		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
1549 						      &rctx->datbuf.addr, GFP_KERNEL);
1550 		if (!rctx->datbuf.buf) {
1551 			ret = -ENOMEM;
1552 			goto out_free;
1553 		}
1554 
1555 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1556 	}
1557 
1558 	rctx->datbuf.size = rctx->residue.size;
1559 	rctx->total_len += rctx->residue.size;
1560 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1561 
1562 	/*
1563 	 * If this is not the first task, paste the previous copied
1564 	 * intermediate results to the registers so that it gets picked up.
1565 	 */
1566 	if (!(rctx->task & SHA_FIRST))
1567 		tegra_cmac_paste_result(ctx->se, rctx);
1568 
1569 	/* Prepare command and submit */
1570 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1571 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1572 	if (ret)
1573 		goto out;
1574 
1575 	/* Read and clear Result register */
1576 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1577 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1578 
1579 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1580 		writel(0, se->base + se->hw->regs->result + (i * 4));
1581 
1582 out:
1583 	if (rctx->residue.size)
1584 		dma_free_coherent(se->dev, rctx->datbuf.size,
1585 				  rctx->datbuf.buf, rctx->datbuf.addr);
1586 out_free:
1587 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1588 			  rctx->residue.buf, rctx->residue.addr);
1589 	return ret;
1590 }
1591 
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)1592 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1593 {
1594 	struct ahash_request *req = ahash_request_cast(areq);
1595 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1596 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1597 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1598 	struct tegra_se *se = ctx->se;
1599 	int ret = 0;
1600 
1601 	if (rctx->task & SHA_UPDATE) {
1602 		ret = tegra_cmac_do_update(req);
1603 		if (ret)
1604 			goto out;
1605 
1606 		rctx->task &= ~SHA_UPDATE;
1607 	}
1608 
1609 	if (rctx->task & SHA_FINAL) {
1610 		ret = tegra_cmac_do_final(req);
1611 		if (ret)
1612 			goto out;
1613 
1614 		rctx->task &= ~SHA_FINAL;
1615 	}
1616 out:
1617 	crypto_finalize_hash_request(se->engine, req, ret);
1618 
1619 	return 0;
1620 }
1621 
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)1622 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1623 				     const char *algname)
1624 {
1625 	unsigned int statesize;
1626 
1627 	ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1628 
1629 	if (IS_ERR(ctx->fallback_tfm)) {
1630 		dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1631 		ctx->fallback_tfm = NULL;
1632 		return;
1633 	}
1634 
1635 	statesize = crypto_shash_statesize(ctx->fallback_tfm);
1636 
1637 	if (statesize > sizeof(struct tegra_cmac_reqctx))
1638 		crypto_ahash_set_statesize(tfm, statesize);
1639 }
1640 
tegra_cmac_cra_init(struct crypto_tfm * tfm)1641 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1642 {
1643 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1644 	struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1645 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1646 	struct tegra_se_alg *se_alg;
1647 	const char *algname;
1648 	int ret;
1649 
1650 	algname = crypto_tfm_alg_name(tfm);
1651 	se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1652 
1653 	crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1654 
1655 	ctx->se = se_alg->se_dev;
1656 	ctx->key_id = 0;
1657 
1658 	ret = se_algname_to_algid(algname);
1659 	if (ret < 0) {
1660 		dev_err(ctx->se->dev, "invalid algorithm\n");
1661 		return ret;
1662 	}
1663 
1664 	ctx->alg = ret;
1665 
1666 	tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1667 
1668 	return 0;
1669 }
1670 
tegra_cmac_cra_exit(struct crypto_tfm * tfm)1671 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1672 {
1673 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1674 
1675 	if (ctx->fallback_tfm)
1676 		crypto_free_shash(ctx->fallback_tfm);
1677 
1678 	tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1679 }
1680 
tegra_cmac_init(struct ahash_request * req)1681 static int tegra_cmac_init(struct ahash_request *req)
1682 {
1683 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1684 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1685 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1686 	struct tegra_se *se = ctx->se;
1687 	int i;
1688 
1689 	rctx->total_len = 0;
1690 	rctx->datbuf.size = 0;
1691 	rctx->residue.size = 0;
1692 	rctx->task = SHA_FIRST;
1693 	rctx->blk_size = crypto_ahash_blocksize(tfm);
1694 
1695 	rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1696 					       &rctx->residue.addr, GFP_KERNEL);
1697 	if (!rctx->residue.buf)
1698 		return -ENOMEM;
1699 
1700 	rctx->residue.size = 0;
1701 
1702 	/* Clear any previous result */
1703 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1704 		writel(0, se->base + se->hw->regs->result + (i * 4));
1705 
1706 	return 0;
1707 }
1708 
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1709 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1710 			     unsigned int keylen)
1711 {
1712 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1713 
1714 	if (aes_check_keylen(keylen)) {
1715 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1716 		return -EINVAL;
1717 	}
1718 
1719 	if (ctx->fallback_tfm)
1720 		crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1721 
1722 	return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1723 }
1724 
tegra_cmac_update(struct ahash_request * req)1725 static int tegra_cmac_update(struct ahash_request *req)
1726 {
1727 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1728 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1729 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1730 
1731 	rctx->task |= SHA_UPDATE;
1732 
1733 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1734 }
1735 
tegra_cmac_final(struct ahash_request * req)1736 static int tegra_cmac_final(struct ahash_request *req)
1737 {
1738 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1739 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1740 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1741 
1742 	rctx->task |= SHA_FINAL;
1743 
1744 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1745 }
1746 
tegra_cmac_finup(struct ahash_request * req)1747 static int tegra_cmac_finup(struct ahash_request *req)
1748 {
1749 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1750 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1751 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1752 
1753 	rctx->task |= SHA_UPDATE | SHA_FINAL;
1754 
1755 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1756 }
1757 
tegra_cmac_digest(struct ahash_request * req)1758 static int tegra_cmac_digest(struct ahash_request *req)
1759 {
1760 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1761 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1762 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1763 	int ret;
1764 
1765 	ret = tegra_cmac_init(req);
1766 	if (ret)
1767 		return ret;
1768 
1769 	rctx->task |= SHA_UPDATE | SHA_FINAL;
1770 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1771 }
1772 
tegra_cmac_export(struct ahash_request * req,void * out)1773 static int tegra_cmac_export(struct ahash_request *req, void *out)
1774 {
1775 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1776 
1777 	memcpy(out, rctx, sizeof(*rctx));
1778 
1779 	return 0;
1780 }
1781 
tegra_cmac_import(struct ahash_request * req,const void * in)1782 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1783 {
1784 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1785 
1786 	memcpy(rctx, in, sizeof(*rctx));
1787 
1788 	return 0;
1789 }
1790 
1791 static struct tegra_se_alg tegra_aead_algs[] = {
1792 	{
1793 		.alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1794 		.alg.aead.base = {
1795 			.init = tegra_aead_cra_init,
1796 			.exit = tegra_aead_cra_exit,
1797 			.setkey = tegra_aead_setkey,
1798 			.setauthsize = tegra_gcm_setauthsize,
1799 			.encrypt = tegra_aead_encrypt,
1800 			.decrypt = tegra_aead_decrypt,
1801 			.maxauthsize = AES_BLOCK_SIZE,
1802 			.ivsize	= GCM_AES_IV_SIZE,
1803 			.base = {
1804 				.cra_name = "gcm(aes)",
1805 				.cra_driver_name = "gcm-aes-tegra",
1806 				.cra_priority = 500,
1807 				.cra_blocksize = 1,
1808 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1809 				.cra_alignmask = 0xf,
1810 				.cra_module = THIS_MODULE,
1811 			},
1812 		}
1813 	}, {
1814 		.alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1815 		.alg.aead.base = {
1816 			.init = tegra_aead_cra_init,
1817 			.exit = tegra_aead_cra_exit,
1818 			.setkey	= tegra_aead_setkey,
1819 			.setauthsize = tegra_ccm_setauthsize,
1820 			.encrypt = tegra_aead_encrypt,
1821 			.decrypt = tegra_aead_decrypt,
1822 			.maxauthsize = AES_BLOCK_SIZE,
1823 			.ivsize	= AES_BLOCK_SIZE,
1824 			.chunksize = AES_BLOCK_SIZE,
1825 			.base = {
1826 				.cra_name = "ccm(aes)",
1827 				.cra_driver_name = "ccm-aes-tegra",
1828 				.cra_priority = 500,
1829 				.cra_blocksize = 1,
1830 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1831 				.cra_alignmask = 0xf,
1832 				.cra_module = THIS_MODULE,
1833 			},
1834 		}
1835 	}
1836 };
1837 
1838 static struct tegra_se_alg tegra_cmac_algs[] = {
1839 	{
1840 		.alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1841 		.alg.ahash.base = {
1842 			.init = tegra_cmac_init,
1843 			.setkey	= tegra_cmac_setkey,
1844 			.update = tegra_cmac_update,
1845 			.final = tegra_cmac_final,
1846 			.finup = tegra_cmac_finup,
1847 			.digest = tegra_cmac_digest,
1848 			.export = tegra_cmac_export,
1849 			.import = tegra_cmac_import,
1850 			.halg.digestsize = AES_BLOCK_SIZE,
1851 			.halg.statesize = sizeof(struct tegra_cmac_reqctx),
1852 			.halg.base = {
1853 				.cra_name = "cmac(aes)",
1854 				.cra_driver_name = "tegra-se-cmac",
1855 				.cra_priority = 300,
1856 				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
1857 				.cra_blocksize = AES_BLOCK_SIZE,
1858 				.cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1859 				.cra_alignmask = 0,
1860 				.cra_module = THIS_MODULE,
1861 				.cra_init = tegra_cmac_cra_init,
1862 				.cra_exit = tegra_cmac_cra_exit,
1863 			}
1864 		}
1865 	}
1866 };
1867 
tegra_init_aes(struct tegra_se * se)1868 int tegra_init_aes(struct tegra_se *se)
1869 {
1870 	struct aead_engine_alg *aead_alg;
1871 	struct ahash_engine_alg *ahash_alg;
1872 	struct skcipher_engine_alg *sk_alg;
1873 	int i, ret;
1874 
1875 	se->manifest = tegra_aes_kac_manifest;
1876 
1877 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1878 		sk_alg = &tegra_aes_algs[i].alg.skcipher;
1879 		tegra_aes_algs[i].se_dev = se;
1880 
1881 		ret = crypto_engine_register_skcipher(sk_alg);
1882 		if (ret) {
1883 			dev_err(se->dev, "failed to register %s\n",
1884 				sk_alg->base.base.cra_name);
1885 			goto err_aes;
1886 		}
1887 	}
1888 
1889 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
1890 		aead_alg = &tegra_aead_algs[i].alg.aead;
1891 		tegra_aead_algs[i].se_dev = se;
1892 
1893 		ret = crypto_engine_register_aead(aead_alg);
1894 		if (ret) {
1895 			dev_err(se->dev, "failed to register %s\n",
1896 				aead_alg->base.base.cra_name);
1897 			goto err_aead;
1898 		}
1899 	}
1900 
1901 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
1902 		ahash_alg = &tegra_cmac_algs[i].alg.ahash;
1903 		tegra_cmac_algs[i].se_dev = se;
1904 
1905 		ret = crypto_engine_register_ahash(ahash_alg);
1906 		if (ret) {
1907 			dev_err(se->dev, "failed to register %s\n",
1908 				ahash_alg->base.halg.base.cra_name);
1909 			goto err_cmac;
1910 		}
1911 	}
1912 
1913 	return 0;
1914 
1915 err_cmac:
1916 	while (i--)
1917 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
1918 
1919 	i = ARRAY_SIZE(tegra_aead_algs);
1920 err_aead:
1921 	while (i--)
1922 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1923 
1924 	i = ARRAY_SIZE(tegra_aes_algs);
1925 err_aes:
1926 	while (i--)
1927 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1928 
1929 	return ret;
1930 }
1931 
tegra_deinit_aes(struct tegra_se * se)1932 void tegra_deinit_aes(struct tegra_se *se)
1933 {
1934 	int i;
1935 
1936 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
1937 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1938 
1939 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
1940 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1941 
1942 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
1943 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
1944 }
1945