• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 CTR_RFC3686_NONCE_SIZE + \
64 					 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH		16
67 
68 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
70 					 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
72 					 CAAM_CMD_SZ * 5)
73 
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 10 * CAAM_CMD_SZ)
79 
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
82 
83 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86 
87 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90 
91 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94 
95 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98 
99 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
101 					 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
103 					 15 * CAAM_CMD_SZ)
104 
105 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107 
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
115 
116 struct caam_alg_entry {
117 	int class1_alg_type;
118 	int class2_alg_type;
119 	int alg_op;
120 	bool rfc3686;
121 	bool geniv;
122 };
123 
124 struct caam_aead_alg {
125 	struct aead_alg aead;
126 	struct caam_alg_entry caam;
127 	bool registered;
128 };
129 
130 /* Set DK bit in class 1 operation if shared */
append_dec_op1(u32 * desc,u32 type)131 static inline void append_dec_op1(u32 *desc, u32 type)
132 {
133 	u32 *jump_cmd, *uncond_jump_cmd;
134 
135 	/* DK bit is valid only for AES */
136 	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 		append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 				 OP_ALG_DECRYPT);
139 		return;
140 	}
141 
142 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 			 OP_ALG_DECRYPT);
145 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 	set_jump_tgt_here(desc, jump_cmd);
147 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 	set_jump_tgt_here(desc, uncond_jump_cmd);
150 }
151 
152 /*
153  * For aead functions, read payload and write payload,
154  * both of which are specified in req->src and req->dst
155  */
aead_append_src_dst(u32 * desc,u32 msg_type)156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157 {
158 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
161 }
162 
163 /*
164  * For ablkcipher encrypt and decrypt, read from req->src and
165  * write to req->dst
166  */
ablkcipher_append_src_dst(u32 * desc)167 static inline void ablkcipher_append_src_dst(u32 *desc)
168 {
169 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
174 }
175 
176 /*
177  * per-session context
178  */
179 struct caam_ctx {
180 	struct device *jrdev;
181 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 	dma_addr_t sh_desc_enc_dma;
185 	dma_addr_t sh_desc_dec_dma;
186 	dma_addr_t sh_desc_givenc_dma;
187 	u32 class1_alg_type;
188 	u32 class2_alg_type;
189 	u32 alg_op;
190 	u8 key[CAAM_MAX_KEY_SIZE];
191 	dma_addr_t key_dma;
192 	unsigned int enckeylen;
193 	unsigned int split_key_len;
194 	unsigned int split_key_pad_len;
195 	unsigned int authsize;
196 };
197 
append_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline,bool is_rfc3686)198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 			    int keys_fit_inline, bool is_rfc3686)
200 {
201 	u32 *nonce;
202 	unsigned int enckeylen = ctx->enckeylen;
203 
204 	/*
205 	 * RFC3686 specific:
206 	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 	 *	| enckeylen = encryption key size + nonce size
208 	 */
209 	if (is_rfc3686)
210 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
211 
212 	if (keys_fit_inline) {
213 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 				  ctx->split_key_len, CLASS_2 |
215 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 		append_key_as_imm(desc, (void *)ctx->key +
217 				  ctx->split_key_pad_len, enckeylen,
218 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 	} else {
220 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 	}
225 
226 	/* Load Counter into CONTEXT1 reg */
227 	if (is_rfc3686) {
228 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 			       enckeylen);
230 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
231 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
232 		append_move(desc,
233 			    MOVE_SRC_OUTFIFO |
234 			    MOVE_DEST_CLASS1CTX |
235 			    (16 << MOVE_OFFSET_SHIFT) |
236 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
237 	}
238 }
239 
init_sh_desc_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline,bool is_rfc3686)240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
241 				  int keys_fit_inline, bool is_rfc3686)
242 {
243 	u32 *key_jump_cmd;
244 
245 	/* Note: Context registers are saved. */
246 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
247 
248 	/* Skip if already shared */
249 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
250 				   JUMP_COND_SHRD);
251 
252 	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
253 
254 	set_jump_tgt_here(desc, key_jump_cmd);
255 }
256 
aead_null_set_sh_desc(struct crypto_aead * aead)257 static int aead_null_set_sh_desc(struct crypto_aead *aead)
258 {
259 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
260 	struct device *jrdev = ctx->jrdev;
261 	bool keys_fit_inline = false;
262 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
263 	u32 *desc;
264 
265 	/*
266 	 * Job Descriptor and Shared Descriptors
267 	 * must all fit into the 64-word Descriptor h/w Buffer
268 	 */
269 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
270 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
271 		keys_fit_inline = true;
272 
273 	/* aead_encrypt shared descriptor */
274 	desc = ctx->sh_desc_enc;
275 
276 	init_sh_desc(desc, HDR_SHARE_SERIAL);
277 
278 	/* Skip if already shared */
279 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
280 				   JUMP_COND_SHRD);
281 	if (keys_fit_inline)
282 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
283 				  ctx->split_key_len, CLASS_2 |
284 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
285 	else
286 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
287 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 	set_jump_tgt_here(desc, key_jump_cmd);
289 
290 	/* assoclen + cryptlen = seqinlen */
291 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
292 
293 	/* Prepare to read and write cryptlen + assoclen bytes */
294 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
295 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
296 
297 	/*
298 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
299 	 * thus need to do some magic, i.e. self-patch the descriptor
300 	 * buffer.
301 	 */
302 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
303 				    MOVE_DEST_MATH3 |
304 				    (0x6 << MOVE_LEN_SHIFT));
305 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
306 				     MOVE_DEST_DESCBUF |
307 				     MOVE_WAITCOMP |
308 				     (0x8 << MOVE_LEN_SHIFT));
309 
310 	/* Class 2 operation */
311 	append_operation(desc, ctx->class2_alg_type |
312 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
313 
314 	/* Read and write cryptlen bytes */
315 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
316 
317 	set_move_tgt_here(desc, read_move_cmd);
318 	set_move_tgt_here(desc, write_move_cmd);
319 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
320 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
321 		    MOVE_AUX_LS);
322 
323 	/* Write ICV */
324 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
325 			 LDST_SRCDST_BYTE_CONTEXT);
326 
327 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
328 					      desc_bytes(desc),
329 					      DMA_TO_DEVICE);
330 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
331 		dev_err(jrdev, "unable to map shared descriptor\n");
332 		return -ENOMEM;
333 	}
334 #ifdef DEBUG
335 	print_hex_dump(KERN_ERR,
336 		       "aead null enc shdesc@"__stringify(__LINE__)": ",
337 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 		       desc_bytes(desc), 1);
339 #endif
340 
341 	/*
342 	 * Job Descriptor and Shared Descriptors
343 	 * must all fit into the 64-word Descriptor h/w Buffer
344 	 */
345 	keys_fit_inline = false;
346 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
347 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
348 		keys_fit_inline = true;
349 
350 	desc = ctx->sh_desc_dec;
351 
352 	/* aead_decrypt shared descriptor */
353 	init_sh_desc(desc, HDR_SHARE_SERIAL);
354 
355 	/* Skip if already shared */
356 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
357 				   JUMP_COND_SHRD);
358 	if (keys_fit_inline)
359 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
360 				  ctx->split_key_len, CLASS_2 |
361 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
362 	else
363 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
364 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
365 	set_jump_tgt_here(desc, key_jump_cmd);
366 
367 	/* Class 2 operation */
368 	append_operation(desc, ctx->class2_alg_type |
369 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
370 
371 	/* assoclen + cryptlen = seqoutlen */
372 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
373 
374 	/* Prepare to read and write cryptlen + assoclen bytes */
375 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
376 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
377 
378 	/*
379 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
380 	 * thus need to do some magic, i.e. self-patch the descriptor
381 	 * buffer.
382 	 */
383 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
384 				    MOVE_DEST_MATH2 |
385 				    (0x6 << MOVE_LEN_SHIFT));
386 	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
387 				     MOVE_DEST_DESCBUF |
388 				     MOVE_WAITCOMP |
389 				     (0x8 << MOVE_LEN_SHIFT));
390 
391 	/* Read and write cryptlen bytes */
392 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
393 
394 	/*
395 	 * Insert a NOP here, since we need at least 4 instructions between
396 	 * code patching the descriptor buffer and the location being patched.
397 	 */
398 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
399 	set_jump_tgt_here(desc, jump_cmd);
400 
401 	set_move_tgt_here(desc, read_move_cmd);
402 	set_move_tgt_here(desc, write_move_cmd);
403 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
404 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
405 		    MOVE_AUX_LS);
406 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
407 
408 	/* Load ICV */
409 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
410 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
411 
412 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
413 					      desc_bytes(desc),
414 					      DMA_TO_DEVICE);
415 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
416 		dev_err(jrdev, "unable to map shared descriptor\n");
417 		return -ENOMEM;
418 	}
419 #ifdef DEBUG
420 	print_hex_dump(KERN_ERR,
421 		       "aead null dec shdesc@"__stringify(__LINE__)": ",
422 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 		       desc_bytes(desc), 1);
424 #endif
425 
426 	return 0;
427 }
428 
aead_set_sh_desc(struct crypto_aead * aead)429 static int aead_set_sh_desc(struct crypto_aead *aead)
430 {
431 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
432 						 struct caam_aead_alg, aead);
433 	unsigned int ivsize = crypto_aead_ivsize(aead);
434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
435 	struct device *jrdev = ctx->jrdev;
436 	bool keys_fit_inline;
437 	u32 geniv, moveiv;
438 	u32 ctx1_iv_off = 0;
439 	u32 *desc;
440 	u32 *wait_cmd;
441 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
442 			       OP_ALG_AAI_CTR_MOD128);
443 	const bool is_rfc3686 = alg->caam.rfc3686;
444 
445 	if (!ctx->authsize)
446 		return 0;
447 
448 	/* NULL encryption / decryption */
449 	if (!ctx->enckeylen)
450 		return aead_null_set_sh_desc(aead);
451 
452 	/*
453 	 * AES-CTR needs to load IV in CONTEXT1 reg
454 	 * at an offset of 128bits (16bytes)
455 	 * CONTEXT1[255:128] = IV
456 	 */
457 	if (ctr_mode)
458 		ctx1_iv_off = 16;
459 
460 	/*
461 	 * RFC3686 specific:
462 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
463 	 */
464 	if (is_rfc3686)
465 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
466 
467 	if (alg->caam.geniv)
468 		goto skip_enc;
469 
470 	/*
471 	 * Job Descriptor and Shared Descriptors
472 	 * must all fit into the 64-word Descriptor h/w Buffer
473 	 */
474 	keys_fit_inline = false;
475 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
476 	    ctx->split_key_pad_len + ctx->enckeylen +
477 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
478 	    CAAM_DESC_BYTES_MAX)
479 		keys_fit_inline = true;
480 
481 	/* aead_encrypt shared descriptor */
482 	desc = ctx->sh_desc_enc;
483 
484 	/* Note: Context registers are saved. */
485 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
486 
487 	/* Class 2 operation */
488 	append_operation(desc, ctx->class2_alg_type |
489 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
490 
491 	/* Read and write assoclen bytes */
492 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
493 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
494 
495 	/* Skip assoc data */
496 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
497 
498 	/* read assoc before reading payload */
499 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
500 				      FIFOLDST_VLF);
501 
502 	/* Load Counter into CONTEXT1 reg */
503 	if (is_rfc3686)
504 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
505 				    LDST_CLASS_1_CCB |
506 				    LDST_SRCDST_BYTE_CONTEXT |
507 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
508 				     LDST_OFFSET_SHIFT));
509 
510 	/* Class 1 operation */
511 	append_operation(desc, ctx->class1_alg_type |
512 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
513 
514 	/* Read and write cryptlen bytes */
515 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
516 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
517 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
518 
519 	/* Write ICV */
520 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
521 			 LDST_SRCDST_BYTE_CONTEXT);
522 
523 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
524 					      desc_bytes(desc),
525 					      DMA_TO_DEVICE);
526 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
527 		dev_err(jrdev, "unable to map shared descriptor\n");
528 		return -ENOMEM;
529 	}
530 #ifdef DEBUG
531 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
532 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
533 		       desc_bytes(desc), 1);
534 #endif
535 
536 skip_enc:
537 	/*
538 	 * Job Descriptor and Shared Descriptors
539 	 * must all fit into the 64-word Descriptor h/w Buffer
540 	 */
541 	keys_fit_inline = false;
542 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
543 	    ctx->split_key_pad_len + ctx->enckeylen +
544 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
545 	    CAAM_DESC_BYTES_MAX)
546 		keys_fit_inline = true;
547 
548 	/* aead_decrypt shared descriptor */
549 	desc = ctx->sh_desc_dec;
550 
551 	/* Note: Context registers are saved. */
552 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
553 
554 	/* Class 2 operation */
555 	append_operation(desc, ctx->class2_alg_type |
556 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
557 
558 	/* Read and write assoclen bytes */
559 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
560 	if (alg->caam.geniv)
561 		append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
562 	else
563 		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
564 
565 	/* Skip assoc data */
566 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
567 
568 	/* read assoc before reading payload */
569 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
570 			     KEY_VLF);
571 
572 	if (alg->caam.geniv) {
573 		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
574 				LDST_SRCDST_BYTE_CONTEXT |
575 				(ctx1_iv_off << LDST_OFFSET_SHIFT));
576 		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
577 			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
578 	}
579 
580 	/* Load Counter into CONTEXT1 reg */
581 	if (is_rfc3686)
582 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
583 				    LDST_CLASS_1_CCB |
584 				    LDST_SRCDST_BYTE_CONTEXT |
585 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
586 				     LDST_OFFSET_SHIFT));
587 
588 	/* Choose operation */
589 	if (ctr_mode)
590 		append_operation(desc, ctx->class1_alg_type |
591 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
592 	else
593 		append_dec_op1(desc, ctx->class1_alg_type);
594 
595 	/* Read and write cryptlen bytes */
596 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
597 	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
598 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
599 
600 	/* Load ICV */
601 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
602 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
603 
604 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
605 					      desc_bytes(desc),
606 					      DMA_TO_DEVICE);
607 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
608 		dev_err(jrdev, "unable to map shared descriptor\n");
609 		return -ENOMEM;
610 	}
611 #ifdef DEBUG
612 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
613 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
614 		       desc_bytes(desc), 1);
615 #endif
616 
617 	if (!alg->caam.geniv)
618 		goto skip_givenc;
619 
620 	/*
621 	 * Job Descriptor and Shared Descriptors
622 	 * must all fit into the 64-word Descriptor h/w Buffer
623 	 */
624 	keys_fit_inline = false;
625 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
626 	    ctx->split_key_pad_len + ctx->enckeylen +
627 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
628 	    CAAM_DESC_BYTES_MAX)
629 		keys_fit_inline = true;
630 
631 	/* aead_givencrypt shared descriptor */
632 	desc = ctx->sh_desc_enc;
633 
634 	/* Note: Context registers are saved. */
635 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
636 
637 	if (is_rfc3686)
638 		goto copy_iv;
639 
640 	/* Generate IV */
641 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
642 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
643 		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
644 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
645 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
646 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
647 	append_move(desc, MOVE_WAITCOMP |
648 		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
649 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
650 		    (ivsize << MOVE_LEN_SHIFT));
651 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
652 
653 copy_iv:
654 	/* Copy IV to class 1 context */
655 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
656 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
657 		    (ivsize << MOVE_LEN_SHIFT));
658 
659 	/* Return to encryption */
660 	append_operation(desc, ctx->class2_alg_type |
661 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
662 
663 	/* Read and write assoclen bytes */
664 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
665 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
666 
667 	/* ivsize + cryptlen = seqoutlen - authsize */
668 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
669 
670 	/* Skip assoc data */
671 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
672 
673 	/* read assoc before reading payload */
674 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
675 			     KEY_VLF);
676 
677 	/* Copy iv from outfifo to class 2 fifo */
678 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
679 		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
680 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
681 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
682 	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
683 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
684 
685 	/* Load Counter into CONTEXT1 reg */
686 	if (is_rfc3686)
687 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
688 				    LDST_CLASS_1_CCB |
689 				    LDST_SRCDST_BYTE_CONTEXT |
690 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
691 				     LDST_OFFSET_SHIFT));
692 
693 	/* Class 1 operation */
694 	append_operation(desc, ctx->class1_alg_type |
695 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696 
697 	/* Will write ivsize + cryptlen */
698 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
699 
700 	/* Not need to reload iv */
701 	append_seq_fifo_load(desc, ivsize,
702 			     FIFOLD_CLASS_SKIP);
703 
704 	/* Will read cryptlen */
705 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
706 
707 	/*
708 	 * Wait for IV transfer (ofifo -> class2) to finish before starting
709 	 * ciphertext transfer (ofifo -> external memory).
710 	 */
711 	wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
712 	set_jump_tgt_here(desc, wait_cmd);
713 
714 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
715 			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
716 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
717 
718 	/* Write ICV */
719 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
720 			 LDST_SRCDST_BYTE_CONTEXT);
721 
722 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
723 					      desc_bytes(desc),
724 					      DMA_TO_DEVICE);
725 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
726 		dev_err(jrdev, "unable to map shared descriptor\n");
727 		return -ENOMEM;
728 	}
729 #ifdef DEBUG
730 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
731 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
732 		       desc_bytes(desc), 1);
733 #endif
734 
735 skip_givenc:
736 	return 0;
737 }
738 
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)739 static int aead_setauthsize(struct crypto_aead *authenc,
740 				    unsigned int authsize)
741 {
742 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
743 
744 	ctx->authsize = authsize;
745 	aead_set_sh_desc(authenc);
746 
747 	return 0;
748 }
749 
gcm_set_sh_desc(struct crypto_aead * aead)750 static int gcm_set_sh_desc(struct crypto_aead *aead)
751 {
752 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
753 	struct device *jrdev = ctx->jrdev;
754 	bool keys_fit_inline = false;
755 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
756 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
757 	u32 *desc;
758 
759 	if (!ctx->enckeylen || !ctx->authsize)
760 		return 0;
761 
762 	/*
763 	 * AES GCM encrypt shared descriptor
764 	 * Job Descriptor and Shared Descriptor
765 	 * must fit into the 64-word Descriptor h/w Buffer
766 	 */
767 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
768 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
769 		keys_fit_inline = true;
770 
771 	desc = ctx->sh_desc_enc;
772 
773 	init_sh_desc(desc, HDR_SHARE_SERIAL);
774 
775 	/* skip key loading if they are loaded due to sharing */
776 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
777 				   JUMP_COND_SHRD | JUMP_COND_SELF);
778 	if (keys_fit_inline)
779 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
780 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
781 	else
782 		append_key(desc, ctx->key_dma, ctx->enckeylen,
783 			   CLASS_1 | KEY_DEST_CLASS_REG);
784 	set_jump_tgt_here(desc, key_jump_cmd);
785 
786 	/* class 1 operation */
787 	append_operation(desc, ctx->class1_alg_type |
788 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
789 
790 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
791 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
792 	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
793 						 JUMP_COND_MATH_Z);
794 
795 	/* if assoclen is ZERO, skip reading the assoc data */
796 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
797 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
798 						 JUMP_COND_MATH_Z);
799 
800 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
801 
802 	/* skip assoc data */
803 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
804 
805 	/* cryptlen = seqinlen - assoclen */
806 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
807 
808 	/* if cryptlen is ZERO jump to zero-payload commands */
809 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
810 					    JUMP_COND_MATH_Z);
811 
812 	/* read assoc data */
813 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
815 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
816 
817 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
818 
819 	/* write encrypted data */
820 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
821 
822 	/* read payload data */
823 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
824 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
825 
826 	/* jump the zero-payload commands */
827 	append_jump(desc, JUMP_TEST_ALL | 2);
828 
829 	/* zero-payload commands */
830 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
831 
832 	/* read assoc data */
833 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
834 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
835 
836 	/* There is no input data */
837 	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
838 
839 	/* write ICV */
840 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
841 			 LDST_SRCDST_BYTE_CONTEXT);
842 
843 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
844 					      desc_bytes(desc),
845 					      DMA_TO_DEVICE);
846 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
847 		dev_err(jrdev, "unable to map shared descriptor\n");
848 		return -ENOMEM;
849 	}
850 #ifdef DEBUG
851 	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
852 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
853 		       desc_bytes(desc), 1);
854 #endif
855 
856 	/*
857 	 * Job Descriptor and Shared Descriptors
858 	 * must all fit into the 64-word Descriptor h/w Buffer
859 	 */
860 	keys_fit_inline = false;
861 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
862 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
863 		keys_fit_inline = true;
864 
865 	desc = ctx->sh_desc_dec;
866 
867 	init_sh_desc(desc, HDR_SHARE_SERIAL);
868 
869 	/* skip key loading if they are loaded due to sharing */
870 	key_jump_cmd = append_jump(desc, JUMP_JSL |
871 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
872 				   JUMP_COND_SELF);
873 	if (keys_fit_inline)
874 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
875 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
876 	else
877 		append_key(desc, ctx->key_dma, ctx->enckeylen,
878 			   CLASS_1 | KEY_DEST_CLASS_REG);
879 	set_jump_tgt_here(desc, key_jump_cmd);
880 
881 	/* class 1 operation */
882 	append_operation(desc, ctx->class1_alg_type |
883 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
884 
885 	/* if assoclen is ZERO, skip reading the assoc data */
886 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
887 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
888 						 JUMP_COND_MATH_Z);
889 
890 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
891 
892 	/* skip assoc data */
893 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
894 
895 	/* read assoc data */
896 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
897 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
898 
899 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
900 
901 	/* cryptlen = seqoutlen - assoclen */
902 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
903 
904 	/* jump to zero-payload command if cryptlen is zero */
905 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
906 					    JUMP_COND_MATH_Z);
907 
908 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
909 
910 	/* store encrypted data */
911 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
912 
913 	/* read payload data */
914 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
915 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
916 
917 	/* zero-payload command */
918 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
919 
920 	/* read ICV */
921 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
922 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
923 
924 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
925 					      desc_bytes(desc),
926 					      DMA_TO_DEVICE);
927 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
928 		dev_err(jrdev, "unable to map shared descriptor\n");
929 		return -ENOMEM;
930 	}
931 #ifdef DEBUG
932 	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
933 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
934 		       desc_bytes(desc), 1);
935 #endif
936 
937 	return 0;
938 }
939 
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)940 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
941 {
942 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
943 
944 	ctx->authsize = authsize;
945 	gcm_set_sh_desc(authenc);
946 
947 	return 0;
948 }
949 
rfc4106_set_sh_desc(struct crypto_aead * aead)950 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
951 {
952 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
953 	struct device *jrdev = ctx->jrdev;
954 	bool keys_fit_inline = false;
955 	u32 *key_jump_cmd;
956 	u32 *desc;
957 
958 	if (!ctx->enckeylen || !ctx->authsize)
959 		return 0;
960 
961 	/*
962 	 * RFC4106 encrypt shared descriptor
963 	 * Job Descriptor and Shared Descriptor
964 	 * must fit into the 64-word Descriptor h/w Buffer
965 	 */
966 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
967 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
968 		keys_fit_inline = true;
969 
970 	desc = ctx->sh_desc_enc;
971 
972 	init_sh_desc(desc, HDR_SHARE_SERIAL);
973 
974 	/* Skip key loading if it is loaded due to sharing */
975 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
976 				   JUMP_COND_SHRD);
977 	if (keys_fit_inline)
978 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
979 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
980 	else
981 		append_key(desc, ctx->key_dma, ctx->enckeylen,
982 			   CLASS_1 | KEY_DEST_CLASS_REG);
983 	set_jump_tgt_here(desc, key_jump_cmd);
984 
985 	/* Class 1 operation */
986 	append_operation(desc, ctx->class1_alg_type |
987 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
988 
989 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
990 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
991 
992 	/* Read assoc data */
993 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
994 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
995 
996 	/* Skip IV */
997 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
998 
999 	/* Will read cryptlen bytes */
1000 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1001 
1002 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1003 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1004 
1005 	/* Skip assoc data */
1006 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1007 
1008 	/* cryptlen = seqoutlen - assoclen */
1009 	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
1010 
1011 	/* Write encrypted data */
1012 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1013 
1014 	/* Read payload data */
1015 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1016 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1017 
1018 	/* Write ICV */
1019 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1020 			 LDST_SRCDST_BYTE_CONTEXT);
1021 
1022 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1023 					      desc_bytes(desc),
1024 					      DMA_TO_DEVICE);
1025 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1026 		dev_err(jrdev, "unable to map shared descriptor\n");
1027 		return -ENOMEM;
1028 	}
1029 #ifdef DEBUG
1030 	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1031 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1032 		       desc_bytes(desc), 1);
1033 #endif
1034 
1035 	/*
1036 	 * Job Descriptor and Shared Descriptors
1037 	 * must all fit into the 64-word Descriptor h/w Buffer
1038 	 */
1039 	keys_fit_inline = false;
1040 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1041 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1042 		keys_fit_inline = true;
1043 
1044 	desc = ctx->sh_desc_dec;
1045 
1046 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1047 
1048 	/* Skip key loading if it is loaded due to sharing */
1049 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1050 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1051 	if (keys_fit_inline)
1052 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1053 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1054 	else
1055 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1056 			   CLASS_1 | KEY_DEST_CLASS_REG);
1057 	set_jump_tgt_here(desc, key_jump_cmd);
1058 
1059 	/* Class 1 operation */
1060 	append_operation(desc, ctx->class1_alg_type |
1061 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1062 
1063 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1064 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1065 
1066 	/* Read assoc data */
1067 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1068 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1069 
1070 	/* Skip IV */
1071 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1072 
1073 	/* Will read cryptlen bytes */
1074 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1075 
1076 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1077 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1078 
1079 	/* Skip assoc data */
1080 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1081 
1082 	/* Will write cryptlen bytes */
1083 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1084 
1085 	/* Store payload data */
1086 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1087 
1088 	/* Read encrypted data */
1089 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1090 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1091 
1092 	/* Read ICV */
1093 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1094 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1095 
1096 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1097 					      desc_bytes(desc),
1098 					      DMA_TO_DEVICE);
1099 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1100 		dev_err(jrdev, "unable to map shared descriptor\n");
1101 		return -ENOMEM;
1102 	}
1103 #ifdef DEBUG
1104 	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1105 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1106 		       desc_bytes(desc), 1);
1107 #endif
1108 
1109 	return 0;
1110 }
1111 
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)1112 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1113 			       unsigned int authsize)
1114 {
1115 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1116 
1117 	ctx->authsize = authsize;
1118 	rfc4106_set_sh_desc(authenc);
1119 
1120 	return 0;
1121 }
1122 
rfc4543_set_sh_desc(struct crypto_aead * aead)1123 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1124 {
1125 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1126 	struct device *jrdev = ctx->jrdev;
1127 	bool keys_fit_inline = false;
1128 	u32 *key_jump_cmd;
1129 	u32 *read_move_cmd, *write_move_cmd;
1130 	u32 *desc;
1131 
1132 	if (!ctx->enckeylen || !ctx->authsize)
1133 		return 0;
1134 
1135 	/*
1136 	 * RFC4543 encrypt shared descriptor
1137 	 * Job Descriptor and Shared Descriptor
1138 	 * must fit into the 64-word Descriptor h/w Buffer
1139 	 */
1140 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1141 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1142 		keys_fit_inline = true;
1143 
1144 	desc = ctx->sh_desc_enc;
1145 
1146 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1147 
1148 	/* Skip key loading if it is loaded due to sharing */
1149 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1150 				   JUMP_COND_SHRD);
1151 	if (keys_fit_inline)
1152 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1153 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1154 	else
1155 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1156 			   CLASS_1 | KEY_DEST_CLASS_REG);
1157 	set_jump_tgt_here(desc, key_jump_cmd);
1158 
1159 	/* Class 1 operation */
1160 	append_operation(desc, ctx->class1_alg_type |
1161 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1162 
1163 	/* assoclen + cryptlen = seqinlen */
1164 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1165 
1166 	/*
1167 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1168 	 * thus need to do some magic, i.e. self-patch the descriptor
1169 	 * buffer.
1170 	 */
1171 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1172 				    (0x6 << MOVE_LEN_SHIFT));
1173 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1174 				     (0x8 << MOVE_LEN_SHIFT));
1175 
1176 	/* Will read assoclen + cryptlen bytes */
1177 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1178 
1179 	/* Will write assoclen + cryptlen bytes */
1180 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1181 
1182 	/* Read and write assoclen + cryptlen bytes */
1183 	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1184 
1185 	set_move_tgt_here(desc, read_move_cmd);
1186 	set_move_tgt_here(desc, write_move_cmd);
1187 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1188 	/* Move payload data to OFIFO */
1189 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1190 
1191 	/* Write ICV */
1192 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1193 			 LDST_SRCDST_BYTE_CONTEXT);
1194 
1195 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1196 					      desc_bytes(desc),
1197 					      DMA_TO_DEVICE);
1198 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1199 		dev_err(jrdev, "unable to map shared descriptor\n");
1200 		return -ENOMEM;
1201 	}
1202 #ifdef DEBUG
1203 	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1204 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1205 		       desc_bytes(desc), 1);
1206 #endif
1207 
1208 	/*
1209 	 * Job Descriptor and Shared Descriptors
1210 	 * must all fit into the 64-word Descriptor h/w Buffer
1211 	 */
1212 	keys_fit_inline = false;
1213 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1214 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1215 		keys_fit_inline = true;
1216 
1217 	desc = ctx->sh_desc_dec;
1218 
1219 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1220 
1221 	/* Skip key loading if it is loaded due to sharing */
1222 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1223 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1224 	if (keys_fit_inline)
1225 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1226 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1227 	else
1228 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1229 			   CLASS_1 | KEY_DEST_CLASS_REG);
1230 	set_jump_tgt_here(desc, key_jump_cmd);
1231 
1232 	/* Class 1 operation */
1233 	append_operation(desc, ctx->class1_alg_type |
1234 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1235 
1236 	/* assoclen + cryptlen = seqoutlen */
1237 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1238 
1239 	/*
1240 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1241 	 * thus need to do some magic, i.e. self-patch the descriptor
1242 	 * buffer.
1243 	 */
1244 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1245 				    (0x6 << MOVE_LEN_SHIFT));
1246 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1247 				     (0x8 << MOVE_LEN_SHIFT));
1248 
1249 	/* Will read assoclen + cryptlen bytes */
1250 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1251 
1252 	/* Will write assoclen + cryptlen bytes */
1253 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1254 
1255 	/* Store payload data */
1256 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1257 
1258 	/* In-snoop assoclen + cryptlen data */
1259 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1260 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1261 
1262 	set_move_tgt_here(desc, read_move_cmd);
1263 	set_move_tgt_here(desc, write_move_cmd);
1264 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1265 	/* Move payload data to OFIFO */
1266 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1267 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1268 
1269 	/* Read ICV */
1270 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1271 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1272 
1273 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1274 					      desc_bytes(desc),
1275 					      DMA_TO_DEVICE);
1276 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1277 		dev_err(jrdev, "unable to map shared descriptor\n");
1278 		return -ENOMEM;
1279 	}
1280 #ifdef DEBUG
1281 	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1282 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1283 		       desc_bytes(desc), 1);
1284 #endif
1285 
1286 	return 0;
1287 }
1288 
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)1289 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1290 			       unsigned int authsize)
1291 {
1292 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1293 
1294 	ctx->authsize = authsize;
1295 	rfc4543_set_sh_desc(authenc);
1296 
1297 	return 0;
1298 }
1299 
gen_split_aead_key(struct caam_ctx * ctx,const u8 * key_in,u32 authkeylen)1300 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1301 			      u32 authkeylen)
1302 {
1303 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1304 			       ctx->split_key_pad_len, key_in, authkeylen,
1305 			       ctx->alg_op);
1306 }
1307 
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1308 static int aead_setkey(struct crypto_aead *aead,
1309 			       const u8 *key, unsigned int keylen)
1310 {
1311 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1312 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1313 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1314 	struct device *jrdev = ctx->jrdev;
1315 	struct crypto_authenc_keys keys;
1316 	int ret = 0;
1317 
1318 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1319 		goto badkey;
1320 
1321 	/* Pick class 2 key length from algorithm submask */
1322 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1323 				      OP_ALG_ALGSEL_SHIFT] * 2;
1324 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1325 
1326 	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1327 		goto badkey;
1328 
1329 #ifdef DEBUG
1330 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1331 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
1332 	       keys.authkeylen);
1333 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1334 	       ctx->split_key_len, ctx->split_key_pad_len);
1335 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1336 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1337 #endif
1338 
1339 	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1340 	if (ret) {
1341 		goto badkey;
1342 	}
1343 
1344 	/* postpend encryption key to auth split key */
1345 	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1346 
1347 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1348 				      keys.enckeylen, DMA_TO_DEVICE);
1349 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1350 		dev_err(jrdev, "unable to map key i/o memory\n");
1351 		return -ENOMEM;
1352 	}
1353 #ifdef DEBUG
1354 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1355 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1356 		       ctx->split_key_pad_len + keys.enckeylen, 1);
1357 #endif
1358 
1359 	ctx->enckeylen = keys.enckeylen;
1360 
1361 	ret = aead_set_sh_desc(aead);
1362 	if (ret) {
1363 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1364 				 keys.enckeylen, DMA_TO_DEVICE);
1365 	}
1366 
1367 	return ret;
1368 badkey:
1369 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1370 	return -EINVAL;
1371 }
1372 
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1373 static int gcm_setkey(struct crypto_aead *aead,
1374 		      const u8 *key, unsigned int keylen)
1375 {
1376 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1377 	struct device *jrdev = ctx->jrdev;
1378 	int ret = 0;
1379 
1380 #ifdef DEBUG
1381 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1382 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1383 #endif
1384 
1385 	memcpy(ctx->key, key, keylen);
1386 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1387 				      DMA_TO_DEVICE);
1388 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1389 		dev_err(jrdev, "unable to map key i/o memory\n");
1390 		return -ENOMEM;
1391 	}
1392 	ctx->enckeylen = keylen;
1393 
1394 	ret = gcm_set_sh_desc(aead);
1395 	if (ret) {
1396 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1397 				 DMA_TO_DEVICE);
1398 	}
1399 
1400 	return ret;
1401 }
1402 
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1403 static int rfc4106_setkey(struct crypto_aead *aead,
1404 			  const u8 *key, unsigned int keylen)
1405 {
1406 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1407 	struct device *jrdev = ctx->jrdev;
1408 	int ret = 0;
1409 
1410 	if (keylen < 4)
1411 		return -EINVAL;
1412 
1413 #ifdef DEBUG
1414 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1415 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1416 #endif
1417 
1418 	memcpy(ctx->key, key, keylen);
1419 
1420 	/*
1421 	 * The last four bytes of the key material are used as the salt value
1422 	 * in the nonce. Update the AES key length.
1423 	 */
1424 	ctx->enckeylen = keylen - 4;
1425 
1426 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1427 				      DMA_TO_DEVICE);
1428 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1429 		dev_err(jrdev, "unable to map key i/o memory\n");
1430 		return -ENOMEM;
1431 	}
1432 
1433 	ret = rfc4106_set_sh_desc(aead);
1434 	if (ret) {
1435 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1436 				 DMA_TO_DEVICE);
1437 	}
1438 
1439 	return ret;
1440 }
1441 
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1442 static int rfc4543_setkey(struct crypto_aead *aead,
1443 			  const u8 *key, unsigned int keylen)
1444 {
1445 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1446 	struct device *jrdev = ctx->jrdev;
1447 	int ret = 0;
1448 
1449 	if (keylen < 4)
1450 		return -EINVAL;
1451 
1452 #ifdef DEBUG
1453 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1454 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1455 #endif
1456 
1457 	memcpy(ctx->key, key, keylen);
1458 
1459 	/*
1460 	 * The last four bytes of the key material are used as the salt value
1461 	 * in the nonce. Update the AES key length.
1462 	 */
1463 	ctx->enckeylen = keylen - 4;
1464 
1465 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1466 				      DMA_TO_DEVICE);
1467 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1468 		dev_err(jrdev, "unable to map key i/o memory\n");
1469 		return -ENOMEM;
1470 	}
1471 
1472 	ret = rfc4543_set_sh_desc(aead);
1473 	if (ret) {
1474 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1475 				 DMA_TO_DEVICE);
1476 	}
1477 
1478 	return ret;
1479 }
1480 
ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)1481 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1482 			     const u8 *key, unsigned int keylen)
1483 {
1484 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1485 	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1486 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1487 	const char *alg_name = crypto_tfm_alg_name(tfm);
1488 	struct device *jrdev = ctx->jrdev;
1489 	int ret = 0;
1490 	u32 *key_jump_cmd;
1491 	u32 *desc;
1492 	u32 *nonce;
1493 	u32 geniv;
1494 	u32 ctx1_iv_off = 0;
1495 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1496 			       OP_ALG_AAI_CTR_MOD128);
1497 	const bool is_rfc3686 = (ctr_mode &&
1498 				 (strstr(alg_name, "rfc3686") != NULL));
1499 
1500 #ifdef DEBUG
1501 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1502 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1503 #endif
1504 	/*
1505 	 * AES-CTR needs to load IV in CONTEXT1 reg
1506 	 * at an offset of 128bits (16bytes)
1507 	 * CONTEXT1[255:128] = IV
1508 	 */
1509 	if (ctr_mode)
1510 		ctx1_iv_off = 16;
1511 
1512 	/*
1513 	 * RFC3686 specific:
1514 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1515 	 *	| *key = {KEY, NONCE}
1516 	 */
1517 	if (is_rfc3686) {
1518 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1519 		keylen -= CTR_RFC3686_NONCE_SIZE;
1520 	}
1521 
1522 	memcpy(ctx->key, key, keylen);
1523 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1524 				      DMA_TO_DEVICE);
1525 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1526 		dev_err(jrdev, "unable to map key i/o memory\n");
1527 		return -ENOMEM;
1528 	}
1529 	ctx->enckeylen = keylen;
1530 
1531 	/* ablkcipher_encrypt shared descriptor */
1532 	desc = ctx->sh_desc_enc;
1533 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1534 	/* Skip if already shared */
1535 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1536 				   JUMP_COND_SHRD);
1537 
1538 	/* Load class1 key only */
1539 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1540 			  ctx->enckeylen, CLASS_1 |
1541 			  KEY_DEST_CLASS_REG);
1542 
1543 	/* Load nonce into CONTEXT1 reg */
1544 	if (is_rfc3686) {
1545 		nonce = (u32 *)(key + keylen);
1546 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1547 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1548 		append_move(desc, MOVE_WAITCOMP |
1549 			    MOVE_SRC_OUTFIFO |
1550 			    MOVE_DEST_CLASS1CTX |
1551 			    (16 << MOVE_OFFSET_SHIFT) |
1552 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1553 	}
1554 
1555 	set_jump_tgt_here(desc, key_jump_cmd);
1556 
1557 	/* Load iv */
1558 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1559 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1560 
1561 	/* Load counter into CONTEXT1 reg */
1562 	if (is_rfc3686)
1563 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1564 				    LDST_CLASS_1_CCB |
1565 				    LDST_SRCDST_BYTE_CONTEXT |
1566 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1567 				     LDST_OFFSET_SHIFT));
1568 
1569 	/* Load operation */
1570 	append_operation(desc, ctx->class1_alg_type |
1571 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1572 
1573 	/* Perform operation */
1574 	ablkcipher_append_src_dst(desc);
1575 
1576 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1577 					      desc_bytes(desc),
1578 					      DMA_TO_DEVICE);
1579 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1580 		dev_err(jrdev, "unable to map shared descriptor\n");
1581 		return -ENOMEM;
1582 	}
1583 #ifdef DEBUG
1584 	print_hex_dump(KERN_ERR,
1585 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1586 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1587 		       desc_bytes(desc), 1);
1588 #endif
1589 	/* ablkcipher_decrypt shared descriptor */
1590 	desc = ctx->sh_desc_dec;
1591 
1592 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1593 	/* Skip if already shared */
1594 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1595 				   JUMP_COND_SHRD);
1596 
1597 	/* Load class1 key only */
1598 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1599 			  ctx->enckeylen, CLASS_1 |
1600 			  KEY_DEST_CLASS_REG);
1601 
1602 	/* Load nonce into CONTEXT1 reg */
1603 	if (is_rfc3686) {
1604 		nonce = (u32 *)(key + keylen);
1605 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1606 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1607 		append_move(desc, MOVE_WAITCOMP |
1608 			    MOVE_SRC_OUTFIFO |
1609 			    MOVE_DEST_CLASS1CTX |
1610 			    (16 << MOVE_OFFSET_SHIFT) |
1611 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1612 	}
1613 
1614 	set_jump_tgt_here(desc, key_jump_cmd);
1615 
1616 	/* load IV */
1617 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1618 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1619 
1620 	/* Load counter into CONTEXT1 reg */
1621 	if (is_rfc3686)
1622 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1623 				    LDST_CLASS_1_CCB |
1624 				    LDST_SRCDST_BYTE_CONTEXT |
1625 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1626 				     LDST_OFFSET_SHIFT));
1627 
1628 	/* Choose operation */
1629 	if (ctr_mode)
1630 		append_operation(desc, ctx->class1_alg_type |
1631 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1632 	else
1633 		append_dec_op1(desc, ctx->class1_alg_type);
1634 
1635 	/* Perform operation */
1636 	ablkcipher_append_src_dst(desc);
1637 
1638 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1639 					      desc_bytes(desc),
1640 					      DMA_TO_DEVICE);
1641 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1642 		dev_err(jrdev, "unable to map shared descriptor\n");
1643 		return -ENOMEM;
1644 	}
1645 
1646 #ifdef DEBUG
1647 	print_hex_dump(KERN_ERR,
1648 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1649 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1650 		       desc_bytes(desc), 1);
1651 #endif
1652 	/* ablkcipher_givencrypt shared descriptor */
1653 	desc = ctx->sh_desc_givenc;
1654 
1655 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1656 	/* Skip if already shared */
1657 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1658 				   JUMP_COND_SHRD);
1659 
1660 	/* Load class1 key only */
1661 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1662 			  ctx->enckeylen, CLASS_1 |
1663 			  KEY_DEST_CLASS_REG);
1664 
1665 	/* Load Nonce into CONTEXT1 reg */
1666 	if (is_rfc3686) {
1667 		nonce = (u32 *)(key + keylen);
1668 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1669 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1670 		append_move(desc, MOVE_WAITCOMP |
1671 			    MOVE_SRC_OUTFIFO |
1672 			    MOVE_DEST_CLASS1CTX |
1673 			    (16 << MOVE_OFFSET_SHIFT) |
1674 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1675 	}
1676 	set_jump_tgt_here(desc, key_jump_cmd);
1677 
1678 	/* Generate IV */
1679 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1680 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1681 		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1682 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1683 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1684 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1685 	append_move(desc, MOVE_WAITCOMP |
1686 		    MOVE_SRC_INFIFO |
1687 		    MOVE_DEST_CLASS1CTX |
1688 		    (crt->ivsize << MOVE_LEN_SHIFT) |
1689 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1690 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1691 
1692 	/* Copy generated IV to memory */
1693 	append_seq_store(desc, crt->ivsize,
1694 			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1695 			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1696 
1697 	/* Load Counter into CONTEXT1 reg */
1698 	if (is_rfc3686)
1699 		append_load_imm_u32(desc, (u32)1, LDST_IMM |
1700 				    LDST_CLASS_1_CCB |
1701 				    LDST_SRCDST_BYTE_CONTEXT |
1702 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1703 				     LDST_OFFSET_SHIFT));
1704 
1705 	if (ctx1_iv_off)
1706 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1707 			    (1 << JUMP_OFFSET_SHIFT));
1708 
1709 	/* Load operation */
1710 	append_operation(desc, ctx->class1_alg_type |
1711 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1712 
1713 	/* Perform operation */
1714 	ablkcipher_append_src_dst(desc);
1715 
1716 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1717 						 desc_bytes(desc),
1718 						 DMA_TO_DEVICE);
1719 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1720 		dev_err(jrdev, "unable to map shared descriptor\n");
1721 		return -ENOMEM;
1722 	}
1723 #ifdef DEBUG
1724 	print_hex_dump(KERN_ERR,
1725 		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1726 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1727 		       desc_bytes(desc), 1);
1728 #endif
1729 
1730 	return ret;
1731 }
1732 
xts_ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)1733 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1734 				 const u8 *key, unsigned int keylen)
1735 {
1736 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1737 	struct device *jrdev = ctx->jrdev;
1738 	u32 *key_jump_cmd, *desc;
1739 	__be64 sector_size = cpu_to_be64(512);
1740 
1741 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1742 		crypto_ablkcipher_set_flags(ablkcipher,
1743 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
1744 		dev_err(jrdev, "key size mismatch\n");
1745 		return -EINVAL;
1746 	}
1747 
1748 	memcpy(ctx->key, key, keylen);
1749 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1750 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1751 		dev_err(jrdev, "unable to map key i/o memory\n");
1752 		return -ENOMEM;
1753 	}
1754 	ctx->enckeylen = keylen;
1755 
1756 	/* xts_ablkcipher_encrypt shared descriptor */
1757 	desc = ctx->sh_desc_enc;
1758 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1759 	/* Skip if already shared */
1760 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1761 				   JUMP_COND_SHRD);
1762 
1763 	/* Load class1 keys only */
1764 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1765 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1766 
1767 	/* Load sector size with index 40 bytes (0x28) */
1768 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1769 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1770 	append_data(desc, (void *)&sector_size, 8);
1771 
1772 	set_jump_tgt_here(desc, key_jump_cmd);
1773 
1774 	/*
1775 	 * create sequence for loading the sector index
1776 	 * Upper 8B of IV - will be used as sector index
1777 	 * Lower 8B of IV - will be discarded
1778 	 */
1779 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1780 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1781 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1782 
1783 	/* Load operation */
1784 	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1785 			 OP_ALG_ENCRYPT);
1786 
1787 	/* Perform operation */
1788 	ablkcipher_append_src_dst(desc);
1789 
1790 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1791 					      DMA_TO_DEVICE);
1792 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1793 		dev_err(jrdev, "unable to map shared descriptor\n");
1794 		return -ENOMEM;
1795 	}
1796 #ifdef DEBUG
1797 	print_hex_dump(KERN_ERR,
1798 		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1799 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1800 #endif
1801 
1802 	/* xts_ablkcipher_decrypt shared descriptor */
1803 	desc = ctx->sh_desc_dec;
1804 
1805 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1806 	/* Skip if already shared */
1807 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1808 				   JUMP_COND_SHRD);
1809 
1810 	/* Load class1 key only */
1811 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1812 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1813 
1814 	/* Load sector size with index 40 bytes (0x28) */
1815 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1816 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1817 	append_data(desc, (void *)&sector_size, 8);
1818 
1819 	set_jump_tgt_here(desc, key_jump_cmd);
1820 
1821 	/*
1822 	 * create sequence for loading the sector index
1823 	 * Upper 8B of IV - will be used as sector index
1824 	 * Lower 8B of IV - will be discarded
1825 	 */
1826 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1827 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1828 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1829 
1830 	/* Load operation */
1831 	append_dec_op1(desc, ctx->class1_alg_type);
1832 
1833 	/* Perform operation */
1834 	ablkcipher_append_src_dst(desc);
1835 
1836 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1837 					      DMA_TO_DEVICE);
1838 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1839 		dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1840 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1841 		dev_err(jrdev, "unable to map shared descriptor\n");
1842 		return -ENOMEM;
1843 	}
1844 #ifdef DEBUG
1845 	print_hex_dump(KERN_ERR,
1846 		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1847 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1848 #endif
1849 
1850 	return 0;
1851 }
1852 
1853 /*
1854  * aead_edesc - s/w-extended aead descriptor
1855  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1856  * @src_nents: number of segments in input scatterlist
1857  * @dst_nents: number of segments in output scatterlist
1858  * @iv_dma: dma address of iv for checking continuity and link table
1859  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1860  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1861  * @sec4_sg_dma: bus physical mapped address of h/w link table
1862  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1863  */
1864 struct aead_edesc {
1865 	int assoc_nents;
1866 	int src_nents;
1867 	int dst_nents;
1868 	dma_addr_t iv_dma;
1869 	int sec4_sg_bytes;
1870 	dma_addr_t sec4_sg_dma;
1871 	struct sec4_sg_entry *sec4_sg;
1872 	u32 hw_desc[];
1873 };
1874 
1875 /*
1876  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1877  * @src_nents: number of segments in input scatterlist
1878  * @dst_nents: number of segments in output scatterlist
1879  * @iv_dma: dma address of iv for checking continuity and link table
1880  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1881  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1882  * @sec4_sg_dma: bus physical mapped address of h/w link table
1883  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1884  */
1885 struct ablkcipher_edesc {
1886 	int src_nents;
1887 	int dst_nents;
1888 	dma_addr_t iv_dma;
1889 	int sec4_sg_bytes;
1890 	dma_addr_t sec4_sg_dma;
1891 	struct sec4_sg_entry *sec4_sg;
1892 	u32 hw_desc[0];
1893 };
1894 
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)1895 static void caam_unmap(struct device *dev, struct scatterlist *src,
1896 		       struct scatterlist *dst, int src_nents,
1897 		       int dst_nents,
1898 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1899 		       int sec4_sg_bytes)
1900 {
1901 	if (dst != src) {
1902 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1903 		dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1904 	} else {
1905 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1906 	}
1907 
1908 	if (iv_dma)
1909 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1910 	if (sec4_sg_bytes)
1911 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1912 				 DMA_TO_DEVICE);
1913 }
1914 
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1915 static void aead_unmap(struct device *dev,
1916 		       struct aead_edesc *edesc,
1917 		       struct aead_request *req)
1918 {
1919 	caam_unmap(dev, req->src, req->dst,
1920 		   edesc->src_nents, edesc->dst_nents, 0, 0,
1921 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1922 }
1923 
ablkcipher_unmap(struct device * dev,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)1924 static void ablkcipher_unmap(struct device *dev,
1925 			     struct ablkcipher_edesc *edesc,
1926 			     struct ablkcipher_request *req)
1927 {
1928 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1929 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1930 
1931 	caam_unmap(dev, req->src, req->dst,
1932 		   edesc->src_nents, edesc->dst_nents,
1933 		   edesc->iv_dma, ivsize,
1934 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1935 }
1936 
aead_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1937 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1938 				   void *context)
1939 {
1940 	struct aead_request *req = context;
1941 	struct aead_edesc *edesc;
1942 
1943 #ifdef DEBUG
1944 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1945 #endif
1946 
1947 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1948 
1949 	if (err)
1950 		caam_jr_strstatus(jrdev, err);
1951 
1952 	aead_unmap(jrdev, edesc, req);
1953 
1954 	kfree(edesc);
1955 
1956 	aead_request_complete(req, err);
1957 }
1958 
aead_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1959 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1960 				   void *context)
1961 {
1962 	struct aead_request *req = context;
1963 	struct aead_edesc *edesc;
1964 
1965 #ifdef DEBUG
1966 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1967 #endif
1968 
1969 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1970 
1971 	if (err)
1972 		caam_jr_strstatus(jrdev, err);
1973 
1974 	aead_unmap(jrdev, edesc, req);
1975 
1976 	/*
1977 	 * verify hw auth check passed else return -EBADMSG
1978 	 */
1979 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1980 		err = -EBADMSG;
1981 
1982 	kfree(edesc);
1983 
1984 	aead_request_complete(req, err);
1985 }
1986 
ablkcipher_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1987 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1988 				   void *context)
1989 {
1990 	struct ablkcipher_request *req = context;
1991 	struct ablkcipher_edesc *edesc;
1992 #ifdef DEBUG
1993 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1994 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1995 
1996 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1997 #endif
1998 
1999 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2000 		 offsetof(struct ablkcipher_edesc, hw_desc));
2001 
2002 	if (err)
2003 		caam_jr_strstatus(jrdev, err);
2004 
2005 #ifdef DEBUG
2006 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2007 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2008 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
2009 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2010 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2011 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2012 #endif
2013 
2014 	ablkcipher_unmap(jrdev, edesc, req);
2015 	kfree(edesc);
2016 
2017 	ablkcipher_request_complete(req, err);
2018 }
2019 
ablkcipher_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)2020 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2021 				    void *context)
2022 {
2023 	struct ablkcipher_request *req = context;
2024 	struct ablkcipher_edesc *edesc;
2025 #ifdef DEBUG
2026 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2027 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2028 
2029 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2030 #endif
2031 
2032 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2033 		 offsetof(struct ablkcipher_edesc, hw_desc));
2034 	if (err)
2035 		caam_jr_strstatus(jrdev, err);
2036 
2037 #ifdef DEBUG
2038 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2039 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2040 		       ivsize, 1);
2041 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2042 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2043 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2044 #endif
2045 
2046 	ablkcipher_unmap(jrdev, edesc, req);
2047 	kfree(edesc);
2048 
2049 	ablkcipher_request_complete(req, err);
2050 }
2051 
2052 /*
2053  * Fill in aead job descriptor
2054  */
init_aead_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2055 static void init_aead_job(struct aead_request *req,
2056 			  struct aead_edesc *edesc,
2057 			  bool all_contig, bool encrypt)
2058 {
2059 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2060 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2061 	int authsize = ctx->authsize;
2062 	u32 *desc = edesc->hw_desc;
2063 	u32 out_options, in_options;
2064 	dma_addr_t dst_dma, src_dma;
2065 	int len, sec4_sg_index = 0;
2066 	dma_addr_t ptr;
2067 	u32 *sh_desc;
2068 
2069 	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2070 	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2071 
2072 	len = desc_len(sh_desc);
2073 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2074 
2075 	if (all_contig) {
2076 		src_dma = sg_dma_address(req->src);
2077 		in_options = 0;
2078 	} else {
2079 		src_dma = edesc->sec4_sg_dma;
2080 		sec4_sg_index += edesc->src_nents;
2081 		in_options = LDST_SGF;
2082 	}
2083 
2084 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2085 			  in_options);
2086 
2087 	dst_dma = src_dma;
2088 	out_options = in_options;
2089 
2090 	if (unlikely(req->src != req->dst)) {
2091 		if (!edesc->dst_nents) {
2092 			dst_dma = sg_dma_address(req->dst);
2093 			out_options = 0;
2094 		} else {
2095 			dst_dma = edesc->sec4_sg_dma +
2096 				  sec4_sg_index *
2097 				  sizeof(struct sec4_sg_entry);
2098 			out_options = LDST_SGF;
2099 		}
2100 	}
2101 
2102 	if (encrypt)
2103 		append_seq_out_ptr(desc, dst_dma,
2104 				   req->assoclen + req->cryptlen + authsize,
2105 				   out_options);
2106 	else
2107 		append_seq_out_ptr(desc, dst_dma,
2108 				   req->assoclen + req->cryptlen - authsize,
2109 				   out_options);
2110 
2111 	/* REG3 = assoclen */
2112 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2113 }
2114 
init_gcm_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2115 static void init_gcm_job(struct aead_request *req,
2116 			 struct aead_edesc *edesc,
2117 			 bool all_contig, bool encrypt)
2118 {
2119 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2120 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2121 	unsigned int ivsize = crypto_aead_ivsize(aead);
2122 	u32 *desc = edesc->hw_desc;
2123 	bool generic_gcm = (ivsize == 12);
2124 	unsigned int last;
2125 
2126 	init_aead_job(req, edesc, all_contig, encrypt);
2127 
2128 	/* BUG This should not be specific to generic GCM. */
2129 	last = 0;
2130 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2131 		last = FIFOLD_TYPE_LAST1;
2132 
2133 	/* Read GCM IV */
2134 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2135 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2136 	/* Append Salt */
2137 	if (!generic_gcm)
2138 		append_data(desc, ctx->key + ctx->enckeylen, 4);
2139 	/* Append IV */
2140 	append_data(desc, req->iv, ivsize);
2141 	/* End of blank commands */
2142 }
2143 
init_authenc_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2144 static void init_authenc_job(struct aead_request *req,
2145 			     struct aead_edesc *edesc,
2146 			     bool all_contig, bool encrypt)
2147 {
2148 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2149 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2150 						 struct caam_aead_alg, aead);
2151 	unsigned int ivsize = crypto_aead_ivsize(aead);
2152 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2153 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2154 			       OP_ALG_AAI_CTR_MOD128);
2155 	const bool is_rfc3686 = alg->caam.rfc3686;
2156 	u32 *desc = edesc->hw_desc;
2157 	u32 ivoffset = 0;
2158 
2159 	/*
2160 	 * AES-CTR needs to load IV in CONTEXT1 reg
2161 	 * at an offset of 128bits (16bytes)
2162 	 * CONTEXT1[255:128] = IV
2163 	 */
2164 	if (ctr_mode)
2165 		ivoffset = 16;
2166 
2167 	/*
2168 	 * RFC3686 specific:
2169 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2170 	 */
2171 	if (is_rfc3686)
2172 		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2173 
2174 	init_aead_job(req, edesc, all_contig, encrypt);
2175 
2176 	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2177 		append_load_as_imm(desc, req->iv, ivsize,
2178 				   LDST_CLASS_1_CCB |
2179 				   LDST_SRCDST_BYTE_CONTEXT |
2180 				   (ivoffset << LDST_OFFSET_SHIFT));
2181 }
2182 
2183 /*
2184  * Fill in ablkcipher job descriptor
2185  */
init_ablkcipher_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req,bool iv_contig)2186 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2187 				struct ablkcipher_edesc *edesc,
2188 				struct ablkcipher_request *req,
2189 				bool iv_contig)
2190 {
2191 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2192 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2193 	u32 *desc = edesc->hw_desc;
2194 	u32 out_options = 0, in_options;
2195 	dma_addr_t dst_dma, src_dma;
2196 	int len, sec4_sg_index = 0;
2197 
2198 #ifdef DEBUG
2199 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2200 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2201 		       ivsize, 1);
2202 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2203 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2204 		       edesc->src_nents ? 100 : req->nbytes, 1);
2205 #endif
2206 
2207 	len = desc_len(sh_desc);
2208 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2209 
2210 	if (iv_contig) {
2211 		src_dma = edesc->iv_dma;
2212 		in_options = 0;
2213 	} else {
2214 		src_dma = edesc->sec4_sg_dma;
2215 		sec4_sg_index += edesc->src_nents + 1;
2216 		in_options = LDST_SGF;
2217 	}
2218 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2219 
2220 	if (likely(req->src == req->dst)) {
2221 		if (!edesc->src_nents && iv_contig) {
2222 			dst_dma = sg_dma_address(req->src);
2223 		} else {
2224 			dst_dma = edesc->sec4_sg_dma +
2225 				sizeof(struct sec4_sg_entry);
2226 			out_options = LDST_SGF;
2227 		}
2228 	} else {
2229 		if (!edesc->dst_nents) {
2230 			dst_dma = sg_dma_address(req->dst);
2231 		} else {
2232 			dst_dma = edesc->sec4_sg_dma +
2233 				sec4_sg_index * sizeof(struct sec4_sg_entry);
2234 			out_options = LDST_SGF;
2235 		}
2236 	}
2237 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2238 }
2239 
2240 /*
2241  * Fill in ablkcipher givencrypt job descriptor
2242  */
init_ablkcipher_giv_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req,bool iv_contig)2243 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2244 				    struct ablkcipher_edesc *edesc,
2245 				    struct ablkcipher_request *req,
2246 				    bool iv_contig)
2247 {
2248 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2249 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2250 	u32 *desc = edesc->hw_desc;
2251 	u32 out_options, in_options;
2252 	dma_addr_t dst_dma, src_dma;
2253 	int len, sec4_sg_index = 0;
2254 
2255 #ifdef DEBUG
2256 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2257 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2258 		       ivsize, 1);
2259 	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2260 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2261 		       edesc->src_nents ? 100 : req->nbytes, 1);
2262 #endif
2263 
2264 	len = desc_len(sh_desc);
2265 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2266 
2267 	if (!edesc->src_nents) {
2268 		src_dma = sg_dma_address(req->src);
2269 		in_options = 0;
2270 	} else {
2271 		src_dma = edesc->sec4_sg_dma;
2272 		sec4_sg_index += edesc->src_nents;
2273 		in_options = LDST_SGF;
2274 	}
2275 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2276 
2277 	if (iv_contig) {
2278 		dst_dma = edesc->iv_dma;
2279 		out_options = 0;
2280 	} else {
2281 		dst_dma = edesc->sec4_sg_dma +
2282 			  sec4_sg_index * sizeof(struct sec4_sg_entry);
2283 		out_options = LDST_SGF;
2284 	}
2285 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2286 }
2287 
2288 /*
2289  * allocate and map the aead extended descriptor
2290  */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)2291 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2292 					   int desc_bytes, bool *all_contig_ptr,
2293 					   bool encrypt)
2294 {
2295 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2296 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2297 	struct device *jrdev = ctx->jrdev;
2298 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2299 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2300 	int src_nents, dst_nents = 0;
2301 	struct aead_edesc *edesc;
2302 	int sgc;
2303 	bool all_contig = true;
2304 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2305 	unsigned int authsize = ctx->authsize;
2306 
2307 	if (unlikely(req->dst != req->src)) {
2308 		src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2309 		dst_nents = sg_count(req->dst,
2310 				     req->assoclen + req->cryptlen +
2311 					(encrypt ? authsize : (-authsize)));
2312 	} else {
2313 		src_nents = sg_count(req->src,
2314 				     req->assoclen + req->cryptlen +
2315 					(encrypt ? authsize : 0));
2316 	}
2317 
2318 	/* Check if data are contiguous. */
2319 	all_contig = !src_nents;
2320 	if (!all_contig) {
2321 		src_nents = src_nents ? : 1;
2322 		sec4_sg_len = src_nents;
2323 	}
2324 
2325 	sec4_sg_len += dst_nents;
2326 
2327 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2328 
2329 	/* allocate space for base edesc and hw desc commands, link tables */
2330 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2331 			GFP_DMA | flags);
2332 	if (!edesc) {
2333 		dev_err(jrdev, "could not allocate extended descriptor\n");
2334 		return ERR_PTR(-ENOMEM);
2335 	}
2336 
2337 	if (likely(req->src == req->dst)) {
2338 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2339 				 DMA_BIDIRECTIONAL);
2340 		if (unlikely(!sgc)) {
2341 			dev_err(jrdev, "unable to map source\n");
2342 			kfree(edesc);
2343 			return ERR_PTR(-ENOMEM);
2344 		}
2345 	} else {
2346 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2347 				 DMA_TO_DEVICE);
2348 		if (unlikely(!sgc)) {
2349 			dev_err(jrdev, "unable to map source\n");
2350 			kfree(edesc);
2351 			return ERR_PTR(-ENOMEM);
2352 		}
2353 
2354 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2355 				 DMA_FROM_DEVICE);
2356 		if (unlikely(!sgc)) {
2357 			dev_err(jrdev, "unable to map destination\n");
2358 			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2359 				     DMA_TO_DEVICE);
2360 			kfree(edesc);
2361 			return ERR_PTR(-ENOMEM);
2362 		}
2363 	}
2364 
2365 	edesc->src_nents = src_nents;
2366 	edesc->dst_nents = dst_nents;
2367 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2368 			 desc_bytes;
2369 	*all_contig_ptr = all_contig;
2370 
2371 	sec4_sg_index = 0;
2372 	if (!all_contig) {
2373 		sg_to_sec4_sg_last(req->src, src_nents,
2374 			      edesc->sec4_sg + sec4_sg_index, 0);
2375 		sec4_sg_index += src_nents;
2376 	}
2377 	if (dst_nents) {
2378 		sg_to_sec4_sg_last(req->dst, dst_nents,
2379 				   edesc->sec4_sg + sec4_sg_index, 0);
2380 	}
2381 
2382 	if (!sec4_sg_bytes)
2383 		return edesc;
2384 
2385 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2386 					    sec4_sg_bytes, DMA_TO_DEVICE);
2387 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2388 		dev_err(jrdev, "unable to map S/G table\n");
2389 		aead_unmap(jrdev, edesc, req);
2390 		kfree(edesc);
2391 		return ERR_PTR(-ENOMEM);
2392 	}
2393 
2394 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2395 
2396 	return edesc;
2397 }
2398 
gcm_encrypt(struct aead_request * req)2399 static int gcm_encrypt(struct aead_request *req)
2400 {
2401 	struct aead_edesc *edesc;
2402 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2403 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2404 	struct device *jrdev = ctx->jrdev;
2405 	bool all_contig;
2406 	u32 *desc;
2407 	int ret = 0;
2408 
2409 	/* allocate extended descriptor */
2410 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2411 	if (IS_ERR(edesc))
2412 		return PTR_ERR(edesc);
2413 
2414 	/* Create and submit job descriptor */
2415 	init_gcm_job(req, edesc, all_contig, true);
2416 #ifdef DEBUG
2417 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2418 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2419 		       desc_bytes(edesc->hw_desc), 1);
2420 #endif
2421 
2422 	desc = edesc->hw_desc;
2423 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2424 	if (!ret) {
2425 		ret = -EINPROGRESS;
2426 	} else {
2427 		aead_unmap(jrdev, edesc, req);
2428 		kfree(edesc);
2429 	}
2430 
2431 	return ret;
2432 }
2433 
ipsec_gcm_encrypt(struct aead_request * req)2434 static int ipsec_gcm_encrypt(struct aead_request *req)
2435 {
2436 	if (req->assoclen < 8)
2437 		return -EINVAL;
2438 
2439 	return gcm_encrypt(req);
2440 }
2441 
aead_encrypt(struct aead_request * req)2442 static int aead_encrypt(struct aead_request *req)
2443 {
2444 	struct aead_edesc *edesc;
2445 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2446 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2447 	struct device *jrdev = ctx->jrdev;
2448 	bool all_contig;
2449 	u32 *desc;
2450 	int ret = 0;
2451 
2452 	/* allocate extended descriptor */
2453 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2454 				 &all_contig, true);
2455 	if (IS_ERR(edesc))
2456 		return PTR_ERR(edesc);
2457 
2458 	/* Create and submit job descriptor */
2459 	init_authenc_job(req, edesc, all_contig, true);
2460 #ifdef DEBUG
2461 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2462 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2463 		       desc_bytes(edesc->hw_desc), 1);
2464 #endif
2465 
2466 	desc = edesc->hw_desc;
2467 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2468 	if (!ret) {
2469 		ret = -EINPROGRESS;
2470 	} else {
2471 		aead_unmap(jrdev, edesc, req);
2472 		kfree(edesc);
2473 	}
2474 
2475 	return ret;
2476 }
2477 
gcm_decrypt(struct aead_request * req)2478 static int gcm_decrypt(struct aead_request *req)
2479 {
2480 	struct aead_edesc *edesc;
2481 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2482 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2483 	struct device *jrdev = ctx->jrdev;
2484 	bool all_contig;
2485 	u32 *desc;
2486 	int ret = 0;
2487 
2488 	/* allocate extended descriptor */
2489 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2490 	if (IS_ERR(edesc))
2491 		return PTR_ERR(edesc);
2492 
2493 	/* Create and submit job descriptor*/
2494 	init_gcm_job(req, edesc, all_contig, false);
2495 #ifdef DEBUG
2496 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2497 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2498 		       desc_bytes(edesc->hw_desc), 1);
2499 #endif
2500 
2501 	desc = edesc->hw_desc;
2502 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2503 	if (!ret) {
2504 		ret = -EINPROGRESS;
2505 	} else {
2506 		aead_unmap(jrdev, edesc, req);
2507 		kfree(edesc);
2508 	}
2509 
2510 	return ret;
2511 }
2512 
ipsec_gcm_decrypt(struct aead_request * req)2513 static int ipsec_gcm_decrypt(struct aead_request *req)
2514 {
2515 	if (req->assoclen < 8)
2516 		return -EINVAL;
2517 
2518 	return gcm_decrypt(req);
2519 }
2520 
aead_decrypt(struct aead_request * req)2521 static int aead_decrypt(struct aead_request *req)
2522 {
2523 	struct aead_edesc *edesc;
2524 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2525 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2526 	struct device *jrdev = ctx->jrdev;
2527 	bool all_contig;
2528 	u32 *desc;
2529 	int ret = 0;
2530 
2531 	/* allocate extended descriptor */
2532 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2533 				 &all_contig, false);
2534 	if (IS_ERR(edesc))
2535 		return PTR_ERR(edesc);
2536 
2537 #ifdef DEBUG
2538 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2539 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2540 		       req->assoclen + req->cryptlen, 1);
2541 #endif
2542 
2543 	/* Create and submit job descriptor*/
2544 	init_authenc_job(req, edesc, all_contig, false);
2545 #ifdef DEBUG
2546 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2547 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2548 		       desc_bytes(edesc->hw_desc), 1);
2549 #endif
2550 
2551 	desc = edesc->hw_desc;
2552 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2553 	if (!ret) {
2554 		ret = -EINPROGRESS;
2555 	} else {
2556 		aead_unmap(jrdev, edesc, req);
2557 		kfree(edesc);
2558 	}
2559 
2560 	return ret;
2561 }
2562 
2563 /*
2564  * allocate and map the ablkcipher extended descriptor for ablkcipher
2565  */
ablkcipher_edesc_alloc(struct ablkcipher_request * req,int desc_bytes,bool * iv_contig_out)2566 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2567 						       *req, int desc_bytes,
2568 						       bool *iv_contig_out)
2569 {
2570 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2571 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2572 	struct device *jrdev = ctx->jrdev;
2573 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2574 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2575 		       GFP_KERNEL : GFP_ATOMIC;
2576 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2577 	struct ablkcipher_edesc *edesc;
2578 	dma_addr_t iv_dma = 0;
2579 	bool iv_contig = false;
2580 	int sgc;
2581 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2582 	int sec4_sg_index;
2583 
2584 	src_nents = sg_count(req->src, req->nbytes);
2585 
2586 	if (req->dst != req->src)
2587 		dst_nents = sg_count(req->dst, req->nbytes);
2588 
2589 	if (likely(req->src == req->dst)) {
2590 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2591 				 DMA_BIDIRECTIONAL);
2592 	} else {
2593 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2594 				 DMA_TO_DEVICE);
2595 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2596 				 DMA_FROM_DEVICE);
2597 	}
2598 
2599 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2600 	if (dma_mapping_error(jrdev, iv_dma)) {
2601 		dev_err(jrdev, "unable to map IV\n");
2602 		return ERR_PTR(-ENOMEM);
2603 	}
2604 
2605 	/*
2606 	 * Check if iv can be contiguous with source and destination.
2607 	 * If so, include it. If not, create scatterlist.
2608 	 */
2609 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2610 		iv_contig = true;
2611 	else
2612 		src_nents = src_nents ? : 1;
2613 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2614 			sizeof(struct sec4_sg_entry);
2615 
2616 	/* allocate space for base edesc and hw desc commands, link tables */
2617 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2618 			GFP_DMA | flags);
2619 	if (!edesc) {
2620 		dev_err(jrdev, "could not allocate extended descriptor\n");
2621 		return ERR_PTR(-ENOMEM);
2622 	}
2623 
2624 	edesc->src_nents = src_nents;
2625 	edesc->dst_nents = dst_nents;
2626 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2627 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2628 			 desc_bytes;
2629 
2630 	sec4_sg_index = 0;
2631 	if (!iv_contig) {
2632 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2633 		sg_to_sec4_sg_last(req->src, src_nents,
2634 				   edesc->sec4_sg + 1, 0);
2635 		sec4_sg_index += 1 + src_nents;
2636 	}
2637 
2638 	if (dst_nents) {
2639 		sg_to_sec4_sg_last(req->dst, dst_nents,
2640 			edesc->sec4_sg + sec4_sg_index, 0);
2641 	}
2642 
2643 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2644 					    sec4_sg_bytes, DMA_TO_DEVICE);
2645 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2646 		dev_err(jrdev, "unable to map S/G table\n");
2647 		return ERR_PTR(-ENOMEM);
2648 	}
2649 
2650 	edesc->iv_dma = iv_dma;
2651 
2652 #ifdef DEBUG
2653 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2654 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2655 		       sec4_sg_bytes, 1);
2656 #endif
2657 
2658 	*iv_contig_out = iv_contig;
2659 	return edesc;
2660 }
2661 
ablkcipher_encrypt(struct ablkcipher_request * req)2662 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2663 {
2664 	struct ablkcipher_edesc *edesc;
2665 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2666 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2667 	struct device *jrdev = ctx->jrdev;
2668 	bool iv_contig;
2669 	u32 *desc;
2670 	int ret = 0;
2671 
2672 	/* allocate extended descriptor */
2673 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2674 				       CAAM_CMD_SZ, &iv_contig);
2675 	if (IS_ERR(edesc))
2676 		return PTR_ERR(edesc);
2677 
2678 	/* Create and submit job descriptor*/
2679 	init_ablkcipher_job(ctx->sh_desc_enc,
2680 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2681 #ifdef DEBUG
2682 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2683 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2684 		       desc_bytes(edesc->hw_desc), 1);
2685 #endif
2686 	desc = edesc->hw_desc;
2687 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2688 
2689 	if (!ret) {
2690 		ret = -EINPROGRESS;
2691 	} else {
2692 		ablkcipher_unmap(jrdev, edesc, req);
2693 		kfree(edesc);
2694 	}
2695 
2696 	return ret;
2697 }
2698 
ablkcipher_decrypt(struct ablkcipher_request * req)2699 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2700 {
2701 	struct ablkcipher_edesc *edesc;
2702 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2703 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2704 	struct device *jrdev = ctx->jrdev;
2705 	bool iv_contig;
2706 	u32 *desc;
2707 	int ret = 0;
2708 
2709 	/* allocate extended descriptor */
2710 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2711 				       CAAM_CMD_SZ, &iv_contig);
2712 	if (IS_ERR(edesc))
2713 		return PTR_ERR(edesc);
2714 
2715 	/* Create and submit job descriptor*/
2716 	init_ablkcipher_job(ctx->sh_desc_dec,
2717 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2718 	desc = edesc->hw_desc;
2719 #ifdef DEBUG
2720 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2721 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2722 		       desc_bytes(edesc->hw_desc), 1);
2723 #endif
2724 
2725 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2726 	if (!ret) {
2727 		ret = -EINPROGRESS;
2728 	} else {
2729 		ablkcipher_unmap(jrdev, edesc, req);
2730 		kfree(edesc);
2731 	}
2732 
2733 	return ret;
2734 }
2735 
2736 /*
2737  * allocate and map the ablkcipher extended descriptor
2738  * for ablkcipher givencrypt
2739  */
ablkcipher_giv_edesc_alloc(struct skcipher_givcrypt_request * greq,int desc_bytes,bool * iv_contig_out)2740 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2741 				struct skcipher_givcrypt_request *greq,
2742 				int desc_bytes,
2743 				bool *iv_contig_out)
2744 {
2745 	struct ablkcipher_request *req = &greq->creq;
2746 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2747 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2748 	struct device *jrdev = ctx->jrdev;
2749 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2750 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2751 		       GFP_KERNEL : GFP_ATOMIC;
2752 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2753 	struct ablkcipher_edesc *edesc;
2754 	dma_addr_t iv_dma = 0;
2755 	bool iv_contig = false;
2756 	int sgc;
2757 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2758 	int sec4_sg_index;
2759 
2760 	src_nents = sg_count(req->src, req->nbytes);
2761 
2762 	if (unlikely(req->dst != req->src))
2763 		dst_nents = sg_count(req->dst, req->nbytes);
2764 
2765 	if (likely(req->src == req->dst)) {
2766 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2767 				 DMA_BIDIRECTIONAL);
2768 	} else {
2769 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2770 				 DMA_TO_DEVICE);
2771 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2772 				 DMA_FROM_DEVICE);
2773 	}
2774 
2775 	/*
2776 	 * Check if iv can be contiguous with source and destination.
2777 	 * If so, include it. If not, create scatterlist.
2778 	 */
2779 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2780 	if (dma_mapping_error(jrdev, iv_dma)) {
2781 		dev_err(jrdev, "unable to map IV\n");
2782 		return ERR_PTR(-ENOMEM);
2783 	}
2784 
2785 	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2786 		iv_contig = true;
2787 	else
2788 		dst_nents = dst_nents ? : 1;
2789 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2790 			sizeof(struct sec4_sg_entry);
2791 
2792 	/* allocate space for base edesc and hw desc commands, link tables */
2793 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2794 			GFP_DMA | flags);
2795 	if (!edesc) {
2796 		dev_err(jrdev, "could not allocate extended descriptor\n");
2797 		return ERR_PTR(-ENOMEM);
2798 	}
2799 
2800 	edesc->src_nents = src_nents;
2801 	edesc->dst_nents = dst_nents;
2802 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2803 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2804 			 desc_bytes;
2805 
2806 	sec4_sg_index = 0;
2807 	if (src_nents) {
2808 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2809 		sec4_sg_index += src_nents;
2810 	}
2811 
2812 	if (!iv_contig) {
2813 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2814 				   iv_dma, ivsize, 0);
2815 		sec4_sg_index += 1;
2816 		sg_to_sec4_sg_last(req->dst, dst_nents,
2817 				   edesc->sec4_sg + sec4_sg_index, 0);
2818 	}
2819 
2820 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2821 					    sec4_sg_bytes, DMA_TO_DEVICE);
2822 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2823 		dev_err(jrdev, "unable to map S/G table\n");
2824 		return ERR_PTR(-ENOMEM);
2825 	}
2826 	edesc->iv_dma = iv_dma;
2827 
2828 #ifdef DEBUG
2829 	print_hex_dump(KERN_ERR,
2830 		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2831 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2832 		       sec4_sg_bytes, 1);
2833 #endif
2834 
2835 	*iv_contig_out = iv_contig;
2836 	return edesc;
2837 }
2838 
ablkcipher_givencrypt(struct skcipher_givcrypt_request * creq)2839 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2840 {
2841 	struct ablkcipher_request *req = &creq->creq;
2842 	struct ablkcipher_edesc *edesc;
2843 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2844 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2845 	struct device *jrdev = ctx->jrdev;
2846 	bool iv_contig;
2847 	u32 *desc;
2848 	int ret = 0;
2849 
2850 	/* allocate extended descriptor */
2851 	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2852 				       CAAM_CMD_SZ, &iv_contig);
2853 	if (IS_ERR(edesc))
2854 		return PTR_ERR(edesc);
2855 
2856 	/* Create and submit job descriptor*/
2857 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2858 				edesc, req, iv_contig);
2859 #ifdef DEBUG
2860 	print_hex_dump(KERN_ERR,
2861 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2862 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2863 		       desc_bytes(edesc->hw_desc), 1);
2864 #endif
2865 	desc = edesc->hw_desc;
2866 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2867 
2868 	if (!ret) {
2869 		ret = -EINPROGRESS;
2870 	} else {
2871 		ablkcipher_unmap(jrdev, edesc, req);
2872 		kfree(edesc);
2873 	}
2874 
2875 	return ret;
2876 }
2877 
2878 #define template_aead		template_u.aead
2879 #define template_ablkcipher	template_u.ablkcipher
2880 struct caam_alg_template {
2881 	char name[CRYPTO_MAX_ALG_NAME];
2882 	char driver_name[CRYPTO_MAX_ALG_NAME];
2883 	unsigned int blocksize;
2884 	u32 type;
2885 	union {
2886 		struct ablkcipher_alg ablkcipher;
2887 	} template_u;
2888 	u32 class1_alg_type;
2889 	u32 class2_alg_type;
2890 	u32 alg_op;
2891 };
2892 
2893 static struct caam_alg_template driver_algs[] = {
2894 	/* ablkcipher descriptor */
2895 	{
2896 		.name = "cbc(aes)",
2897 		.driver_name = "cbc-aes-caam",
2898 		.blocksize = AES_BLOCK_SIZE,
2899 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2900 		.template_ablkcipher = {
2901 			.setkey = ablkcipher_setkey,
2902 			.encrypt = ablkcipher_encrypt,
2903 			.decrypt = ablkcipher_decrypt,
2904 			.givencrypt = ablkcipher_givencrypt,
2905 			.geniv = "<built-in>",
2906 			.min_keysize = AES_MIN_KEY_SIZE,
2907 			.max_keysize = AES_MAX_KEY_SIZE,
2908 			.ivsize = AES_BLOCK_SIZE,
2909 			},
2910 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2911 	},
2912 	{
2913 		.name = "cbc(des3_ede)",
2914 		.driver_name = "cbc-3des-caam",
2915 		.blocksize = DES3_EDE_BLOCK_SIZE,
2916 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2917 		.template_ablkcipher = {
2918 			.setkey = ablkcipher_setkey,
2919 			.encrypt = ablkcipher_encrypt,
2920 			.decrypt = ablkcipher_decrypt,
2921 			.givencrypt = ablkcipher_givencrypt,
2922 			.geniv = "<built-in>",
2923 			.min_keysize = DES3_EDE_KEY_SIZE,
2924 			.max_keysize = DES3_EDE_KEY_SIZE,
2925 			.ivsize = DES3_EDE_BLOCK_SIZE,
2926 			},
2927 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2928 	},
2929 	{
2930 		.name = "cbc(des)",
2931 		.driver_name = "cbc-des-caam",
2932 		.blocksize = DES_BLOCK_SIZE,
2933 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2934 		.template_ablkcipher = {
2935 			.setkey = ablkcipher_setkey,
2936 			.encrypt = ablkcipher_encrypt,
2937 			.decrypt = ablkcipher_decrypt,
2938 			.givencrypt = ablkcipher_givencrypt,
2939 			.geniv = "<built-in>",
2940 			.min_keysize = DES_KEY_SIZE,
2941 			.max_keysize = DES_KEY_SIZE,
2942 			.ivsize = DES_BLOCK_SIZE,
2943 			},
2944 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2945 	},
2946 	{
2947 		.name = "ctr(aes)",
2948 		.driver_name = "ctr-aes-caam",
2949 		.blocksize = 1,
2950 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2951 		.template_ablkcipher = {
2952 			.setkey = ablkcipher_setkey,
2953 			.encrypt = ablkcipher_encrypt,
2954 			.decrypt = ablkcipher_decrypt,
2955 			.geniv = "chainiv",
2956 			.min_keysize = AES_MIN_KEY_SIZE,
2957 			.max_keysize = AES_MAX_KEY_SIZE,
2958 			.ivsize = AES_BLOCK_SIZE,
2959 			},
2960 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2961 	},
2962 	{
2963 		.name = "rfc3686(ctr(aes))",
2964 		.driver_name = "rfc3686-ctr-aes-caam",
2965 		.blocksize = 1,
2966 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2967 		.template_ablkcipher = {
2968 			.setkey = ablkcipher_setkey,
2969 			.encrypt = ablkcipher_encrypt,
2970 			.decrypt = ablkcipher_decrypt,
2971 			.givencrypt = ablkcipher_givencrypt,
2972 			.geniv = "<built-in>",
2973 			.min_keysize = AES_MIN_KEY_SIZE +
2974 				       CTR_RFC3686_NONCE_SIZE,
2975 			.max_keysize = AES_MAX_KEY_SIZE +
2976 				       CTR_RFC3686_NONCE_SIZE,
2977 			.ivsize = CTR_RFC3686_IV_SIZE,
2978 			},
2979 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2980 	},
2981 	{
2982 		.name = "xts(aes)",
2983 		.driver_name = "xts-aes-caam",
2984 		.blocksize = AES_BLOCK_SIZE,
2985 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2986 		.template_ablkcipher = {
2987 			.setkey = xts_ablkcipher_setkey,
2988 			.encrypt = ablkcipher_encrypt,
2989 			.decrypt = ablkcipher_decrypt,
2990 			.geniv = "eseqiv",
2991 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
2992 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
2993 			.ivsize = AES_BLOCK_SIZE,
2994 			},
2995 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2996 	},
2997 };
2998 
2999 static struct caam_aead_alg driver_aeads[] = {
3000 	{
3001 		.aead = {
3002 			.base = {
3003 				.cra_name = "rfc4106(gcm(aes))",
3004 				.cra_driver_name = "rfc4106-gcm-aes-caam",
3005 				.cra_blocksize = 1,
3006 			},
3007 			.setkey = rfc4106_setkey,
3008 			.setauthsize = rfc4106_setauthsize,
3009 			.encrypt = ipsec_gcm_encrypt,
3010 			.decrypt = ipsec_gcm_decrypt,
3011 			.ivsize = 8,
3012 			.maxauthsize = AES_BLOCK_SIZE,
3013 		},
3014 		.caam = {
3015 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3016 		},
3017 	},
3018 	{
3019 		.aead = {
3020 			.base = {
3021 				.cra_name = "rfc4543(gcm(aes))",
3022 				.cra_driver_name = "rfc4543-gcm-aes-caam",
3023 				.cra_blocksize = 1,
3024 			},
3025 			.setkey = rfc4543_setkey,
3026 			.setauthsize = rfc4543_setauthsize,
3027 			.encrypt = ipsec_gcm_encrypt,
3028 			.decrypt = ipsec_gcm_decrypt,
3029 			.ivsize = 8,
3030 			.maxauthsize = AES_BLOCK_SIZE,
3031 		},
3032 		.caam = {
3033 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3034 		},
3035 	},
3036 	/* Galois Counter Mode */
3037 	{
3038 		.aead = {
3039 			.base = {
3040 				.cra_name = "gcm(aes)",
3041 				.cra_driver_name = "gcm-aes-caam",
3042 				.cra_blocksize = 1,
3043 			},
3044 			.setkey = gcm_setkey,
3045 			.setauthsize = gcm_setauthsize,
3046 			.encrypt = gcm_encrypt,
3047 			.decrypt = gcm_decrypt,
3048 			.ivsize = 12,
3049 			.maxauthsize = AES_BLOCK_SIZE,
3050 		},
3051 		.caam = {
3052 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3053 		},
3054 	},
3055 	/* single-pass ipsec_esp descriptor */
3056 	{
3057 		.aead = {
3058 			.base = {
3059 				.cra_name = "authenc(hmac(md5),"
3060 					    "ecb(cipher_null))",
3061 				.cra_driver_name = "authenc-hmac-md5-"
3062 						   "ecb-cipher_null-caam",
3063 				.cra_blocksize = NULL_BLOCK_SIZE,
3064 			},
3065 			.setkey = aead_setkey,
3066 			.setauthsize = aead_setauthsize,
3067 			.encrypt = aead_encrypt,
3068 			.decrypt = aead_decrypt,
3069 			.ivsize = NULL_IV_SIZE,
3070 			.maxauthsize = MD5_DIGEST_SIZE,
3071 		},
3072 		.caam = {
3073 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3074 					   OP_ALG_AAI_HMAC_PRECOMP,
3075 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3076 		},
3077 	},
3078 	{
3079 		.aead = {
3080 			.base = {
3081 				.cra_name = "authenc(hmac(sha1),"
3082 					    "ecb(cipher_null))",
3083 				.cra_driver_name = "authenc-hmac-sha1-"
3084 						   "ecb-cipher_null-caam",
3085 				.cra_blocksize = NULL_BLOCK_SIZE,
3086 			},
3087 			.setkey = aead_setkey,
3088 			.setauthsize = aead_setauthsize,
3089 			.encrypt = aead_encrypt,
3090 			.decrypt = aead_decrypt,
3091 			.ivsize = NULL_IV_SIZE,
3092 			.maxauthsize = SHA1_DIGEST_SIZE,
3093 		},
3094 		.caam = {
3095 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3096 					   OP_ALG_AAI_HMAC_PRECOMP,
3097 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3098 		},
3099 	},
3100 	{
3101 		.aead = {
3102 			.base = {
3103 				.cra_name = "authenc(hmac(sha224),"
3104 					    "ecb(cipher_null))",
3105 				.cra_driver_name = "authenc-hmac-sha224-"
3106 						   "ecb-cipher_null-caam",
3107 				.cra_blocksize = NULL_BLOCK_SIZE,
3108 			},
3109 			.setkey = aead_setkey,
3110 			.setauthsize = aead_setauthsize,
3111 			.encrypt = aead_encrypt,
3112 			.decrypt = aead_decrypt,
3113 			.ivsize = NULL_IV_SIZE,
3114 			.maxauthsize = SHA224_DIGEST_SIZE,
3115 		},
3116 		.caam = {
3117 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3118 					   OP_ALG_AAI_HMAC_PRECOMP,
3119 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3120 		},
3121 	},
3122 	{
3123 		.aead = {
3124 			.base = {
3125 				.cra_name = "authenc(hmac(sha256),"
3126 					    "ecb(cipher_null))",
3127 				.cra_driver_name = "authenc-hmac-sha256-"
3128 						   "ecb-cipher_null-caam",
3129 				.cra_blocksize = NULL_BLOCK_SIZE,
3130 			},
3131 			.setkey = aead_setkey,
3132 			.setauthsize = aead_setauthsize,
3133 			.encrypt = aead_encrypt,
3134 			.decrypt = aead_decrypt,
3135 			.ivsize = NULL_IV_SIZE,
3136 			.maxauthsize = SHA256_DIGEST_SIZE,
3137 		},
3138 		.caam = {
3139 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3140 					   OP_ALG_AAI_HMAC_PRECOMP,
3141 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3142 		},
3143 	},
3144 	{
3145 		.aead = {
3146 			.base = {
3147 				.cra_name = "authenc(hmac(sha384),"
3148 					    "ecb(cipher_null))",
3149 				.cra_driver_name = "authenc-hmac-sha384-"
3150 						   "ecb-cipher_null-caam",
3151 				.cra_blocksize = NULL_BLOCK_SIZE,
3152 			},
3153 			.setkey = aead_setkey,
3154 			.setauthsize = aead_setauthsize,
3155 			.encrypt = aead_encrypt,
3156 			.decrypt = aead_decrypt,
3157 			.ivsize = NULL_IV_SIZE,
3158 			.maxauthsize = SHA384_DIGEST_SIZE,
3159 		},
3160 		.caam = {
3161 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3162 					   OP_ALG_AAI_HMAC_PRECOMP,
3163 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3164 		},
3165 	},
3166 	{
3167 		.aead = {
3168 			.base = {
3169 				.cra_name = "authenc(hmac(sha512),"
3170 					    "ecb(cipher_null))",
3171 				.cra_driver_name = "authenc-hmac-sha512-"
3172 						   "ecb-cipher_null-caam",
3173 				.cra_blocksize = NULL_BLOCK_SIZE,
3174 			},
3175 			.setkey = aead_setkey,
3176 			.setauthsize = aead_setauthsize,
3177 			.encrypt = aead_encrypt,
3178 			.decrypt = aead_decrypt,
3179 			.ivsize = NULL_IV_SIZE,
3180 			.maxauthsize = SHA512_DIGEST_SIZE,
3181 		},
3182 		.caam = {
3183 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3184 					   OP_ALG_AAI_HMAC_PRECOMP,
3185 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3186 		},
3187 	},
3188 	{
3189 		.aead = {
3190 			.base = {
3191 				.cra_name = "authenc(hmac(md5),cbc(aes))",
3192 				.cra_driver_name = "authenc-hmac-md5-"
3193 						   "cbc-aes-caam",
3194 				.cra_blocksize = AES_BLOCK_SIZE,
3195 			},
3196 			.setkey = aead_setkey,
3197 			.setauthsize = aead_setauthsize,
3198 			.encrypt = aead_encrypt,
3199 			.decrypt = aead_decrypt,
3200 			.ivsize = AES_BLOCK_SIZE,
3201 			.maxauthsize = MD5_DIGEST_SIZE,
3202 		},
3203 		.caam = {
3204 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3205 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3206 					   OP_ALG_AAI_HMAC_PRECOMP,
3207 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3208 		},
3209 	},
3210 	{
3211 		.aead = {
3212 			.base = {
3213 				.cra_name = "echainiv(authenc(hmac(md5),"
3214 					    "cbc(aes)))",
3215 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3216 						   "cbc-aes-caam",
3217 				.cra_blocksize = AES_BLOCK_SIZE,
3218 			},
3219 			.setkey = aead_setkey,
3220 			.setauthsize = aead_setauthsize,
3221 			.encrypt = aead_encrypt,
3222 			.decrypt = aead_decrypt,
3223 			.ivsize = AES_BLOCK_SIZE,
3224 			.maxauthsize = MD5_DIGEST_SIZE,
3225 		},
3226 		.caam = {
3227 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3228 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3229 					   OP_ALG_AAI_HMAC_PRECOMP,
3230 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3231 			.geniv = true,
3232 		},
3233 	},
3234 	{
3235 		.aead = {
3236 			.base = {
3237 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3238 				.cra_driver_name = "authenc-hmac-sha1-"
3239 						   "cbc-aes-caam",
3240 				.cra_blocksize = AES_BLOCK_SIZE,
3241 			},
3242 			.setkey = aead_setkey,
3243 			.setauthsize = aead_setauthsize,
3244 			.encrypt = aead_encrypt,
3245 			.decrypt = aead_decrypt,
3246 			.ivsize = AES_BLOCK_SIZE,
3247 			.maxauthsize = SHA1_DIGEST_SIZE,
3248 		},
3249 		.caam = {
3250 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3251 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3252 					   OP_ALG_AAI_HMAC_PRECOMP,
3253 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3254 		},
3255 	},
3256 	{
3257 		.aead = {
3258 			.base = {
3259 				.cra_name = "echainiv(authenc(hmac(sha1),"
3260 					    "cbc(aes)))",
3261 				.cra_driver_name = "echainiv-authenc-"
3262 						   "hmac-sha1-cbc-aes-caam",
3263 				.cra_blocksize = AES_BLOCK_SIZE,
3264 			},
3265 			.setkey = aead_setkey,
3266 			.setauthsize = aead_setauthsize,
3267 			.encrypt = aead_encrypt,
3268 			.decrypt = aead_decrypt,
3269 			.ivsize = AES_BLOCK_SIZE,
3270 			.maxauthsize = SHA1_DIGEST_SIZE,
3271 		},
3272 		.caam = {
3273 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3274 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3275 					   OP_ALG_AAI_HMAC_PRECOMP,
3276 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3277 			.geniv = true,
3278 		},
3279 	},
3280 	{
3281 		.aead = {
3282 			.base = {
3283 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3284 				.cra_driver_name = "authenc-hmac-sha224-"
3285 						   "cbc-aes-caam",
3286 				.cra_blocksize = AES_BLOCK_SIZE,
3287 			},
3288 			.setkey = aead_setkey,
3289 			.setauthsize = aead_setauthsize,
3290 			.encrypt = aead_encrypt,
3291 			.decrypt = aead_decrypt,
3292 			.ivsize = AES_BLOCK_SIZE,
3293 			.maxauthsize = SHA224_DIGEST_SIZE,
3294 		},
3295 		.caam = {
3296 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3297 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3298 					   OP_ALG_AAI_HMAC_PRECOMP,
3299 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3300 		},
3301 	},
3302 	{
3303 		.aead = {
3304 			.base = {
3305 				.cra_name = "echainiv(authenc(hmac(sha224),"
3306 					    "cbc(aes)))",
3307 				.cra_driver_name = "echainiv-authenc-"
3308 						   "hmac-sha224-cbc-aes-caam",
3309 				.cra_blocksize = AES_BLOCK_SIZE,
3310 			},
3311 			.setkey = aead_setkey,
3312 			.setauthsize = aead_setauthsize,
3313 			.encrypt = aead_encrypt,
3314 			.decrypt = aead_decrypt,
3315 			.ivsize = AES_BLOCK_SIZE,
3316 			.maxauthsize = SHA224_DIGEST_SIZE,
3317 		},
3318 		.caam = {
3319 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3320 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3321 					   OP_ALG_AAI_HMAC_PRECOMP,
3322 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3323 			.geniv = true,
3324 		},
3325 	},
3326 	{
3327 		.aead = {
3328 			.base = {
3329 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3330 				.cra_driver_name = "authenc-hmac-sha256-"
3331 						   "cbc-aes-caam",
3332 				.cra_blocksize = AES_BLOCK_SIZE,
3333 			},
3334 			.setkey = aead_setkey,
3335 			.setauthsize = aead_setauthsize,
3336 			.encrypt = aead_encrypt,
3337 			.decrypt = aead_decrypt,
3338 			.ivsize = AES_BLOCK_SIZE,
3339 			.maxauthsize = SHA256_DIGEST_SIZE,
3340 		},
3341 		.caam = {
3342 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3343 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3344 					   OP_ALG_AAI_HMAC_PRECOMP,
3345 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3346 		},
3347 	},
3348 	{
3349 		.aead = {
3350 			.base = {
3351 				.cra_name = "echainiv(authenc(hmac(sha256),"
3352 					    "cbc(aes)))",
3353 				.cra_driver_name = "echainiv-authenc-"
3354 						   "hmac-sha256-cbc-aes-caam",
3355 				.cra_blocksize = AES_BLOCK_SIZE,
3356 			},
3357 			.setkey = aead_setkey,
3358 			.setauthsize = aead_setauthsize,
3359 			.encrypt = aead_encrypt,
3360 			.decrypt = aead_decrypt,
3361 			.ivsize = AES_BLOCK_SIZE,
3362 			.maxauthsize = SHA256_DIGEST_SIZE,
3363 		},
3364 		.caam = {
3365 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3366 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3367 					   OP_ALG_AAI_HMAC_PRECOMP,
3368 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3369 			.geniv = true,
3370 		},
3371 	},
3372 	{
3373 		.aead = {
3374 			.base = {
3375 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3376 				.cra_driver_name = "authenc-hmac-sha384-"
3377 						   "cbc-aes-caam",
3378 				.cra_blocksize = AES_BLOCK_SIZE,
3379 			},
3380 			.setkey = aead_setkey,
3381 			.setauthsize = aead_setauthsize,
3382 			.encrypt = aead_encrypt,
3383 			.decrypt = aead_decrypt,
3384 			.ivsize = AES_BLOCK_SIZE,
3385 			.maxauthsize = SHA384_DIGEST_SIZE,
3386 		},
3387 		.caam = {
3388 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3389 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3390 					   OP_ALG_AAI_HMAC_PRECOMP,
3391 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3392 		},
3393 	},
3394 	{
3395 		.aead = {
3396 			.base = {
3397 				.cra_name = "echainiv(authenc(hmac(sha384),"
3398 					    "cbc(aes)))",
3399 				.cra_driver_name = "echainiv-authenc-"
3400 						   "hmac-sha384-cbc-aes-caam",
3401 				.cra_blocksize = AES_BLOCK_SIZE,
3402 			},
3403 			.setkey = aead_setkey,
3404 			.setauthsize = aead_setauthsize,
3405 			.encrypt = aead_encrypt,
3406 			.decrypt = aead_decrypt,
3407 			.ivsize = AES_BLOCK_SIZE,
3408 			.maxauthsize = SHA384_DIGEST_SIZE,
3409 		},
3410 		.caam = {
3411 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3412 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3413 					   OP_ALG_AAI_HMAC_PRECOMP,
3414 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3415 			.geniv = true,
3416 		},
3417 	},
3418 	{
3419 		.aead = {
3420 			.base = {
3421 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3422 				.cra_driver_name = "authenc-hmac-sha512-"
3423 						   "cbc-aes-caam",
3424 				.cra_blocksize = AES_BLOCK_SIZE,
3425 			},
3426 			.setkey = aead_setkey,
3427 			.setauthsize = aead_setauthsize,
3428 			.encrypt = aead_encrypt,
3429 			.decrypt = aead_decrypt,
3430 			.ivsize = AES_BLOCK_SIZE,
3431 			.maxauthsize = SHA512_DIGEST_SIZE,
3432 		},
3433 		.caam = {
3434 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3435 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3436 					   OP_ALG_AAI_HMAC_PRECOMP,
3437 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3438 		},
3439 	},
3440 	{
3441 		.aead = {
3442 			.base = {
3443 				.cra_name = "echainiv(authenc(hmac(sha512),"
3444 					    "cbc(aes)))",
3445 				.cra_driver_name = "echainiv-authenc-"
3446 						   "hmac-sha512-cbc-aes-caam",
3447 				.cra_blocksize = AES_BLOCK_SIZE,
3448 			},
3449 			.setkey = aead_setkey,
3450 			.setauthsize = aead_setauthsize,
3451 			.encrypt = aead_encrypt,
3452 			.decrypt = aead_decrypt,
3453 			.ivsize = AES_BLOCK_SIZE,
3454 			.maxauthsize = SHA512_DIGEST_SIZE,
3455 		},
3456 		.caam = {
3457 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3458 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3459 					   OP_ALG_AAI_HMAC_PRECOMP,
3460 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3461 			.geniv = true,
3462 		},
3463 	},
3464 	{
3465 		.aead = {
3466 			.base = {
3467 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3468 				.cra_driver_name = "authenc-hmac-md5-"
3469 						   "cbc-des3_ede-caam",
3470 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3471 			},
3472 			.setkey = aead_setkey,
3473 			.setauthsize = aead_setauthsize,
3474 			.encrypt = aead_encrypt,
3475 			.decrypt = aead_decrypt,
3476 			.ivsize = DES3_EDE_BLOCK_SIZE,
3477 			.maxauthsize = MD5_DIGEST_SIZE,
3478 		},
3479 		.caam = {
3480 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3481 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3482 					   OP_ALG_AAI_HMAC_PRECOMP,
3483 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3484 		}
3485 	},
3486 	{
3487 		.aead = {
3488 			.base = {
3489 				.cra_name = "echainiv(authenc(hmac(md5),"
3490 					    "cbc(des3_ede)))",
3491 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3492 						   "cbc-des3_ede-caam",
3493 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3494 			},
3495 			.setkey = aead_setkey,
3496 			.setauthsize = aead_setauthsize,
3497 			.encrypt = aead_encrypt,
3498 			.decrypt = aead_decrypt,
3499 			.ivsize = DES3_EDE_BLOCK_SIZE,
3500 			.maxauthsize = MD5_DIGEST_SIZE,
3501 		},
3502 		.caam = {
3503 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3504 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3505 					   OP_ALG_AAI_HMAC_PRECOMP,
3506 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3507 			.geniv = true,
3508 		}
3509 	},
3510 	{
3511 		.aead = {
3512 			.base = {
3513 				.cra_name = "authenc(hmac(sha1),"
3514 					    "cbc(des3_ede))",
3515 				.cra_driver_name = "authenc-hmac-sha1-"
3516 						   "cbc-des3_ede-caam",
3517 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3518 			},
3519 			.setkey = aead_setkey,
3520 			.setauthsize = aead_setauthsize,
3521 			.encrypt = aead_encrypt,
3522 			.decrypt = aead_decrypt,
3523 			.ivsize = DES3_EDE_BLOCK_SIZE,
3524 			.maxauthsize = SHA1_DIGEST_SIZE,
3525 		},
3526 		.caam = {
3527 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3528 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3529 					   OP_ALG_AAI_HMAC_PRECOMP,
3530 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3531 		},
3532 	},
3533 	{
3534 		.aead = {
3535 			.base = {
3536 				.cra_name = "echainiv(authenc(hmac(sha1),"
3537 					    "cbc(des3_ede)))",
3538 				.cra_driver_name = "echainiv-authenc-"
3539 						   "hmac-sha1-"
3540 						   "cbc-des3_ede-caam",
3541 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3542 			},
3543 			.setkey = aead_setkey,
3544 			.setauthsize = aead_setauthsize,
3545 			.encrypt = aead_encrypt,
3546 			.decrypt = aead_decrypt,
3547 			.ivsize = DES3_EDE_BLOCK_SIZE,
3548 			.maxauthsize = SHA1_DIGEST_SIZE,
3549 		},
3550 		.caam = {
3551 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3552 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3553 					   OP_ALG_AAI_HMAC_PRECOMP,
3554 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3555 			.geniv = true,
3556 		},
3557 	},
3558 	{
3559 		.aead = {
3560 			.base = {
3561 				.cra_name = "authenc(hmac(sha224),"
3562 					    "cbc(des3_ede))",
3563 				.cra_driver_name = "authenc-hmac-sha224-"
3564 						   "cbc-des3_ede-caam",
3565 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3566 			},
3567 			.setkey = aead_setkey,
3568 			.setauthsize = aead_setauthsize,
3569 			.encrypt = aead_encrypt,
3570 			.decrypt = aead_decrypt,
3571 			.ivsize = DES3_EDE_BLOCK_SIZE,
3572 			.maxauthsize = SHA224_DIGEST_SIZE,
3573 		},
3574 		.caam = {
3575 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3576 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3577 					   OP_ALG_AAI_HMAC_PRECOMP,
3578 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3579 		},
3580 	},
3581 	{
3582 		.aead = {
3583 			.base = {
3584 				.cra_name = "echainiv(authenc(hmac(sha224),"
3585 					    "cbc(des3_ede)))",
3586 				.cra_driver_name = "echainiv-authenc-"
3587 						   "hmac-sha224-"
3588 						   "cbc-des3_ede-caam",
3589 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3590 			},
3591 			.setkey = aead_setkey,
3592 			.setauthsize = aead_setauthsize,
3593 			.encrypt = aead_encrypt,
3594 			.decrypt = aead_decrypt,
3595 			.ivsize = DES3_EDE_BLOCK_SIZE,
3596 			.maxauthsize = SHA224_DIGEST_SIZE,
3597 		},
3598 		.caam = {
3599 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3600 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3601 					   OP_ALG_AAI_HMAC_PRECOMP,
3602 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3603 			.geniv = true,
3604 		},
3605 	},
3606 	{
3607 		.aead = {
3608 			.base = {
3609 				.cra_name = "authenc(hmac(sha256),"
3610 					    "cbc(des3_ede))",
3611 				.cra_driver_name = "authenc-hmac-sha256-"
3612 						   "cbc-des3_ede-caam",
3613 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3614 			},
3615 			.setkey = aead_setkey,
3616 			.setauthsize = aead_setauthsize,
3617 			.encrypt = aead_encrypt,
3618 			.decrypt = aead_decrypt,
3619 			.ivsize = DES3_EDE_BLOCK_SIZE,
3620 			.maxauthsize = SHA256_DIGEST_SIZE,
3621 		},
3622 		.caam = {
3623 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3624 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3625 					   OP_ALG_AAI_HMAC_PRECOMP,
3626 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3627 		},
3628 	},
3629 	{
3630 		.aead = {
3631 			.base = {
3632 				.cra_name = "echainiv(authenc(hmac(sha256),"
3633 					    "cbc(des3_ede)))",
3634 				.cra_driver_name = "echainiv-authenc-"
3635 						   "hmac-sha256-"
3636 						   "cbc-des3_ede-caam",
3637 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3638 			},
3639 			.setkey = aead_setkey,
3640 			.setauthsize = aead_setauthsize,
3641 			.encrypt = aead_encrypt,
3642 			.decrypt = aead_decrypt,
3643 			.ivsize = DES3_EDE_BLOCK_SIZE,
3644 			.maxauthsize = SHA256_DIGEST_SIZE,
3645 		},
3646 		.caam = {
3647 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3648 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3649 					   OP_ALG_AAI_HMAC_PRECOMP,
3650 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3651 			.geniv = true,
3652 		},
3653 	},
3654 	{
3655 		.aead = {
3656 			.base = {
3657 				.cra_name = "authenc(hmac(sha384),"
3658 					    "cbc(des3_ede))",
3659 				.cra_driver_name = "authenc-hmac-sha384-"
3660 						   "cbc-des3_ede-caam",
3661 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3662 			},
3663 			.setkey = aead_setkey,
3664 			.setauthsize = aead_setauthsize,
3665 			.encrypt = aead_encrypt,
3666 			.decrypt = aead_decrypt,
3667 			.ivsize = DES3_EDE_BLOCK_SIZE,
3668 			.maxauthsize = SHA384_DIGEST_SIZE,
3669 		},
3670 		.caam = {
3671 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3672 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3673 					   OP_ALG_AAI_HMAC_PRECOMP,
3674 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3675 		},
3676 	},
3677 	{
3678 		.aead = {
3679 			.base = {
3680 				.cra_name = "echainiv(authenc(hmac(sha384),"
3681 					    "cbc(des3_ede)))",
3682 				.cra_driver_name = "echainiv-authenc-"
3683 						   "hmac-sha384-"
3684 						   "cbc-des3_ede-caam",
3685 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3686 			},
3687 			.setkey = aead_setkey,
3688 			.setauthsize = aead_setauthsize,
3689 			.encrypt = aead_encrypt,
3690 			.decrypt = aead_decrypt,
3691 			.ivsize = DES3_EDE_BLOCK_SIZE,
3692 			.maxauthsize = SHA384_DIGEST_SIZE,
3693 		},
3694 		.caam = {
3695 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3696 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3697 					   OP_ALG_AAI_HMAC_PRECOMP,
3698 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3699 			.geniv = true,
3700 		},
3701 	},
3702 	{
3703 		.aead = {
3704 			.base = {
3705 				.cra_name = "authenc(hmac(sha512),"
3706 					    "cbc(des3_ede))",
3707 				.cra_driver_name = "authenc-hmac-sha512-"
3708 						   "cbc-des3_ede-caam",
3709 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3710 			},
3711 			.setkey = aead_setkey,
3712 			.setauthsize = aead_setauthsize,
3713 			.encrypt = aead_encrypt,
3714 			.decrypt = aead_decrypt,
3715 			.ivsize = DES3_EDE_BLOCK_SIZE,
3716 			.maxauthsize = SHA512_DIGEST_SIZE,
3717 		},
3718 		.caam = {
3719 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3720 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3721 					   OP_ALG_AAI_HMAC_PRECOMP,
3722 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3723 		},
3724 	},
3725 	{
3726 		.aead = {
3727 			.base = {
3728 				.cra_name = "echainiv(authenc(hmac(sha512),"
3729 					    "cbc(des3_ede)))",
3730 				.cra_driver_name = "echainiv-authenc-"
3731 						   "hmac-sha512-"
3732 						   "cbc-des3_ede-caam",
3733 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3734 			},
3735 			.setkey = aead_setkey,
3736 			.setauthsize = aead_setauthsize,
3737 			.encrypt = aead_encrypt,
3738 			.decrypt = aead_decrypt,
3739 			.ivsize = DES3_EDE_BLOCK_SIZE,
3740 			.maxauthsize = SHA512_DIGEST_SIZE,
3741 		},
3742 		.caam = {
3743 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3744 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3745 					   OP_ALG_AAI_HMAC_PRECOMP,
3746 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3747 			.geniv = true,
3748 		},
3749 	},
3750 	{
3751 		.aead = {
3752 			.base = {
3753 				.cra_name = "authenc(hmac(md5),cbc(des))",
3754 				.cra_driver_name = "authenc-hmac-md5-"
3755 						   "cbc-des-caam",
3756 				.cra_blocksize = DES_BLOCK_SIZE,
3757 			},
3758 			.setkey = aead_setkey,
3759 			.setauthsize = aead_setauthsize,
3760 			.encrypt = aead_encrypt,
3761 			.decrypt = aead_decrypt,
3762 			.ivsize = DES_BLOCK_SIZE,
3763 			.maxauthsize = MD5_DIGEST_SIZE,
3764 		},
3765 		.caam = {
3766 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3767 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3768 					   OP_ALG_AAI_HMAC_PRECOMP,
3769 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3770 		},
3771 	},
3772 	{
3773 		.aead = {
3774 			.base = {
3775 				.cra_name = "echainiv(authenc(hmac(md5),"
3776 					    "cbc(des)))",
3777 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3778 						   "cbc-des-caam",
3779 				.cra_blocksize = DES_BLOCK_SIZE,
3780 			},
3781 			.setkey = aead_setkey,
3782 			.setauthsize = aead_setauthsize,
3783 			.encrypt = aead_encrypt,
3784 			.decrypt = aead_decrypt,
3785 			.ivsize = DES_BLOCK_SIZE,
3786 			.maxauthsize = MD5_DIGEST_SIZE,
3787 		},
3788 		.caam = {
3789 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3790 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3791 					   OP_ALG_AAI_HMAC_PRECOMP,
3792 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3793 			.geniv = true,
3794 		},
3795 	},
3796 	{
3797 		.aead = {
3798 			.base = {
3799 				.cra_name = "authenc(hmac(sha1),cbc(des))",
3800 				.cra_driver_name = "authenc-hmac-sha1-"
3801 						   "cbc-des-caam",
3802 				.cra_blocksize = DES_BLOCK_SIZE,
3803 			},
3804 			.setkey = aead_setkey,
3805 			.setauthsize = aead_setauthsize,
3806 			.encrypt = aead_encrypt,
3807 			.decrypt = aead_decrypt,
3808 			.ivsize = DES_BLOCK_SIZE,
3809 			.maxauthsize = SHA1_DIGEST_SIZE,
3810 		},
3811 		.caam = {
3812 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3813 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3814 					   OP_ALG_AAI_HMAC_PRECOMP,
3815 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3816 		},
3817 	},
3818 	{
3819 		.aead = {
3820 			.base = {
3821 				.cra_name = "echainiv(authenc(hmac(sha1),"
3822 					    "cbc(des)))",
3823 				.cra_driver_name = "echainiv-authenc-"
3824 						   "hmac-sha1-cbc-des-caam",
3825 				.cra_blocksize = DES_BLOCK_SIZE,
3826 			},
3827 			.setkey = aead_setkey,
3828 			.setauthsize = aead_setauthsize,
3829 			.encrypt = aead_encrypt,
3830 			.decrypt = aead_decrypt,
3831 			.ivsize = DES_BLOCK_SIZE,
3832 			.maxauthsize = SHA1_DIGEST_SIZE,
3833 		},
3834 		.caam = {
3835 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3836 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3837 					   OP_ALG_AAI_HMAC_PRECOMP,
3838 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3839 			.geniv = true,
3840 		},
3841 	},
3842 	{
3843 		.aead = {
3844 			.base = {
3845 				.cra_name = "authenc(hmac(sha224),cbc(des))",
3846 				.cra_driver_name = "authenc-hmac-sha224-"
3847 						   "cbc-des-caam",
3848 				.cra_blocksize = DES_BLOCK_SIZE,
3849 			},
3850 			.setkey = aead_setkey,
3851 			.setauthsize = aead_setauthsize,
3852 			.encrypt = aead_encrypt,
3853 			.decrypt = aead_decrypt,
3854 			.ivsize = DES_BLOCK_SIZE,
3855 			.maxauthsize = SHA224_DIGEST_SIZE,
3856 		},
3857 		.caam = {
3858 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3859 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3860 					   OP_ALG_AAI_HMAC_PRECOMP,
3861 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3862 		},
3863 	},
3864 	{
3865 		.aead = {
3866 			.base = {
3867 				.cra_name = "echainiv(authenc(hmac(sha224),"
3868 					    "cbc(des)))",
3869 				.cra_driver_name = "echainiv-authenc-"
3870 						   "hmac-sha224-cbc-des-caam",
3871 				.cra_blocksize = DES_BLOCK_SIZE,
3872 			},
3873 			.setkey = aead_setkey,
3874 			.setauthsize = aead_setauthsize,
3875 			.encrypt = aead_encrypt,
3876 			.decrypt = aead_decrypt,
3877 			.ivsize = DES_BLOCK_SIZE,
3878 			.maxauthsize = SHA224_DIGEST_SIZE,
3879 		},
3880 		.caam = {
3881 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3882 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3883 					   OP_ALG_AAI_HMAC_PRECOMP,
3884 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3885 			.geniv = true,
3886 		},
3887 	},
3888 	{
3889 		.aead = {
3890 			.base = {
3891 				.cra_name = "authenc(hmac(sha256),cbc(des))",
3892 				.cra_driver_name = "authenc-hmac-sha256-"
3893 						   "cbc-des-caam",
3894 				.cra_blocksize = DES_BLOCK_SIZE,
3895 			},
3896 			.setkey = aead_setkey,
3897 			.setauthsize = aead_setauthsize,
3898 			.encrypt = aead_encrypt,
3899 			.decrypt = aead_decrypt,
3900 			.ivsize = DES_BLOCK_SIZE,
3901 			.maxauthsize = SHA256_DIGEST_SIZE,
3902 		},
3903 		.caam = {
3904 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3905 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3906 					   OP_ALG_AAI_HMAC_PRECOMP,
3907 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3908 		},
3909 	},
3910 	{
3911 		.aead = {
3912 			.base = {
3913 				.cra_name = "echainiv(authenc(hmac(sha256),"
3914 					    "cbc(des)))",
3915 				.cra_driver_name = "echainiv-authenc-"
3916 						   "hmac-sha256-cbc-des-caam",
3917 				.cra_blocksize = DES_BLOCK_SIZE,
3918 			},
3919 			.setkey = aead_setkey,
3920 			.setauthsize = aead_setauthsize,
3921 			.encrypt = aead_encrypt,
3922 			.decrypt = aead_decrypt,
3923 			.ivsize = DES_BLOCK_SIZE,
3924 			.maxauthsize = SHA256_DIGEST_SIZE,
3925 		},
3926 		.caam = {
3927 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3928 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3929 					   OP_ALG_AAI_HMAC_PRECOMP,
3930 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3931 			.geniv = true,
3932 		},
3933 	},
3934 	{
3935 		.aead = {
3936 			.base = {
3937 				.cra_name = "authenc(hmac(sha384),cbc(des))",
3938 				.cra_driver_name = "authenc-hmac-sha384-"
3939 						   "cbc-des-caam",
3940 				.cra_blocksize = DES_BLOCK_SIZE,
3941 			},
3942 			.setkey = aead_setkey,
3943 			.setauthsize = aead_setauthsize,
3944 			.encrypt = aead_encrypt,
3945 			.decrypt = aead_decrypt,
3946 			.ivsize = DES_BLOCK_SIZE,
3947 			.maxauthsize = SHA384_DIGEST_SIZE,
3948 		},
3949 		.caam = {
3950 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3951 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3952 					   OP_ALG_AAI_HMAC_PRECOMP,
3953 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3954 		},
3955 	},
3956 	{
3957 		.aead = {
3958 			.base = {
3959 				.cra_name = "echainiv(authenc(hmac(sha384),"
3960 					    "cbc(des)))",
3961 				.cra_driver_name = "echainiv-authenc-"
3962 						   "hmac-sha384-cbc-des-caam",
3963 				.cra_blocksize = DES_BLOCK_SIZE,
3964 			},
3965 			.setkey = aead_setkey,
3966 			.setauthsize = aead_setauthsize,
3967 			.encrypt = aead_encrypt,
3968 			.decrypt = aead_decrypt,
3969 			.ivsize = DES_BLOCK_SIZE,
3970 			.maxauthsize = SHA384_DIGEST_SIZE,
3971 		},
3972 		.caam = {
3973 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3974 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3975 					   OP_ALG_AAI_HMAC_PRECOMP,
3976 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3977 			.geniv = true,
3978 		},
3979 	},
3980 	{
3981 		.aead = {
3982 			.base = {
3983 				.cra_name = "authenc(hmac(sha512),cbc(des))",
3984 				.cra_driver_name = "authenc-hmac-sha512-"
3985 						   "cbc-des-caam",
3986 				.cra_blocksize = DES_BLOCK_SIZE,
3987 			},
3988 			.setkey = aead_setkey,
3989 			.setauthsize = aead_setauthsize,
3990 			.encrypt = aead_encrypt,
3991 			.decrypt = aead_decrypt,
3992 			.ivsize = DES_BLOCK_SIZE,
3993 			.maxauthsize = SHA512_DIGEST_SIZE,
3994 		},
3995 		.caam = {
3996 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3997 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3998 					   OP_ALG_AAI_HMAC_PRECOMP,
3999 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4000 		},
4001 	},
4002 	{
4003 		.aead = {
4004 			.base = {
4005 				.cra_name = "echainiv(authenc(hmac(sha512),"
4006 					    "cbc(des)))",
4007 				.cra_driver_name = "echainiv-authenc-"
4008 						   "hmac-sha512-cbc-des-caam",
4009 				.cra_blocksize = DES_BLOCK_SIZE,
4010 			},
4011 			.setkey = aead_setkey,
4012 			.setauthsize = aead_setauthsize,
4013 			.encrypt = aead_encrypt,
4014 			.decrypt = aead_decrypt,
4015 			.ivsize = DES_BLOCK_SIZE,
4016 			.maxauthsize = SHA512_DIGEST_SIZE,
4017 		},
4018 		.caam = {
4019 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4020 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4021 					   OP_ALG_AAI_HMAC_PRECOMP,
4022 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4023 			.geniv = true,
4024 		},
4025 	},
4026 	{
4027 		.aead = {
4028 			.base = {
4029 				.cra_name = "authenc(hmac(md5),"
4030 					    "rfc3686(ctr(aes)))",
4031 				.cra_driver_name = "authenc-hmac-md5-"
4032 						   "rfc3686-ctr-aes-caam",
4033 				.cra_blocksize = 1,
4034 			},
4035 			.setkey = aead_setkey,
4036 			.setauthsize = aead_setauthsize,
4037 			.encrypt = aead_encrypt,
4038 			.decrypt = aead_decrypt,
4039 			.ivsize = CTR_RFC3686_IV_SIZE,
4040 			.maxauthsize = MD5_DIGEST_SIZE,
4041 		},
4042 		.caam = {
4043 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4044 					   OP_ALG_AAI_CTR_MOD128,
4045 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4046 					   OP_ALG_AAI_HMAC_PRECOMP,
4047 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4048 			.rfc3686 = true,
4049 		},
4050 	},
4051 	{
4052 		.aead = {
4053 			.base = {
4054 				.cra_name = "seqiv(authenc("
4055 					    "hmac(md5),rfc3686(ctr(aes))))",
4056 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
4057 						   "rfc3686-ctr-aes-caam",
4058 				.cra_blocksize = 1,
4059 			},
4060 			.setkey = aead_setkey,
4061 			.setauthsize = aead_setauthsize,
4062 			.encrypt = aead_encrypt,
4063 			.decrypt = aead_decrypt,
4064 			.ivsize = CTR_RFC3686_IV_SIZE,
4065 			.maxauthsize = MD5_DIGEST_SIZE,
4066 		},
4067 		.caam = {
4068 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4069 					   OP_ALG_AAI_CTR_MOD128,
4070 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4071 					   OP_ALG_AAI_HMAC_PRECOMP,
4072 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4073 			.rfc3686 = true,
4074 			.geniv = true,
4075 		},
4076 	},
4077 	{
4078 		.aead = {
4079 			.base = {
4080 				.cra_name = "authenc(hmac(sha1),"
4081 					    "rfc3686(ctr(aes)))",
4082 				.cra_driver_name = "authenc-hmac-sha1-"
4083 						   "rfc3686-ctr-aes-caam",
4084 				.cra_blocksize = 1,
4085 			},
4086 			.setkey = aead_setkey,
4087 			.setauthsize = aead_setauthsize,
4088 			.encrypt = aead_encrypt,
4089 			.decrypt = aead_decrypt,
4090 			.ivsize = CTR_RFC3686_IV_SIZE,
4091 			.maxauthsize = SHA1_DIGEST_SIZE,
4092 		},
4093 		.caam = {
4094 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4095 					   OP_ALG_AAI_CTR_MOD128,
4096 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4097 					   OP_ALG_AAI_HMAC_PRECOMP,
4098 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4099 			.rfc3686 = true,
4100 		},
4101 	},
4102 	{
4103 		.aead = {
4104 			.base = {
4105 				.cra_name = "seqiv(authenc("
4106 					    "hmac(sha1),rfc3686(ctr(aes))))",
4107 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
4108 						   "rfc3686-ctr-aes-caam",
4109 				.cra_blocksize = 1,
4110 			},
4111 			.setkey = aead_setkey,
4112 			.setauthsize = aead_setauthsize,
4113 			.encrypt = aead_encrypt,
4114 			.decrypt = aead_decrypt,
4115 			.ivsize = CTR_RFC3686_IV_SIZE,
4116 			.maxauthsize = SHA1_DIGEST_SIZE,
4117 		},
4118 		.caam = {
4119 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4120 					   OP_ALG_AAI_CTR_MOD128,
4121 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4122 					   OP_ALG_AAI_HMAC_PRECOMP,
4123 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4124 			.rfc3686 = true,
4125 			.geniv = true,
4126 		},
4127 	},
4128 	{
4129 		.aead = {
4130 			.base = {
4131 				.cra_name = "authenc(hmac(sha224),"
4132 					    "rfc3686(ctr(aes)))",
4133 				.cra_driver_name = "authenc-hmac-sha224-"
4134 						   "rfc3686-ctr-aes-caam",
4135 				.cra_blocksize = 1,
4136 			},
4137 			.setkey = aead_setkey,
4138 			.setauthsize = aead_setauthsize,
4139 			.encrypt = aead_encrypt,
4140 			.decrypt = aead_decrypt,
4141 			.ivsize = CTR_RFC3686_IV_SIZE,
4142 			.maxauthsize = SHA224_DIGEST_SIZE,
4143 		},
4144 		.caam = {
4145 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4146 					   OP_ALG_AAI_CTR_MOD128,
4147 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4148 					   OP_ALG_AAI_HMAC_PRECOMP,
4149 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4150 			.rfc3686 = true,
4151 		},
4152 	},
4153 	{
4154 		.aead = {
4155 			.base = {
4156 				.cra_name = "seqiv(authenc("
4157 					    "hmac(sha224),rfc3686(ctr(aes))))",
4158 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
4159 						   "rfc3686-ctr-aes-caam",
4160 				.cra_blocksize = 1,
4161 			},
4162 			.setkey = aead_setkey,
4163 			.setauthsize = aead_setauthsize,
4164 			.encrypt = aead_encrypt,
4165 			.decrypt = aead_decrypt,
4166 			.ivsize = CTR_RFC3686_IV_SIZE,
4167 			.maxauthsize = SHA224_DIGEST_SIZE,
4168 		},
4169 		.caam = {
4170 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4171 					   OP_ALG_AAI_CTR_MOD128,
4172 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4173 					   OP_ALG_AAI_HMAC_PRECOMP,
4174 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4175 			.rfc3686 = true,
4176 			.geniv = true,
4177 		},
4178 	},
4179 	{
4180 		.aead = {
4181 			.base = {
4182 				.cra_name = "authenc(hmac(sha256),"
4183 					    "rfc3686(ctr(aes)))",
4184 				.cra_driver_name = "authenc-hmac-sha256-"
4185 						   "rfc3686-ctr-aes-caam",
4186 				.cra_blocksize = 1,
4187 			},
4188 			.setkey = aead_setkey,
4189 			.setauthsize = aead_setauthsize,
4190 			.encrypt = aead_encrypt,
4191 			.decrypt = aead_decrypt,
4192 			.ivsize = CTR_RFC3686_IV_SIZE,
4193 			.maxauthsize = SHA256_DIGEST_SIZE,
4194 		},
4195 		.caam = {
4196 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4197 					   OP_ALG_AAI_CTR_MOD128,
4198 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4199 					   OP_ALG_AAI_HMAC_PRECOMP,
4200 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4201 			.rfc3686 = true,
4202 		},
4203 	},
4204 	{
4205 		.aead = {
4206 			.base = {
4207 				.cra_name = "seqiv(authenc(hmac(sha256),"
4208 					    "rfc3686(ctr(aes))))",
4209 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
4210 						   "rfc3686-ctr-aes-caam",
4211 				.cra_blocksize = 1,
4212 			},
4213 			.setkey = aead_setkey,
4214 			.setauthsize = aead_setauthsize,
4215 			.encrypt = aead_encrypt,
4216 			.decrypt = aead_decrypt,
4217 			.ivsize = CTR_RFC3686_IV_SIZE,
4218 			.maxauthsize = SHA256_DIGEST_SIZE,
4219 		},
4220 		.caam = {
4221 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4222 					   OP_ALG_AAI_CTR_MOD128,
4223 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4224 					   OP_ALG_AAI_HMAC_PRECOMP,
4225 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4226 			.rfc3686 = true,
4227 			.geniv = true,
4228 		},
4229 	},
4230 	{
4231 		.aead = {
4232 			.base = {
4233 				.cra_name = "authenc(hmac(sha384),"
4234 					    "rfc3686(ctr(aes)))",
4235 				.cra_driver_name = "authenc-hmac-sha384-"
4236 						   "rfc3686-ctr-aes-caam",
4237 				.cra_blocksize = 1,
4238 			},
4239 			.setkey = aead_setkey,
4240 			.setauthsize = aead_setauthsize,
4241 			.encrypt = aead_encrypt,
4242 			.decrypt = aead_decrypt,
4243 			.ivsize = CTR_RFC3686_IV_SIZE,
4244 			.maxauthsize = SHA384_DIGEST_SIZE,
4245 		},
4246 		.caam = {
4247 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4248 					   OP_ALG_AAI_CTR_MOD128,
4249 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4250 					   OP_ALG_AAI_HMAC_PRECOMP,
4251 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4252 			.rfc3686 = true,
4253 		},
4254 	},
4255 	{
4256 		.aead = {
4257 			.base = {
4258 				.cra_name = "seqiv(authenc(hmac(sha384),"
4259 					    "rfc3686(ctr(aes))))",
4260 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
4261 						   "rfc3686-ctr-aes-caam",
4262 				.cra_blocksize = 1,
4263 			},
4264 			.setkey = aead_setkey,
4265 			.setauthsize = aead_setauthsize,
4266 			.encrypt = aead_encrypt,
4267 			.decrypt = aead_decrypt,
4268 			.ivsize = CTR_RFC3686_IV_SIZE,
4269 			.maxauthsize = SHA384_DIGEST_SIZE,
4270 		},
4271 		.caam = {
4272 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4273 					   OP_ALG_AAI_CTR_MOD128,
4274 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4275 					   OP_ALG_AAI_HMAC_PRECOMP,
4276 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4277 			.rfc3686 = true,
4278 			.geniv = true,
4279 		},
4280 	},
4281 	{
4282 		.aead = {
4283 			.base = {
4284 				.cra_name = "authenc(hmac(sha512),"
4285 					    "rfc3686(ctr(aes)))",
4286 				.cra_driver_name = "authenc-hmac-sha512-"
4287 						   "rfc3686-ctr-aes-caam",
4288 				.cra_blocksize = 1,
4289 			},
4290 			.setkey = aead_setkey,
4291 			.setauthsize = aead_setauthsize,
4292 			.encrypt = aead_encrypt,
4293 			.decrypt = aead_decrypt,
4294 			.ivsize = CTR_RFC3686_IV_SIZE,
4295 			.maxauthsize = SHA512_DIGEST_SIZE,
4296 		},
4297 		.caam = {
4298 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4299 					   OP_ALG_AAI_CTR_MOD128,
4300 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4301 					   OP_ALG_AAI_HMAC_PRECOMP,
4302 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4303 			.rfc3686 = true,
4304 		},
4305 	},
4306 	{
4307 		.aead = {
4308 			.base = {
4309 				.cra_name = "seqiv(authenc(hmac(sha512),"
4310 					    "rfc3686(ctr(aes))))",
4311 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
4312 						   "rfc3686-ctr-aes-caam",
4313 				.cra_blocksize = 1,
4314 			},
4315 			.setkey = aead_setkey,
4316 			.setauthsize = aead_setauthsize,
4317 			.encrypt = aead_encrypt,
4318 			.decrypt = aead_decrypt,
4319 			.ivsize = CTR_RFC3686_IV_SIZE,
4320 			.maxauthsize = SHA512_DIGEST_SIZE,
4321 		},
4322 		.caam = {
4323 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4324 					   OP_ALG_AAI_CTR_MOD128,
4325 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4326 					   OP_ALG_AAI_HMAC_PRECOMP,
4327 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4328 			.rfc3686 = true,
4329 			.geniv = true,
4330 		},
4331 	},
4332 };
4333 
4334 struct caam_crypto_alg {
4335 	struct crypto_alg crypto_alg;
4336 	struct list_head entry;
4337 	struct caam_alg_entry caam;
4338 };
4339 
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam)4340 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4341 {
4342 	ctx->jrdev = caam_jr_alloc();
4343 	if (IS_ERR(ctx->jrdev)) {
4344 		pr_err("Job Ring Device allocation for transform failed\n");
4345 		return PTR_ERR(ctx->jrdev);
4346 	}
4347 
4348 	/* copy descriptor header template value */
4349 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4350 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4351 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4352 
4353 	return 0;
4354 }
4355 
caam_cra_init(struct crypto_tfm * tfm)4356 static int caam_cra_init(struct crypto_tfm *tfm)
4357 {
4358 	struct crypto_alg *alg = tfm->__crt_alg;
4359 	struct caam_crypto_alg *caam_alg =
4360 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
4361 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4362 
4363 	return caam_init_common(ctx, &caam_alg->caam);
4364 }
4365 
caam_aead_init(struct crypto_aead * tfm)4366 static int caam_aead_init(struct crypto_aead *tfm)
4367 {
4368 	struct aead_alg *alg = crypto_aead_alg(tfm);
4369 	struct caam_aead_alg *caam_alg =
4370 		 container_of(alg, struct caam_aead_alg, aead);
4371 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4372 
4373 	return caam_init_common(ctx, &caam_alg->caam);
4374 }
4375 
caam_exit_common(struct caam_ctx * ctx)4376 static void caam_exit_common(struct caam_ctx *ctx)
4377 {
4378 	if (ctx->sh_desc_enc_dma &&
4379 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4380 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4381 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4382 	if (ctx->sh_desc_dec_dma &&
4383 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4384 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4385 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4386 	if (ctx->sh_desc_givenc_dma &&
4387 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4388 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4389 				 desc_bytes(ctx->sh_desc_givenc),
4390 				 DMA_TO_DEVICE);
4391 	if (ctx->key_dma &&
4392 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4393 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
4394 				 ctx->enckeylen + ctx->split_key_pad_len,
4395 				 DMA_TO_DEVICE);
4396 
4397 	caam_jr_free(ctx->jrdev);
4398 }
4399 
caam_cra_exit(struct crypto_tfm * tfm)4400 static void caam_cra_exit(struct crypto_tfm *tfm)
4401 {
4402 	caam_exit_common(crypto_tfm_ctx(tfm));
4403 }
4404 
caam_aead_exit(struct crypto_aead * tfm)4405 static void caam_aead_exit(struct crypto_aead *tfm)
4406 {
4407 	caam_exit_common(crypto_aead_ctx(tfm));
4408 }
4409 
caam_algapi_exit(void)4410 static void __exit caam_algapi_exit(void)
4411 {
4412 
4413 	struct caam_crypto_alg *t_alg, *n;
4414 	int i;
4415 
4416 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4417 		struct caam_aead_alg *t_alg = driver_aeads + i;
4418 
4419 		if (t_alg->registered)
4420 			crypto_unregister_aead(&t_alg->aead);
4421 	}
4422 
4423 	if (!alg_list.next)
4424 		return;
4425 
4426 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4427 		crypto_unregister_alg(&t_alg->crypto_alg);
4428 		list_del(&t_alg->entry);
4429 		kfree(t_alg);
4430 	}
4431 }
4432 
caam_alg_alloc(struct caam_alg_template * template)4433 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4434 					      *template)
4435 {
4436 	struct caam_crypto_alg *t_alg;
4437 	struct crypto_alg *alg;
4438 
4439 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4440 	if (!t_alg) {
4441 		pr_err("failed to allocate t_alg\n");
4442 		return ERR_PTR(-ENOMEM);
4443 	}
4444 
4445 	alg = &t_alg->crypto_alg;
4446 
4447 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4448 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4449 		 template->driver_name);
4450 	alg->cra_module = THIS_MODULE;
4451 	alg->cra_init = caam_cra_init;
4452 	alg->cra_exit = caam_cra_exit;
4453 	alg->cra_priority = CAAM_CRA_PRIORITY;
4454 	alg->cra_blocksize = template->blocksize;
4455 	alg->cra_alignmask = 0;
4456 	alg->cra_ctxsize = sizeof(struct caam_ctx);
4457 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4458 			 template->type;
4459 	switch (template->type) {
4460 	case CRYPTO_ALG_TYPE_GIVCIPHER:
4461 		alg->cra_type = &crypto_givcipher_type;
4462 		alg->cra_ablkcipher = template->template_ablkcipher;
4463 		break;
4464 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
4465 		alg->cra_type = &crypto_ablkcipher_type;
4466 		alg->cra_ablkcipher = template->template_ablkcipher;
4467 		break;
4468 	}
4469 
4470 	t_alg->caam.class1_alg_type = template->class1_alg_type;
4471 	t_alg->caam.class2_alg_type = template->class2_alg_type;
4472 	t_alg->caam.alg_op = template->alg_op;
4473 
4474 	return t_alg;
4475 }
4476 
caam_aead_alg_init(struct caam_aead_alg * t_alg)4477 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4478 {
4479 	struct aead_alg *alg = &t_alg->aead;
4480 
4481 	alg->base.cra_module = THIS_MODULE;
4482 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
4483 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4484 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4485 
4486 	alg->init = caam_aead_init;
4487 	alg->exit = caam_aead_exit;
4488 }
4489 
caam_algapi_init(void)4490 static int __init caam_algapi_init(void)
4491 {
4492 	struct device_node *dev_node;
4493 	struct platform_device *pdev;
4494 	struct device *ctrldev;
4495 	struct caam_drv_private *priv;
4496 	int i = 0, err = 0;
4497 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4498 	unsigned int md_limit = SHA512_DIGEST_SIZE;
4499 	bool registered = false;
4500 
4501 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4502 	if (!dev_node) {
4503 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4504 		if (!dev_node)
4505 			return -ENODEV;
4506 	}
4507 
4508 	pdev = of_find_device_by_node(dev_node);
4509 	if (!pdev) {
4510 		of_node_put(dev_node);
4511 		return -ENODEV;
4512 	}
4513 
4514 	ctrldev = &pdev->dev;
4515 	priv = dev_get_drvdata(ctrldev);
4516 	of_node_put(dev_node);
4517 
4518 	/*
4519 	 * If priv is NULL, it's probably because the caam driver wasn't
4520 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4521 	 */
4522 	if (!priv)
4523 		return -ENODEV;
4524 
4525 
4526 	INIT_LIST_HEAD(&alg_list);
4527 
4528 	/*
4529 	 * Register crypto algorithms the device supports.
4530 	 * First, detect presence and attributes of DES, AES, and MD blocks.
4531 	 */
4532 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4533 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4534 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4535 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4536 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4537 
4538 	/* If MD is present, limit digest size based on LP256 */
4539 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4540 		md_limit = SHA256_DIGEST_SIZE;
4541 
4542 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4543 		struct caam_crypto_alg *t_alg;
4544 		struct caam_alg_template *alg = driver_algs + i;
4545 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4546 
4547 		/* Skip DES algorithms if not supported by device */
4548 		if (!des_inst &&
4549 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4550 		     (alg_sel == OP_ALG_ALGSEL_DES)))
4551 				continue;
4552 
4553 		/* Skip AES algorithms if not supported by device */
4554 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4555 				continue;
4556 
4557 		/*
4558 		 * Check support for AES modes not available
4559 		 * on LP devices.
4560 		 */
4561 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4562 			if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
4563 			     OP_ALG_AAI_XTS)
4564 				continue;
4565 
4566 		t_alg = caam_alg_alloc(alg);
4567 		if (IS_ERR(t_alg)) {
4568 			err = PTR_ERR(t_alg);
4569 			pr_warn("%s alg allocation failed\n", alg->driver_name);
4570 			continue;
4571 		}
4572 
4573 		err = crypto_register_alg(&t_alg->crypto_alg);
4574 		if (err) {
4575 			pr_warn("%s alg registration failed\n",
4576 				t_alg->crypto_alg.cra_driver_name);
4577 			kfree(t_alg);
4578 			continue;
4579 		}
4580 
4581 		list_add_tail(&t_alg->entry, &alg_list);
4582 		registered = true;
4583 	}
4584 
4585 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4586 		struct caam_aead_alg *t_alg = driver_aeads + i;
4587 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4588 				 OP_ALG_ALGSEL_MASK;
4589 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4590 				 OP_ALG_ALGSEL_MASK;
4591 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4592 
4593 		/* Skip DES algorithms if not supported by device */
4594 		if (!des_inst &&
4595 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4596 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4597 				continue;
4598 
4599 		/* Skip AES algorithms if not supported by device */
4600 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4601 				continue;
4602 
4603 		/*
4604 		 * Check support for AES algorithms not available
4605 		 * on LP devices.
4606 		 */
4607 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4608 			if (alg_aai == OP_ALG_AAI_GCM)
4609 				continue;
4610 
4611 		/*
4612 		 * Skip algorithms requiring message digests
4613 		 * if MD or MD size is not supported by device.
4614 		 */
4615 		if (c2_alg_sel &&
4616 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4617 				continue;
4618 
4619 		caam_aead_alg_init(t_alg);
4620 
4621 		err = crypto_register_aead(&t_alg->aead);
4622 		if (err) {
4623 			pr_warn("%s alg registration failed\n",
4624 				t_alg->aead.base.cra_driver_name);
4625 			continue;
4626 		}
4627 
4628 		t_alg->registered = true;
4629 		registered = true;
4630 	}
4631 
4632 	if (registered)
4633 		pr_info("caam algorithms registered in /proc/crypto\n");
4634 
4635 	return err;
4636 }
4637 
4638 module_init(caam_algapi_init);
4639 module_exit(caam_algapi_exit);
4640 
4641 MODULE_LICENSE("GPL");
4642 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4643 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4644