• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IXP4xx NPE-C crypto driver
4  *
5  * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6  */
7 
8 #include <linux/platform_device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/crypto.h>
12 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
18 
19 #include <crypto/ctr.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/aes.h>
22 #include <crypto/hmac.h>
23 #include <crypto/sha.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/authenc.h>
27 #include <crypto/scatterwalk.h>
28 
29 #include <linux/soc/ixp4xx/npe.h>
30 #include <linux/soc/ixp4xx/qmgr.h>
31 
32 #define MAX_KEYLEN 32
33 
34 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
35 #define NPE_CTX_LEN 80
36 #define AES_BLOCK128 16
37 
38 #define NPE_OP_HASH_VERIFY   0x01
39 #define NPE_OP_CCM_ENABLE    0x04
40 #define NPE_OP_CRYPT_ENABLE  0x08
41 #define NPE_OP_HASH_ENABLE   0x10
42 #define NPE_OP_NOT_IN_PLACE  0x20
43 #define NPE_OP_HMAC_DISABLE  0x40
44 #define NPE_OP_CRYPT_ENCRYPT 0x80
45 
46 #define NPE_OP_CCM_GEN_MIC   0xcc
47 #define NPE_OP_HASH_GEN_ICV  0x50
48 #define NPE_OP_ENC_GEN_KEY   0xc9
49 
50 #define MOD_ECB     0x0000
51 #define MOD_CTR     0x1000
52 #define MOD_CBC_ENC 0x2000
53 #define MOD_CBC_DEC 0x3000
54 #define MOD_CCM_ENC 0x4000
55 #define MOD_CCM_DEC 0x5000
56 
57 #define KEYLEN_128  4
58 #define KEYLEN_192  6
59 #define KEYLEN_256  8
60 
61 #define CIPH_DECR   0x0000
62 #define CIPH_ENCR   0x0400
63 
64 #define MOD_DES     0x0000
65 #define MOD_TDEA2   0x0100
66 #define MOD_3DES   0x0200
67 #define MOD_AES     0x0800
68 #define MOD_AES128  (0x0800 | KEYLEN_128)
69 #define MOD_AES192  (0x0900 | KEYLEN_192)
70 #define MOD_AES256  (0x0a00 | KEYLEN_256)
71 
72 #define MAX_IVLEN   16
73 #define NPE_ID      2  /* NPE C */
74 #define NPE_QLEN    16
75 /* Space for registering when the first
76  * NPE_QLEN crypt_ctl are busy */
77 #define NPE_QLEN_TOTAL 64
78 
79 #define SEND_QID    29
80 #define RECV_QID    30
81 
82 #define CTL_FLAG_UNUSED		0x0000
83 #define CTL_FLAG_USED		0x1000
84 #define CTL_FLAG_PERFORM_ABLK	0x0001
85 #define CTL_FLAG_GEN_ICV	0x0002
86 #define CTL_FLAG_GEN_REVAES	0x0004
87 #define CTL_FLAG_PERFORM_AEAD	0x0008
88 #define CTL_FLAG_MASK		0x000f
89 
90 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
91 
92 #define MD5_DIGEST_SIZE   16
93 
94 struct buffer_desc {
95 	u32 phys_next;
96 #ifdef __ARMEB__
97 	u16 buf_len;
98 	u16 pkt_len;
99 #else
100 	u16 pkt_len;
101 	u16 buf_len;
102 #endif
103 	dma_addr_t phys_addr;
104 	u32 __reserved[4];
105 	struct buffer_desc *next;
106 	enum dma_data_direction dir;
107 };
108 
109 struct crypt_ctl {
110 #ifdef __ARMEB__
111 	u8 mode;		/* NPE_OP_*  operation mode */
112 	u8 init_len;
113 	u16 reserved;
114 #else
115 	u16 reserved;
116 	u8 init_len;
117 	u8 mode;		/* NPE_OP_*  operation mode */
118 #endif
119 	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
120 	dma_addr_t icv_rev_aes;	/* icv or rev aes */
121 	dma_addr_t src_buf;
122 	dma_addr_t dst_buf;
123 #ifdef __ARMEB__
124 	u16 auth_offs;		/* Authentication start offset */
125 	u16 auth_len;		/* Authentication data length */
126 	u16 crypt_offs;		/* Cryption start offset */
127 	u16 crypt_len;		/* Cryption data length */
128 #else
129 	u16 auth_len;		/* Authentication data length */
130 	u16 auth_offs;		/* Authentication start offset */
131 	u16 crypt_len;		/* Cryption data length */
132 	u16 crypt_offs;		/* Cryption start offset */
133 #endif
134 	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
135 	u32 crypto_ctx;		/* NPE Crypto Param structure address */
136 
137 	/* Used by Host: 4*4 bytes*/
138 	unsigned ctl_flags;
139 	union {
140 		struct ablkcipher_request *ablk_req;
141 		struct aead_request *aead_req;
142 		struct crypto_tfm *tfm;
143 	} data;
144 	struct buffer_desc *regist_buf;
145 	u8 *regist_ptr;
146 };
147 
148 struct ablk_ctx {
149 	struct buffer_desc *src;
150 	struct buffer_desc *dst;
151 };
152 
153 struct aead_ctx {
154 	struct buffer_desc *src;
155 	struct buffer_desc *dst;
156 	struct scatterlist ivlist;
157 	/* used when the hmac is not on one sg entry */
158 	u8 *hmac_virt;
159 	int encrypt;
160 };
161 
162 struct ix_hash_algo {
163 	u32 cfgword;
164 	unsigned char *icv;
165 };
166 
167 struct ix_sa_dir {
168 	unsigned char *npe_ctx;
169 	dma_addr_t npe_ctx_phys;
170 	int npe_ctx_idx;
171 	u8 npe_mode;
172 };
173 
174 struct ixp_ctx {
175 	struct ix_sa_dir encrypt;
176 	struct ix_sa_dir decrypt;
177 	int authkey_len;
178 	u8 authkey[MAX_KEYLEN];
179 	int enckey_len;
180 	u8 enckey[MAX_KEYLEN];
181 	u8 salt[MAX_IVLEN];
182 	u8 nonce[CTR_RFC3686_NONCE_SIZE];
183 	unsigned salted;
184 	atomic_t configuring;
185 	struct completion completion;
186 };
187 
188 struct ixp_alg {
189 	struct crypto_alg crypto;
190 	const struct ix_hash_algo *hash;
191 	u32 cfg_enc;
192 	u32 cfg_dec;
193 
194 	int registered;
195 };
196 
197 struct ixp_aead_alg {
198 	struct aead_alg crypto;
199 	const struct ix_hash_algo *hash;
200 	u32 cfg_enc;
201 	u32 cfg_dec;
202 
203 	int registered;
204 };
205 
206 static const struct ix_hash_algo hash_alg_md5 = {
207 	.cfgword	= 0xAA010004,
208 	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
209 			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
210 };
211 static const struct ix_hash_algo hash_alg_sha1 = {
212 	.cfgword	= 0x00000005,
213 	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
214 			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
215 };
216 
217 static struct npe *npe_c;
218 static struct dma_pool *buffer_pool = NULL;
219 static struct dma_pool *ctx_pool = NULL;
220 
221 static struct crypt_ctl *crypt_virt = NULL;
222 static dma_addr_t crypt_phys;
223 
224 static int support_aes = 1;
225 
226 #define DRIVER_NAME "ixp4xx_crypto"
227 
228 static struct platform_device *pdev;
229 
crypt_virt2phys(struct crypt_ctl * virt)230 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
231 {
232 	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
233 }
234 
crypt_phys2virt(dma_addr_t phys)235 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
236 {
237 	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
238 }
239 
cipher_cfg_enc(struct crypto_tfm * tfm)240 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
241 {
242 	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
243 }
244 
cipher_cfg_dec(struct crypto_tfm * tfm)245 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
246 {
247 	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
248 }
249 
ix_hash(struct crypto_tfm * tfm)250 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
251 {
252 	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
253 }
254 
setup_crypt_desc(void)255 static int setup_crypt_desc(void)
256 {
257 	struct device *dev = &pdev->dev;
258 	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
259 	crypt_virt = dma_alloc_coherent(dev,
260 					NPE_QLEN * sizeof(struct crypt_ctl),
261 					&crypt_phys, GFP_ATOMIC);
262 	if (!crypt_virt)
263 		return -ENOMEM;
264 	return 0;
265 }
266 
267 static spinlock_t desc_lock;
get_crypt_desc(void)268 static struct crypt_ctl *get_crypt_desc(void)
269 {
270 	int i;
271 	static int idx = 0;
272 	unsigned long flags;
273 
274 	spin_lock_irqsave(&desc_lock, flags);
275 
276 	if (unlikely(!crypt_virt))
277 		setup_crypt_desc();
278 	if (unlikely(!crypt_virt)) {
279 		spin_unlock_irqrestore(&desc_lock, flags);
280 		return NULL;
281 	}
282 	i = idx;
283 	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
284 		if (++idx >= NPE_QLEN)
285 			idx = 0;
286 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
287 		spin_unlock_irqrestore(&desc_lock, flags);
288 		return crypt_virt +i;
289 	} else {
290 		spin_unlock_irqrestore(&desc_lock, flags);
291 		return NULL;
292 	}
293 }
294 
295 static spinlock_t emerg_lock;
get_crypt_desc_emerg(void)296 static struct crypt_ctl *get_crypt_desc_emerg(void)
297 {
298 	int i;
299 	static int idx = NPE_QLEN;
300 	struct crypt_ctl *desc;
301 	unsigned long flags;
302 
303 	desc = get_crypt_desc();
304 	if (desc)
305 		return desc;
306 	if (unlikely(!crypt_virt))
307 		return NULL;
308 
309 	spin_lock_irqsave(&emerg_lock, flags);
310 	i = idx;
311 	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
312 		if (++idx >= NPE_QLEN_TOTAL)
313 			idx = NPE_QLEN;
314 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
315 		spin_unlock_irqrestore(&emerg_lock, flags);
316 		return crypt_virt +i;
317 	} else {
318 		spin_unlock_irqrestore(&emerg_lock, flags);
319 		return NULL;
320 	}
321 }
322 
free_buf_chain(struct device * dev,struct buffer_desc * buf,dma_addr_t phys)323 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
324 			   dma_addr_t phys)
325 {
326 	while (buf) {
327 		struct buffer_desc *buf1;
328 		u32 phys1;
329 
330 		buf1 = buf->next;
331 		phys1 = buf->phys_next;
332 		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
333 		dma_pool_free(buffer_pool, buf, phys);
334 		buf = buf1;
335 		phys = phys1;
336 	}
337 }
338 
339 static struct tasklet_struct crypto_done_tasklet;
340 
finish_scattered_hmac(struct crypt_ctl * crypt)341 static void finish_scattered_hmac(struct crypt_ctl *crypt)
342 {
343 	struct aead_request *req = crypt->data.aead_req;
344 	struct aead_ctx *req_ctx = aead_request_ctx(req);
345 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
346 	int authsize = crypto_aead_authsize(tfm);
347 	int decryptlen = req->assoclen + req->cryptlen - authsize;
348 
349 	if (req_ctx->encrypt) {
350 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
351 			req->dst, decryptlen, authsize, 1);
352 	}
353 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
354 }
355 
one_packet(dma_addr_t phys)356 static void one_packet(dma_addr_t phys)
357 {
358 	struct device *dev = &pdev->dev;
359 	struct crypt_ctl *crypt;
360 	struct ixp_ctx *ctx;
361 	int failed;
362 
363 	failed = phys & 0x1 ? -EBADMSG : 0;
364 	phys &= ~0x3;
365 	crypt = crypt_phys2virt(phys);
366 
367 	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
368 	case CTL_FLAG_PERFORM_AEAD: {
369 		struct aead_request *req = crypt->data.aead_req;
370 		struct aead_ctx *req_ctx = aead_request_ctx(req);
371 
372 		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
373 		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
374 		if (req_ctx->hmac_virt) {
375 			finish_scattered_hmac(crypt);
376 		}
377 		req->base.complete(&req->base, failed);
378 		break;
379 	}
380 	case CTL_FLAG_PERFORM_ABLK: {
381 		struct ablkcipher_request *req = crypt->data.ablk_req;
382 		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
383 
384 		if (req_ctx->dst) {
385 			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
386 		}
387 		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
388 		req->base.complete(&req->base, failed);
389 		break;
390 	}
391 	case CTL_FLAG_GEN_ICV:
392 		ctx = crypto_tfm_ctx(crypt->data.tfm);
393 		dma_pool_free(ctx_pool, crypt->regist_ptr,
394 				crypt->regist_buf->phys_addr);
395 		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
396 		if (atomic_dec_and_test(&ctx->configuring))
397 			complete(&ctx->completion);
398 		break;
399 	case CTL_FLAG_GEN_REVAES:
400 		ctx = crypto_tfm_ctx(crypt->data.tfm);
401 		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
402 		if (atomic_dec_and_test(&ctx->configuring))
403 			complete(&ctx->completion);
404 		break;
405 	default:
406 		BUG();
407 	}
408 	crypt->ctl_flags = CTL_FLAG_UNUSED;
409 }
410 
irqhandler(void * _unused)411 static void irqhandler(void *_unused)
412 {
413 	tasklet_schedule(&crypto_done_tasklet);
414 }
415 
crypto_done_action(unsigned long arg)416 static void crypto_done_action(unsigned long arg)
417 {
418 	int i;
419 
420 	for(i=0; i<4; i++) {
421 		dma_addr_t phys = qmgr_get_entry(RECV_QID);
422 		if (!phys)
423 			return;
424 		one_packet(phys);
425 	}
426 	tasklet_schedule(&crypto_done_tasklet);
427 }
428 
init_ixp_crypto(struct device * dev)429 static int init_ixp_crypto(struct device *dev)
430 {
431 	int ret = -ENODEV;
432 	u32 msg[2] = { 0, 0 };
433 
434 	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
435 				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
436 		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
437 		return ret;
438 	}
439 	npe_c = npe_request(NPE_ID);
440 	if (!npe_c)
441 		return ret;
442 
443 	if (!npe_running(npe_c)) {
444 		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
445 		if (ret)
446 			goto npe_release;
447 		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
448 			goto npe_error;
449 	} else {
450 		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
451 			goto npe_error;
452 
453 		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
454 			goto npe_error;
455 	}
456 
457 	switch ((msg[1]>>16) & 0xff) {
458 	case 3:
459 		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
460 				npe_name(npe_c));
461 		support_aes = 0;
462 		break;
463 	case 4:
464 	case 5:
465 		support_aes = 1;
466 		break;
467 	default:
468 		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
469 			npe_name(npe_c));
470 		ret = -ENODEV;
471 		goto npe_release;
472 	}
473 	/* buffer_pool will also be used to sometimes store the hmac,
474 	 * so assure it is large enough
475 	 */
476 	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
477 	buffer_pool = dma_pool_create("buffer", dev,
478 			sizeof(struct buffer_desc), 32, 0);
479 	ret = -ENOMEM;
480 	if (!buffer_pool) {
481 		goto err;
482 	}
483 	ctx_pool = dma_pool_create("context", dev,
484 			NPE_CTX_LEN, 16, 0);
485 	if (!ctx_pool) {
486 		goto err;
487 	}
488 	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
489 				 "ixp_crypto:out", NULL);
490 	if (ret)
491 		goto err;
492 	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
493 				 "ixp_crypto:in", NULL);
494 	if (ret) {
495 		qmgr_release_queue(SEND_QID);
496 		goto err;
497 	}
498 	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
499 	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
500 
501 	qmgr_enable_irq(RECV_QID);
502 	return 0;
503 
504 npe_error:
505 	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
506 	ret = -EIO;
507 err:
508 	dma_pool_destroy(ctx_pool);
509 	dma_pool_destroy(buffer_pool);
510 npe_release:
511 	npe_release(npe_c);
512 	return ret;
513 }
514 
release_ixp_crypto(struct device * dev)515 static void release_ixp_crypto(struct device *dev)
516 {
517 	qmgr_disable_irq(RECV_QID);
518 	tasklet_kill(&crypto_done_tasklet);
519 
520 	qmgr_release_queue(SEND_QID);
521 	qmgr_release_queue(RECV_QID);
522 
523 	dma_pool_destroy(ctx_pool);
524 	dma_pool_destroy(buffer_pool);
525 
526 	npe_release(npe_c);
527 
528 	if (crypt_virt) {
529 		dma_free_coherent(dev,
530 			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
531 			crypt_virt, crypt_phys);
532 	}
533 }
534 
reset_sa_dir(struct ix_sa_dir * dir)535 static void reset_sa_dir(struct ix_sa_dir *dir)
536 {
537 	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
538 	dir->npe_ctx_idx = 0;
539 	dir->npe_mode = 0;
540 }
541 
init_sa_dir(struct ix_sa_dir * dir)542 static int init_sa_dir(struct ix_sa_dir *dir)
543 {
544 	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
545 	if (!dir->npe_ctx) {
546 		return -ENOMEM;
547 	}
548 	reset_sa_dir(dir);
549 	return 0;
550 }
551 
free_sa_dir(struct ix_sa_dir * dir)552 static void free_sa_dir(struct ix_sa_dir *dir)
553 {
554 	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
555 	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
556 }
557 
init_tfm(struct crypto_tfm * tfm)558 static int init_tfm(struct crypto_tfm *tfm)
559 {
560 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
561 	int ret;
562 
563 	atomic_set(&ctx->configuring, 0);
564 	ret = init_sa_dir(&ctx->encrypt);
565 	if (ret)
566 		return ret;
567 	ret = init_sa_dir(&ctx->decrypt);
568 	if (ret) {
569 		free_sa_dir(&ctx->encrypt);
570 	}
571 	return ret;
572 }
573 
init_tfm_ablk(struct crypto_tfm * tfm)574 static int init_tfm_ablk(struct crypto_tfm *tfm)
575 {
576 	tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
577 	return init_tfm(tfm);
578 }
579 
init_tfm_aead(struct crypto_aead * tfm)580 static int init_tfm_aead(struct crypto_aead *tfm)
581 {
582 	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
583 	return init_tfm(crypto_aead_tfm(tfm));
584 }
585 
exit_tfm(struct crypto_tfm * tfm)586 static void exit_tfm(struct crypto_tfm *tfm)
587 {
588 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
589 	free_sa_dir(&ctx->encrypt);
590 	free_sa_dir(&ctx->decrypt);
591 }
592 
exit_tfm_aead(struct crypto_aead * tfm)593 static void exit_tfm_aead(struct crypto_aead *tfm)
594 {
595 	exit_tfm(crypto_aead_tfm(tfm));
596 }
597 
register_chain_var(struct crypto_tfm * tfm,u8 xpad,u32 target,int init_len,u32 ctx_addr,const u8 * key,int key_len)598 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
599 		int init_len, u32 ctx_addr, const u8 *key, int key_len)
600 {
601 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
602 	struct crypt_ctl *crypt;
603 	struct buffer_desc *buf;
604 	int i;
605 	u8 *pad;
606 	dma_addr_t pad_phys, buf_phys;
607 
608 	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
609 	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
610 	if (!pad)
611 		return -ENOMEM;
612 	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
613 	if (!buf) {
614 		dma_pool_free(ctx_pool, pad, pad_phys);
615 		return -ENOMEM;
616 	}
617 	crypt = get_crypt_desc_emerg();
618 	if (!crypt) {
619 		dma_pool_free(ctx_pool, pad, pad_phys);
620 		dma_pool_free(buffer_pool, buf, buf_phys);
621 		return -EAGAIN;
622 	}
623 
624 	memcpy(pad, key, key_len);
625 	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
626 	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
627 		pad[i] ^= xpad;
628 	}
629 
630 	crypt->data.tfm = tfm;
631 	crypt->regist_ptr = pad;
632 	crypt->regist_buf = buf;
633 
634 	crypt->auth_offs = 0;
635 	crypt->auth_len = HMAC_PAD_BLOCKLEN;
636 	crypt->crypto_ctx = ctx_addr;
637 	crypt->src_buf = buf_phys;
638 	crypt->icv_rev_aes = target;
639 	crypt->mode = NPE_OP_HASH_GEN_ICV;
640 	crypt->init_len = init_len;
641 	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
642 
643 	buf->next = 0;
644 	buf->buf_len = HMAC_PAD_BLOCKLEN;
645 	buf->pkt_len = 0;
646 	buf->phys_addr = pad_phys;
647 
648 	atomic_inc(&ctx->configuring);
649 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
650 	BUG_ON(qmgr_stat_overflow(SEND_QID));
651 	return 0;
652 }
653 
setup_auth(struct crypto_tfm * tfm,int encrypt,unsigned authsize,const u8 * key,int key_len,unsigned digest_len)654 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
655 		const u8 *key, int key_len, unsigned digest_len)
656 {
657 	u32 itarget, otarget, npe_ctx_addr;
658 	unsigned char *cinfo;
659 	int init_len, ret = 0;
660 	u32 cfgword;
661 	struct ix_sa_dir *dir;
662 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
663 	const struct ix_hash_algo *algo;
664 
665 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
666 	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
667 	algo = ix_hash(tfm);
668 
669 	/* write cfg word to cryptinfo */
670 	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
671 #ifndef __ARMEB__
672 	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
673 #endif
674 	*(u32*)cinfo = cpu_to_be32(cfgword);
675 	cinfo += sizeof(cfgword);
676 
677 	/* write ICV to cryptinfo */
678 	memcpy(cinfo, algo->icv, digest_len);
679 	cinfo += digest_len;
680 
681 	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
682 				+ sizeof(algo->cfgword);
683 	otarget = itarget + digest_len;
684 	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
685 	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
686 
687 	dir->npe_ctx_idx += init_len;
688 	dir->npe_mode |= NPE_OP_HASH_ENABLE;
689 
690 	if (!encrypt)
691 		dir->npe_mode |= NPE_OP_HASH_VERIFY;
692 
693 	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
694 			init_len, npe_ctx_addr, key, key_len);
695 	if (ret)
696 		return ret;
697 	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
698 			init_len, npe_ctx_addr, key, key_len);
699 }
700 
gen_rev_aes_key(struct crypto_tfm * tfm)701 static int gen_rev_aes_key(struct crypto_tfm *tfm)
702 {
703 	struct crypt_ctl *crypt;
704 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
705 	struct ix_sa_dir *dir = &ctx->decrypt;
706 
707 	crypt = get_crypt_desc_emerg();
708 	if (!crypt) {
709 		return -EAGAIN;
710 	}
711 	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
712 
713 	crypt->data.tfm = tfm;
714 	crypt->crypt_offs = 0;
715 	crypt->crypt_len = AES_BLOCK128;
716 	crypt->src_buf = 0;
717 	crypt->crypto_ctx = dir->npe_ctx_phys;
718 	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
719 	crypt->mode = NPE_OP_ENC_GEN_KEY;
720 	crypt->init_len = dir->npe_ctx_idx;
721 	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
722 
723 	atomic_inc(&ctx->configuring);
724 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
725 	BUG_ON(qmgr_stat_overflow(SEND_QID));
726 	return 0;
727 }
728 
setup_cipher(struct crypto_tfm * tfm,int encrypt,const u8 * key,int key_len)729 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
730 		const u8 *key, int key_len)
731 {
732 	u8 *cinfo;
733 	u32 cipher_cfg;
734 	u32 keylen_cfg = 0;
735 	struct ix_sa_dir *dir;
736 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
737 	u32 *flags = &tfm->crt_flags;
738 
739 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
740 	cinfo = dir->npe_ctx;
741 
742 	if (encrypt) {
743 		cipher_cfg = cipher_cfg_enc(tfm);
744 		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
745 	} else {
746 		cipher_cfg = cipher_cfg_dec(tfm);
747 	}
748 	if (cipher_cfg & MOD_AES) {
749 		switch (key_len) {
750 		case 16: keylen_cfg = MOD_AES128; break;
751 		case 24: keylen_cfg = MOD_AES192; break;
752 		case 32: keylen_cfg = MOD_AES256; break;
753 		default:
754 			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
755 			return -EINVAL;
756 		}
757 		cipher_cfg |= keylen_cfg;
758 	} else {
759 		crypto_des_verify_key(tfm, key);
760 	}
761 	/* write cfg word to cryptinfo */
762 	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
763 	cinfo += sizeof(cipher_cfg);
764 
765 	/* write cipher key to cryptinfo */
766 	memcpy(cinfo, key, key_len);
767 	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
768 	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
769 		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
770 		key_len = DES3_EDE_KEY_SIZE;
771 	}
772 	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
773 	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
774 	if ((cipher_cfg & MOD_AES) && !encrypt) {
775 		return gen_rev_aes_key(tfm);
776 	}
777 	return 0;
778 }
779 
chainup_buffers(struct device * dev,struct scatterlist * sg,unsigned nbytes,struct buffer_desc * buf,gfp_t flags,enum dma_data_direction dir)780 static struct buffer_desc *chainup_buffers(struct device *dev,
781 		struct scatterlist *sg,	unsigned nbytes,
782 		struct buffer_desc *buf, gfp_t flags,
783 		enum dma_data_direction dir)
784 {
785 	for (; nbytes > 0; sg = sg_next(sg)) {
786 		unsigned len = min(nbytes, sg->length);
787 		struct buffer_desc *next_buf;
788 		dma_addr_t next_buf_phys;
789 		void *ptr;
790 
791 		nbytes -= len;
792 		ptr = sg_virt(sg);
793 		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
794 		if (!next_buf) {
795 			buf = NULL;
796 			break;
797 		}
798 		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
799 		buf->next = next_buf;
800 		buf->phys_next = next_buf_phys;
801 		buf = next_buf;
802 
803 		buf->phys_addr = sg_dma_address(sg);
804 		buf->buf_len = len;
805 		buf->dir = dir;
806 	}
807 	buf->next = NULL;
808 	buf->phys_next = 0;
809 	return buf;
810 }
811 
ablk_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int key_len)812 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
813 			unsigned int key_len)
814 {
815 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
816 	u32 *flags = &tfm->base.crt_flags;
817 	int ret;
818 
819 	init_completion(&ctx->completion);
820 	atomic_inc(&ctx->configuring);
821 
822 	reset_sa_dir(&ctx->encrypt);
823 	reset_sa_dir(&ctx->decrypt);
824 
825 	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
826 	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
827 
828 	ret = setup_cipher(&tfm->base, 0, key, key_len);
829 	if (ret)
830 		goto out;
831 	ret = setup_cipher(&tfm->base, 1, key, key_len);
832 	if (ret)
833 		goto out;
834 
835 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
836 		if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
837 			ret = -EINVAL;
838 		} else {
839 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
840 		}
841 	}
842 out:
843 	if (!atomic_dec_and_test(&ctx->configuring))
844 		wait_for_completion(&ctx->completion);
845 	return ret;
846 }
847 
ablk_des3_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int key_len)848 static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
849 			    unsigned int key_len)
850 {
851 	return verify_ablkcipher_des3_key(tfm, key) ?:
852 	       ablk_setkey(tfm, key, key_len);
853 }
854 
ablk_rfc3686_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int key_len)855 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
856 		unsigned int key_len)
857 {
858 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
859 
860 	/* the nonce is stored in bytes at end of key */
861 	if (key_len < CTR_RFC3686_NONCE_SIZE)
862 		return -EINVAL;
863 
864 	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
865 			CTR_RFC3686_NONCE_SIZE);
866 
867 	key_len -= CTR_RFC3686_NONCE_SIZE;
868 	return ablk_setkey(tfm, key, key_len);
869 }
870 
ablk_perform(struct ablkcipher_request * req,int encrypt)871 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
872 {
873 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
874 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
875 	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
876 	struct ix_sa_dir *dir;
877 	struct crypt_ctl *crypt;
878 	unsigned int nbytes = req->nbytes;
879 	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
880 	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
881 	struct buffer_desc src_hook;
882 	struct device *dev = &pdev->dev;
883 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
884 				GFP_KERNEL : GFP_ATOMIC;
885 
886 	if (qmgr_stat_full(SEND_QID))
887 		return -EAGAIN;
888 	if (atomic_read(&ctx->configuring))
889 		return -EAGAIN;
890 
891 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
892 
893 	crypt = get_crypt_desc();
894 	if (!crypt)
895 		return -ENOMEM;
896 
897 	crypt->data.ablk_req = req;
898 	crypt->crypto_ctx = dir->npe_ctx_phys;
899 	crypt->mode = dir->npe_mode;
900 	crypt->init_len = dir->npe_ctx_idx;
901 
902 	crypt->crypt_offs = 0;
903 	crypt->crypt_len = nbytes;
904 
905 	BUG_ON(ivsize && !req->info);
906 	memcpy(crypt->iv, req->info, ivsize);
907 	if (req->src != req->dst) {
908 		struct buffer_desc dst_hook;
909 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
910 		/* This was never tested by Intel
911 		 * for more than one dst buffer, I think. */
912 		req_ctx->dst = NULL;
913 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
914 					flags, DMA_FROM_DEVICE))
915 			goto free_buf_dest;
916 		src_direction = DMA_TO_DEVICE;
917 		req_ctx->dst = dst_hook.next;
918 		crypt->dst_buf = dst_hook.phys_next;
919 	} else {
920 		req_ctx->dst = NULL;
921 	}
922 	req_ctx->src = NULL;
923 	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
924 				flags, src_direction))
925 		goto free_buf_src;
926 
927 	req_ctx->src = src_hook.next;
928 	crypt->src_buf = src_hook.phys_next;
929 	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
930 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
931 	BUG_ON(qmgr_stat_overflow(SEND_QID));
932 	return -EINPROGRESS;
933 
934 free_buf_src:
935 	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
936 free_buf_dest:
937 	if (req->src != req->dst) {
938 		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
939 	}
940 	crypt->ctl_flags = CTL_FLAG_UNUSED;
941 	return -ENOMEM;
942 }
943 
ablk_encrypt(struct ablkcipher_request * req)944 static int ablk_encrypt(struct ablkcipher_request *req)
945 {
946 	return ablk_perform(req, 1);
947 }
948 
ablk_decrypt(struct ablkcipher_request * req)949 static int ablk_decrypt(struct ablkcipher_request *req)
950 {
951 	return ablk_perform(req, 0);
952 }
953 
ablk_rfc3686_crypt(struct ablkcipher_request * req)954 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
955 {
956 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
957 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
958 	u8 iv[CTR_RFC3686_BLOCK_SIZE];
959 	u8 *info = req->info;
960 	int ret;
961 
962 	/* set up counter block */
963         memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
964 	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
965 
966 	/* initialize counter portion of counter block */
967 	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
968 		cpu_to_be32(1);
969 
970 	req->info = iv;
971 	ret = ablk_perform(req, 1);
972 	req->info = info;
973 	return ret;
974 }
975 
aead_perform(struct aead_request * req,int encrypt,int cryptoffset,int eff_cryptlen,u8 * iv)976 static int aead_perform(struct aead_request *req, int encrypt,
977 		int cryptoffset, int eff_cryptlen, u8 *iv)
978 {
979 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
980 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
981 	unsigned ivsize = crypto_aead_ivsize(tfm);
982 	unsigned authsize = crypto_aead_authsize(tfm);
983 	struct ix_sa_dir *dir;
984 	struct crypt_ctl *crypt;
985 	unsigned int cryptlen;
986 	struct buffer_desc *buf, src_hook;
987 	struct aead_ctx *req_ctx = aead_request_ctx(req);
988 	struct device *dev = &pdev->dev;
989 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
990 				GFP_KERNEL : GFP_ATOMIC;
991 	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
992 	unsigned int lastlen;
993 
994 	if (qmgr_stat_full(SEND_QID))
995 		return -EAGAIN;
996 	if (atomic_read(&ctx->configuring))
997 		return -EAGAIN;
998 
999 	if (encrypt) {
1000 		dir = &ctx->encrypt;
1001 		cryptlen = req->cryptlen;
1002 	} else {
1003 		dir = &ctx->decrypt;
1004 		/* req->cryptlen includes the authsize when decrypting */
1005 		cryptlen = req->cryptlen -authsize;
1006 		eff_cryptlen -= authsize;
1007 	}
1008 	crypt = get_crypt_desc();
1009 	if (!crypt)
1010 		return -ENOMEM;
1011 
1012 	crypt->data.aead_req = req;
1013 	crypt->crypto_ctx = dir->npe_ctx_phys;
1014 	crypt->mode = dir->npe_mode;
1015 	crypt->init_len = dir->npe_ctx_idx;
1016 
1017 	crypt->crypt_offs = cryptoffset;
1018 	crypt->crypt_len = eff_cryptlen;
1019 
1020 	crypt->auth_offs = 0;
1021 	crypt->auth_len = req->assoclen + cryptlen;
1022 	BUG_ON(ivsize && !req->iv);
1023 	memcpy(crypt->iv, req->iv, ivsize);
1024 
1025 	buf = chainup_buffers(dev, req->src, crypt->auth_len,
1026 			      &src_hook, flags, src_direction);
1027 	req_ctx->src = src_hook.next;
1028 	crypt->src_buf = src_hook.phys_next;
1029 	if (!buf)
1030 		goto free_buf_src;
1031 
1032 	lastlen = buf->buf_len;
1033 	if (lastlen >= authsize)
1034 		crypt->icv_rev_aes = buf->phys_addr +
1035 				     buf->buf_len - authsize;
1036 
1037 	req_ctx->dst = NULL;
1038 
1039 	if (req->src != req->dst) {
1040 		struct buffer_desc dst_hook;
1041 
1042 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1043 		src_direction = DMA_TO_DEVICE;
1044 
1045 		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1046 				      &dst_hook, flags, DMA_FROM_DEVICE);
1047 		req_ctx->dst = dst_hook.next;
1048 		crypt->dst_buf = dst_hook.phys_next;
1049 
1050 		if (!buf)
1051 			goto free_buf_dst;
1052 
1053 		if (encrypt) {
1054 			lastlen = buf->buf_len;
1055 			if (lastlen >= authsize)
1056 				crypt->icv_rev_aes = buf->phys_addr +
1057 						     buf->buf_len - authsize;
1058 		}
1059 	}
1060 
1061 	if (unlikely(lastlen < authsize)) {
1062 		/* The 12 hmac bytes are scattered,
1063 		 * we need to copy them into a safe buffer */
1064 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1065 				&crypt->icv_rev_aes);
1066 		if (unlikely(!req_ctx->hmac_virt))
1067 			goto free_buf_dst;
1068 		if (!encrypt) {
1069 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1070 				req->src, cryptlen, authsize, 0);
1071 		}
1072 		req_ctx->encrypt = encrypt;
1073 	} else {
1074 		req_ctx->hmac_virt = NULL;
1075 	}
1076 
1077 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1078 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1079 	BUG_ON(qmgr_stat_overflow(SEND_QID));
1080 	return -EINPROGRESS;
1081 
1082 free_buf_dst:
1083 	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1084 free_buf_src:
1085 	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1086 	crypt->ctl_flags = CTL_FLAG_UNUSED;
1087 	return -ENOMEM;
1088 }
1089 
aead_setup(struct crypto_aead * tfm,unsigned int authsize)1090 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1091 {
1092 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1093 	u32 *flags = &tfm->base.crt_flags;
1094 	unsigned digest_len = crypto_aead_maxauthsize(tfm);
1095 	int ret;
1096 
1097 	if (!ctx->enckey_len && !ctx->authkey_len)
1098 		return 0;
1099 	init_completion(&ctx->completion);
1100 	atomic_inc(&ctx->configuring);
1101 
1102 	reset_sa_dir(&ctx->encrypt);
1103 	reset_sa_dir(&ctx->decrypt);
1104 
1105 	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1106 	if (ret)
1107 		goto out;
1108 	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1109 	if (ret)
1110 		goto out;
1111 	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1112 			ctx->authkey_len, digest_len);
1113 	if (ret)
1114 		goto out;
1115 	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1116 			ctx->authkey_len, digest_len);
1117 	if (ret)
1118 		goto out;
1119 
1120 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1121 		if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1122 			ret = -EINVAL;
1123 			goto out;
1124 		} else {
1125 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1126 		}
1127 	}
1128 out:
1129 	if (!atomic_dec_and_test(&ctx->configuring))
1130 		wait_for_completion(&ctx->completion);
1131 	return ret;
1132 }
1133 
aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1134 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1135 {
1136 	int max = crypto_aead_maxauthsize(tfm) >> 2;
1137 
1138 	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1139 		return -EINVAL;
1140 	return aead_setup(tfm, authsize);
1141 }
1142 
aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1143 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1144 			unsigned int keylen)
1145 {
1146 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1147 	struct crypto_authenc_keys keys;
1148 
1149 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1150 		goto badkey;
1151 
1152 	if (keys.authkeylen > sizeof(ctx->authkey))
1153 		goto badkey;
1154 
1155 	if (keys.enckeylen > sizeof(ctx->enckey))
1156 		goto badkey;
1157 
1158 	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1159 	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1160 	ctx->authkey_len = keys.authkeylen;
1161 	ctx->enckey_len = keys.enckeylen;
1162 
1163 	memzero_explicit(&keys, sizeof(keys));
1164 	return aead_setup(tfm, crypto_aead_authsize(tfm));
1165 badkey:
1166 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1167 	memzero_explicit(&keys, sizeof(keys));
1168 	return -EINVAL;
1169 }
1170 
des3_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1171 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1172 			    unsigned int keylen)
1173 {
1174 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1175 	struct crypto_authenc_keys keys;
1176 	int err;
1177 
1178 	err = crypto_authenc_extractkeys(&keys, key, keylen);
1179 	if (unlikely(err))
1180 		goto badkey;
1181 
1182 	err = -EINVAL;
1183 	if (keys.authkeylen > sizeof(ctx->authkey))
1184 		goto badkey;
1185 
1186 	err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1187 	if (err)
1188 		goto badkey;
1189 
1190 	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1191 	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1192 	ctx->authkey_len = keys.authkeylen;
1193 	ctx->enckey_len = keys.enckeylen;
1194 
1195 	memzero_explicit(&keys, sizeof(keys));
1196 	return aead_setup(tfm, crypto_aead_authsize(tfm));
1197 badkey:
1198 	memzero_explicit(&keys, sizeof(keys));
1199 	return err;
1200 }
1201 
aead_encrypt(struct aead_request * req)1202 static int aead_encrypt(struct aead_request *req)
1203 {
1204 	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1205 }
1206 
aead_decrypt(struct aead_request * req)1207 static int aead_decrypt(struct aead_request *req)
1208 {
1209 	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1210 }
1211 
1212 static struct ixp_alg ixp4xx_algos[] = {
1213 {
1214 	.crypto	= {
1215 		.cra_name	= "cbc(des)",
1216 		.cra_blocksize	= DES_BLOCK_SIZE,
1217 		.cra_u		= { .ablkcipher = {
1218 			.min_keysize	= DES_KEY_SIZE,
1219 			.max_keysize	= DES_KEY_SIZE,
1220 			.ivsize		= DES_BLOCK_SIZE,
1221 			}
1222 		}
1223 	},
1224 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1225 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1226 
1227 }, {
1228 	.crypto	= {
1229 		.cra_name	= "ecb(des)",
1230 		.cra_blocksize	= DES_BLOCK_SIZE,
1231 		.cra_u		= { .ablkcipher = {
1232 			.min_keysize	= DES_KEY_SIZE,
1233 			.max_keysize	= DES_KEY_SIZE,
1234 			}
1235 		}
1236 	},
1237 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1238 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1239 }, {
1240 	.crypto	= {
1241 		.cra_name	= "cbc(des3_ede)",
1242 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1243 		.cra_u		= { .ablkcipher = {
1244 			.min_keysize	= DES3_EDE_KEY_SIZE,
1245 			.max_keysize	= DES3_EDE_KEY_SIZE,
1246 			.ivsize		= DES3_EDE_BLOCK_SIZE,
1247 			.setkey		= ablk_des3_setkey,
1248 			}
1249 		}
1250 	},
1251 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1252 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1253 }, {
1254 	.crypto	= {
1255 		.cra_name	= "ecb(des3_ede)",
1256 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1257 		.cra_u		= { .ablkcipher = {
1258 			.min_keysize	= DES3_EDE_KEY_SIZE,
1259 			.max_keysize	= DES3_EDE_KEY_SIZE,
1260 			.setkey		= ablk_des3_setkey,
1261 			}
1262 		}
1263 	},
1264 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1265 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1266 }, {
1267 	.crypto	= {
1268 		.cra_name	= "cbc(aes)",
1269 		.cra_blocksize	= AES_BLOCK_SIZE,
1270 		.cra_u		= { .ablkcipher = {
1271 			.min_keysize	= AES_MIN_KEY_SIZE,
1272 			.max_keysize	= AES_MAX_KEY_SIZE,
1273 			.ivsize		= AES_BLOCK_SIZE,
1274 			}
1275 		}
1276 	},
1277 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1278 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1279 }, {
1280 	.crypto	= {
1281 		.cra_name	= "ecb(aes)",
1282 		.cra_blocksize	= AES_BLOCK_SIZE,
1283 		.cra_u		= { .ablkcipher = {
1284 			.min_keysize	= AES_MIN_KEY_SIZE,
1285 			.max_keysize	= AES_MAX_KEY_SIZE,
1286 			}
1287 		}
1288 	},
1289 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1290 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1291 }, {
1292 	.crypto	= {
1293 		.cra_name	= "ctr(aes)",
1294 		.cra_blocksize	= AES_BLOCK_SIZE,
1295 		.cra_u		= { .ablkcipher = {
1296 			.min_keysize	= AES_MIN_KEY_SIZE,
1297 			.max_keysize	= AES_MAX_KEY_SIZE,
1298 			.ivsize		= AES_BLOCK_SIZE,
1299 			}
1300 		}
1301 	},
1302 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1303 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1304 }, {
1305 	.crypto	= {
1306 		.cra_name	= "rfc3686(ctr(aes))",
1307 		.cra_blocksize	= AES_BLOCK_SIZE,
1308 		.cra_u		= { .ablkcipher = {
1309 			.min_keysize	= AES_MIN_KEY_SIZE,
1310 			.max_keysize	= AES_MAX_KEY_SIZE,
1311 			.ivsize		= AES_BLOCK_SIZE,
1312 			.setkey		= ablk_rfc3686_setkey,
1313 			.encrypt	= ablk_rfc3686_crypt,
1314 			.decrypt	= ablk_rfc3686_crypt }
1315 		}
1316 	},
1317 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1318 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1319 } };
1320 
1321 static struct ixp_aead_alg ixp4xx_aeads[] = {
1322 {
1323 	.crypto	= {
1324 		.base = {
1325 			.cra_name	= "authenc(hmac(md5),cbc(des))",
1326 			.cra_blocksize	= DES_BLOCK_SIZE,
1327 		},
1328 		.ivsize		= DES_BLOCK_SIZE,
1329 		.maxauthsize	= MD5_DIGEST_SIZE,
1330 	},
1331 	.hash = &hash_alg_md5,
1332 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1333 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1334 }, {
1335 	.crypto	= {
1336 		.base = {
1337 			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1338 			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1339 		},
1340 		.ivsize		= DES3_EDE_BLOCK_SIZE,
1341 		.maxauthsize	= MD5_DIGEST_SIZE,
1342 		.setkey		= des3_aead_setkey,
1343 	},
1344 	.hash = &hash_alg_md5,
1345 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1346 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1347 }, {
1348 	.crypto	= {
1349 		.base = {
1350 			.cra_name	= "authenc(hmac(sha1),cbc(des))",
1351 			.cra_blocksize	= DES_BLOCK_SIZE,
1352 		},
1353 			.ivsize		= DES_BLOCK_SIZE,
1354 			.maxauthsize	= SHA1_DIGEST_SIZE,
1355 	},
1356 	.hash = &hash_alg_sha1,
1357 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1358 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1359 }, {
1360 	.crypto	= {
1361 		.base = {
1362 			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1363 			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1364 		},
1365 		.ivsize		= DES3_EDE_BLOCK_SIZE,
1366 		.maxauthsize	= SHA1_DIGEST_SIZE,
1367 		.setkey		= des3_aead_setkey,
1368 	},
1369 	.hash = &hash_alg_sha1,
1370 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1371 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1372 }, {
1373 	.crypto	= {
1374 		.base = {
1375 			.cra_name	= "authenc(hmac(md5),cbc(aes))",
1376 			.cra_blocksize	= AES_BLOCK_SIZE,
1377 		},
1378 		.ivsize		= AES_BLOCK_SIZE,
1379 		.maxauthsize	= MD5_DIGEST_SIZE,
1380 	},
1381 	.hash = &hash_alg_md5,
1382 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1383 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1384 }, {
1385 	.crypto	= {
1386 		.base = {
1387 			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1388 			.cra_blocksize	= AES_BLOCK_SIZE,
1389 		},
1390 		.ivsize		= AES_BLOCK_SIZE,
1391 		.maxauthsize	= SHA1_DIGEST_SIZE,
1392 	},
1393 	.hash = &hash_alg_sha1,
1394 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1395 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1396 } };
1397 
1398 #define IXP_POSTFIX "-ixp4xx"
1399 
1400 static const struct platform_device_info ixp_dev_info __initdata = {
1401 	.name		= DRIVER_NAME,
1402 	.id		= 0,
1403 	.dma_mask	= DMA_BIT_MASK(32),
1404 };
1405 
ixp_module_init(void)1406 static int __init ixp_module_init(void)
1407 {
1408 	int num = ARRAY_SIZE(ixp4xx_algos);
1409 	int i, err;
1410 
1411 	pdev = platform_device_register_full(&ixp_dev_info);
1412 	if (IS_ERR(pdev))
1413 		return PTR_ERR(pdev);
1414 
1415 	spin_lock_init(&desc_lock);
1416 	spin_lock_init(&emerg_lock);
1417 
1418 	err = init_ixp_crypto(&pdev->dev);
1419 	if (err) {
1420 		platform_device_unregister(pdev);
1421 		return err;
1422 	}
1423 	for (i=0; i< num; i++) {
1424 		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1425 
1426 		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1427 			"%s"IXP_POSTFIX, cra->cra_name) >=
1428 			CRYPTO_MAX_ALG_NAME)
1429 		{
1430 			continue;
1431 		}
1432 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1433 			continue;
1434 		}
1435 
1436 		/* block ciphers */
1437 		cra->cra_type = &crypto_ablkcipher_type;
1438 		cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1439 				 CRYPTO_ALG_KERN_DRIVER_ONLY |
1440 				 CRYPTO_ALG_ASYNC;
1441 		if (!cra->cra_ablkcipher.setkey)
1442 			cra->cra_ablkcipher.setkey = ablk_setkey;
1443 		if (!cra->cra_ablkcipher.encrypt)
1444 			cra->cra_ablkcipher.encrypt = ablk_encrypt;
1445 		if (!cra->cra_ablkcipher.decrypt)
1446 			cra->cra_ablkcipher.decrypt = ablk_decrypt;
1447 		cra->cra_init = init_tfm_ablk;
1448 
1449 		cra->cra_ctxsize = sizeof(struct ixp_ctx);
1450 		cra->cra_module = THIS_MODULE;
1451 		cra->cra_alignmask = 3;
1452 		cra->cra_priority = 300;
1453 		cra->cra_exit = exit_tfm;
1454 		if (crypto_register_alg(cra))
1455 			printk(KERN_ERR "Failed to register '%s'\n",
1456 				cra->cra_name);
1457 		else
1458 			ixp4xx_algos[i].registered = 1;
1459 	}
1460 
1461 	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1462 		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1463 
1464 		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1465 			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1466 		    CRYPTO_MAX_ALG_NAME)
1467 			continue;
1468 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1469 			continue;
1470 
1471 		/* authenc */
1472 		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1473 				      CRYPTO_ALG_ASYNC;
1474 		cra->setkey = cra->setkey ?: aead_setkey;
1475 		cra->setauthsize = aead_setauthsize;
1476 		cra->encrypt = aead_encrypt;
1477 		cra->decrypt = aead_decrypt;
1478 		cra->init = init_tfm_aead;
1479 		cra->exit = exit_tfm_aead;
1480 
1481 		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1482 		cra->base.cra_module = THIS_MODULE;
1483 		cra->base.cra_alignmask = 3;
1484 		cra->base.cra_priority = 300;
1485 
1486 		if (crypto_register_aead(cra))
1487 			printk(KERN_ERR "Failed to register '%s'\n",
1488 				cra->base.cra_driver_name);
1489 		else
1490 			ixp4xx_aeads[i].registered = 1;
1491 	}
1492 	return 0;
1493 }
1494 
ixp_module_exit(void)1495 static void __exit ixp_module_exit(void)
1496 {
1497 	int num = ARRAY_SIZE(ixp4xx_algos);
1498 	int i;
1499 
1500 	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1501 		if (ixp4xx_aeads[i].registered)
1502 			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1503 	}
1504 
1505 	for (i=0; i< num; i++) {
1506 		if (ixp4xx_algos[i].registered)
1507 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1508 	}
1509 	release_ixp_crypto(&pdev->dev);
1510 	platform_device_unregister(pdev);
1511 }
1512 
1513 module_init(ixp_module_init);
1514 module_exit(ixp_module_exit);
1515 
1516 MODULE_LICENSE("GPL");
1517 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1518 MODULE_DESCRIPTION("IXP4xx hardware crypto");
1519 
1520