• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18 #include <crypto/internal/aead.h>
19 #include <crypto/aes.h>
20 #include <crypto/algapi.h>
21 #include <crypto/authenc.h>
22 #include <crypto/des.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/clk.h>
27 #include <linux/crypto.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/err.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/io.h>
35 #include <linux/list.h>
36 #include <linux/module.h>
37 #include <linux/of.h>
38 #include <linux/platform_device.h>
39 #include <linux/pm.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/scatterlist.h>
42 #include <linux/sched.h>
43 #include <linux/sizes.h>
44 #include <linux/slab.h>
45 #include <linux/timer.h>
46 
47 #include "picoxcell_crypto_regs.h"
48 
49 /*
50  * The threshold for the number of entries in the CMD FIFO available before
51  * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
52  * number of interrupts raised to the CPU.
53  */
54 #define CMD0_IRQ_THRESHOLD   1
55 
56 /*
57  * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
58  * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
59  * When there are packets in flight but lower than the threshold, we enable
60  * the timer and at expiry, attempt to remove any processed packets from the
61  * queue and if there are still packets left, schedule the timer again.
62  */
63 #define PACKET_TIMEOUT	    1
64 
65 /* The priority to register each algorithm with. */
66 #define SPACC_CRYPTO_ALG_PRIORITY	10000
67 
68 #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN	16
69 #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
70 #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ	64
71 #define SPACC_CRYPTO_IPSEC_MAX_CTXS	32
72 #define SPACC_CRYPTO_IPSEC_FIFO_SZ	32
73 #define SPACC_CRYPTO_L2_CIPHER_PG_SZ	64
74 #define SPACC_CRYPTO_L2_HASH_PG_SZ	64
75 #define SPACC_CRYPTO_L2_MAX_CTXS	128
76 #define SPACC_CRYPTO_L2_FIFO_SZ		128
77 
78 #define MAX_DDT_LEN			16
79 
80 /* DDT format. This must match the hardware DDT format exactly. */
81 struct spacc_ddt {
82 	dma_addr_t	p;
83 	u32		len;
84 };
85 
86 /*
87  * Asynchronous crypto request structure.
88  *
89  * This structure defines a request that is either queued for processing or
90  * being processed.
91  */
92 struct spacc_req {
93 	struct list_head		list;
94 	struct spacc_engine		*engine;
95 	struct crypto_async_request	*req;
96 	int				result;
97 	bool				is_encrypt;
98 	unsigned			ctx_id;
99 	dma_addr_t			src_addr, dst_addr;
100 	struct spacc_ddt		*src_ddt, *dst_ddt;
101 	void				(*complete)(struct spacc_req *req);
102 };
103 
104 struct spacc_aead {
105 	unsigned long			ctrl_default;
106 	unsigned long			type;
107 	struct aead_alg			alg;
108 	struct spacc_engine		*engine;
109 	struct list_head		entry;
110 	int				key_offs;
111 	int				iv_offs;
112 };
113 
114 struct spacc_engine {
115 	void __iomem			*regs;
116 	struct list_head		pending;
117 	int				next_ctx;
118 	spinlock_t			hw_lock;
119 	int				in_flight;
120 	struct list_head		completed;
121 	struct list_head		in_progress;
122 	struct tasklet_struct		complete;
123 	unsigned long			fifo_sz;
124 	void __iomem			*cipher_ctx_base;
125 	void __iomem			*hash_key_base;
126 	struct spacc_alg		*algs;
127 	unsigned			num_algs;
128 	struct list_head		registered_algs;
129 	struct spacc_aead		*aeads;
130 	unsigned			num_aeads;
131 	struct list_head		registered_aeads;
132 	size_t				cipher_pg_sz;
133 	size_t				hash_pg_sz;
134 	const char			*name;
135 	struct clk			*clk;
136 	struct device			*dev;
137 	unsigned			max_ctxs;
138 	struct timer_list		packet_timeout;
139 	unsigned			stat_irq_thresh;
140 	struct dma_pool			*req_pool;
141 };
142 
143 /* Algorithm type mask. */
144 #define SPACC_CRYPTO_ALG_MASK		0x7
145 
146 /* SPACC definition of a crypto algorithm. */
147 struct spacc_alg {
148 	unsigned long			ctrl_default;
149 	unsigned long			type;
150 	struct crypto_alg		alg;
151 	struct spacc_engine		*engine;
152 	struct list_head		entry;
153 	int				key_offs;
154 	int				iv_offs;
155 };
156 
157 /* Generic context structure for any algorithm type. */
158 struct spacc_generic_ctx {
159 	struct spacc_engine		*engine;
160 	int				flags;
161 	int				key_offs;
162 	int				iv_offs;
163 };
164 
165 /* Block cipher context. */
166 struct spacc_ablk_ctx {
167 	struct spacc_generic_ctx	generic;
168 	u8				key[AES_MAX_KEY_SIZE];
169 	u8				key_len;
170 	/*
171 	 * The fallback cipher. If the operation can't be done in hardware,
172 	 * fallback to a software version.
173 	 */
174 	struct crypto_ablkcipher	*sw_cipher;
175 };
176 
177 /* AEAD cipher context. */
178 struct spacc_aead_ctx {
179 	struct spacc_generic_ctx	generic;
180 	u8				cipher_key[AES_MAX_KEY_SIZE];
181 	u8				hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
182 	u8				cipher_key_len;
183 	u8				hash_key_len;
184 	struct crypto_aead		*sw_cipher;
185 };
186 
187 static int spacc_ablk_submit(struct spacc_req *req);
188 
to_spacc_alg(struct crypto_alg * alg)189 static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
190 {
191 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
192 }
193 
to_spacc_aead(struct aead_alg * alg)194 static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
195 {
196 	return container_of(alg, struct spacc_aead, alg);
197 }
198 
spacc_fifo_cmd_full(struct spacc_engine * engine)199 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
200 {
201 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
202 
203 	return fifo_stat & SPA_FIFO_CMD_FULL;
204 }
205 
206 /*
207  * Given a cipher context, and a context number, get the base address of the
208  * context page.
209  *
210  * Returns the address of the context page where the key/context may
211  * be written.
212  */
spacc_ctx_page_addr(struct spacc_generic_ctx * ctx,unsigned indx,bool is_cipher_ctx)213 static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
214 						unsigned indx,
215 						bool is_cipher_ctx)
216 {
217 	return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
218 			(indx * ctx->engine->cipher_pg_sz) :
219 		ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
220 }
221 
222 /* The context pages can only be written with 32-bit accesses. */
memcpy_toio32(u32 __iomem * dst,const void * src,unsigned count)223 static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
224 				 unsigned count)
225 {
226 	const u32 *src32 = (const u32 *) src;
227 
228 	while (count--)
229 		writel(*src32++, dst++);
230 }
231 
spacc_cipher_write_ctx(struct spacc_generic_ctx * ctx,void __iomem * page_addr,const u8 * key,size_t key_len,const u8 * iv,size_t iv_len)232 static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
233 				   void __iomem *page_addr, const u8 *key,
234 				   size_t key_len, const u8 *iv, size_t iv_len)
235 {
236 	void __iomem *key_ptr = page_addr + ctx->key_offs;
237 	void __iomem *iv_ptr = page_addr + ctx->iv_offs;
238 
239 	memcpy_toio32(key_ptr, key, key_len / 4);
240 	memcpy_toio32(iv_ptr, iv, iv_len / 4);
241 }
242 
243 /*
244  * Load a context into the engines context memory.
245  *
246  * Returns the index of the context page where the context was loaded.
247  */
spacc_load_ctx(struct spacc_generic_ctx * ctx,const u8 * ciph_key,size_t ciph_len,const u8 * iv,size_t ivlen,const u8 * hash_key,size_t hash_len)248 static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
249 			       const u8 *ciph_key, size_t ciph_len,
250 			       const u8 *iv, size_t ivlen, const u8 *hash_key,
251 			       size_t hash_len)
252 {
253 	unsigned indx = ctx->engine->next_ctx++;
254 	void __iomem *ciph_page_addr, *hash_page_addr;
255 
256 	ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
257 	hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
258 
259 	ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
260 	spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
261 			       ivlen);
262 	writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
263 	       (1 << SPA_KEY_SZ_CIPHER_OFFSET),
264 	       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
265 
266 	if (hash_key) {
267 		memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
268 		writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
269 		       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
270 	}
271 
272 	return indx;
273 }
274 
275 /* Count the number of scatterlist entries in a scatterlist. */
sg_count(struct scatterlist * sg_list,int nbytes)276 static inline int sg_count(struct scatterlist *sg_list, int nbytes)
277 {
278 	return sg_nents_for_len(sg_list, nbytes);
279 }
280 
ddt_set(struct spacc_ddt * ddt,dma_addr_t phys,size_t len)281 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
282 {
283 	ddt->p = phys;
284 	ddt->len = len;
285 }
286 
287 /*
288  * Take a crypto request and scatterlists for the data and turn them into DDTs
289  * for passing to the crypto engines. This also DMA maps the data so that the
290  * crypto engines can DMA to/from them.
291  */
spacc_sg_to_ddt(struct spacc_engine * engine,struct scatterlist * payload,unsigned nbytes,enum dma_data_direction dir,dma_addr_t * ddt_phys)292 static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
293 					 struct scatterlist *payload,
294 					 unsigned nbytes,
295 					 enum dma_data_direction dir,
296 					 dma_addr_t *ddt_phys)
297 {
298 	unsigned nents, mapped_ents;
299 	struct scatterlist *cur;
300 	struct spacc_ddt *ddt;
301 	int i;
302 
303 	nents = sg_count(payload, nbytes);
304 	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
305 
306 	if (mapped_ents + 1 > MAX_DDT_LEN)
307 		goto out;
308 
309 	ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
310 	if (!ddt)
311 		goto out;
312 
313 	for_each_sg(payload, cur, mapped_ents, i)
314 		ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
315 	ddt_set(&ddt[mapped_ents], 0, 0);
316 
317 	return ddt;
318 
319 out:
320 	dma_unmap_sg(engine->dev, payload, nents, dir);
321 	return NULL;
322 }
323 
spacc_aead_make_ddts(struct aead_request * areq)324 static int spacc_aead_make_ddts(struct aead_request *areq)
325 {
326 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
327 	struct spacc_req *req = aead_request_ctx(areq);
328 	struct spacc_engine *engine = req->engine;
329 	struct spacc_ddt *src_ddt, *dst_ddt;
330 	unsigned total;
331 	unsigned int src_nents, dst_nents;
332 	struct scatterlist *cur;
333 	int i, dst_ents, src_ents;
334 
335 	total = areq->assoclen + areq->cryptlen;
336 	if (req->is_encrypt)
337 		total += crypto_aead_authsize(aead);
338 
339 	src_nents = sg_count(areq->src, total);
340 	if (src_nents + 1 > MAX_DDT_LEN)
341 		return -E2BIG;
342 
343 	dst_nents = 0;
344 	if (areq->src != areq->dst) {
345 		dst_nents = sg_count(areq->dst, total);
346 		if (src_nents + 1 > MAX_DDT_LEN)
347 			return -E2BIG;
348 	}
349 
350 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
351 	if (!src_ddt)
352 		goto err;
353 
354 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
355 	if (!dst_ddt)
356 		goto err_free_src;
357 
358 	req->src_ddt = src_ddt;
359 	req->dst_ddt = dst_ddt;
360 
361 	if (dst_nents) {
362 		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
363 				      DMA_TO_DEVICE);
364 		if (!src_ents)
365 			goto err_free_dst;
366 
367 		dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
368 				      DMA_FROM_DEVICE);
369 
370 		if (!dst_ents) {
371 			dma_unmap_sg(engine->dev, areq->src, src_nents,
372 				     DMA_TO_DEVICE);
373 			goto err_free_dst;
374 		}
375 	} else {
376 		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
377 				      DMA_BIDIRECTIONAL);
378 		if (!src_ents)
379 			goto err_free_dst;
380 		dst_ents = src_ents;
381 	}
382 
383 	/*
384 	 * Now map in the payload for the source and destination and terminate
385 	 * with the NULL pointers.
386 	 */
387 	for_each_sg(areq->src, cur, src_ents, i)
388 		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
389 
390 	/* For decryption we need to skip the associated data. */
391 	total = req->is_encrypt ? 0 : areq->assoclen;
392 	for_each_sg(areq->dst, cur, dst_ents, i) {
393 		unsigned len = sg_dma_len(cur);
394 
395 		if (len <= total) {
396 			total -= len;
397 			continue;
398 		}
399 
400 		ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
401 	}
402 
403 	ddt_set(src_ddt, 0, 0);
404 	ddt_set(dst_ddt, 0, 0);
405 
406 	return 0;
407 
408 err_free_dst:
409 	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
410 err_free_src:
411 	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
412 err:
413 	return -ENOMEM;
414 }
415 
spacc_aead_free_ddts(struct spacc_req * req)416 static void spacc_aead_free_ddts(struct spacc_req *req)
417 {
418 	struct aead_request *areq = container_of(req->req, struct aead_request,
419 						 base);
420 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
421 	unsigned total = areq->assoclen + areq->cryptlen +
422 			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
423 	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
424 	struct spacc_engine *engine = aead_ctx->generic.engine;
425 	unsigned nents = sg_count(areq->src, total);
426 
427 	if (areq->src != areq->dst) {
428 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
429 		dma_unmap_sg(engine->dev, areq->dst,
430 			     sg_count(areq->dst, total),
431 			     DMA_FROM_DEVICE);
432 	} else
433 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
434 
435 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
436 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
437 }
438 
spacc_free_ddt(struct spacc_req * req,struct spacc_ddt * ddt,dma_addr_t ddt_addr,struct scatterlist * payload,unsigned nbytes,enum dma_data_direction dir)439 static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
440 			   dma_addr_t ddt_addr, struct scatterlist *payload,
441 			   unsigned nbytes, enum dma_data_direction dir)
442 {
443 	unsigned nents = sg_count(payload, nbytes);
444 
445 	dma_unmap_sg(req->engine->dev, payload, nents, dir);
446 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
447 }
448 
spacc_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)449 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
450 			     unsigned int keylen)
451 {
452 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
453 	struct crypto_authenc_keys keys;
454 	int err;
455 
456 	crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
457 	crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
458 					      CRYPTO_TFM_REQ_MASK);
459 	err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
460 	crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
461 	crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
462 				   CRYPTO_TFM_RES_MASK);
463 	if (err)
464 		return err;
465 
466 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
467 		goto badkey;
468 
469 	if (keys.enckeylen > AES_MAX_KEY_SIZE)
470 		goto badkey;
471 
472 	if (keys.authkeylen > sizeof(ctx->hash_ctx))
473 		goto badkey;
474 
475 	memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
476 	ctx->cipher_key_len = keys.enckeylen;
477 
478 	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
479 	ctx->hash_key_len = keys.authkeylen;
480 
481 	return 0;
482 
483 badkey:
484 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
485 	return -EINVAL;
486 }
487 
spacc_aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)488 static int spacc_aead_setauthsize(struct crypto_aead *tfm,
489 				  unsigned int authsize)
490 {
491 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
492 
493 	return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
494 }
495 
496 /*
497  * Check if an AEAD request requires a fallback operation. Some requests can't
498  * be completed in hardware because the hardware may not support certain key
499  * sizes. In these cases we need to complete the request in software.
500  */
spacc_aead_need_fallback(struct aead_request * aead_req)501 static int spacc_aead_need_fallback(struct aead_request *aead_req)
502 {
503 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
504 	struct aead_alg *alg = crypto_aead_alg(aead);
505 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
506 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
507 
508 	/*
509 	 * If we have a non-supported key-length, then we need to do a
510 	 * software fallback.
511 	 */
512 	if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
513 	    SPA_CTRL_CIPH_ALG_AES &&
514 	    ctx->cipher_key_len != AES_KEYSIZE_128 &&
515 	    ctx->cipher_key_len != AES_KEYSIZE_256)
516 		return 1;
517 
518 	return 0;
519 }
520 
spacc_aead_do_fallback(struct aead_request * req,unsigned alg_type,bool is_encrypt)521 static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
522 				  bool is_encrypt)
523 {
524 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
525 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
526 	struct aead_request *subreq = aead_request_ctx(req);
527 
528 	aead_request_set_tfm(subreq, ctx->sw_cipher);
529 	aead_request_set_callback(subreq, req->base.flags,
530 				  req->base.complete, req->base.data);
531 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
532 			       req->iv);
533 	aead_request_set_ad(subreq, req->assoclen);
534 
535 	return is_encrypt ? crypto_aead_encrypt(subreq) :
536 			    crypto_aead_decrypt(subreq);
537 }
538 
spacc_aead_complete(struct spacc_req * req)539 static void spacc_aead_complete(struct spacc_req *req)
540 {
541 	spacc_aead_free_ddts(req);
542 	req->req->complete(req->req, req->result);
543 }
544 
spacc_aead_submit(struct spacc_req * req)545 static int spacc_aead_submit(struct spacc_req *req)
546 {
547 	struct aead_request *aead_req =
548 		container_of(req->req, struct aead_request, base);
549 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
550 	unsigned int authsize = crypto_aead_authsize(aead);
551 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
552 	struct aead_alg *alg = crypto_aead_alg(aead);
553 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
554 	struct spacc_engine *engine = ctx->generic.engine;
555 	u32 ctrl, proc_len, assoc_len;
556 
557 	req->result = -EINPROGRESS;
558 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
559 		ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
560 		ctx->hash_ctx, ctx->hash_key_len);
561 
562 	/* Set the source and destination DDT pointers. */
563 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
564 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
565 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
566 
567 	assoc_len = aead_req->assoclen;
568 	proc_len = aead_req->cryptlen + assoc_len;
569 
570 	/*
571 	 * If we are decrypting, we need to take the length of the ICV out of
572 	 * the processing length.
573 	 */
574 	if (!req->is_encrypt)
575 		proc_len -= authsize;
576 
577 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
578 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
579 	writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
580 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
581 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
582 
583 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
584 		(1 << SPA_CTRL_ICV_APPEND);
585 	if (req->is_encrypt)
586 		ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
587 	else
588 		ctrl |= (1 << SPA_CTRL_KEY_EXP);
589 
590 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
591 
592 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
593 
594 	return -EINPROGRESS;
595 }
596 
597 static int spacc_req_submit(struct spacc_req *req);
598 
spacc_push(struct spacc_engine * engine)599 static void spacc_push(struct spacc_engine *engine)
600 {
601 	struct spacc_req *req;
602 
603 	while (!list_empty(&engine->pending) &&
604 	       engine->in_flight + 1 <= engine->fifo_sz) {
605 
606 		++engine->in_flight;
607 		req = list_first_entry(&engine->pending, struct spacc_req,
608 				       list);
609 		list_move_tail(&req->list, &engine->in_progress);
610 
611 		req->result = spacc_req_submit(req);
612 	}
613 }
614 
615 /*
616  * Setup an AEAD request for processing. This will configure the engine, load
617  * the context and then start the packet processing.
618  */
spacc_aead_setup(struct aead_request * req,unsigned alg_type,bool is_encrypt)619 static int spacc_aead_setup(struct aead_request *req,
620 			    unsigned alg_type, bool is_encrypt)
621 {
622 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
623 	struct aead_alg *alg = crypto_aead_alg(aead);
624 	struct spacc_engine *engine = to_spacc_aead(alg)->engine;
625 	struct spacc_req *dev_req = aead_request_ctx(req);
626 	int err;
627 	unsigned long flags;
628 
629 	dev_req->req		= &req->base;
630 	dev_req->is_encrypt	= is_encrypt;
631 	dev_req->result		= -EBUSY;
632 	dev_req->engine		= engine;
633 	dev_req->complete	= spacc_aead_complete;
634 
635 	if (unlikely(spacc_aead_need_fallback(req) ||
636 		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
637 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
638 
639 	if (err)
640 		goto out;
641 
642 	err = -EINPROGRESS;
643 	spin_lock_irqsave(&engine->hw_lock, flags);
644 	if (unlikely(spacc_fifo_cmd_full(engine)) ||
645 	    engine->in_flight + 1 > engine->fifo_sz) {
646 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
647 			err = -EBUSY;
648 			spin_unlock_irqrestore(&engine->hw_lock, flags);
649 			goto out_free_ddts;
650 		}
651 		list_add_tail(&dev_req->list, &engine->pending);
652 	} else {
653 		list_add_tail(&dev_req->list, &engine->pending);
654 		spacc_push(engine);
655 	}
656 	spin_unlock_irqrestore(&engine->hw_lock, flags);
657 
658 	goto out;
659 
660 out_free_ddts:
661 	spacc_aead_free_ddts(dev_req);
662 out:
663 	return err;
664 }
665 
spacc_aead_encrypt(struct aead_request * req)666 static int spacc_aead_encrypt(struct aead_request *req)
667 {
668 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
669 	struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
670 
671 	return spacc_aead_setup(req, alg->type, 1);
672 }
673 
spacc_aead_decrypt(struct aead_request * req)674 static int spacc_aead_decrypt(struct aead_request *req)
675 {
676 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
677 	struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
678 
679 	return spacc_aead_setup(req, alg->type, 0);
680 }
681 
682 /*
683  * Initialise a new AEAD context. This is responsible for allocating the
684  * fallback cipher and initialising the context.
685  */
spacc_aead_cra_init(struct crypto_aead * tfm)686 static int spacc_aead_cra_init(struct crypto_aead *tfm)
687 {
688 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
689 	struct aead_alg *alg = crypto_aead_alg(tfm);
690 	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
691 	struct spacc_engine *engine = spacc_alg->engine;
692 
693 	ctx->generic.flags = spacc_alg->type;
694 	ctx->generic.engine = engine;
695 	ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
696 					   CRYPTO_ALG_NEED_FALLBACK);
697 	if (IS_ERR(ctx->sw_cipher))
698 		return PTR_ERR(ctx->sw_cipher);
699 	ctx->generic.key_offs = spacc_alg->key_offs;
700 	ctx->generic.iv_offs = spacc_alg->iv_offs;
701 
702 	crypto_aead_set_reqsize(
703 		tfm,
704 		max(sizeof(struct spacc_req),
705 		    sizeof(struct aead_request) +
706 		    crypto_aead_reqsize(ctx->sw_cipher)));
707 
708 	return 0;
709 }
710 
711 /*
712  * Destructor for an AEAD context. This is called when the transform is freed
713  * and must free the fallback cipher.
714  */
spacc_aead_cra_exit(struct crypto_aead * tfm)715 static void spacc_aead_cra_exit(struct crypto_aead *tfm)
716 {
717 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
718 
719 	crypto_free_aead(ctx->sw_cipher);
720 }
721 
722 /*
723  * Set the DES key for a block cipher transform. This also performs weak key
724  * checking if the transform has requested it.
725  */
spacc_des_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)726 static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
727 			    unsigned int len)
728 {
729 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
730 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
731 	u32 tmp[DES_EXPKEY_WORDS];
732 
733 	if (len > DES3_EDE_KEY_SIZE) {
734 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
735 		return -EINVAL;
736 	}
737 
738 	if (unlikely(!des_ekey(tmp, key)) &&
739 	    (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
740 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
741 		return -EINVAL;
742 	}
743 
744 	memcpy(ctx->key, key, len);
745 	ctx->key_len = len;
746 
747 	return 0;
748 }
749 
750 /*
751  * Set the key for an AES block cipher. Some key lengths are not supported in
752  * hardware so this must also check whether a fallback is needed.
753  */
spacc_aes_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)754 static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
755 			    unsigned int len)
756 {
757 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
758 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
759 	int err = 0;
760 
761 	if (len > AES_MAX_KEY_SIZE) {
762 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
763 		return -EINVAL;
764 	}
765 
766 	/*
767 	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
768 	 * request for any other size (192 bits) then we need to do a software
769 	 * fallback.
770 	 */
771 	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
772 	    ctx->sw_cipher) {
773 		/*
774 		 * Set the fallback transform to use the same request flags as
775 		 * the hardware transform.
776 		 */
777 		ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
778 		ctx->sw_cipher->base.crt_flags |=
779 			cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
780 
781 		err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
782 		if (err)
783 			goto sw_setkey_failed;
784 	} else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
785 		   !ctx->sw_cipher)
786 		err = -EINVAL;
787 
788 	memcpy(ctx->key, key, len);
789 	ctx->key_len = len;
790 
791 sw_setkey_failed:
792 	if (err && ctx->sw_cipher) {
793 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
794 		tfm->crt_flags |=
795 			ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
796 	}
797 
798 	return err;
799 }
800 
spacc_kasumi_f8_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)801 static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
802 				  const u8 *key, unsigned int len)
803 {
804 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
805 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
806 	int err = 0;
807 
808 	if (len > AES_MAX_KEY_SIZE) {
809 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
810 		err = -EINVAL;
811 		goto out;
812 	}
813 
814 	memcpy(ctx->key, key, len);
815 	ctx->key_len = len;
816 
817 out:
818 	return err;
819 }
820 
spacc_ablk_need_fallback(struct spacc_req * req)821 static int spacc_ablk_need_fallback(struct spacc_req *req)
822 {
823 	struct spacc_ablk_ctx *ctx;
824 	struct crypto_tfm *tfm = req->req->tfm;
825 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
826 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
827 
828 	ctx = crypto_tfm_ctx(tfm);
829 
830 	return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
831 			SPA_CTRL_CIPH_ALG_AES &&
832 			ctx->key_len != AES_KEYSIZE_128 &&
833 			ctx->key_len != AES_KEYSIZE_256;
834 }
835 
spacc_ablk_complete(struct spacc_req * req)836 static void spacc_ablk_complete(struct spacc_req *req)
837 {
838 	struct ablkcipher_request *ablk_req =
839 		container_of(req->req, struct ablkcipher_request, base);
840 
841 	if (ablk_req->src != ablk_req->dst) {
842 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
843 			       ablk_req->nbytes, DMA_TO_DEVICE);
844 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
845 			       ablk_req->nbytes, DMA_FROM_DEVICE);
846 	} else
847 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
848 			       ablk_req->nbytes, DMA_BIDIRECTIONAL);
849 
850 	req->req->complete(req->req, req->result);
851 }
852 
spacc_ablk_submit(struct spacc_req * req)853 static int spacc_ablk_submit(struct spacc_req *req)
854 {
855 	struct crypto_tfm *tfm = req->req->tfm;
856 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
857 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
858 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
859 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
860 	struct spacc_engine *engine = ctx->generic.engine;
861 	u32 ctrl;
862 
863 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
864 		ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
865 		NULL, 0);
866 
867 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
868 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
869 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
870 
871 	writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
872 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
873 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
874 	writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
875 
876 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
877 		(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
878 		 (1 << SPA_CTRL_KEY_EXP));
879 
880 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
881 
882 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
883 
884 	return -EINPROGRESS;
885 }
886 
spacc_ablk_do_fallback(struct ablkcipher_request * req,unsigned alg_type,bool is_encrypt)887 static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
888 				  unsigned alg_type, bool is_encrypt)
889 {
890 	struct crypto_tfm *old_tfm =
891 	    crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
892 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
893 	int err;
894 
895 	if (!ctx->sw_cipher)
896 		return -EINVAL;
897 
898 	/*
899 	 * Change the request to use the software fallback transform, and once
900 	 * the ciphering has completed, put the old transform back into the
901 	 * request.
902 	 */
903 	ablkcipher_request_set_tfm(req, ctx->sw_cipher);
904 	err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
905 		crypto_ablkcipher_decrypt(req);
906 	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
907 
908 	return err;
909 }
910 
spacc_ablk_setup(struct ablkcipher_request * req,unsigned alg_type,bool is_encrypt)911 static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
912 			    bool is_encrypt)
913 {
914 	struct crypto_alg *alg = req->base.tfm->__crt_alg;
915 	struct spacc_engine *engine = to_spacc_alg(alg)->engine;
916 	struct spacc_req *dev_req = ablkcipher_request_ctx(req);
917 	unsigned long flags;
918 	int err = -ENOMEM;
919 
920 	dev_req->req		= &req->base;
921 	dev_req->is_encrypt	= is_encrypt;
922 	dev_req->engine		= engine;
923 	dev_req->complete	= spacc_ablk_complete;
924 	dev_req->result		= -EINPROGRESS;
925 
926 	if (unlikely(spacc_ablk_need_fallback(dev_req)))
927 		return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
928 
929 	/*
930 	 * Create the DDT's for the engine. If we share the same source and
931 	 * destination then we can optimize by reusing the DDT's.
932 	 */
933 	if (req->src != req->dst) {
934 		dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
935 			req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
936 		if (!dev_req->src_ddt)
937 			goto out;
938 
939 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
940 			req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
941 		if (!dev_req->dst_ddt)
942 			goto out_free_src;
943 	} else {
944 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
945 			req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
946 		if (!dev_req->dst_ddt)
947 			goto out;
948 
949 		dev_req->src_ddt = NULL;
950 		dev_req->src_addr = dev_req->dst_addr;
951 	}
952 
953 	err = -EINPROGRESS;
954 	spin_lock_irqsave(&engine->hw_lock, flags);
955 	/*
956 	 * Check if the engine will accept the operation now. If it won't then
957 	 * we either stick it on the end of a pending list if we can backlog,
958 	 * or bailout with an error if not.
959 	 */
960 	if (unlikely(spacc_fifo_cmd_full(engine)) ||
961 	    engine->in_flight + 1 > engine->fifo_sz) {
962 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
963 			err = -EBUSY;
964 			spin_unlock_irqrestore(&engine->hw_lock, flags);
965 			goto out_free_ddts;
966 		}
967 		list_add_tail(&dev_req->list, &engine->pending);
968 	} else {
969 		list_add_tail(&dev_req->list, &engine->pending);
970 		spacc_push(engine);
971 	}
972 	spin_unlock_irqrestore(&engine->hw_lock, flags);
973 
974 	goto out;
975 
976 out_free_ddts:
977 	spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
978 		       req->nbytes, req->src == req->dst ?
979 		       DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
980 out_free_src:
981 	if (req->src != req->dst)
982 		spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
983 			       req->src, req->nbytes, DMA_TO_DEVICE);
984 out:
985 	return err;
986 }
987 
spacc_ablk_cra_init(struct crypto_tfm * tfm)988 static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
989 {
990 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
991 	struct crypto_alg *alg = tfm->__crt_alg;
992 	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
993 	struct spacc_engine *engine = spacc_alg->engine;
994 
995 	ctx->generic.flags = spacc_alg->type;
996 	ctx->generic.engine = engine;
997 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
998 		ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
999 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1000 		if (IS_ERR(ctx->sw_cipher)) {
1001 			dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1002 				 alg->cra_name);
1003 			ctx->sw_cipher = NULL;
1004 		}
1005 	}
1006 	ctx->generic.key_offs = spacc_alg->key_offs;
1007 	ctx->generic.iv_offs = spacc_alg->iv_offs;
1008 
1009 	tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1010 
1011 	return 0;
1012 }
1013 
spacc_ablk_cra_exit(struct crypto_tfm * tfm)1014 static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1015 {
1016 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1017 
1018 	if (ctx->sw_cipher)
1019 		crypto_free_ablkcipher(ctx->sw_cipher);
1020 	ctx->sw_cipher = NULL;
1021 }
1022 
spacc_ablk_encrypt(struct ablkcipher_request * req)1023 static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1024 {
1025 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1026 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1027 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1028 
1029 	return spacc_ablk_setup(req, alg->type, 1);
1030 }
1031 
spacc_ablk_decrypt(struct ablkcipher_request * req)1032 static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1033 {
1034 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1035 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1036 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1037 
1038 	return spacc_ablk_setup(req, alg->type, 0);
1039 }
1040 
spacc_fifo_stat_empty(struct spacc_engine * engine)1041 static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1042 {
1043 	return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1044 		SPA_FIFO_STAT_EMPTY;
1045 }
1046 
spacc_process_done(struct spacc_engine * engine)1047 static void spacc_process_done(struct spacc_engine *engine)
1048 {
1049 	struct spacc_req *req;
1050 	unsigned long flags;
1051 
1052 	spin_lock_irqsave(&engine->hw_lock, flags);
1053 
1054 	while (!spacc_fifo_stat_empty(engine)) {
1055 		req = list_first_entry(&engine->in_progress, struct spacc_req,
1056 				       list);
1057 		list_move_tail(&req->list, &engine->completed);
1058 		--engine->in_flight;
1059 
1060 		/* POP the status register. */
1061 		writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1062 		req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1063 		     SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1064 
1065 		/*
1066 		 * Convert the SPAcc error status into the standard POSIX error
1067 		 * codes.
1068 		 */
1069 		if (unlikely(req->result)) {
1070 			switch (req->result) {
1071 			case SPA_STATUS_ICV_FAIL:
1072 				req->result = -EBADMSG;
1073 				break;
1074 
1075 			case SPA_STATUS_MEMORY_ERROR:
1076 				dev_warn(engine->dev,
1077 					 "memory error triggered\n");
1078 				req->result = -EFAULT;
1079 				break;
1080 
1081 			case SPA_STATUS_BLOCK_ERROR:
1082 				dev_warn(engine->dev,
1083 					 "block error triggered\n");
1084 				req->result = -EIO;
1085 				break;
1086 			}
1087 		}
1088 	}
1089 
1090 	tasklet_schedule(&engine->complete);
1091 
1092 	spin_unlock_irqrestore(&engine->hw_lock, flags);
1093 }
1094 
spacc_spacc_irq(int irq,void * dev)1095 static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1096 {
1097 	struct spacc_engine *engine = (struct spacc_engine *)dev;
1098 	u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1099 
1100 	writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1101 	spacc_process_done(engine);
1102 
1103 	return IRQ_HANDLED;
1104 }
1105 
spacc_packet_timeout(unsigned long data)1106 static void spacc_packet_timeout(unsigned long data)
1107 {
1108 	struct spacc_engine *engine = (struct spacc_engine *)data;
1109 
1110 	spacc_process_done(engine);
1111 }
1112 
spacc_req_submit(struct spacc_req * req)1113 static int spacc_req_submit(struct spacc_req *req)
1114 {
1115 	struct crypto_alg *alg = req->req->tfm->__crt_alg;
1116 
1117 	if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1118 		return spacc_aead_submit(req);
1119 	else
1120 		return spacc_ablk_submit(req);
1121 }
1122 
spacc_spacc_complete(unsigned long data)1123 static void spacc_spacc_complete(unsigned long data)
1124 {
1125 	struct spacc_engine *engine = (struct spacc_engine *)data;
1126 	struct spacc_req *req, *tmp;
1127 	unsigned long flags;
1128 	LIST_HEAD(completed);
1129 
1130 	spin_lock_irqsave(&engine->hw_lock, flags);
1131 
1132 	list_splice_init(&engine->completed, &completed);
1133 	spacc_push(engine);
1134 	if (engine->in_flight)
1135 		mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1136 
1137 	spin_unlock_irqrestore(&engine->hw_lock, flags);
1138 
1139 	list_for_each_entry_safe(req, tmp, &completed, list) {
1140 		list_del(&req->list);
1141 		req->complete(req);
1142 	}
1143 }
1144 
1145 #ifdef CONFIG_PM
spacc_suspend(struct device * dev)1146 static int spacc_suspend(struct device *dev)
1147 {
1148 	struct platform_device *pdev = to_platform_device(dev);
1149 	struct spacc_engine *engine = platform_get_drvdata(pdev);
1150 
1151 	/*
1152 	 * We only support standby mode. All we have to do is gate the clock to
1153 	 * the spacc. The hardware will preserve state until we turn it back
1154 	 * on again.
1155 	 */
1156 	clk_disable(engine->clk);
1157 
1158 	return 0;
1159 }
1160 
spacc_resume(struct device * dev)1161 static int spacc_resume(struct device *dev)
1162 {
1163 	struct platform_device *pdev = to_platform_device(dev);
1164 	struct spacc_engine *engine = platform_get_drvdata(pdev);
1165 
1166 	return clk_enable(engine->clk);
1167 }
1168 
1169 static const struct dev_pm_ops spacc_pm_ops = {
1170 	.suspend	= spacc_suspend,
1171 	.resume		= spacc_resume,
1172 };
1173 #endif /* CONFIG_PM */
1174 
spacc_dev_to_engine(struct device * dev)1175 static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1176 {
1177 	return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
1178 }
1179 
spacc_stat_irq_thresh_show(struct device * dev,struct device_attribute * attr,char * buf)1180 static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1181 					  struct device_attribute *attr,
1182 					  char *buf)
1183 {
1184 	struct spacc_engine *engine = spacc_dev_to_engine(dev);
1185 
1186 	return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1187 }
1188 
spacc_stat_irq_thresh_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1189 static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1190 					   struct device_attribute *attr,
1191 					   const char *buf, size_t len)
1192 {
1193 	struct spacc_engine *engine = spacc_dev_to_engine(dev);
1194 	unsigned long thresh;
1195 
1196 	if (kstrtoul(buf, 0, &thresh))
1197 		return -EINVAL;
1198 
1199 	thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1200 
1201 	engine->stat_irq_thresh = thresh;
1202 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1203 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1204 
1205 	return len;
1206 }
1207 static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1208 		   spacc_stat_irq_thresh_store);
1209 
1210 static struct spacc_alg ipsec_engine_algs[] = {
1211 	{
1212 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1213 		.key_offs = 0,
1214 		.iv_offs = AES_MAX_KEY_SIZE,
1215 		.alg = {
1216 			.cra_name = "cbc(aes)",
1217 			.cra_driver_name = "cbc-aes-picoxcell",
1218 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1219 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1220 				     CRYPTO_ALG_KERN_DRIVER_ONLY |
1221 				     CRYPTO_ALG_ASYNC |
1222 				     CRYPTO_ALG_NEED_FALLBACK,
1223 			.cra_blocksize = AES_BLOCK_SIZE,
1224 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1225 			.cra_type = &crypto_ablkcipher_type,
1226 			.cra_module = THIS_MODULE,
1227 			.cra_ablkcipher = {
1228 				.setkey = spacc_aes_setkey,
1229 				.encrypt = spacc_ablk_encrypt,
1230 				.decrypt = spacc_ablk_decrypt,
1231 				.min_keysize = AES_MIN_KEY_SIZE,
1232 				.max_keysize = AES_MAX_KEY_SIZE,
1233 				.ivsize = AES_BLOCK_SIZE,
1234 			},
1235 			.cra_init = spacc_ablk_cra_init,
1236 			.cra_exit = spacc_ablk_cra_exit,
1237 		},
1238 	},
1239 	{
1240 		.key_offs = 0,
1241 		.iv_offs = AES_MAX_KEY_SIZE,
1242 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1243 		.alg = {
1244 			.cra_name = "ecb(aes)",
1245 			.cra_driver_name = "ecb-aes-picoxcell",
1246 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1247 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1248 				CRYPTO_ALG_KERN_DRIVER_ONLY |
1249 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1250 			.cra_blocksize = AES_BLOCK_SIZE,
1251 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1252 			.cra_type = &crypto_ablkcipher_type,
1253 			.cra_module = THIS_MODULE,
1254 			.cra_ablkcipher = {
1255 				.setkey = spacc_aes_setkey,
1256 				.encrypt = spacc_ablk_encrypt,
1257 				.decrypt = spacc_ablk_decrypt,
1258 				.min_keysize = AES_MIN_KEY_SIZE,
1259 				.max_keysize = AES_MAX_KEY_SIZE,
1260 			},
1261 			.cra_init = spacc_ablk_cra_init,
1262 			.cra_exit = spacc_ablk_cra_exit,
1263 		},
1264 	},
1265 	{
1266 		.key_offs = DES_BLOCK_SIZE,
1267 		.iv_offs = 0,
1268 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1269 		.alg = {
1270 			.cra_name = "cbc(des)",
1271 			.cra_driver_name = "cbc-des-picoxcell",
1272 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1273 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1274 					CRYPTO_ALG_ASYNC |
1275 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1276 			.cra_blocksize = DES_BLOCK_SIZE,
1277 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1278 			.cra_type = &crypto_ablkcipher_type,
1279 			.cra_module = THIS_MODULE,
1280 			.cra_ablkcipher = {
1281 				.setkey = spacc_des_setkey,
1282 				.encrypt = spacc_ablk_encrypt,
1283 				.decrypt = spacc_ablk_decrypt,
1284 				.min_keysize = DES_KEY_SIZE,
1285 				.max_keysize = DES_KEY_SIZE,
1286 				.ivsize = DES_BLOCK_SIZE,
1287 			},
1288 			.cra_init = spacc_ablk_cra_init,
1289 			.cra_exit = spacc_ablk_cra_exit,
1290 		},
1291 	},
1292 	{
1293 		.key_offs = DES_BLOCK_SIZE,
1294 		.iv_offs = 0,
1295 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1296 		.alg = {
1297 			.cra_name = "ecb(des)",
1298 			.cra_driver_name = "ecb-des-picoxcell",
1299 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1300 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1301 					CRYPTO_ALG_ASYNC |
1302 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1303 			.cra_blocksize = DES_BLOCK_SIZE,
1304 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1305 			.cra_type = &crypto_ablkcipher_type,
1306 			.cra_module = THIS_MODULE,
1307 			.cra_ablkcipher = {
1308 				.setkey = spacc_des_setkey,
1309 				.encrypt = spacc_ablk_encrypt,
1310 				.decrypt = spacc_ablk_decrypt,
1311 				.min_keysize = DES_KEY_SIZE,
1312 				.max_keysize = DES_KEY_SIZE,
1313 			},
1314 			.cra_init = spacc_ablk_cra_init,
1315 			.cra_exit = spacc_ablk_cra_exit,
1316 		},
1317 	},
1318 	{
1319 		.key_offs = DES_BLOCK_SIZE,
1320 		.iv_offs = 0,
1321 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1322 		.alg = {
1323 			.cra_name = "cbc(des3_ede)",
1324 			.cra_driver_name = "cbc-des3-ede-picoxcell",
1325 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1326 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1327 					CRYPTO_ALG_ASYNC |
1328 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1329 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1330 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1331 			.cra_type = &crypto_ablkcipher_type,
1332 			.cra_module = THIS_MODULE,
1333 			.cra_ablkcipher = {
1334 				.setkey = spacc_des_setkey,
1335 				.encrypt = spacc_ablk_encrypt,
1336 				.decrypt = spacc_ablk_decrypt,
1337 				.min_keysize = DES3_EDE_KEY_SIZE,
1338 				.max_keysize = DES3_EDE_KEY_SIZE,
1339 				.ivsize = DES3_EDE_BLOCK_SIZE,
1340 			},
1341 			.cra_init = spacc_ablk_cra_init,
1342 			.cra_exit = spacc_ablk_cra_exit,
1343 		},
1344 	},
1345 	{
1346 		.key_offs = DES_BLOCK_SIZE,
1347 		.iv_offs = 0,
1348 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1349 		.alg = {
1350 			.cra_name = "ecb(des3_ede)",
1351 			.cra_driver_name = "ecb-des3-ede-picoxcell",
1352 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1353 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1354 					CRYPTO_ALG_ASYNC |
1355 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1356 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1357 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1358 			.cra_type = &crypto_ablkcipher_type,
1359 			.cra_module = THIS_MODULE,
1360 			.cra_ablkcipher = {
1361 				.setkey = spacc_des_setkey,
1362 				.encrypt = spacc_ablk_encrypt,
1363 				.decrypt = spacc_ablk_decrypt,
1364 				.min_keysize = DES3_EDE_KEY_SIZE,
1365 				.max_keysize = DES3_EDE_KEY_SIZE,
1366 			},
1367 			.cra_init = spacc_ablk_cra_init,
1368 			.cra_exit = spacc_ablk_cra_exit,
1369 		},
1370 	},
1371 };
1372 
1373 static struct spacc_aead ipsec_engine_aeads[] = {
1374 	{
1375 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1376 				SPA_CTRL_CIPH_MODE_CBC |
1377 				SPA_CTRL_HASH_ALG_SHA |
1378 				SPA_CTRL_HASH_MODE_HMAC,
1379 		.key_offs = 0,
1380 		.iv_offs = AES_MAX_KEY_SIZE,
1381 		.alg = {
1382 			.base = {
1383 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1384 				.cra_driver_name = "authenc-hmac-sha1-"
1385 						   "cbc-aes-picoxcell",
1386 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1387 				.cra_flags = CRYPTO_ALG_ASYNC |
1388 					     CRYPTO_ALG_NEED_FALLBACK |
1389 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1390 				.cra_blocksize = AES_BLOCK_SIZE,
1391 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1392 				.cra_module = THIS_MODULE,
1393 			},
1394 			.setkey = spacc_aead_setkey,
1395 			.setauthsize = spacc_aead_setauthsize,
1396 			.encrypt = spacc_aead_encrypt,
1397 			.decrypt = spacc_aead_decrypt,
1398 			.ivsize = AES_BLOCK_SIZE,
1399 			.maxauthsize = SHA1_DIGEST_SIZE,
1400 			.init = spacc_aead_cra_init,
1401 			.exit = spacc_aead_cra_exit,
1402 		},
1403 	},
1404 	{
1405 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1406 				SPA_CTRL_CIPH_MODE_CBC |
1407 				SPA_CTRL_HASH_ALG_SHA256 |
1408 				SPA_CTRL_HASH_MODE_HMAC,
1409 		.key_offs = 0,
1410 		.iv_offs = AES_MAX_KEY_SIZE,
1411 		.alg = {
1412 			.base = {
1413 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1414 				.cra_driver_name = "authenc-hmac-sha256-"
1415 						   "cbc-aes-picoxcell",
1416 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1417 				.cra_flags = CRYPTO_ALG_ASYNC |
1418 					     CRYPTO_ALG_NEED_FALLBACK |
1419 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1420 				.cra_blocksize = AES_BLOCK_SIZE,
1421 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1422 				.cra_module = THIS_MODULE,
1423 			},
1424 			.setkey = spacc_aead_setkey,
1425 			.setauthsize = spacc_aead_setauthsize,
1426 			.encrypt = spacc_aead_encrypt,
1427 			.decrypt = spacc_aead_decrypt,
1428 			.ivsize = AES_BLOCK_SIZE,
1429 			.maxauthsize = SHA256_DIGEST_SIZE,
1430 			.init = spacc_aead_cra_init,
1431 			.exit = spacc_aead_cra_exit,
1432 		},
1433 	},
1434 	{
1435 		.key_offs = 0,
1436 		.iv_offs = AES_MAX_KEY_SIZE,
1437 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1438 				SPA_CTRL_CIPH_MODE_CBC |
1439 				SPA_CTRL_HASH_ALG_MD5 |
1440 				SPA_CTRL_HASH_MODE_HMAC,
1441 		.alg = {
1442 			.base = {
1443 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1444 				.cra_driver_name = "authenc-hmac-md5-"
1445 						   "cbc-aes-picoxcell",
1446 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1447 				.cra_flags = CRYPTO_ALG_ASYNC |
1448 					     CRYPTO_ALG_NEED_FALLBACK |
1449 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1450 				.cra_blocksize = AES_BLOCK_SIZE,
1451 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1452 				.cra_module = THIS_MODULE,
1453 			},
1454 			.setkey = spacc_aead_setkey,
1455 			.setauthsize = spacc_aead_setauthsize,
1456 			.encrypt = spacc_aead_encrypt,
1457 			.decrypt = spacc_aead_decrypt,
1458 			.ivsize = AES_BLOCK_SIZE,
1459 			.maxauthsize = MD5_DIGEST_SIZE,
1460 			.init = spacc_aead_cra_init,
1461 			.exit = spacc_aead_cra_exit,
1462 		},
1463 	},
1464 	{
1465 		.key_offs = DES_BLOCK_SIZE,
1466 		.iv_offs = 0,
1467 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1468 				SPA_CTRL_CIPH_MODE_CBC |
1469 				SPA_CTRL_HASH_ALG_SHA |
1470 				SPA_CTRL_HASH_MODE_HMAC,
1471 		.alg = {
1472 			.base = {
1473 				.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1474 				.cra_driver_name = "authenc-hmac-sha1-"
1475 						   "cbc-3des-picoxcell",
1476 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1477 				.cra_flags = CRYPTO_ALG_ASYNC |
1478 					     CRYPTO_ALG_NEED_FALLBACK |
1479 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1480 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1481 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1482 				.cra_module = THIS_MODULE,
1483 			},
1484 			.setkey = spacc_aead_setkey,
1485 			.setauthsize = spacc_aead_setauthsize,
1486 			.encrypt = spacc_aead_encrypt,
1487 			.decrypt = spacc_aead_decrypt,
1488 			.ivsize = DES3_EDE_BLOCK_SIZE,
1489 			.maxauthsize = SHA1_DIGEST_SIZE,
1490 			.init = spacc_aead_cra_init,
1491 			.exit = spacc_aead_cra_exit,
1492 		},
1493 	},
1494 	{
1495 		.key_offs = DES_BLOCK_SIZE,
1496 		.iv_offs = 0,
1497 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1498 				SPA_CTRL_CIPH_MODE_CBC |
1499 				SPA_CTRL_HASH_ALG_SHA256 |
1500 				SPA_CTRL_HASH_MODE_HMAC,
1501 		.alg = {
1502 			.base = {
1503 				.cra_name = "authenc(hmac(sha256),"
1504 					    "cbc(des3_ede))",
1505 				.cra_driver_name = "authenc-hmac-sha256-"
1506 						   "cbc-3des-picoxcell",
1507 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1508 				.cra_flags = CRYPTO_ALG_ASYNC |
1509 					     CRYPTO_ALG_NEED_FALLBACK |
1510 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1511 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1512 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1513 				.cra_module = THIS_MODULE,
1514 			},
1515 			.setkey = spacc_aead_setkey,
1516 			.setauthsize = spacc_aead_setauthsize,
1517 			.encrypt = spacc_aead_encrypt,
1518 			.decrypt = spacc_aead_decrypt,
1519 			.ivsize = DES3_EDE_BLOCK_SIZE,
1520 			.maxauthsize = SHA256_DIGEST_SIZE,
1521 			.init = spacc_aead_cra_init,
1522 			.exit = spacc_aead_cra_exit,
1523 		},
1524 	},
1525 	{
1526 		.key_offs = DES_BLOCK_SIZE,
1527 		.iv_offs = 0,
1528 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1529 				SPA_CTRL_CIPH_MODE_CBC |
1530 				SPA_CTRL_HASH_ALG_MD5 |
1531 				SPA_CTRL_HASH_MODE_HMAC,
1532 		.alg = {
1533 			.base = {
1534 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1535 				.cra_driver_name = "authenc-hmac-md5-"
1536 						   "cbc-3des-picoxcell",
1537 				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1538 				.cra_flags = CRYPTO_ALG_ASYNC |
1539 					     CRYPTO_ALG_NEED_FALLBACK |
1540 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1541 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1542 				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
1543 				.cra_module = THIS_MODULE,
1544 			},
1545 			.setkey = spacc_aead_setkey,
1546 			.setauthsize = spacc_aead_setauthsize,
1547 			.encrypt = spacc_aead_encrypt,
1548 			.decrypt = spacc_aead_decrypt,
1549 			.ivsize = DES3_EDE_BLOCK_SIZE,
1550 			.maxauthsize = MD5_DIGEST_SIZE,
1551 			.init = spacc_aead_cra_init,
1552 			.exit = spacc_aead_cra_exit,
1553 		},
1554 	},
1555 };
1556 
1557 static struct spacc_alg l2_engine_algs[] = {
1558 	{
1559 		.key_offs = 0,
1560 		.iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1561 		.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1562 				SPA_CTRL_CIPH_MODE_F8,
1563 		.alg = {
1564 			.cra_name = "f8(kasumi)",
1565 			.cra_driver_name = "f8-kasumi-picoxcell",
1566 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1567 			.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
1568 					CRYPTO_ALG_ASYNC |
1569 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1570 			.cra_blocksize = 8,
1571 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1572 			.cra_type = &crypto_ablkcipher_type,
1573 			.cra_module = THIS_MODULE,
1574 			.cra_ablkcipher = {
1575 				.setkey = spacc_kasumi_f8_setkey,
1576 				.encrypt = spacc_ablk_encrypt,
1577 				.decrypt = spacc_ablk_decrypt,
1578 				.min_keysize = 16,
1579 				.max_keysize = 16,
1580 				.ivsize = 8,
1581 			},
1582 			.cra_init = spacc_ablk_cra_init,
1583 			.cra_exit = spacc_ablk_cra_exit,
1584 		},
1585 	},
1586 };
1587 
1588 #ifdef CONFIG_OF
1589 static const struct of_device_id spacc_of_id_table[] = {
1590 	{ .compatible = "picochip,spacc-ipsec" },
1591 	{ .compatible = "picochip,spacc-l2" },
1592 	{}
1593 };
1594 MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1595 #endif /* CONFIG_OF */
1596 
spacc_is_compatible(struct platform_device * pdev,const char * spacc_type)1597 static bool spacc_is_compatible(struct platform_device *pdev,
1598 				const char *spacc_type)
1599 {
1600 	const struct platform_device_id *platid = platform_get_device_id(pdev);
1601 
1602 	if (platid && !strcmp(platid->name, spacc_type))
1603 		return true;
1604 
1605 #ifdef CONFIG_OF
1606 	if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
1607 		return true;
1608 #endif /* CONFIG_OF */
1609 
1610 	return false;
1611 }
1612 
spacc_tasklet_kill(void * data)1613 static void spacc_tasklet_kill(void *data)
1614 {
1615 	tasklet_kill(data);
1616 }
1617 
spacc_probe(struct platform_device * pdev)1618 static int spacc_probe(struct platform_device *pdev)
1619 {
1620 	int i, err, ret = -EINVAL;
1621 	struct resource *mem, *irq;
1622 	struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1623 						   GFP_KERNEL);
1624 	if (!engine)
1625 		return -ENOMEM;
1626 
1627 	if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
1628 		engine->max_ctxs	= SPACC_CRYPTO_IPSEC_MAX_CTXS;
1629 		engine->cipher_pg_sz	= SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
1630 		engine->hash_pg_sz	= SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
1631 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
1632 		engine->algs		= ipsec_engine_algs;
1633 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
1634 		engine->aeads		= ipsec_engine_aeads;
1635 		engine->num_aeads	= ARRAY_SIZE(ipsec_engine_aeads);
1636 	} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
1637 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
1638 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
1639 		engine->hash_pg_sz	= SPACC_CRYPTO_L2_HASH_PG_SZ;
1640 		engine->fifo_sz		= SPACC_CRYPTO_L2_FIFO_SZ;
1641 		engine->algs		= l2_engine_algs;
1642 		engine->num_algs	= ARRAY_SIZE(l2_engine_algs);
1643 	} else {
1644 		return -EINVAL;
1645 	}
1646 
1647 	engine->name = dev_name(&pdev->dev);
1648 
1649 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1650 	engine->regs = devm_ioremap_resource(&pdev->dev, mem);
1651 	if (IS_ERR(engine->regs))
1652 		return PTR_ERR(engine->regs);
1653 
1654 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1655 	if (!irq) {
1656 		dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1657 		return -ENXIO;
1658 	}
1659 
1660 	tasklet_init(&engine->complete, spacc_spacc_complete,
1661 		     (unsigned long)engine);
1662 
1663 	ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
1664 			      &engine->complete);
1665 	if (ret)
1666 		return ret;
1667 
1668 	if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1669 			     engine->name, engine)) {
1670 		dev_err(engine->dev, "failed to request IRQ\n");
1671 		return -EBUSY;
1672 	}
1673 
1674 	engine->dev		= &pdev->dev;
1675 	engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1676 	engine->hash_key_base	= engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1677 
1678 	engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1679 		MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1680 	if (!engine->req_pool)
1681 		return -ENOMEM;
1682 
1683 	spin_lock_init(&engine->hw_lock);
1684 
1685 	engine->clk = clk_get(&pdev->dev, "ref");
1686 	if (IS_ERR(engine->clk)) {
1687 		dev_info(&pdev->dev, "clk unavailable\n");
1688 		device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1689 		return PTR_ERR(engine->clk);
1690 	}
1691 
1692 	if (clk_prepare_enable(engine->clk)) {
1693 		dev_info(&pdev->dev, "unable to prepare/enable clk\n");
1694 		clk_put(engine->clk);
1695 		return -EIO;
1696 	}
1697 
1698 	err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1699 	if (err) {
1700 		clk_disable_unprepare(engine->clk);
1701 		clk_put(engine->clk);
1702 		return err;
1703 	}
1704 
1705 
1706 	/*
1707 	 * Use an IRQ threshold of 50% as a default. This seems to be a
1708 	 * reasonable trade off of latency against throughput but can be
1709 	 * changed at runtime.
1710 	 */
1711 	engine->stat_irq_thresh = (engine->fifo_sz / 2);
1712 
1713 	/*
1714 	 * Configure the interrupts. We only use the STAT_CNT interrupt as we
1715 	 * only submit a new packet for processing when we complete another in
1716 	 * the queue. This minimizes time spent in the interrupt handler.
1717 	 */
1718 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1719 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1720 	writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1721 	       engine->regs + SPA_IRQ_EN_REG_OFFSET);
1722 
1723 	setup_timer(&engine->packet_timeout, spacc_packet_timeout,
1724 		    (unsigned long)engine);
1725 
1726 	INIT_LIST_HEAD(&engine->pending);
1727 	INIT_LIST_HEAD(&engine->completed);
1728 	INIT_LIST_HEAD(&engine->in_progress);
1729 	engine->in_flight = 0;
1730 
1731 	platform_set_drvdata(pdev, engine);
1732 
1733 	INIT_LIST_HEAD(&engine->registered_algs);
1734 	for (i = 0; i < engine->num_algs; ++i) {
1735 		engine->algs[i].engine = engine;
1736 		err = crypto_register_alg(&engine->algs[i].alg);
1737 		if (!err) {
1738 			list_add_tail(&engine->algs[i].entry,
1739 				      &engine->registered_algs);
1740 			ret = 0;
1741 		}
1742 		if (err)
1743 			dev_err(engine->dev, "failed to register alg \"%s\"\n",
1744 				engine->algs[i].alg.cra_name);
1745 		else
1746 			dev_dbg(engine->dev, "registered alg \"%s\"\n",
1747 				engine->algs[i].alg.cra_name);
1748 	}
1749 
1750 	INIT_LIST_HEAD(&engine->registered_aeads);
1751 	for (i = 0; i < engine->num_aeads; ++i) {
1752 		engine->aeads[i].engine = engine;
1753 		err = crypto_register_aead(&engine->aeads[i].alg);
1754 		if (!err) {
1755 			list_add_tail(&engine->aeads[i].entry,
1756 				      &engine->registered_aeads);
1757 			ret = 0;
1758 		}
1759 		if (err)
1760 			dev_err(engine->dev, "failed to register alg \"%s\"\n",
1761 				engine->aeads[i].alg.base.cra_name);
1762 		else
1763 			dev_dbg(engine->dev, "registered alg \"%s\"\n",
1764 				engine->aeads[i].alg.base.cra_name);
1765 	}
1766 
1767 	return ret;
1768 }
1769 
spacc_remove(struct platform_device * pdev)1770 static int spacc_remove(struct platform_device *pdev)
1771 {
1772 	struct spacc_aead *aead, *an;
1773 	struct spacc_alg *alg, *next;
1774 	struct spacc_engine *engine = platform_get_drvdata(pdev);
1775 
1776 	del_timer_sync(&engine->packet_timeout);
1777 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1778 
1779 	list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
1780 		list_del(&aead->entry);
1781 		crypto_unregister_aead(&aead->alg);
1782 	}
1783 
1784 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1785 		list_del(&alg->entry);
1786 		crypto_unregister_alg(&alg->alg);
1787 	}
1788 
1789 	clk_disable_unprepare(engine->clk);
1790 	clk_put(engine->clk);
1791 
1792 	return 0;
1793 }
1794 
1795 static const struct platform_device_id spacc_id_table[] = {
1796 	{ "picochip,spacc-ipsec", },
1797 	{ "picochip,spacc-l2", },
1798 	{ }
1799 };
1800 
1801 static struct platform_driver spacc_driver = {
1802 	.probe		= spacc_probe,
1803 	.remove		= spacc_remove,
1804 	.driver		= {
1805 		.name	= "picochip,spacc",
1806 #ifdef CONFIG_PM
1807 		.pm	= &spacc_pm_ops,
1808 #endif /* CONFIG_PM */
1809 		.of_match_table	= of_match_ptr(spacc_of_id_table),
1810 	},
1811 	.id_table	= spacc_id_table,
1812 };
1813 
1814 module_platform_driver(spacc_driver);
1815 
1816 MODULE_LICENSE("GPL");
1817 MODULE_AUTHOR("Jamie Iles");
1818