• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Cryptographic API.
3  *
4  * Support for SAHARA cryptographic accelerator.
5  *
6  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
7  * Copyright (c) 2013 Vista Silicon S.L.
8  * Author: Javier Martin <javier.martin@vista-silicon.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation.
13  *
14  * Based on omap-aes.c and tegra-aes.c
15  */
16 
17 #include <crypto/algapi.h>
18 #include <crypto/aes.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/sha.h>
23 
24 #include <linux/clk.h>
25 #include <linux/crypto.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/of.h>
34 #include <linux/of_device.h>
35 #include <linux/platform_device.h>
36 
37 #define SHA_BUFFER_LEN		PAGE_SIZE
38 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
39 
40 #define SAHARA_NAME "sahara"
41 #define SAHARA_VERSION_3	3
42 #define SAHARA_VERSION_4	4
43 #define SAHARA_TIMEOUT_MS	1000
44 #define SAHARA_MAX_HW_DESC	2
45 #define SAHARA_MAX_HW_LINK	20
46 
47 #define FLAGS_MODE_MASK		0x000f
48 #define FLAGS_ENCRYPT		BIT(0)
49 #define FLAGS_CBC		BIT(1)
50 #define FLAGS_NEW_KEY		BIT(3)
51 
52 #define SAHARA_HDR_BASE			0x00800000
53 #define SAHARA_HDR_SKHA_ALG_AES	0
54 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
55 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
56 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
57 #define SAHARA_HDR_FORM_DATA		(5 << 16)
58 #define SAHARA_HDR_FORM_KEY		(8 << 16)
59 #define SAHARA_HDR_LLO			(1 << 24)
60 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
61 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
62 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
63 
64 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
65 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
66 #define SAHARA_HDR_MDHA_HASH		0xA0850000
67 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
68 #define SAHARA_HDR_MDHA_ALG_SHA1	0
69 #define SAHARA_HDR_MDHA_ALG_MD5		1
70 #define SAHARA_HDR_MDHA_ALG_SHA256	2
71 #define SAHARA_HDR_MDHA_ALG_SHA224	3
72 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
73 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
74 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
75 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
76 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
77 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
78 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
79 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
80 
81 /* SAHARA can only process one request at a time */
82 #define SAHARA_QUEUE_LENGTH	1
83 
84 #define SAHARA_REG_VERSION	0x00
85 #define SAHARA_REG_DAR		0x04
86 #define SAHARA_REG_CONTROL	0x08
87 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
88 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
89 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
90 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
91 #define SAHARA_REG_CMD		0x0C
92 #define		SAHARA_CMD_RESET		(1 << 0)
93 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
94 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
95 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
96 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
97 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
98 #define	SAHARA_REG_STATUS	0x10
99 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
100 #define			SAHARA_STATE_IDLE	0
101 #define			SAHARA_STATE_BUSY	1
102 #define			SAHARA_STATE_ERR	2
103 #define			SAHARA_STATE_FAULT	3
104 #define			SAHARA_STATE_COMPLETE	4
105 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
106 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
107 #define		SAHARA_STATUS_ERROR		(1 << 4)
108 #define		SAHARA_STATUS_SECURE		(1 << 5)
109 #define		SAHARA_STATUS_FAIL		(1 << 6)
110 #define		SAHARA_STATUS_INIT		(1 << 7)
111 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
112 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
113 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
114 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
115 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
116 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
117 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
118 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
119 #define SAHARA_REG_ERRSTATUS	0x14
120 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
121 #define			SAHARA_ERRSOURCE_CHA	14
122 #define			SAHARA_ERRSOURCE_DMA	15
123 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
124 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
127 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
128 #define SAHARA_REG_FADDR	0x18
129 #define SAHARA_REG_CDAR		0x1C
130 #define SAHARA_REG_IDAR		0x20
131 
132 struct sahara_hw_desc {
133 	u32		hdr;
134 	u32		len1;
135 	dma_addr_t	p1;
136 	u32		len2;
137 	dma_addr_t	p2;
138 	dma_addr_t	next;
139 };
140 
141 struct sahara_hw_link {
142 	u32		len;
143 	dma_addr_t	p;
144 	dma_addr_t	next;
145 };
146 
147 struct sahara_ctx {
148 	unsigned long flags;
149 
150 	/* AES-specific context */
151 	int keylen;
152 	u8 key[AES_KEYSIZE_128];
153 	struct crypto_ablkcipher *fallback;
154 
155 	/* SHA-specific context */
156 	struct crypto_shash *shash_fallback;
157 };
158 
159 struct sahara_aes_reqctx {
160 	unsigned long mode;
161 };
162 
163 /*
164  * struct sahara_sha_reqctx - private data per request
165  * @buf: holds data for requests smaller than block_size
166  * @rembuf: used to prepare one block_size-aligned request
167  * @context: hw-specific context for request. Digest is extracted from this
168  * @mode: specifies what type of hw-descriptor needs to be built
169  * @digest_size: length of digest for this request
170  * @context_size: length of hw-context for this request.
171  *                Always digest_size + 4
172  * @buf_cnt: number of bytes saved in buf
173  * @sg_in_idx: number of hw links
174  * @in_sg: scatterlist for input data
175  * @in_sg_chain: scatterlists for chained input data
176  * @total: total number of bytes for transfer
177  * @last: is this the last block
178  * @first: is this the first block
179  * @active: inside a transfer
180  */
181 struct sahara_sha_reqctx {
182 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
183 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 	u8			context[SHA256_DIGEST_SIZE + 4];
185 	struct mutex		mutex;
186 	unsigned int		mode;
187 	unsigned int		digest_size;
188 	unsigned int		context_size;
189 	unsigned int		buf_cnt;
190 	unsigned int		sg_in_idx;
191 	struct scatterlist	*in_sg;
192 	struct scatterlist	in_sg_chain[2];
193 	size_t			total;
194 	unsigned int		last;
195 	unsigned int		first;
196 	unsigned int		active;
197 };
198 
199 struct sahara_dev {
200 	struct device		*device;
201 	unsigned int		version;
202 	void __iomem		*regs_base;
203 	struct clk		*clk_ipg;
204 	struct clk		*clk_ahb;
205 	struct mutex		queue_mutex;
206 	struct task_struct	*kthread;
207 	struct completion	dma_completion;
208 
209 	struct sahara_ctx	*ctx;
210 	spinlock_t		lock;
211 	struct crypto_queue	queue;
212 	unsigned long		flags;
213 
214 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
215 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
216 
217 	u8			*key_base;
218 	dma_addr_t		key_phys_base;
219 
220 	u8			*iv_base;
221 	dma_addr_t		iv_phys_base;
222 
223 	u8			*context_base;
224 	dma_addr_t		context_phys_base;
225 
226 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
227 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
228 
229 	size_t			total;
230 	struct scatterlist	*in_sg;
231 	unsigned int		nb_in_sg;
232 	struct scatterlist	*out_sg;
233 	unsigned int		nb_out_sg;
234 
235 	u32			error;
236 };
237 
238 static struct sahara_dev *dev_ptr;
239 
sahara_write(struct sahara_dev * dev,u32 data,u32 reg)240 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
241 {
242 	writel(data, dev->regs_base + reg);
243 }
244 
sahara_read(struct sahara_dev * dev,u32 reg)245 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
246 {
247 	return readl(dev->regs_base + reg);
248 }
249 
sahara_aes_key_hdr(struct sahara_dev * dev)250 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
251 {
252 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
253 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
254 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
255 
256 	if (dev->flags & FLAGS_CBC) {
257 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
258 		hdr ^= SAHARA_HDR_PARITY_BIT;
259 	}
260 
261 	if (dev->flags & FLAGS_ENCRYPT) {
262 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
263 		hdr ^= SAHARA_HDR_PARITY_BIT;
264 	}
265 
266 	return hdr;
267 }
268 
sahara_aes_data_link_hdr(struct sahara_dev * dev)269 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
270 {
271 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
272 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
273 }
274 
275 static const char *sahara_err_src[16] = {
276 	"No error",
277 	"Header error",
278 	"Descriptor length error",
279 	"Descriptor length or pointer error",
280 	"Link length error",
281 	"Link pointer error",
282 	"Input buffer error",
283 	"Output buffer error",
284 	"Output buffer starvation",
285 	"Internal state fault",
286 	"General descriptor problem",
287 	"Reserved",
288 	"Descriptor address error",
289 	"Link address error",
290 	"CHA error",
291 	"DMA error"
292 };
293 
294 static const char *sahara_err_dmasize[4] = {
295 	"Byte transfer",
296 	"Half-word transfer",
297 	"Word transfer",
298 	"Reserved"
299 };
300 
301 static const char *sahara_err_dmasrc[8] = {
302 	"No error",
303 	"AHB bus error",
304 	"Internal IP bus error",
305 	"Parity error",
306 	"DMA crosses 256 byte boundary",
307 	"DMA is busy",
308 	"Reserved",
309 	"DMA HW error"
310 };
311 
312 static const char *sahara_cha_errsrc[12] = {
313 	"Input buffer non-empty",
314 	"Illegal address",
315 	"Illegal mode",
316 	"Illegal data size",
317 	"Illegal key size",
318 	"Write during processing",
319 	"CTX read during processing",
320 	"HW error",
321 	"Input buffer disabled/underflow",
322 	"Output buffer disabled/overflow",
323 	"DES key parity error",
324 	"Reserved"
325 };
326 
327 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
328 
sahara_decode_error(struct sahara_dev * dev,unsigned int error)329 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
330 {
331 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
332 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
333 
334 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
335 
336 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
337 
338 	if (source == SAHARA_ERRSOURCE_DMA) {
339 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
340 			dev_err(dev->device, "		* DMA read.\n");
341 		else
342 			dev_err(dev->device, "		* DMA write.\n");
343 
344 		dev_err(dev->device, "		* %s.\n",
345 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
346 		dev_err(dev->device, "		* %s.\n",
347 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
348 	} else if (source == SAHARA_ERRSOURCE_CHA) {
349 		dev_err(dev->device, "		* %s.\n",
350 			sahara_cha_errsrc[chasrc]);
351 		dev_err(dev->device, "		* %s.\n",
352 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
353 	}
354 	dev_err(dev->device, "\n");
355 }
356 
357 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
358 
sahara_decode_status(struct sahara_dev * dev,unsigned int status)359 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
360 {
361 	u8 state;
362 
363 	if (!IS_ENABLED(DEBUG))
364 		return;
365 
366 	state = SAHARA_STATUS_GET_STATE(status);
367 
368 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
369 		__func__, status);
370 
371 	dev_dbg(dev->device, "	- State = %d:\n", state);
372 	if (state & SAHARA_STATE_COMP_FLAG)
373 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
374 
375 	dev_dbg(dev->device, "		* %s.\n",
376 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
377 
378 	if (status & SAHARA_STATUS_DAR_FULL)
379 		dev_dbg(dev->device, "	- DAR Full.\n");
380 	if (status & SAHARA_STATUS_ERROR)
381 		dev_dbg(dev->device, "	- Error.\n");
382 	if (status & SAHARA_STATUS_SECURE)
383 		dev_dbg(dev->device, "	- Secure.\n");
384 	if (status & SAHARA_STATUS_FAIL)
385 		dev_dbg(dev->device, "	- Fail.\n");
386 	if (status & SAHARA_STATUS_RNG_RESEED)
387 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
388 	if (status & SAHARA_STATUS_ACTIVE_RNG)
389 		dev_dbg(dev->device, "	- RNG Active.\n");
390 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
391 		dev_dbg(dev->device, "	- MDHA Active.\n");
392 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
393 		dev_dbg(dev->device, "	- SKHA Active.\n");
394 
395 	if (status & SAHARA_STATUS_MODE_BATCH)
396 		dev_dbg(dev->device, "	- Batch Mode.\n");
397 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
398 		dev_dbg(dev->device, "	- Decidated Mode.\n");
399 	else if (status & SAHARA_STATUS_MODE_DEBUG)
400 		dev_dbg(dev->device, "	- Debug Mode.\n");
401 
402 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
403 	       SAHARA_STATUS_GET_ISTATE(status));
404 
405 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
406 		sahara_read(dev, SAHARA_REG_CDAR));
407 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
408 		sahara_read(dev, SAHARA_REG_IDAR));
409 }
410 
sahara_dump_descriptors(struct sahara_dev * dev)411 static void sahara_dump_descriptors(struct sahara_dev *dev)
412 {
413 	int i;
414 
415 	if (!IS_ENABLED(DEBUG))
416 		return;
417 
418 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
419 		dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
420 			i, dev->hw_phys_desc[i]);
421 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
422 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
423 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
424 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
425 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
426 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 			dev->hw_desc[i]->next);
428 	}
429 	dev_dbg(dev->device, "\n");
430 }
431 
sahara_dump_links(struct sahara_dev * dev)432 static void sahara_dump_links(struct sahara_dev *dev)
433 {
434 	int i;
435 
436 	if (!IS_ENABLED(DEBUG))
437 		return;
438 
439 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
440 		dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
441 			i, dev->hw_phys_link[i]);
442 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
443 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
444 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
445 			dev->hw_link[i]->next);
446 	}
447 	dev_dbg(dev->device, "\n");
448 }
449 
sahara_hw_descriptor_create(struct sahara_dev * dev)450 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
451 {
452 	struct sahara_ctx *ctx = dev->ctx;
453 	struct scatterlist *sg;
454 	int ret;
455 	int i, j;
456 	int idx = 0;
457 
458 	/* Copy new key if necessary */
459 	if (ctx->flags & FLAGS_NEW_KEY) {
460 		memcpy(dev->key_base, ctx->key, ctx->keylen);
461 		ctx->flags &= ~FLAGS_NEW_KEY;
462 
463 		if (dev->flags & FLAGS_CBC) {
464 			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
465 			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
466 		} else {
467 			dev->hw_desc[idx]->len1 = 0;
468 			dev->hw_desc[idx]->p1 = 0;
469 		}
470 		dev->hw_desc[idx]->len2 = ctx->keylen;
471 		dev->hw_desc[idx]->p2 = dev->key_phys_base;
472 		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
473 
474 		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
475 
476 		idx++;
477 	}
478 
479 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
480 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
481 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
482 		dev_err(dev->device, "not enough hw links (%d)\n",
483 			dev->nb_in_sg + dev->nb_out_sg);
484 		return -EINVAL;
485 	}
486 
487 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
488 			 DMA_TO_DEVICE);
489 	if (ret != dev->nb_in_sg) {
490 		dev_err(dev->device, "couldn't map in sg\n");
491 		goto unmap_in;
492 	}
493 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
494 			 DMA_FROM_DEVICE);
495 	if (ret != dev->nb_out_sg) {
496 		dev_err(dev->device, "couldn't map out sg\n");
497 		goto unmap_out;
498 	}
499 
500 	/* Create input links */
501 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
502 	sg = dev->in_sg;
503 	for (i = 0; i < dev->nb_in_sg; i++) {
504 		dev->hw_link[i]->len = sg->length;
505 		dev->hw_link[i]->p = sg->dma_address;
506 		if (i == (dev->nb_in_sg - 1)) {
507 			dev->hw_link[i]->next = 0;
508 		} else {
509 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
510 			sg = sg_next(sg);
511 		}
512 	}
513 
514 	/* Create output links */
515 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
516 	sg = dev->out_sg;
517 	for (j = i; j < dev->nb_out_sg + i; j++) {
518 		dev->hw_link[j]->len = sg->length;
519 		dev->hw_link[j]->p = sg->dma_address;
520 		if (j == (dev->nb_out_sg + i - 1)) {
521 			dev->hw_link[j]->next = 0;
522 		} else {
523 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
524 			sg = sg_next(sg);
525 		}
526 	}
527 
528 	/* Fill remaining fields of hw_desc[1] */
529 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 	dev->hw_desc[idx]->len1 = dev->total;
531 	dev->hw_desc[idx]->len2 = dev->total;
532 	dev->hw_desc[idx]->next = 0;
533 
534 	sahara_dump_descriptors(dev);
535 	sahara_dump_links(dev);
536 
537 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
538 
539 	return 0;
540 
541 unmap_out:
542 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
543 		DMA_TO_DEVICE);
544 unmap_in:
545 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
546 		DMA_FROM_DEVICE);
547 
548 	return -EINVAL;
549 }
550 
sahara_aes_process(struct ablkcipher_request * req)551 static int sahara_aes_process(struct ablkcipher_request *req)
552 {
553 	struct sahara_dev *dev = dev_ptr;
554 	struct sahara_ctx *ctx;
555 	struct sahara_aes_reqctx *rctx;
556 	int ret;
557 	unsigned long timeout;
558 
559 	/* Request is ready to be dispatched by the device */
560 	dev_dbg(dev->device,
561 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
562 		req->nbytes, req->src, req->dst);
563 
564 	/* assign new request to device */
565 	dev->total = req->nbytes;
566 	dev->in_sg = req->src;
567 	dev->out_sg = req->dst;
568 
569 	rctx = ablkcipher_request_ctx(req);
570 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
571 	rctx->mode &= FLAGS_MODE_MASK;
572 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
573 
574 	if ((dev->flags & FLAGS_CBC) && req->info)
575 		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
576 
577 	/* assign new context to device */
578 	dev->ctx = ctx;
579 
580 	reinit_completion(&dev->dma_completion);
581 
582 	ret = sahara_hw_descriptor_create(dev);
583 	if (ret)
584 		return -EINVAL;
585 
586 	timeout = wait_for_completion_timeout(&dev->dma_completion,
587 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
588 	if (!timeout) {
589 		dev_err(dev->device, "AES timeout\n");
590 		return -ETIMEDOUT;
591 	}
592 
593 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
594 		DMA_TO_DEVICE);
595 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
596 		DMA_FROM_DEVICE);
597 
598 	return 0;
599 }
600 
sahara_aes_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)601 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
602 			     unsigned int keylen)
603 {
604 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
605 	int ret;
606 
607 	ctx->keylen = keylen;
608 
609 	/* SAHARA only supports 128bit keys */
610 	if (keylen == AES_KEYSIZE_128) {
611 		memcpy(ctx->key, key, keylen);
612 		ctx->flags |= FLAGS_NEW_KEY;
613 		return 0;
614 	}
615 
616 	if (keylen != AES_KEYSIZE_128 &&
617 	    keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
618 		return -EINVAL;
619 
620 	/*
621 	 * The requested key size is not supported by HW, do a fallback.
622 	 */
623 	ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
624 	ctx->fallback->base.crt_flags |=
625 		(tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
626 
627 	ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
628 	if (ret) {
629 		struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
630 
631 		tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
632 		tfm_aux->crt_flags |=
633 			(ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
634 	}
635 	return ret;
636 }
637 
sahara_aes_crypt(struct ablkcipher_request * req,unsigned long mode)638 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
639 {
640 	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
641 	struct sahara_dev *dev = dev_ptr;
642 	int err = 0;
643 
644 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
645 		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
646 
647 	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
648 		dev_err(dev->device,
649 			"request size is not exact amount of AES blocks\n");
650 		return -EINVAL;
651 	}
652 
653 	rctx->mode = mode;
654 
655 	mutex_lock(&dev->queue_mutex);
656 	err = ablkcipher_enqueue_request(&dev->queue, req);
657 	mutex_unlock(&dev->queue_mutex);
658 
659 	wake_up_process(dev->kthread);
660 
661 	return err;
662 }
663 
sahara_aes_ecb_encrypt(struct ablkcipher_request * req)664 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
665 {
666 	struct crypto_tfm *tfm =
667 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
668 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
669 		crypto_ablkcipher_reqtfm(req));
670 	int err;
671 
672 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
673 		ablkcipher_request_set_tfm(req, ctx->fallback);
674 		err = crypto_ablkcipher_encrypt(req);
675 		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
676 		return err;
677 	}
678 
679 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
680 }
681 
sahara_aes_ecb_decrypt(struct ablkcipher_request * req)682 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
683 {
684 	struct crypto_tfm *tfm =
685 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
686 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
687 		crypto_ablkcipher_reqtfm(req));
688 	int err;
689 
690 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
691 		ablkcipher_request_set_tfm(req, ctx->fallback);
692 		err = crypto_ablkcipher_decrypt(req);
693 		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
694 		return err;
695 	}
696 
697 	return sahara_aes_crypt(req, 0);
698 }
699 
sahara_aes_cbc_encrypt(struct ablkcipher_request * req)700 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
701 {
702 	struct crypto_tfm *tfm =
703 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
704 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
705 		crypto_ablkcipher_reqtfm(req));
706 	int err;
707 
708 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
709 		ablkcipher_request_set_tfm(req, ctx->fallback);
710 		err = crypto_ablkcipher_encrypt(req);
711 		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
712 		return err;
713 	}
714 
715 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
716 }
717 
sahara_aes_cbc_decrypt(struct ablkcipher_request * req)718 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
719 {
720 	struct crypto_tfm *tfm =
721 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
722 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
723 		crypto_ablkcipher_reqtfm(req));
724 	int err;
725 
726 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
727 		ablkcipher_request_set_tfm(req, ctx->fallback);
728 		err = crypto_ablkcipher_decrypt(req);
729 		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
730 		return err;
731 	}
732 
733 	return sahara_aes_crypt(req, FLAGS_CBC);
734 }
735 
sahara_aes_cra_init(struct crypto_tfm * tfm)736 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
737 {
738 	const char *name = crypto_tfm_alg_name(tfm);
739 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
740 
741 	ctx->fallback = crypto_alloc_ablkcipher(name, 0,
742 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
743 	if (IS_ERR(ctx->fallback)) {
744 		pr_err("Error allocating fallback algo %s\n", name);
745 		return PTR_ERR(ctx->fallback);
746 	}
747 
748 	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
749 
750 	return 0;
751 }
752 
sahara_aes_cra_exit(struct crypto_tfm * tfm)753 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
754 {
755 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
756 
757 	if (ctx->fallback)
758 		crypto_free_ablkcipher(ctx->fallback);
759 	ctx->fallback = NULL;
760 }
761 
sahara_sha_init_hdr(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx)762 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
763 			      struct sahara_sha_reqctx *rctx)
764 {
765 	u32 hdr = 0;
766 
767 	hdr = rctx->mode;
768 
769 	if (rctx->first) {
770 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
771 		hdr |= SAHARA_HDR_MDHA_INIT;
772 	} else {
773 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
774 	}
775 
776 	if (rctx->last)
777 		hdr |= SAHARA_HDR_MDHA_PDATA;
778 
779 	if (hweight_long(hdr) % 2 == 0)
780 		hdr |= SAHARA_HDR_PARITY_BIT;
781 
782 	return hdr;
783 }
784 
sahara_sha_hw_links_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,int start)785 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
786 				       struct sahara_sha_reqctx *rctx,
787 				       int start)
788 {
789 	struct scatterlist *sg;
790 	unsigned int i;
791 	int ret;
792 
793 	dev->in_sg = rctx->in_sg;
794 
795 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
796 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
797 		dev_err(dev->device, "not enough hw links (%d)\n",
798 			dev->nb_in_sg + dev->nb_out_sg);
799 		return -EINVAL;
800 	}
801 
802 	sg = dev->in_sg;
803 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
804 	if (!ret)
805 		return -EFAULT;
806 
807 	for (i = start; i < dev->nb_in_sg + start; i++) {
808 		dev->hw_link[i]->len = sg->length;
809 		dev->hw_link[i]->p = sg->dma_address;
810 		if (i == (dev->nb_in_sg + start - 1)) {
811 			dev->hw_link[i]->next = 0;
812 		} else {
813 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
814 			sg = sg_next(sg);
815 		}
816 	}
817 
818 	return i;
819 }
820 
sahara_sha_hw_data_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)821 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
822 						struct sahara_sha_reqctx *rctx,
823 						struct ahash_request *req,
824 						int index)
825 {
826 	unsigned result_len;
827 	int i = index;
828 
829 	if (rctx->first)
830 		/* Create initial descriptor: #8*/
831 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
832 	else
833 		/* Create hash descriptor: #10. Must follow #6. */
834 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
835 
836 	dev->hw_desc[index]->len1 = rctx->total;
837 	if (dev->hw_desc[index]->len1 == 0) {
838 		/* if len1 is 0, p1 must be 0, too */
839 		dev->hw_desc[index]->p1 = 0;
840 		rctx->sg_in_idx = 0;
841 	} else {
842 		/* Create input links */
843 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
844 		i = sahara_sha_hw_links_create(dev, rctx, index);
845 
846 		rctx->sg_in_idx = index;
847 		if (i < 0)
848 			return i;
849 	}
850 
851 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
852 
853 	/* Save the context for the next operation */
854 	result_len = rctx->context_size;
855 	dev->hw_link[i]->p = dev->context_phys_base;
856 
857 	dev->hw_link[i]->len = result_len;
858 	dev->hw_desc[index]->len2 = result_len;
859 
860 	dev->hw_link[i]->next = 0;
861 
862 	return 0;
863 }
864 
865 /*
866  * Load descriptor aka #6
867  *
868  * To load a previously saved context back to the MDHA unit
869  *
870  * p1: Saved Context
871  * p2: NULL
872  *
873  */
sahara_sha_hw_context_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)874 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
875 						struct sahara_sha_reqctx *rctx,
876 						struct ahash_request *req,
877 						int index)
878 {
879 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
880 
881 	dev->hw_desc[index]->len1 = rctx->context_size;
882 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
883 	dev->hw_desc[index]->len2 = 0;
884 	dev->hw_desc[index]->p2 = 0;
885 
886 	dev->hw_link[index]->len = rctx->context_size;
887 	dev->hw_link[index]->p = dev->context_phys_base;
888 	dev->hw_link[index]->next = 0;
889 
890 	return 0;
891 }
892 
sahara_walk_and_recalc(struct scatterlist * sg,unsigned int nbytes)893 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
894 {
895 	if (!sg || !sg->length)
896 		return nbytes;
897 
898 	while (nbytes && sg) {
899 		if (nbytes <= sg->length) {
900 			sg->length = nbytes;
901 			sg_mark_end(sg);
902 			break;
903 		}
904 		nbytes -= sg->length;
905 		sg = sg_next(sg);
906 	}
907 
908 	return nbytes;
909 }
910 
sahara_sha_prepare_request(struct ahash_request * req)911 static int sahara_sha_prepare_request(struct ahash_request *req)
912 {
913 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
914 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
915 	unsigned int hash_later;
916 	unsigned int block_size;
917 	unsigned int len;
918 
919 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
920 
921 	/* append bytes from previous operation */
922 	len = rctx->buf_cnt + req->nbytes;
923 
924 	/* only the last transfer can be padded in hardware */
925 	if (!rctx->last && (len < block_size)) {
926 		/* to few data, save for next operation */
927 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
928 					 0, req->nbytes, 0);
929 		rctx->buf_cnt += req->nbytes;
930 
931 		return 0;
932 	}
933 
934 	/* add data from previous operation first */
935 	if (rctx->buf_cnt)
936 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
937 
938 	/* data must always be a multiple of block_size */
939 	hash_later = rctx->last ? 0 : len & (block_size - 1);
940 	if (hash_later) {
941 		unsigned int offset = req->nbytes - hash_later;
942 		/* Save remaining bytes for later use */
943 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
944 					hash_later, 0);
945 	}
946 
947 	/* nbytes should now be multiple of blocksize */
948 	req->nbytes = req->nbytes - hash_later;
949 
950 	sahara_walk_and_recalc(req->src, req->nbytes);
951 
952 	/* have data from previous operation and current */
953 	if (rctx->buf_cnt && req->nbytes) {
954 		sg_init_table(rctx->in_sg_chain, 2);
955 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
956 
957 		sg_chain(rctx->in_sg_chain, 2, req->src);
958 
959 		rctx->total = req->nbytes + rctx->buf_cnt;
960 		rctx->in_sg = rctx->in_sg_chain;
961 
962 		req->src = rctx->in_sg_chain;
963 	/* only data from previous operation */
964 	} else if (rctx->buf_cnt) {
965 		if (req->src)
966 			rctx->in_sg = req->src;
967 		else
968 			rctx->in_sg = rctx->in_sg_chain;
969 		/* buf was copied into rembuf above */
970 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
971 		rctx->total = rctx->buf_cnt;
972 	/* no data from previous operation */
973 	} else {
974 		rctx->in_sg = req->src;
975 		rctx->total = req->nbytes;
976 		req->src = rctx->in_sg;
977 	}
978 
979 	/* on next call, we only have the remaining data in the buffer */
980 	rctx->buf_cnt = hash_later;
981 
982 	return -EINPROGRESS;
983 }
984 
sahara_sha_process(struct ahash_request * req)985 static int sahara_sha_process(struct ahash_request *req)
986 {
987 	struct sahara_dev *dev = dev_ptr;
988 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
989 	int ret;
990 	unsigned long timeout;
991 
992 	ret = sahara_sha_prepare_request(req);
993 	if (!ret)
994 		return ret;
995 
996 	if (rctx->first) {
997 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
998 		dev->hw_desc[0]->next = 0;
999 		rctx->first = 0;
1000 	} else {
1001 		memcpy(dev->context_base, rctx->context, rctx->context_size);
1002 
1003 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1004 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1005 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1006 		dev->hw_desc[1]->next = 0;
1007 	}
1008 
1009 	sahara_dump_descriptors(dev);
1010 	sahara_dump_links(dev);
1011 
1012 	reinit_completion(&dev->dma_completion);
1013 
1014 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1015 
1016 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1017 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1018 	if (!timeout) {
1019 		dev_err(dev->device, "SHA timeout\n");
1020 		return -ETIMEDOUT;
1021 	}
1022 
1023 	if (rctx->sg_in_idx)
1024 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025 			     DMA_TO_DEVICE);
1026 
1027 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1028 
1029 	if (req->result)
1030 		memcpy(req->result, rctx->context, rctx->digest_size);
1031 
1032 	return 0;
1033 }
1034 
sahara_queue_manage(void * data)1035 static int sahara_queue_manage(void *data)
1036 {
1037 	struct sahara_dev *dev = (struct sahara_dev *)data;
1038 	struct crypto_async_request *async_req;
1039 	struct crypto_async_request *backlog;
1040 	int ret = 0;
1041 
1042 	do {
1043 		__set_current_state(TASK_INTERRUPTIBLE);
1044 
1045 		mutex_lock(&dev->queue_mutex);
1046 		backlog = crypto_get_backlog(&dev->queue);
1047 		async_req = crypto_dequeue_request(&dev->queue);
1048 		mutex_unlock(&dev->queue_mutex);
1049 
1050 		if (backlog)
1051 			backlog->complete(backlog, -EINPROGRESS);
1052 
1053 		if (async_req) {
1054 			if (crypto_tfm_alg_type(async_req->tfm) ==
1055 			    CRYPTO_ALG_TYPE_AHASH) {
1056 				struct ahash_request *req =
1057 					ahash_request_cast(async_req);
1058 
1059 				ret = sahara_sha_process(req);
1060 			} else {
1061 				struct ablkcipher_request *req =
1062 					ablkcipher_request_cast(async_req);
1063 
1064 				ret = sahara_aes_process(req);
1065 			}
1066 
1067 			async_req->complete(async_req, ret);
1068 
1069 			continue;
1070 		}
1071 
1072 		schedule();
1073 	} while (!kthread_should_stop());
1074 
1075 	return 0;
1076 }
1077 
sahara_sha_enqueue(struct ahash_request * req,int last)1078 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1079 {
1080 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1081 	struct sahara_dev *dev = dev_ptr;
1082 	int ret;
1083 
1084 	if (!req->nbytes && !last)
1085 		return 0;
1086 
1087 	mutex_lock(&rctx->mutex);
1088 	rctx->last = last;
1089 
1090 	if (!rctx->active) {
1091 		rctx->active = 1;
1092 		rctx->first = 1;
1093 	}
1094 
1095 	mutex_lock(&dev->queue_mutex);
1096 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 	mutex_unlock(&dev->queue_mutex);
1098 
1099 	wake_up_process(dev->kthread);
1100 	mutex_unlock(&rctx->mutex);
1101 
1102 	return ret;
1103 }
1104 
sahara_sha_init(struct ahash_request * req)1105 static int sahara_sha_init(struct ahash_request *req)
1106 {
1107 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1108 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1109 
1110 	memset(rctx, 0, sizeof(*rctx));
1111 
1112 	switch (crypto_ahash_digestsize(tfm)) {
1113 	case SHA1_DIGEST_SIZE:
1114 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1115 		rctx->digest_size = SHA1_DIGEST_SIZE;
1116 		break;
1117 	case SHA256_DIGEST_SIZE:
1118 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1119 		rctx->digest_size = SHA256_DIGEST_SIZE;
1120 		break;
1121 	default:
1122 		return -EINVAL;
1123 	}
1124 
1125 	rctx->context_size = rctx->digest_size + 4;
1126 	rctx->active = 0;
1127 
1128 	mutex_init(&rctx->mutex);
1129 
1130 	return 0;
1131 }
1132 
sahara_sha_update(struct ahash_request * req)1133 static int sahara_sha_update(struct ahash_request *req)
1134 {
1135 	return sahara_sha_enqueue(req, 0);
1136 }
1137 
sahara_sha_final(struct ahash_request * req)1138 static int sahara_sha_final(struct ahash_request *req)
1139 {
1140 	req->nbytes = 0;
1141 	return sahara_sha_enqueue(req, 1);
1142 }
1143 
sahara_sha_finup(struct ahash_request * req)1144 static int sahara_sha_finup(struct ahash_request *req)
1145 {
1146 	return sahara_sha_enqueue(req, 1);
1147 }
1148 
sahara_sha_digest(struct ahash_request * req)1149 static int sahara_sha_digest(struct ahash_request *req)
1150 {
1151 	sahara_sha_init(req);
1152 
1153 	return sahara_sha_finup(req);
1154 }
1155 
sahara_sha_export(struct ahash_request * req,void * out)1156 static int sahara_sha_export(struct ahash_request *req, void *out)
1157 {
1158 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1159 	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1160 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1161 
1162 	memcpy(out, ctx, sizeof(struct sahara_ctx));
1163 	memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1164 	       sizeof(struct sahara_sha_reqctx));
1165 
1166 	return 0;
1167 }
1168 
sahara_sha_import(struct ahash_request * req,const void * in)1169 static int sahara_sha_import(struct ahash_request *req, const void *in)
1170 {
1171 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1172 	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1173 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1174 
1175 	memcpy(ctx, in, sizeof(struct sahara_ctx));
1176 	memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1177 	       sizeof(struct sahara_sha_reqctx));
1178 
1179 	return 0;
1180 }
1181 
sahara_sha_cra_init(struct crypto_tfm * tfm)1182 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1183 {
1184 	const char *name = crypto_tfm_alg_name(tfm);
1185 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1186 
1187 	ctx->shash_fallback = crypto_alloc_shash(name, 0,
1188 					CRYPTO_ALG_NEED_FALLBACK);
1189 	if (IS_ERR(ctx->shash_fallback)) {
1190 		pr_err("Error allocating fallback algo %s\n", name);
1191 		return PTR_ERR(ctx->shash_fallback);
1192 	}
1193 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1194 				 sizeof(struct sahara_sha_reqctx) +
1195 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1196 
1197 	return 0;
1198 }
1199 
sahara_sha_cra_exit(struct crypto_tfm * tfm)1200 static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1201 {
1202 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1203 
1204 	crypto_free_shash(ctx->shash_fallback);
1205 	ctx->shash_fallback = NULL;
1206 }
1207 
1208 static struct crypto_alg aes_algs[] = {
1209 {
1210 	.cra_name		= "ecb(aes)",
1211 	.cra_driver_name	= "sahara-ecb-aes",
1212 	.cra_priority		= 300,
1213 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1214 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1215 	.cra_blocksize		= AES_BLOCK_SIZE,
1216 	.cra_ctxsize		= sizeof(struct sahara_ctx),
1217 	.cra_alignmask		= 0x0,
1218 	.cra_type		= &crypto_ablkcipher_type,
1219 	.cra_module		= THIS_MODULE,
1220 	.cra_init		= sahara_aes_cra_init,
1221 	.cra_exit		= sahara_aes_cra_exit,
1222 	.cra_u.ablkcipher = {
1223 		.min_keysize	= AES_MIN_KEY_SIZE ,
1224 		.max_keysize	= AES_MAX_KEY_SIZE,
1225 		.setkey		= sahara_aes_setkey,
1226 		.encrypt	= sahara_aes_ecb_encrypt,
1227 		.decrypt	= sahara_aes_ecb_decrypt,
1228 	}
1229 }, {
1230 	.cra_name		= "cbc(aes)",
1231 	.cra_driver_name	= "sahara-cbc-aes",
1232 	.cra_priority		= 300,
1233 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1234 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1235 	.cra_blocksize		= AES_BLOCK_SIZE,
1236 	.cra_ctxsize		= sizeof(struct sahara_ctx),
1237 	.cra_alignmask		= 0x0,
1238 	.cra_type		= &crypto_ablkcipher_type,
1239 	.cra_module		= THIS_MODULE,
1240 	.cra_init		= sahara_aes_cra_init,
1241 	.cra_exit		= sahara_aes_cra_exit,
1242 	.cra_u.ablkcipher = {
1243 		.min_keysize	= AES_MIN_KEY_SIZE ,
1244 		.max_keysize	= AES_MAX_KEY_SIZE,
1245 		.ivsize		= AES_BLOCK_SIZE,
1246 		.setkey		= sahara_aes_setkey,
1247 		.encrypt	= sahara_aes_cbc_encrypt,
1248 		.decrypt	= sahara_aes_cbc_decrypt,
1249 	}
1250 }
1251 };
1252 
1253 static struct ahash_alg sha_v3_algs[] = {
1254 {
1255 	.init		= sahara_sha_init,
1256 	.update		= sahara_sha_update,
1257 	.final		= sahara_sha_final,
1258 	.finup		= sahara_sha_finup,
1259 	.digest		= sahara_sha_digest,
1260 	.export		= sahara_sha_export,
1261 	.import		= sahara_sha_import,
1262 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1263 	.halg.base	= {
1264 		.cra_name		= "sha1",
1265 		.cra_driver_name	= "sahara-sha1",
1266 		.cra_priority		= 300,
1267 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1268 						CRYPTO_ALG_ASYNC |
1269 						CRYPTO_ALG_NEED_FALLBACK,
1270 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1271 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1272 		.cra_alignmask		= 0,
1273 		.cra_module		= THIS_MODULE,
1274 		.cra_init		= sahara_sha_cra_init,
1275 		.cra_exit		= sahara_sha_cra_exit,
1276 	}
1277 },
1278 };
1279 
1280 static struct ahash_alg sha_v4_algs[] = {
1281 {
1282 	.init		= sahara_sha_init,
1283 	.update		= sahara_sha_update,
1284 	.final		= sahara_sha_final,
1285 	.finup		= sahara_sha_finup,
1286 	.digest		= sahara_sha_digest,
1287 	.export		= sahara_sha_export,
1288 	.import		= sahara_sha_import,
1289 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1290 	.halg.base	= {
1291 		.cra_name		= "sha256",
1292 		.cra_driver_name	= "sahara-sha256",
1293 		.cra_priority		= 300,
1294 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1295 						CRYPTO_ALG_ASYNC |
1296 						CRYPTO_ALG_NEED_FALLBACK,
1297 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1298 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1299 		.cra_alignmask		= 0,
1300 		.cra_module		= THIS_MODULE,
1301 		.cra_init		= sahara_sha_cra_init,
1302 		.cra_exit		= sahara_sha_cra_exit,
1303 	}
1304 },
1305 };
1306 
sahara_irq_handler(int irq,void * data)1307 static irqreturn_t sahara_irq_handler(int irq, void *data)
1308 {
1309 	struct sahara_dev *dev = (struct sahara_dev *)data;
1310 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1311 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1312 
1313 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1314 		     SAHARA_REG_CMD);
1315 
1316 	sahara_decode_status(dev, stat);
1317 
1318 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1319 		return IRQ_NONE;
1320 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1321 		dev->error = 0;
1322 	} else {
1323 		sahara_decode_error(dev, err);
1324 		dev->error = -EINVAL;
1325 	}
1326 
1327 	complete(&dev->dma_completion);
1328 
1329 	return IRQ_HANDLED;
1330 }
1331 
1332 
sahara_register_algs(struct sahara_dev * dev)1333 static int sahara_register_algs(struct sahara_dev *dev)
1334 {
1335 	int err;
1336 	unsigned int i, j, k, l;
1337 
1338 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1339 		INIT_LIST_HEAD(&aes_algs[i].cra_list);
1340 		err = crypto_register_alg(&aes_algs[i]);
1341 		if (err)
1342 			goto err_aes_algs;
1343 	}
1344 
1345 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1346 		err = crypto_register_ahash(&sha_v3_algs[k]);
1347 		if (err)
1348 			goto err_sha_v3_algs;
1349 	}
1350 
1351 	if (dev->version > SAHARA_VERSION_3)
1352 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1353 			err = crypto_register_ahash(&sha_v4_algs[l]);
1354 			if (err)
1355 				goto err_sha_v4_algs;
1356 		}
1357 
1358 	return 0;
1359 
1360 err_sha_v4_algs:
1361 	for (j = 0; j < l; j++)
1362 		crypto_unregister_ahash(&sha_v4_algs[j]);
1363 
1364 err_sha_v3_algs:
1365 	for (j = 0; j < k; j++)
1366 		crypto_unregister_ahash(&sha_v3_algs[j]);
1367 
1368 err_aes_algs:
1369 	for (j = 0; j < i; j++)
1370 		crypto_unregister_alg(&aes_algs[j]);
1371 
1372 	return err;
1373 }
1374 
sahara_unregister_algs(struct sahara_dev * dev)1375 static void sahara_unregister_algs(struct sahara_dev *dev)
1376 {
1377 	unsigned int i;
1378 
1379 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1380 		crypto_unregister_alg(&aes_algs[i]);
1381 
1382 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1383 		crypto_unregister_ahash(&sha_v3_algs[i]);
1384 
1385 	if (dev->version > SAHARA_VERSION_3)
1386 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1387 			crypto_unregister_ahash(&sha_v4_algs[i]);
1388 }
1389 
1390 static struct platform_device_id sahara_platform_ids[] = {
1391 	{ .name = "sahara-imx27" },
1392 	{ /* sentinel */ }
1393 };
1394 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1395 
1396 static struct of_device_id sahara_dt_ids[] = {
1397 	{ .compatible = "fsl,imx53-sahara" },
1398 	{ .compatible = "fsl,imx27-sahara" },
1399 	{ /* sentinel */ }
1400 };
1401 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1402 
sahara_probe(struct platform_device * pdev)1403 static int sahara_probe(struct platform_device *pdev)
1404 {
1405 	struct sahara_dev *dev;
1406 	struct resource *res;
1407 	u32 version;
1408 	int irq;
1409 	int err;
1410 	int i;
1411 
1412 	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1413 	if (dev == NULL) {
1414 		dev_err(&pdev->dev, "unable to alloc data struct.\n");
1415 		return -ENOMEM;
1416 	}
1417 
1418 	dev->device = &pdev->dev;
1419 	platform_set_drvdata(pdev, dev);
1420 
1421 	/* Get the base address */
1422 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1423 	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1424 	if (IS_ERR(dev->regs_base))
1425 		return PTR_ERR(dev->regs_base);
1426 
1427 	/* Get the IRQ */
1428 	irq = platform_get_irq(pdev,  0);
1429 	if (irq < 0) {
1430 		dev_err(&pdev->dev, "failed to get irq resource\n");
1431 		return irq;
1432 	}
1433 
1434 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1435 			       0, dev_name(&pdev->dev), dev);
1436 	if (err) {
1437 		dev_err(&pdev->dev, "failed to request irq\n");
1438 		return err;
1439 	}
1440 
1441 	/* clocks */
1442 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1443 	if (IS_ERR(dev->clk_ipg)) {
1444 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1445 		return PTR_ERR(dev->clk_ipg);
1446 	}
1447 
1448 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1449 	if (IS_ERR(dev->clk_ahb)) {
1450 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1451 		return PTR_ERR(dev->clk_ahb);
1452 	}
1453 
1454 	/* Allocate HW descriptors */
1455 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1456 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1457 			&dev->hw_phys_desc[0], GFP_KERNEL);
1458 	if (!dev->hw_desc[0]) {
1459 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1460 		return -ENOMEM;
1461 	}
1462 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1463 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1464 				sizeof(struct sahara_hw_desc);
1465 
1466 	/* Allocate space for iv and key */
1467 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1468 				&dev->key_phys_base, GFP_KERNEL);
1469 	if (!dev->key_base) {
1470 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1471 		return -ENOMEM;
1472 	}
1473 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1474 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1475 
1476 	/* Allocate space for context: largest digest + message length field */
1477 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1478 					SHA256_DIGEST_SIZE + 4,
1479 					&dev->context_phys_base, GFP_KERNEL);
1480 	if (!dev->context_base) {
1481 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1482 		return -ENOMEM;
1483 	}
1484 
1485 	/* Allocate space for HW links */
1486 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1487 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1488 			&dev->hw_phys_link[0], GFP_KERNEL);
1489 	if (!dev->hw_link[0]) {
1490 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1491 		return -ENOMEM;
1492 	}
1493 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1494 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1495 					sizeof(struct sahara_hw_link);
1496 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1497 	}
1498 
1499 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1500 
1501 	spin_lock_init(&dev->lock);
1502 	mutex_init(&dev->queue_mutex);
1503 
1504 	dev_ptr = dev;
1505 
1506 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1507 	if (IS_ERR(dev->kthread)) {
1508 		return PTR_ERR(dev->kthread);
1509 	}
1510 
1511 	init_completion(&dev->dma_completion);
1512 
1513 	err = clk_prepare_enable(dev->clk_ipg);
1514 	if (err)
1515 		return err;
1516 	err = clk_prepare_enable(dev->clk_ahb);
1517 	if (err)
1518 		goto clk_ipg_disable;
1519 
1520 	version = sahara_read(dev, SAHARA_REG_VERSION);
1521 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1522 		if (version != SAHARA_VERSION_3)
1523 			err = -ENODEV;
1524 	} else if (of_device_is_compatible(pdev->dev.of_node,
1525 			"fsl,imx53-sahara")) {
1526 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1527 			err = -ENODEV;
1528 		version = (version >> 8) & 0xff;
1529 	}
1530 	if (err == -ENODEV) {
1531 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1532 				version);
1533 		goto err_algs;
1534 	}
1535 
1536 	dev->version = version;
1537 
1538 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1539 		     SAHARA_REG_CMD);
1540 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1541 			SAHARA_CONTROL_SET_MAXBURST(8) |
1542 			SAHARA_CONTROL_RNG_AUTORSD |
1543 			SAHARA_CONTROL_ENABLE_INT,
1544 			SAHARA_REG_CONTROL);
1545 
1546 	err = sahara_register_algs(dev);
1547 	if (err)
1548 		goto err_algs;
1549 
1550 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1551 
1552 	return 0;
1553 
1554 err_algs:
1555 	kthread_stop(dev->kthread);
1556 	dev_ptr = NULL;
1557 	clk_disable_unprepare(dev->clk_ahb);
1558 clk_ipg_disable:
1559 	clk_disable_unprepare(dev->clk_ipg);
1560 
1561 	return err;
1562 }
1563 
sahara_remove(struct platform_device * pdev)1564 static int sahara_remove(struct platform_device *pdev)
1565 {
1566 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1567 
1568 	kthread_stop(dev->kthread);
1569 
1570 	sahara_unregister_algs(dev);
1571 
1572 	clk_disable_unprepare(dev->clk_ipg);
1573 	clk_disable_unprepare(dev->clk_ahb);
1574 
1575 	dev_ptr = NULL;
1576 
1577 	return 0;
1578 }
1579 
1580 static struct platform_driver sahara_driver = {
1581 	.probe		= sahara_probe,
1582 	.remove		= sahara_remove,
1583 	.driver		= {
1584 		.name	= SAHARA_NAME,
1585 		.of_match_table = sahara_dt_ids,
1586 	},
1587 	.id_table = sahara_platform_ids,
1588 };
1589 
1590 module_platform_driver(sahara_driver);
1591 
1592 MODULE_LICENSE("GPL");
1593 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1594 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1595 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1596