• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
33 
34 #define SHA_BUFFER_LEN		PAGE_SIZE
35 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
36 
37 #define SAHARA_NAME "sahara"
38 #define SAHARA_VERSION_3	3
39 #define SAHARA_VERSION_4	4
40 #define SAHARA_TIMEOUT_MS	1000
41 #define SAHARA_MAX_HW_DESC	2
42 #define SAHARA_MAX_HW_LINK	20
43 
44 #define FLAGS_MODE_MASK		0x000f
45 #define FLAGS_ENCRYPT		BIT(0)
46 #define FLAGS_CBC		BIT(1)
47 
48 #define SAHARA_HDR_BASE			0x00800000
49 #define SAHARA_HDR_SKHA_ALG_AES	0
50 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
51 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
52 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
53 #define SAHARA_HDR_FORM_DATA		(5 << 16)
54 #define SAHARA_HDR_FORM_KEY		(8 << 16)
55 #define SAHARA_HDR_LLO			(1 << 24)
56 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
57 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
58 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
59 
60 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
61 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
62 #define SAHARA_HDR_MDHA_HASH		0xA0850000
63 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
64 #define SAHARA_HDR_MDHA_ALG_SHA1	0
65 #define SAHARA_HDR_MDHA_ALG_MD5		1
66 #define SAHARA_HDR_MDHA_ALG_SHA256	2
67 #define SAHARA_HDR_MDHA_ALG_SHA224	3
68 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
69 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
70 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
71 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
72 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
73 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
74 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
75 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
76 
77 /* SAHARA can only process one request at a time */
78 #define SAHARA_QUEUE_LENGTH	1
79 
80 #define SAHARA_REG_VERSION	0x00
81 #define SAHARA_REG_DAR		0x04
82 #define SAHARA_REG_CONTROL	0x08
83 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
84 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
85 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
86 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
87 #define SAHARA_REG_CMD		0x0C
88 #define		SAHARA_CMD_RESET		(1 << 0)
89 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
90 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
91 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
92 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
93 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
94 #define	SAHARA_REG_STATUS	0x10
95 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
96 #define			SAHARA_STATE_IDLE	0
97 #define			SAHARA_STATE_BUSY	1
98 #define			SAHARA_STATE_ERR	2
99 #define			SAHARA_STATE_FAULT	3
100 #define			SAHARA_STATE_COMPLETE	4
101 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
102 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
103 #define		SAHARA_STATUS_ERROR		(1 << 4)
104 #define		SAHARA_STATUS_SECURE		(1 << 5)
105 #define		SAHARA_STATUS_FAIL		(1 << 6)
106 #define		SAHARA_STATUS_INIT		(1 << 7)
107 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
108 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
109 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
110 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
111 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
112 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
113 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
114 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
115 #define SAHARA_REG_ERRSTATUS	0x14
116 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
117 #define			SAHARA_ERRSOURCE_CHA	14
118 #define			SAHARA_ERRSOURCE_DMA	15
119 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
120 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
121 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
122 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
123 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
124 #define SAHARA_REG_FADDR	0x18
125 #define SAHARA_REG_CDAR		0x1C
126 #define SAHARA_REG_IDAR		0x20
127 
128 struct sahara_hw_desc {
129 	u32	hdr;
130 	u32	len1;
131 	u32	p1;
132 	u32	len2;
133 	u32	p2;
134 	u32	next;
135 };
136 
137 struct sahara_hw_link {
138 	u32	len;
139 	u32	p;
140 	u32	next;
141 };
142 
143 struct sahara_ctx {
144 	/* AES-specific context */
145 	int keylen;
146 	u8 key[AES_KEYSIZE_128];
147 	struct crypto_skcipher *fallback;
148 };
149 
150 struct sahara_aes_reqctx {
151 	unsigned long mode;
152 	u8 iv_out[AES_BLOCK_SIZE];
153 	struct skcipher_request fallback_req;	// keep at the end
154 };
155 
156 /*
157  * struct sahara_sha_reqctx - private data per request
158  * @buf: holds data for requests smaller than block_size
159  * @rembuf: used to prepare one block_size-aligned request
160  * @context: hw-specific context for request. Digest is extracted from this
161  * @mode: specifies what type of hw-descriptor needs to be built
162  * @digest_size: length of digest for this request
163  * @context_size: length of hw-context for this request.
164  *                Always digest_size + 4
165  * @buf_cnt: number of bytes saved in buf
166  * @sg_in_idx: number of hw links
167  * @in_sg: scatterlist for input data
168  * @in_sg_chain: scatterlists for chained input data
169  * @total: total number of bytes for transfer
170  * @last: is this the last block
171  * @first: is this the first block
172  * @active: inside a transfer
173  */
174 struct sahara_sha_reqctx {
175 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 	u8			context[SHA256_DIGEST_SIZE + 4];
178 	unsigned int		mode;
179 	unsigned int		digest_size;
180 	unsigned int		context_size;
181 	unsigned int		buf_cnt;
182 	unsigned int		sg_in_idx;
183 	struct scatterlist	*in_sg;
184 	struct scatterlist	in_sg_chain[2];
185 	size_t			total;
186 	unsigned int		last;
187 	unsigned int		first;
188 	unsigned int		active;
189 };
190 
191 struct sahara_dev {
192 	struct device		*device;
193 	unsigned int		version;
194 	void __iomem		*regs_base;
195 	struct clk		*clk_ipg;
196 	struct clk		*clk_ahb;
197 	spinlock_t		queue_spinlock;
198 	struct task_struct	*kthread;
199 	struct completion	dma_completion;
200 
201 	struct sahara_ctx	*ctx;
202 	struct crypto_queue	queue;
203 	unsigned long		flags;
204 
205 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
206 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
207 
208 	u8			*key_base;
209 	dma_addr_t		key_phys_base;
210 
211 	u8			*iv_base;
212 	dma_addr_t		iv_phys_base;
213 
214 	u8			*context_base;
215 	dma_addr_t		context_phys_base;
216 
217 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
218 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
219 
220 	size_t			total;
221 	struct scatterlist	*in_sg;
222 	int		nb_in_sg;
223 	struct scatterlist	*out_sg;
224 	int		nb_out_sg;
225 
226 	u32			error;
227 };
228 
229 static struct sahara_dev *dev_ptr;
230 
sahara_write(struct sahara_dev * dev,u32 data,u32 reg)231 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
232 {
233 	writel(data, dev->regs_base + reg);
234 }
235 
sahara_read(struct sahara_dev * dev,u32 reg)236 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
237 {
238 	return readl(dev->regs_base + reg);
239 }
240 
sahara_aes_key_hdr(struct sahara_dev * dev)241 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
242 {
243 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
244 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
245 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
246 
247 	if (dev->flags & FLAGS_CBC) {
248 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
249 		hdr ^= SAHARA_HDR_PARITY_BIT;
250 	}
251 
252 	if (dev->flags & FLAGS_ENCRYPT) {
253 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
254 		hdr ^= SAHARA_HDR_PARITY_BIT;
255 	}
256 
257 	return hdr;
258 }
259 
sahara_aes_data_link_hdr(struct sahara_dev * dev)260 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
261 {
262 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
263 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
264 }
265 
266 static const char *sahara_err_src[16] = {
267 	"No error",
268 	"Header error",
269 	"Descriptor length error",
270 	"Descriptor length or pointer error",
271 	"Link length error",
272 	"Link pointer error",
273 	"Input buffer error",
274 	"Output buffer error",
275 	"Output buffer starvation",
276 	"Internal state fault",
277 	"General descriptor problem",
278 	"Reserved",
279 	"Descriptor address error",
280 	"Link address error",
281 	"CHA error",
282 	"DMA error"
283 };
284 
285 static const char *sahara_err_dmasize[4] = {
286 	"Byte transfer",
287 	"Half-word transfer",
288 	"Word transfer",
289 	"Reserved"
290 };
291 
292 static const char *sahara_err_dmasrc[8] = {
293 	"No error",
294 	"AHB bus error",
295 	"Internal IP bus error",
296 	"Parity error",
297 	"DMA crosses 256 byte boundary",
298 	"DMA is busy",
299 	"Reserved",
300 	"DMA HW error"
301 };
302 
303 static const char *sahara_cha_errsrc[12] = {
304 	"Input buffer non-empty",
305 	"Illegal address",
306 	"Illegal mode",
307 	"Illegal data size",
308 	"Illegal key size",
309 	"Write during processing",
310 	"CTX read during processing",
311 	"HW error",
312 	"Input buffer disabled/underflow",
313 	"Output buffer disabled/overflow",
314 	"DES key parity error",
315 	"Reserved"
316 };
317 
318 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
319 
sahara_decode_error(struct sahara_dev * dev,unsigned int error)320 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
321 {
322 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
323 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
324 
325 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
326 
327 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
328 
329 	if (source == SAHARA_ERRSOURCE_DMA) {
330 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
331 			dev_err(dev->device, "		* DMA read.\n");
332 		else
333 			dev_err(dev->device, "		* DMA write.\n");
334 
335 		dev_err(dev->device, "		* %s.\n",
336 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
337 		dev_err(dev->device, "		* %s.\n",
338 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
339 	} else if (source == SAHARA_ERRSOURCE_CHA) {
340 		dev_err(dev->device, "		* %s.\n",
341 			sahara_cha_errsrc[chasrc]);
342 		dev_err(dev->device, "		* %s.\n",
343 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
344 	}
345 	dev_err(dev->device, "\n");
346 }
347 
348 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
349 
sahara_decode_status(struct sahara_dev * dev,unsigned int status)350 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
351 {
352 	u8 state;
353 
354 	if (!__is_defined(DEBUG))
355 		return;
356 
357 	state = SAHARA_STATUS_GET_STATE(status);
358 
359 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
360 		__func__, status);
361 
362 	dev_dbg(dev->device, "	- State = %d:\n", state);
363 	if (state & SAHARA_STATE_COMP_FLAG)
364 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
365 
366 	dev_dbg(dev->device, "		* %s.\n",
367 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
368 
369 	if (status & SAHARA_STATUS_DAR_FULL)
370 		dev_dbg(dev->device, "	- DAR Full.\n");
371 	if (status & SAHARA_STATUS_ERROR)
372 		dev_dbg(dev->device, "	- Error.\n");
373 	if (status & SAHARA_STATUS_SECURE)
374 		dev_dbg(dev->device, "	- Secure.\n");
375 	if (status & SAHARA_STATUS_FAIL)
376 		dev_dbg(dev->device, "	- Fail.\n");
377 	if (status & SAHARA_STATUS_RNG_RESEED)
378 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
379 	if (status & SAHARA_STATUS_ACTIVE_RNG)
380 		dev_dbg(dev->device, "	- RNG Active.\n");
381 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
382 		dev_dbg(dev->device, "	- MDHA Active.\n");
383 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
384 		dev_dbg(dev->device, "	- SKHA Active.\n");
385 
386 	if (status & SAHARA_STATUS_MODE_BATCH)
387 		dev_dbg(dev->device, "	- Batch Mode.\n");
388 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
389 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
390 	else if (status & SAHARA_STATUS_MODE_DEBUG)
391 		dev_dbg(dev->device, "	- Debug Mode.\n");
392 
393 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
394 	       SAHARA_STATUS_GET_ISTATE(status));
395 
396 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
397 		sahara_read(dev, SAHARA_REG_CDAR));
398 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
399 		sahara_read(dev, SAHARA_REG_IDAR));
400 }
401 
sahara_dump_descriptors(struct sahara_dev * dev)402 static void sahara_dump_descriptors(struct sahara_dev *dev)
403 {
404 	int i;
405 
406 	if (!__is_defined(DEBUG))
407 		return;
408 
409 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
410 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
411 			i, &dev->hw_phys_desc[i]);
412 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
413 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
414 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
415 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
416 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
417 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
418 			dev->hw_desc[i]->next);
419 	}
420 	dev_dbg(dev->device, "\n");
421 }
422 
sahara_dump_links(struct sahara_dev * dev)423 static void sahara_dump_links(struct sahara_dev *dev)
424 {
425 	int i;
426 
427 	if (!__is_defined(DEBUG))
428 		return;
429 
430 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
431 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
432 			i, &dev->hw_phys_link[i]);
433 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
434 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
435 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
436 			dev->hw_link[i]->next);
437 	}
438 	dev_dbg(dev->device, "\n");
439 }
440 
sahara_hw_descriptor_create(struct sahara_dev * dev)441 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
442 {
443 	struct sahara_ctx *ctx = dev->ctx;
444 	struct scatterlist *sg;
445 	int ret;
446 	int i, j;
447 	int idx = 0;
448 	u32 len;
449 
450 	memcpy(dev->key_base, ctx->key, ctx->keylen);
451 
452 	if (dev->flags & FLAGS_CBC) {
453 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
454 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
455 	} else {
456 		dev->hw_desc[idx]->len1 = 0;
457 		dev->hw_desc[idx]->p1 = 0;
458 	}
459 	dev->hw_desc[idx]->len2 = ctx->keylen;
460 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
461 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
462 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
463 
464 	idx++;
465 
466 
467 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
468 	if (dev->nb_in_sg < 0) {
469 		dev_err(dev->device, "Invalid numbers of src SG.\n");
470 		return dev->nb_in_sg;
471 	}
472 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
473 	if (dev->nb_out_sg < 0) {
474 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
475 		return dev->nb_out_sg;
476 	}
477 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
478 		dev_err(dev->device, "not enough hw links (%d)\n",
479 			dev->nb_in_sg + dev->nb_out_sg);
480 		return -EINVAL;
481 	}
482 
483 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
484 			 DMA_TO_DEVICE);
485 	if (ret != dev->nb_in_sg) {
486 		dev_err(dev->device, "couldn't map in sg\n");
487 		return -EINVAL;
488 	}
489 
490 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
491 			 DMA_FROM_DEVICE);
492 	if (ret != dev->nb_out_sg) {
493 		dev_err(dev->device, "couldn't map out sg\n");
494 		goto unmap_in;
495 	}
496 
497 	/* Create input links */
498 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
499 	sg = dev->in_sg;
500 	len = dev->total;
501 	for (i = 0; i < dev->nb_in_sg; i++) {
502 		dev->hw_link[i]->len = min(len, sg->length);
503 		dev->hw_link[i]->p = sg->dma_address;
504 		if (i == (dev->nb_in_sg - 1)) {
505 			dev->hw_link[i]->next = 0;
506 		} else {
507 			len -= min(len, sg->length);
508 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
509 			sg = sg_next(sg);
510 		}
511 	}
512 
513 	/* Create output links */
514 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
515 	sg = dev->out_sg;
516 	len = dev->total;
517 	for (j = i; j < dev->nb_out_sg + i; j++) {
518 		dev->hw_link[j]->len = min(len, sg->length);
519 		dev->hw_link[j]->p = sg->dma_address;
520 		if (j == (dev->nb_out_sg + i - 1)) {
521 			dev->hw_link[j]->next = 0;
522 		} else {
523 			len -= min(len, sg->length);
524 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
525 			sg = sg_next(sg);
526 		}
527 	}
528 
529 	/* Fill remaining fields of hw_desc[1] */
530 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 	dev->hw_desc[idx]->len1 = dev->total;
532 	dev->hw_desc[idx]->len2 = dev->total;
533 	dev->hw_desc[idx]->next = 0;
534 
535 	sahara_dump_descriptors(dev);
536 	sahara_dump_links(dev);
537 
538 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
539 
540 	return 0;
541 
542 unmap_in:
543 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
544 		DMA_TO_DEVICE);
545 
546 	return -EINVAL;
547 }
548 
sahara_aes_cbc_update_iv(struct skcipher_request * req)549 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
550 {
551 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
552 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
553 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
554 
555 	/* Update IV buffer to contain the last ciphertext block */
556 	if (rctx->mode & FLAGS_ENCRYPT) {
557 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
558 				   ivsize, req->cryptlen - ivsize);
559 	} else {
560 		memcpy(req->iv, rctx->iv_out, ivsize);
561 	}
562 }
563 
sahara_aes_process(struct skcipher_request * req)564 static int sahara_aes_process(struct skcipher_request *req)
565 {
566 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
567 	struct sahara_dev *dev = dev_ptr;
568 	struct sahara_ctx *ctx;
569 	struct sahara_aes_reqctx *rctx;
570 	int ret;
571 	unsigned long timeout;
572 
573 	/* Request is ready to be dispatched by the device */
574 	dev_dbg(dev->device,
575 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
576 		req->cryptlen, req->src, req->dst);
577 
578 	/* assign new request to device */
579 	dev->total = req->cryptlen;
580 	dev->in_sg = req->src;
581 	dev->out_sg = req->dst;
582 
583 	rctx = skcipher_request_ctx(req);
584 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
585 	rctx->mode &= FLAGS_MODE_MASK;
586 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
587 
588 	if ((dev->flags & FLAGS_CBC) && req->iv) {
589 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
590 
591 		memcpy(dev->iv_base, req->iv, ivsize);
592 
593 		if (!(dev->flags & FLAGS_ENCRYPT)) {
594 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
595 					   rctx->iv_out, ivsize,
596 					   req->cryptlen - ivsize);
597 		}
598 	}
599 
600 	/* assign new context to device */
601 	dev->ctx = ctx;
602 
603 	reinit_completion(&dev->dma_completion);
604 
605 	ret = sahara_hw_descriptor_create(dev);
606 	if (ret)
607 		return -EINVAL;
608 
609 	timeout = wait_for_completion_timeout(&dev->dma_completion,
610 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
611 
612 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
613 		DMA_FROM_DEVICE);
614 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
615 		DMA_TO_DEVICE);
616 
617 	if (!timeout) {
618 		dev_err(dev->device, "AES timeout\n");
619 		return -ETIMEDOUT;
620 	}
621 
622 	if ((dev->flags & FLAGS_CBC) && req->iv)
623 		sahara_aes_cbc_update_iv(req);
624 
625 	return 0;
626 }
627 
sahara_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)628 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
629 			     unsigned int keylen)
630 {
631 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
632 
633 	ctx->keylen = keylen;
634 
635 	/* SAHARA only supports 128bit keys */
636 	if (keylen == AES_KEYSIZE_128) {
637 		memcpy(ctx->key, key, keylen);
638 		return 0;
639 	}
640 
641 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
642 		return -EINVAL;
643 
644 	/*
645 	 * The requested key size is not supported by HW, do a fallback.
646 	 */
647 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
648 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
649 						 CRYPTO_TFM_REQ_MASK);
650 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
651 }
652 
sahara_aes_fallback(struct skcipher_request * req,unsigned long mode)653 static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
654 {
655 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
656 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
657 		crypto_skcipher_reqtfm(req));
658 
659 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
660 	skcipher_request_set_callback(&rctx->fallback_req,
661 				      req->base.flags,
662 				      req->base.complete,
663 				      req->base.data);
664 	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
665 				   req->dst, req->cryptlen, req->iv);
666 
667 	if (mode & FLAGS_ENCRYPT)
668 		return crypto_skcipher_encrypt(&rctx->fallback_req);
669 
670 	return crypto_skcipher_decrypt(&rctx->fallback_req);
671 }
672 
sahara_aes_crypt(struct skcipher_request * req,unsigned long mode)673 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
674 {
675 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
676 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
677 		crypto_skcipher_reqtfm(req));
678 	struct sahara_dev *dev = dev_ptr;
679 	int err = 0;
680 
681 	if (!req->cryptlen)
682 		return 0;
683 
684 	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
685 		return sahara_aes_fallback(req, mode);
686 
687 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
688 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
689 
690 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
691 		dev_err(dev->device,
692 			"request size is not exact amount of AES blocks\n");
693 		return -EINVAL;
694 	}
695 
696 	rctx->mode = mode;
697 
698 	spin_lock_bh(&dev->queue_spinlock);
699 	err = crypto_enqueue_request(&dev->queue, &req->base);
700 	spin_unlock_bh(&dev->queue_spinlock);
701 
702 	wake_up_process(dev->kthread);
703 
704 	return err;
705 }
706 
sahara_aes_ecb_encrypt(struct skcipher_request * req)707 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
708 {
709 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
710 }
711 
sahara_aes_ecb_decrypt(struct skcipher_request * req)712 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
713 {
714 	return sahara_aes_crypt(req, 0);
715 }
716 
sahara_aes_cbc_encrypt(struct skcipher_request * req)717 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
718 {
719 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
720 }
721 
sahara_aes_cbc_decrypt(struct skcipher_request * req)722 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
723 {
724 	return sahara_aes_crypt(req, FLAGS_CBC);
725 }
726 
sahara_aes_init_tfm(struct crypto_skcipher * tfm)727 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
728 {
729 	const char *name = crypto_tfm_alg_name(&tfm->base);
730 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
731 
732 	ctx->fallback = crypto_alloc_skcipher(name, 0,
733 					      CRYPTO_ALG_NEED_FALLBACK);
734 	if (IS_ERR(ctx->fallback)) {
735 		pr_err("Error allocating fallback algo %s\n", name);
736 		return PTR_ERR(ctx->fallback);
737 	}
738 
739 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
740 					 crypto_skcipher_reqsize(ctx->fallback));
741 
742 	return 0;
743 }
744 
sahara_aes_exit_tfm(struct crypto_skcipher * tfm)745 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
746 {
747 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
748 
749 	crypto_free_skcipher(ctx->fallback);
750 }
751 
sahara_sha_init_hdr(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx)752 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
753 			      struct sahara_sha_reqctx *rctx)
754 {
755 	u32 hdr = 0;
756 
757 	hdr = rctx->mode;
758 
759 	if (rctx->first) {
760 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
761 		hdr |= SAHARA_HDR_MDHA_INIT;
762 	} else {
763 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
764 	}
765 
766 	if (rctx->last)
767 		hdr |= SAHARA_HDR_MDHA_PDATA;
768 
769 	if (hweight_long(hdr) % 2 == 0)
770 		hdr |= SAHARA_HDR_PARITY_BIT;
771 
772 	return hdr;
773 }
774 
sahara_sha_hw_links_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,int start)775 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
776 				       struct sahara_sha_reqctx *rctx,
777 				       int start)
778 {
779 	struct scatterlist *sg;
780 	unsigned int len;
781 	unsigned int i;
782 	int ret;
783 
784 	dev->in_sg = rctx->in_sg;
785 
786 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
787 	if (dev->nb_in_sg < 0) {
788 		dev_err(dev->device, "Invalid numbers of src SG.\n");
789 		return dev->nb_in_sg;
790 	}
791 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
792 		dev_err(dev->device, "not enough hw links (%d)\n",
793 			dev->nb_in_sg + dev->nb_out_sg);
794 		return -EINVAL;
795 	}
796 
797 	sg = dev->in_sg;
798 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
799 	if (!ret)
800 		return -EFAULT;
801 
802 	len = rctx->total;
803 	for (i = start; i < dev->nb_in_sg + start; i++) {
804 		dev->hw_link[i]->len = min(len, sg->length);
805 		dev->hw_link[i]->p = sg->dma_address;
806 		if (i == (dev->nb_in_sg + start - 1)) {
807 			dev->hw_link[i]->next = 0;
808 		} else {
809 			len -= min(len, sg->length);
810 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
811 			sg = sg_next(sg);
812 		}
813 	}
814 
815 	return i;
816 }
817 
sahara_sha_hw_data_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)818 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
819 						struct sahara_sha_reqctx *rctx,
820 						struct ahash_request *req,
821 						int index)
822 {
823 	unsigned result_len;
824 	int i = index;
825 
826 	if (rctx->first)
827 		/* Create initial descriptor: #8*/
828 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
829 	else
830 		/* Create hash descriptor: #10. Must follow #6. */
831 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
832 
833 	dev->hw_desc[index]->len1 = rctx->total;
834 	if (dev->hw_desc[index]->len1 == 0) {
835 		/* if len1 is 0, p1 must be 0, too */
836 		dev->hw_desc[index]->p1 = 0;
837 		rctx->sg_in_idx = 0;
838 	} else {
839 		/* Create input links */
840 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
841 		i = sahara_sha_hw_links_create(dev, rctx, index);
842 
843 		rctx->sg_in_idx = index;
844 		if (i < 0)
845 			return i;
846 	}
847 
848 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
849 
850 	/* Save the context for the next operation */
851 	result_len = rctx->context_size;
852 	dev->hw_link[i]->p = dev->context_phys_base;
853 
854 	dev->hw_link[i]->len = result_len;
855 	dev->hw_desc[index]->len2 = result_len;
856 
857 	dev->hw_link[i]->next = 0;
858 
859 	return 0;
860 }
861 
862 /*
863  * Load descriptor aka #6
864  *
865  * To load a previously saved context back to the MDHA unit
866  *
867  * p1: Saved Context
868  * p2: NULL
869  *
870  */
sahara_sha_hw_context_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)871 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
872 						struct sahara_sha_reqctx *rctx,
873 						struct ahash_request *req,
874 						int index)
875 {
876 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
877 
878 	dev->hw_desc[index]->len1 = rctx->context_size;
879 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
880 	dev->hw_desc[index]->len2 = 0;
881 	dev->hw_desc[index]->p2 = 0;
882 
883 	dev->hw_link[index]->len = rctx->context_size;
884 	dev->hw_link[index]->p = dev->context_phys_base;
885 	dev->hw_link[index]->next = 0;
886 
887 	return 0;
888 }
889 
sahara_sha_prepare_request(struct ahash_request * req)890 static int sahara_sha_prepare_request(struct ahash_request *req)
891 {
892 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
893 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
894 	unsigned int hash_later;
895 	unsigned int block_size;
896 	unsigned int len;
897 
898 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
899 
900 	/* append bytes from previous operation */
901 	len = rctx->buf_cnt + req->nbytes;
902 
903 	/* only the last transfer can be padded in hardware */
904 	if (!rctx->last && (len < block_size)) {
905 		/* to few data, save for next operation */
906 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
907 					 0, req->nbytes, 0);
908 		rctx->buf_cnt += req->nbytes;
909 
910 		return 0;
911 	}
912 
913 	/* add data from previous operation first */
914 	if (rctx->buf_cnt)
915 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
916 
917 	/* data must always be a multiple of block_size */
918 	hash_later = rctx->last ? 0 : len & (block_size - 1);
919 	if (hash_later) {
920 		unsigned int offset = req->nbytes - hash_later;
921 		/* Save remaining bytes for later use */
922 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
923 					hash_later, 0);
924 	}
925 
926 	rctx->total = len - hash_later;
927 	/* have data from previous operation and current */
928 	if (rctx->buf_cnt && req->nbytes) {
929 		sg_init_table(rctx->in_sg_chain, 2);
930 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
931 		sg_chain(rctx->in_sg_chain, 2, req->src);
932 		rctx->in_sg = rctx->in_sg_chain;
933 	/* only data from previous operation */
934 	} else if (rctx->buf_cnt) {
935 		rctx->in_sg = rctx->in_sg_chain;
936 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
937 	/* no data from previous operation */
938 	} else {
939 		rctx->in_sg = req->src;
940 	}
941 
942 	/* on next call, we only have the remaining data in the buffer */
943 	rctx->buf_cnt = hash_later;
944 
945 	return -EINPROGRESS;
946 }
947 
sahara_sha_process(struct ahash_request * req)948 static int sahara_sha_process(struct ahash_request *req)
949 {
950 	struct sahara_dev *dev = dev_ptr;
951 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
952 	int ret;
953 	unsigned long timeout;
954 
955 	ret = sahara_sha_prepare_request(req);
956 	if (!ret)
957 		return ret;
958 
959 	if (rctx->first) {
960 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
961 		if (ret)
962 			return ret;
963 
964 		dev->hw_desc[0]->next = 0;
965 		rctx->first = 0;
966 	} else {
967 		memcpy(dev->context_base, rctx->context, rctx->context_size);
968 
969 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
970 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
971 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
972 		if (ret)
973 			return ret;
974 
975 		dev->hw_desc[1]->next = 0;
976 	}
977 
978 	sahara_dump_descriptors(dev);
979 	sahara_dump_links(dev);
980 
981 	reinit_completion(&dev->dma_completion);
982 
983 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
984 
985 	timeout = wait_for_completion_timeout(&dev->dma_completion,
986 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
987 
988 	if (rctx->sg_in_idx)
989 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
990 			     DMA_TO_DEVICE);
991 
992 	if (!timeout) {
993 		dev_err(dev->device, "SHA timeout\n");
994 		return -ETIMEDOUT;
995 	}
996 
997 	memcpy(rctx->context, dev->context_base, rctx->context_size);
998 
999 	if (req->result && rctx->last)
1000 		memcpy(req->result, rctx->context, rctx->digest_size);
1001 
1002 	return 0;
1003 }
1004 
sahara_queue_manage(void * data)1005 static int sahara_queue_manage(void *data)
1006 {
1007 	struct sahara_dev *dev = (struct sahara_dev *)data;
1008 	struct crypto_async_request *async_req;
1009 	struct crypto_async_request *backlog;
1010 	int ret = 0;
1011 
1012 	do {
1013 		__set_current_state(TASK_INTERRUPTIBLE);
1014 
1015 		spin_lock_bh(&dev->queue_spinlock);
1016 		backlog = crypto_get_backlog(&dev->queue);
1017 		async_req = crypto_dequeue_request(&dev->queue);
1018 		spin_unlock_bh(&dev->queue_spinlock);
1019 
1020 		if (backlog)
1021 			backlog->complete(backlog, -EINPROGRESS);
1022 
1023 		if (async_req) {
1024 			if (crypto_tfm_alg_type(async_req->tfm) ==
1025 			    CRYPTO_ALG_TYPE_AHASH) {
1026 				struct ahash_request *req =
1027 					ahash_request_cast(async_req);
1028 
1029 				ret = sahara_sha_process(req);
1030 			} else {
1031 				struct skcipher_request *req =
1032 					skcipher_request_cast(async_req);
1033 
1034 				ret = sahara_aes_process(req);
1035 			}
1036 
1037 			async_req->complete(async_req, ret);
1038 
1039 			continue;
1040 		}
1041 
1042 		schedule();
1043 	} while (!kthread_should_stop());
1044 
1045 	return 0;
1046 }
1047 
sahara_sha_enqueue(struct ahash_request * req,int last)1048 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1049 {
1050 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1051 	struct sahara_dev *dev = dev_ptr;
1052 	int ret;
1053 
1054 	if (!req->nbytes && !last)
1055 		return 0;
1056 
1057 	rctx->last = last;
1058 
1059 	if (!rctx->active) {
1060 		rctx->active = 1;
1061 		rctx->first = 1;
1062 	}
1063 
1064 	spin_lock_bh(&dev->queue_spinlock);
1065 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1066 	spin_unlock_bh(&dev->queue_spinlock);
1067 
1068 	wake_up_process(dev->kthread);
1069 
1070 	return ret;
1071 }
1072 
sahara_sha_init(struct ahash_request * req)1073 static int sahara_sha_init(struct ahash_request *req)
1074 {
1075 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1076 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1077 
1078 	memset(rctx, 0, sizeof(*rctx));
1079 
1080 	switch (crypto_ahash_digestsize(tfm)) {
1081 	case SHA1_DIGEST_SIZE:
1082 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1083 		rctx->digest_size = SHA1_DIGEST_SIZE;
1084 		break;
1085 	case SHA256_DIGEST_SIZE:
1086 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1087 		rctx->digest_size = SHA256_DIGEST_SIZE;
1088 		break;
1089 	default:
1090 		return -EINVAL;
1091 	}
1092 
1093 	rctx->context_size = rctx->digest_size + 4;
1094 	rctx->active = 0;
1095 
1096 	return 0;
1097 }
1098 
sahara_sha_update(struct ahash_request * req)1099 static int sahara_sha_update(struct ahash_request *req)
1100 {
1101 	return sahara_sha_enqueue(req, 0);
1102 }
1103 
sahara_sha_final(struct ahash_request * req)1104 static int sahara_sha_final(struct ahash_request *req)
1105 {
1106 	req->nbytes = 0;
1107 	return sahara_sha_enqueue(req, 1);
1108 }
1109 
sahara_sha_finup(struct ahash_request * req)1110 static int sahara_sha_finup(struct ahash_request *req)
1111 {
1112 	return sahara_sha_enqueue(req, 1);
1113 }
1114 
sahara_sha_digest(struct ahash_request * req)1115 static int sahara_sha_digest(struct ahash_request *req)
1116 {
1117 	sahara_sha_init(req);
1118 
1119 	return sahara_sha_finup(req);
1120 }
1121 
sahara_sha_export(struct ahash_request * req,void * out)1122 static int sahara_sha_export(struct ahash_request *req, void *out)
1123 {
1124 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1125 
1126 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1127 
1128 	return 0;
1129 }
1130 
sahara_sha_import(struct ahash_request * req,const void * in)1131 static int sahara_sha_import(struct ahash_request *req, const void *in)
1132 {
1133 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1134 
1135 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1136 
1137 	return 0;
1138 }
1139 
sahara_sha_cra_init(struct crypto_tfm * tfm)1140 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1141 {
1142 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1143 				 sizeof(struct sahara_sha_reqctx));
1144 
1145 	return 0;
1146 }
1147 
1148 static struct skcipher_alg aes_algs[] = {
1149 {
1150 	.base.cra_name		= "ecb(aes)",
1151 	.base.cra_driver_name	= "sahara-ecb-aes",
1152 	.base.cra_priority	= 300,
1153 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1154 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1155 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1156 	.base.cra_alignmask	= 0x0,
1157 	.base.cra_module	= THIS_MODULE,
1158 
1159 	.init			= sahara_aes_init_tfm,
1160 	.exit			= sahara_aes_exit_tfm,
1161 	.min_keysize		= AES_MIN_KEY_SIZE ,
1162 	.max_keysize		= AES_MAX_KEY_SIZE,
1163 	.setkey			= sahara_aes_setkey,
1164 	.encrypt		= sahara_aes_ecb_encrypt,
1165 	.decrypt		= sahara_aes_ecb_decrypt,
1166 }, {
1167 	.base.cra_name		= "cbc(aes)",
1168 	.base.cra_driver_name	= "sahara-cbc-aes",
1169 	.base.cra_priority	= 300,
1170 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1171 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1172 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1173 	.base.cra_alignmask	= 0x0,
1174 	.base.cra_module	= THIS_MODULE,
1175 
1176 	.init			= sahara_aes_init_tfm,
1177 	.exit			= sahara_aes_exit_tfm,
1178 	.min_keysize		= AES_MIN_KEY_SIZE ,
1179 	.max_keysize		= AES_MAX_KEY_SIZE,
1180 	.ivsize			= AES_BLOCK_SIZE,
1181 	.setkey			= sahara_aes_setkey,
1182 	.encrypt		= sahara_aes_cbc_encrypt,
1183 	.decrypt		= sahara_aes_cbc_decrypt,
1184 }
1185 };
1186 
1187 static struct ahash_alg sha_v3_algs[] = {
1188 {
1189 	.init		= sahara_sha_init,
1190 	.update		= sahara_sha_update,
1191 	.final		= sahara_sha_final,
1192 	.finup		= sahara_sha_finup,
1193 	.digest		= sahara_sha_digest,
1194 	.export		= sahara_sha_export,
1195 	.import		= sahara_sha_import,
1196 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1197 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1198 	.halg.base	= {
1199 		.cra_name		= "sha1",
1200 		.cra_driver_name	= "sahara-sha1",
1201 		.cra_priority		= 300,
1202 		.cra_flags		= CRYPTO_ALG_ASYNC |
1203 						CRYPTO_ALG_NEED_FALLBACK,
1204 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1205 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1206 		.cra_alignmask		= 0,
1207 		.cra_module		= THIS_MODULE,
1208 		.cra_init		= sahara_sha_cra_init,
1209 	}
1210 },
1211 };
1212 
1213 static struct ahash_alg sha_v4_algs[] = {
1214 {
1215 	.init		= sahara_sha_init,
1216 	.update		= sahara_sha_update,
1217 	.final		= sahara_sha_final,
1218 	.finup		= sahara_sha_finup,
1219 	.digest		= sahara_sha_digest,
1220 	.export		= sahara_sha_export,
1221 	.import		= sahara_sha_import,
1222 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1223 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1224 	.halg.base	= {
1225 		.cra_name		= "sha256",
1226 		.cra_driver_name	= "sahara-sha256",
1227 		.cra_priority		= 300,
1228 		.cra_flags		= CRYPTO_ALG_ASYNC |
1229 						CRYPTO_ALG_NEED_FALLBACK,
1230 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1231 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1232 		.cra_alignmask		= 0,
1233 		.cra_module		= THIS_MODULE,
1234 		.cra_init		= sahara_sha_cra_init,
1235 	}
1236 },
1237 };
1238 
sahara_irq_handler(int irq,void * data)1239 static irqreturn_t sahara_irq_handler(int irq, void *data)
1240 {
1241 	struct sahara_dev *dev = (struct sahara_dev *)data;
1242 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1243 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1244 
1245 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1246 		     SAHARA_REG_CMD);
1247 
1248 	sahara_decode_status(dev, stat);
1249 
1250 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1251 		return IRQ_NONE;
1252 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1253 		dev->error = 0;
1254 	} else {
1255 		sahara_decode_error(dev, err);
1256 		dev->error = -EINVAL;
1257 	}
1258 
1259 	complete(&dev->dma_completion);
1260 
1261 	return IRQ_HANDLED;
1262 }
1263 
1264 
sahara_register_algs(struct sahara_dev * dev)1265 static int sahara_register_algs(struct sahara_dev *dev)
1266 {
1267 	int err;
1268 	unsigned int i, j, k, l;
1269 
1270 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1271 		err = crypto_register_skcipher(&aes_algs[i]);
1272 		if (err)
1273 			goto err_aes_algs;
1274 	}
1275 
1276 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1277 		err = crypto_register_ahash(&sha_v3_algs[k]);
1278 		if (err)
1279 			goto err_sha_v3_algs;
1280 	}
1281 
1282 	if (dev->version > SAHARA_VERSION_3)
1283 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1284 			err = crypto_register_ahash(&sha_v4_algs[l]);
1285 			if (err)
1286 				goto err_sha_v4_algs;
1287 		}
1288 
1289 	return 0;
1290 
1291 err_sha_v4_algs:
1292 	for (j = 0; j < l; j++)
1293 		crypto_unregister_ahash(&sha_v4_algs[j]);
1294 
1295 err_sha_v3_algs:
1296 	for (j = 0; j < k; j++)
1297 		crypto_unregister_ahash(&sha_v3_algs[j]);
1298 
1299 err_aes_algs:
1300 	for (j = 0; j < i; j++)
1301 		crypto_unregister_skcipher(&aes_algs[j]);
1302 
1303 	return err;
1304 }
1305 
sahara_unregister_algs(struct sahara_dev * dev)1306 static void sahara_unregister_algs(struct sahara_dev *dev)
1307 {
1308 	unsigned int i;
1309 
1310 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1311 		crypto_unregister_skcipher(&aes_algs[i]);
1312 
1313 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1314 		crypto_unregister_ahash(&sha_v3_algs[i]);
1315 
1316 	if (dev->version > SAHARA_VERSION_3)
1317 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1318 			crypto_unregister_ahash(&sha_v4_algs[i]);
1319 }
1320 
1321 static const struct of_device_id sahara_dt_ids[] = {
1322 	{ .compatible = "fsl,imx53-sahara" },
1323 	{ .compatible = "fsl,imx27-sahara" },
1324 	{ /* sentinel */ }
1325 };
1326 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1327 
sahara_probe(struct platform_device * pdev)1328 static int sahara_probe(struct platform_device *pdev)
1329 {
1330 	struct sahara_dev *dev;
1331 	u32 version;
1332 	int irq;
1333 	int err;
1334 	int i;
1335 
1336 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1337 	if (!dev)
1338 		return -ENOMEM;
1339 
1340 	dev->device = &pdev->dev;
1341 	platform_set_drvdata(pdev, dev);
1342 
1343 	/* Get the base address */
1344 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1345 	if (IS_ERR(dev->regs_base))
1346 		return PTR_ERR(dev->regs_base);
1347 
1348 	/* Get the IRQ */
1349 	irq = platform_get_irq(pdev,  0);
1350 	if (irq < 0)
1351 		return irq;
1352 
1353 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1354 			       0, dev_name(&pdev->dev), dev);
1355 	if (err) {
1356 		dev_err(&pdev->dev, "failed to request irq\n");
1357 		return err;
1358 	}
1359 
1360 	/* clocks */
1361 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1362 	if (IS_ERR(dev->clk_ipg)) {
1363 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1364 		return PTR_ERR(dev->clk_ipg);
1365 	}
1366 
1367 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1368 	if (IS_ERR(dev->clk_ahb)) {
1369 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1370 		return PTR_ERR(dev->clk_ahb);
1371 	}
1372 
1373 	/* Allocate HW descriptors */
1374 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1375 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1376 			&dev->hw_phys_desc[0], GFP_KERNEL);
1377 	if (!dev->hw_desc[0]) {
1378 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1379 		return -ENOMEM;
1380 	}
1381 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1382 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1383 				sizeof(struct sahara_hw_desc);
1384 
1385 	/* Allocate space for iv and key */
1386 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1387 				&dev->key_phys_base, GFP_KERNEL);
1388 	if (!dev->key_base) {
1389 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1390 		return -ENOMEM;
1391 	}
1392 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1393 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1394 
1395 	/* Allocate space for context: largest digest + message length field */
1396 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1397 					SHA256_DIGEST_SIZE + 4,
1398 					&dev->context_phys_base, GFP_KERNEL);
1399 	if (!dev->context_base) {
1400 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1401 		return -ENOMEM;
1402 	}
1403 
1404 	/* Allocate space for HW links */
1405 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1406 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1407 			&dev->hw_phys_link[0], GFP_KERNEL);
1408 	if (!dev->hw_link[0]) {
1409 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1410 		return -ENOMEM;
1411 	}
1412 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1413 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1414 					sizeof(struct sahara_hw_link);
1415 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1416 	}
1417 
1418 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1419 
1420 	spin_lock_init(&dev->queue_spinlock);
1421 
1422 	dev_ptr = dev;
1423 
1424 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1425 	if (IS_ERR(dev->kthread)) {
1426 		return PTR_ERR(dev->kthread);
1427 	}
1428 
1429 	init_completion(&dev->dma_completion);
1430 
1431 	err = clk_prepare_enable(dev->clk_ipg);
1432 	if (err)
1433 		return err;
1434 	err = clk_prepare_enable(dev->clk_ahb);
1435 	if (err)
1436 		goto clk_ipg_disable;
1437 
1438 	version = sahara_read(dev, SAHARA_REG_VERSION);
1439 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1440 		if (version != SAHARA_VERSION_3)
1441 			err = -ENODEV;
1442 	} else if (of_device_is_compatible(pdev->dev.of_node,
1443 			"fsl,imx53-sahara")) {
1444 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1445 			err = -ENODEV;
1446 		version = (version >> 8) & 0xff;
1447 	}
1448 	if (err == -ENODEV) {
1449 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1450 				version);
1451 		goto err_algs;
1452 	}
1453 
1454 	dev->version = version;
1455 
1456 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1457 		     SAHARA_REG_CMD);
1458 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1459 			SAHARA_CONTROL_SET_MAXBURST(8) |
1460 			SAHARA_CONTROL_RNG_AUTORSD |
1461 			SAHARA_CONTROL_ENABLE_INT,
1462 			SAHARA_REG_CONTROL);
1463 
1464 	err = sahara_register_algs(dev);
1465 	if (err)
1466 		goto err_algs;
1467 
1468 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1469 
1470 	return 0;
1471 
1472 err_algs:
1473 	kthread_stop(dev->kthread);
1474 	dev_ptr = NULL;
1475 	clk_disable_unprepare(dev->clk_ahb);
1476 clk_ipg_disable:
1477 	clk_disable_unprepare(dev->clk_ipg);
1478 
1479 	return err;
1480 }
1481 
sahara_remove(struct platform_device * pdev)1482 static int sahara_remove(struct platform_device *pdev)
1483 {
1484 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1485 
1486 	kthread_stop(dev->kthread);
1487 
1488 	sahara_unregister_algs(dev);
1489 
1490 	clk_disable_unprepare(dev->clk_ipg);
1491 	clk_disable_unprepare(dev->clk_ahb);
1492 
1493 	dev_ptr = NULL;
1494 
1495 	return 0;
1496 }
1497 
1498 static struct platform_driver sahara_driver = {
1499 	.probe		= sahara_probe,
1500 	.remove		= sahara_remove,
1501 	.driver		= {
1502 		.name	= SAHARA_NAME,
1503 		.of_match_table = sahara_dt_ids,
1504 	},
1505 };
1506 
1507 module_platform_driver(sahara_driver);
1508 
1509 MODULE_LICENSE("GPL");
1510 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1511 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1512 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1513