• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (C) ST-Ericsson SA 2010
3  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4  * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
5  * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
6  * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
7  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
8  * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
9  * License terms: GNU General Public License (GPL) version 2
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/crypto.h>
15 #include <linux/dmaengine.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irqreturn.h>
21 #include <linux/klist.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/semaphore.h>
26 #include <linux/platform_data/dma-ste-dma40.h>
27 
28 #include <crypto/aes.h>
29 #include <crypto/algapi.h>
30 #include <crypto/ctr.h>
31 #include <crypto/des.h>
32 #include <crypto/scatterwalk.h>
33 
34 #include <linux/platform_data/crypto-ux500.h>
35 
36 #include "cryp_p.h"
37 #include "cryp.h"
38 
39 #define CRYP_MAX_KEY_SIZE	32
40 #define BYTES_PER_WORD		4
41 
42 static int cryp_mode;
43 static atomic_t session_id;
44 
45 static struct stedma40_chan_cfg *mem_to_engine;
46 static struct stedma40_chan_cfg *engine_to_mem;
47 
48 /**
49  * struct cryp_driver_data - data specific to the driver.
50  *
51  * @device_list: A list of registered devices to choose from.
52  * @device_allocation: A semaphore initialized with number of devices.
53  */
54 struct cryp_driver_data {
55 	struct klist device_list;
56 	struct semaphore device_allocation;
57 };
58 
59 /**
60  * struct cryp_ctx - Crypto context
61  * @config: Crypto mode.
62  * @key[CRYP_MAX_KEY_SIZE]: Key.
63  * @keylen: Length of key.
64  * @iv: Pointer to initialization vector.
65  * @indata: Pointer to indata.
66  * @outdata: Pointer to outdata.
67  * @datalen: Length of indata.
68  * @outlen: Length of outdata.
69  * @blocksize: Size of blocks.
70  * @updated: Updated flag.
71  * @dev_ctx: Device dependent context.
72  * @device: Pointer to the device.
73  */
74 struct cryp_ctx {
75 	struct cryp_config config;
76 	u8 key[CRYP_MAX_KEY_SIZE];
77 	u32 keylen;
78 	u8 *iv;
79 	const u8 *indata;
80 	u8 *outdata;
81 	u32 datalen;
82 	u32 outlen;
83 	u32 blocksize;
84 	u8 updated;
85 	struct cryp_device_context dev_ctx;
86 	struct cryp_device_data *device;
87 	u32 session_id;
88 };
89 
90 static struct cryp_driver_data driver_data;
91 
92 /**
93  * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
94  * @in: Data to convert.
95  */
uint8p_to_uint32_be(u8 * in)96 static inline u32 uint8p_to_uint32_be(u8 *in)
97 {
98 	u32 *data = (u32 *)in;
99 
100 	return cpu_to_be32p(data);
101 }
102 
103 /**
104  * swap_bits_in_byte - mirror the bits in a byte
105  * @b: the byte to be mirrored
106  *
107  * The bits are swapped the following way:
108  *  Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
109  *  nibble 2 (n2) bits 4-7.
110  *
111  *  Nibble 1 (n1):
112  *  (The "old" (moved) bit is replaced with a zero)
113  *  1. Move bit 6 and 7, 4 positions to the left.
114  *  2. Move bit 3 and 5, 2 positions to the left.
115  *  3. Move bit 1-4, 1 position to the left.
116  *
117  *  Nibble 2 (n2):
118  *  1. Move bit 0 and 1, 4 positions to the right.
119  *  2. Move bit 2 and 4, 2 positions to the right.
120  *  3. Move bit 3-6, 1 position to the right.
121  *
122  *  Combine the two nibbles to a complete and swapped byte.
123  */
124 
swap_bits_in_byte(u8 b)125 static inline u8 swap_bits_in_byte(u8 b)
126 {
127 #define R_SHIFT_4_MASK  0xc0 /* Bits 6 and 7, right shift 4 */
128 #define R_SHIFT_2_MASK  0x28 /* (After right shift 4) Bits 3 and 5,
129 				  right shift 2 */
130 #define R_SHIFT_1_MASK  0x1e /* (After right shift 2) Bits 1-4,
131 				  right shift 1 */
132 #define L_SHIFT_4_MASK  0x03 /* Bits 0 and 1, left shift 4 */
133 #define L_SHIFT_2_MASK  0x14 /* (After left shift 4) Bits 2 and 4,
134 				  left shift 2 */
135 #define L_SHIFT_1_MASK  0x78 /* (After left shift 1) Bits 3-6,
136 				  left shift 1 */
137 
138 	u8 n1;
139 	u8 n2;
140 
141 	/* Swap most significant nibble */
142 	/* Right shift 4, bits 6 and 7 */
143 	n1 = ((b  & R_SHIFT_4_MASK) >> 4) | (b  & ~(R_SHIFT_4_MASK >> 4));
144 	/* Right shift 2, bits 3 and 5 */
145 	n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
146 	/* Right shift 1, bits 1-4 */
147 	n1 = (n1  & R_SHIFT_1_MASK) >> 1;
148 
149 	/* Swap least significant nibble */
150 	/* Left shift 4, bits 0 and 1 */
151 	n2 = ((b  & L_SHIFT_4_MASK) << 4) | (b  & ~(L_SHIFT_4_MASK << 4));
152 	/* Left shift 2, bits 2 and 4 */
153 	n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
154 	/* Left shift 1, bits 3-6 */
155 	n2 = (n2  & L_SHIFT_1_MASK) << 1;
156 
157 	return n1 | n2;
158 }
159 
swap_words_in_key_and_bits_in_byte(const u8 * in,u8 * out,u32 len)160 static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
161 						      u8 *out, u32 len)
162 {
163 	unsigned int i = 0;
164 	int j;
165 	int index = 0;
166 
167 	j = len - BYTES_PER_WORD;
168 	while (j >= 0) {
169 		for (i = 0; i < BYTES_PER_WORD; i++) {
170 			index = len - j - BYTES_PER_WORD + i;
171 			out[j + i] =
172 				swap_bits_in_byte(in[index]);
173 		}
174 		j -= BYTES_PER_WORD;
175 	}
176 }
177 
add_session_id(struct cryp_ctx * ctx)178 static void add_session_id(struct cryp_ctx *ctx)
179 {
180 	/*
181 	 * We never want 0 to be a valid value, since this is the default value
182 	 * for the software context.
183 	 */
184 	if (unlikely(atomic_inc_and_test(&session_id)))
185 		atomic_inc(&session_id);
186 
187 	ctx->session_id = atomic_read(&session_id);
188 }
189 
cryp_interrupt_handler(int irq,void * param)190 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
191 {
192 	struct cryp_ctx *ctx;
193 	int count;
194 	struct cryp_device_data *device_data;
195 
196 	if (param == NULL) {
197 		BUG_ON(!param);
198 		return IRQ_HANDLED;
199 	}
200 
201 	/* The device is coming from the one found in hw_crypt_noxts. */
202 	device_data = (struct cryp_device_data *)param;
203 
204 	ctx = device_data->current_ctx;
205 
206 	if (ctx == NULL) {
207 		BUG_ON(!ctx);
208 		return IRQ_HANDLED;
209 	}
210 
211 	dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
212 		cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
213 		"out" : "in");
214 
215 	if (cryp_pending_irq_src(device_data,
216 				 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
217 		if (ctx->outlen / ctx->blocksize > 0) {
218 			count = ctx->blocksize / 4;
219 
220 			readsl(&device_data->base->dout, ctx->outdata, count);
221 			ctx->outdata += count;
222 			ctx->outlen -= count;
223 
224 			if (ctx->outlen == 0) {
225 				cryp_disable_irq_src(device_data,
226 						     CRYP_IRQ_SRC_OUTPUT_FIFO);
227 			}
228 		}
229 	} else if (cryp_pending_irq_src(device_data,
230 					CRYP_IRQ_SRC_INPUT_FIFO)) {
231 		if (ctx->datalen / ctx->blocksize > 0) {
232 			count = ctx->blocksize / 4;
233 
234 			writesl(&device_data->base->din, ctx->indata, count);
235 
236 			ctx->indata += count;
237 			ctx->datalen -= count;
238 
239 			if (ctx->datalen == 0)
240 				cryp_disable_irq_src(device_data,
241 						   CRYP_IRQ_SRC_INPUT_FIFO);
242 
243 			if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
244 				CRYP_PUT_BITS(&device_data->base->cr,
245 					      CRYP_START_ENABLE,
246 					      CRYP_CR_START_POS,
247 					      CRYP_CR_START_MASK);
248 
249 				cryp_wait_until_done(device_data);
250 			}
251 		}
252 	}
253 
254 	return IRQ_HANDLED;
255 }
256 
mode_is_aes(enum cryp_algo_mode mode)257 static int mode_is_aes(enum cryp_algo_mode mode)
258 {
259 	return	CRYP_ALGO_AES_ECB == mode ||
260 		CRYP_ALGO_AES_CBC == mode ||
261 		CRYP_ALGO_AES_CTR == mode ||
262 		CRYP_ALGO_AES_XTS == mode;
263 }
264 
cfg_iv(struct cryp_device_data * device_data,u32 left,u32 right,enum cryp_init_vector_index index)265 static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
266 		  enum cryp_init_vector_index index)
267 {
268 	struct cryp_init_vector_value vector_value;
269 
270 	dev_dbg(device_data->dev, "[%s]", __func__);
271 
272 	vector_value.init_value_left = left;
273 	vector_value.init_value_right = right;
274 
275 	return cryp_configure_init_vector(device_data,
276 					  index,
277 					  vector_value);
278 }
279 
cfg_ivs(struct cryp_device_data * device_data,struct cryp_ctx * ctx)280 static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
281 {
282 	int i;
283 	int status = 0;
284 	int num_of_regs = ctx->blocksize / 8;
285 	u32 iv[AES_BLOCK_SIZE / 4];
286 
287 	dev_dbg(device_data->dev, "[%s]", __func__);
288 
289 	/*
290 	 * Since we loop on num_of_regs we need to have a check in case
291 	 * someone provides an incorrect blocksize which would force calling
292 	 * cfg_iv with i greater than 2 which is an error.
293 	 */
294 	if (num_of_regs > 2) {
295 		dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
296 			__func__, ctx->blocksize);
297 		return -EINVAL;
298 	}
299 
300 	for (i = 0; i < ctx->blocksize / 4; i++)
301 		iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
302 
303 	for (i = 0; i < num_of_regs; i++) {
304 		status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
305 				(enum cryp_init_vector_index) i);
306 		if (status != 0)
307 			return status;
308 	}
309 	return status;
310 }
311 
set_key(struct cryp_device_data * device_data,u32 left_key,u32 right_key,enum cryp_key_reg_index index)312 static int set_key(struct cryp_device_data *device_data,
313 		   u32 left_key,
314 		   u32 right_key,
315 		   enum cryp_key_reg_index index)
316 {
317 	struct cryp_key_value key_value;
318 	int cryp_error;
319 
320 	dev_dbg(device_data->dev, "[%s]", __func__);
321 
322 	key_value.key_value_left = left_key;
323 	key_value.key_value_right = right_key;
324 
325 	cryp_error = cryp_configure_key_values(device_data,
326 					       index,
327 					       key_value);
328 	if (cryp_error != 0)
329 		dev_err(device_data->dev, "[%s]: "
330 			"cryp_configure_key_values() failed!", __func__);
331 
332 	return cryp_error;
333 }
334 
cfg_keys(struct cryp_ctx * ctx)335 static int cfg_keys(struct cryp_ctx *ctx)
336 {
337 	int i;
338 	int num_of_regs = ctx->keylen / 8;
339 	u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
340 	int cryp_error = 0;
341 
342 	dev_dbg(ctx->device->dev, "[%s]", __func__);
343 
344 	if (mode_is_aes(ctx->config.algomode)) {
345 		swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
346 						   (u8 *)swapped_key,
347 						   ctx->keylen);
348 	} else {
349 		for (i = 0; i < ctx->keylen / 4; i++)
350 			swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
351 	}
352 
353 	for (i = 0; i < num_of_regs; i++) {
354 		cryp_error = set_key(ctx->device,
355 				     *(((u32 *)swapped_key)+i*2),
356 				     *(((u32 *)swapped_key)+i*2+1),
357 				     (enum cryp_key_reg_index) i);
358 
359 		if (cryp_error != 0) {
360 			dev_err(ctx->device->dev, "[%s]: set_key() failed!",
361 					__func__);
362 			return cryp_error;
363 		}
364 	}
365 	return cryp_error;
366 }
367 
cryp_setup_context(struct cryp_ctx * ctx,struct cryp_device_data * device_data)368 static int cryp_setup_context(struct cryp_ctx *ctx,
369 			      struct cryp_device_data *device_data)
370 {
371 	u32 control_register = CRYP_CR_DEFAULT;
372 
373 	switch (cryp_mode) {
374 	case CRYP_MODE_INTERRUPT:
375 		writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
376 		break;
377 
378 	case CRYP_MODE_DMA:
379 		writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
380 		break;
381 
382 	default:
383 		break;
384 	}
385 
386 	if (ctx->updated == 0) {
387 		cryp_flush_inoutfifo(device_data);
388 		if (cfg_keys(ctx) != 0) {
389 			dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
390 				__func__);
391 			return -EINVAL;
392 		}
393 
394 		if (ctx->iv &&
395 		    CRYP_ALGO_AES_ECB != ctx->config.algomode &&
396 		    CRYP_ALGO_DES_ECB != ctx->config.algomode &&
397 		    CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
398 			if (cfg_ivs(device_data, ctx) != 0)
399 				return -EPERM;
400 		}
401 
402 		cryp_set_configuration(device_data, &ctx->config,
403 				       &control_register);
404 		add_session_id(ctx);
405 	} else if (ctx->updated == 1 &&
406 		   ctx->session_id != atomic_read(&session_id)) {
407 		cryp_flush_inoutfifo(device_data);
408 		cryp_restore_device_context(device_data, &ctx->dev_ctx);
409 
410 		add_session_id(ctx);
411 		control_register = ctx->dev_ctx.cr;
412 	} else
413 		control_register = ctx->dev_ctx.cr;
414 
415 	writel(control_register |
416 	       (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
417 	       &device_data->base->cr);
418 
419 	return 0;
420 }
421 
cryp_get_device_data(struct cryp_ctx * ctx,struct cryp_device_data ** device_data)422 static int cryp_get_device_data(struct cryp_ctx *ctx,
423 				struct cryp_device_data **device_data)
424 {
425 	int ret;
426 	struct klist_iter device_iterator;
427 	struct klist_node *device_node;
428 	struct cryp_device_data *local_device_data = NULL;
429 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
430 
431 	/* Wait until a device is available */
432 	ret = down_interruptible(&driver_data.device_allocation);
433 	if (ret)
434 		return ret;  /* Interrupted */
435 
436 	/* Select a device */
437 	klist_iter_init(&driver_data.device_list, &device_iterator);
438 
439 	device_node = klist_next(&device_iterator);
440 	while (device_node) {
441 		local_device_data = container_of(device_node,
442 					   struct cryp_device_data, list_node);
443 		spin_lock(&local_device_data->ctx_lock);
444 		/* current_ctx allocates a device, NULL = unallocated */
445 		if (local_device_data->current_ctx) {
446 			device_node = klist_next(&device_iterator);
447 		} else {
448 			local_device_data->current_ctx = ctx;
449 			ctx->device = local_device_data;
450 			spin_unlock(&local_device_data->ctx_lock);
451 			break;
452 		}
453 		spin_unlock(&local_device_data->ctx_lock);
454 	}
455 	klist_iter_exit(&device_iterator);
456 
457 	if (!device_node) {
458 		/**
459 		 * No free device found.
460 		 * Since we allocated a device with down_interruptible, this
461 		 * should not be able to happen.
462 		 * Number of available devices, which are contained in
463 		 * device_allocation, is therefore decremented by not doing
464 		 * an up(device_allocation).
465 		 */
466 		return -EBUSY;
467 	}
468 
469 	*device_data = local_device_data;
470 
471 	return 0;
472 }
473 
cryp_dma_setup_channel(struct cryp_device_data * device_data,struct device * dev)474 static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
475 				   struct device *dev)
476 {
477 	struct dma_slave_config mem2cryp = {
478 		.direction = DMA_MEM_TO_DEV,
479 		.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
480 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
481 		.dst_maxburst = 4,
482 	};
483 	struct dma_slave_config cryp2mem = {
484 		.direction = DMA_DEV_TO_MEM,
485 		.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
486 		.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
487 		.src_maxburst = 4,
488 	};
489 
490 	dma_cap_zero(device_data->dma.mask);
491 	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
492 
493 	device_data->dma.cfg_mem2cryp = mem_to_engine;
494 	device_data->dma.chan_mem2cryp =
495 		dma_request_channel(device_data->dma.mask,
496 				    stedma40_filter,
497 				    device_data->dma.cfg_mem2cryp);
498 
499 	device_data->dma.cfg_cryp2mem = engine_to_mem;
500 	device_data->dma.chan_cryp2mem =
501 		dma_request_channel(device_data->dma.mask,
502 				    stedma40_filter,
503 				    device_data->dma.cfg_cryp2mem);
504 
505 	dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
506 	dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
507 
508 	init_completion(&device_data->dma.cryp_dma_complete);
509 }
510 
cryp_dma_out_callback(void * data)511 static void cryp_dma_out_callback(void *data)
512 {
513 	struct cryp_ctx *ctx = (struct cryp_ctx *) data;
514 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
515 
516 	complete(&ctx->device->dma.cryp_dma_complete);
517 }
518 
cryp_set_dma_transfer(struct cryp_ctx * ctx,struct scatterlist * sg,int len,enum dma_data_direction direction)519 static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
520 				 struct scatterlist *sg,
521 				 int len,
522 				 enum dma_data_direction direction)
523 {
524 	struct dma_async_tx_descriptor *desc;
525 	struct dma_chan *channel = NULL;
526 	dma_cookie_t cookie;
527 
528 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
529 
530 	if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
531 		dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
532 			"aligned! Addr: 0x%08x", __func__, (u32)sg);
533 		return -EFAULT;
534 	}
535 
536 	switch (direction) {
537 	case DMA_TO_DEVICE:
538 		channel = ctx->device->dma.chan_mem2cryp;
539 		ctx->device->dma.sg_src = sg;
540 		ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
541 						 ctx->device->dma.sg_src,
542 						 ctx->device->dma.nents_src,
543 						 direction);
544 
545 		if (!ctx->device->dma.sg_src_len) {
546 			dev_dbg(ctx->device->dev,
547 				"[%s]: Could not map the sg list (TO_DEVICE)",
548 				__func__);
549 			return -EFAULT;
550 		}
551 
552 		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
553 			"(TO_DEVICE)", __func__);
554 
555 		desc = dmaengine_prep_slave_sg(channel,
556 				ctx->device->dma.sg_src,
557 				ctx->device->dma.sg_src_len,
558 				DMA_MEM_TO_DEV, DMA_CTRL_ACK);
559 		break;
560 
561 	case DMA_FROM_DEVICE:
562 		channel = ctx->device->dma.chan_cryp2mem;
563 		ctx->device->dma.sg_dst = sg;
564 		ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
565 						 ctx->device->dma.sg_dst,
566 						 ctx->device->dma.nents_dst,
567 						 direction);
568 
569 		if (!ctx->device->dma.sg_dst_len) {
570 			dev_dbg(ctx->device->dev,
571 				"[%s]: Could not map the sg list (FROM_DEVICE)",
572 				__func__);
573 			return -EFAULT;
574 		}
575 
576 		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
577 			"(FROM_DEVICE)", __func__);
578 
579 		desc = dmaengine_prep_slave_sg(channel,
580 				ctx->device->dma.sg_dst,
581 				ctx->device->dma.sg_dst_len,
582 				DMA_DEV_TO_MEM,
583 				DMA_CTRL_ACK |
584 				DMA_PREP_INTERRUPT);
585 
586 		desc->callback = cryp_dma_out_callback;
587 		desc->callback_param = ctx;
588 		break;
589 
590 	default:
591 		dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
592 			__func__);
593 		return -EFAULT;
594 	}
595 
596 	cookie = dmaengine_submit(desc);
597 	dma_async_issue_pending(channel);
598 
599 	return 0;
600 }
601 
cryp_dma_done(struct cryp_ctx * ctx)602 static void cryp_dma_done(struct cryp_ctx *ctx)
603 {
604 	struct dma_chan *chan;
605 
606 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
607 
608 	chan = ctx->device->dma.chan_mem2cryp;
609 	dmaengine_terminate_all(chan);
610 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
611 		     ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
612 
613 	chan = ctx->device->dma.chan_cryp2mem;
614 	dmaengine_terminate_all(chan);
615 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
616 		     ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
617 }
618 
cryp_dma_write(struct cryp_ctx * ctx,struct scatterlist * sg,int len)619 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
620 			  int len)
621 {
622 	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
623 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
624 
625 	if (error) {
626 		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
627 			"failed", __func__);
628 		return error;
629 	}
630 
631 	return len;
632 }
633 
cryp_dma_read(struct cryp_ctx * ctx,struct scatterlist * sg,int len)634 static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
635 {
636 	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
637 	if (error) {
638 		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
639 			"failed", __func__);
640 		return error;
641 	}
642 
643 	return len;
644 }
645 
cryp_polling_mode(struct cryp_ctx * ctx,struct cryp_device_data * device_data)646 static void cryp_polling_mode(struct cryp_ctx *ctx,
647 			      struct cryp_device_data *device_data)
648 {
649 	int len = ctx->blocksize / BYTES_PER_WORD;
650 	int remaining_length = ctx->datalen;
651 	u32 *indata = (u32 *)ctx->indata;
652 	u32 *outdata = (u32 *)ctx->outdata;
653 
654 	while (remaining_length > 0) {
655 		writesl(&device_data->base->din, indata, len);
656 		indata += len;
657 		remaining_length -= (len * BYTES_PER_WORD);
658 		cryp_wait_until_done(device_data);
659 
660 		readsl(&device_data->base->dout, outdata, len);
661 		outdata += len;
662 		cryp_wait_until_done(device_data);
663 	}
664 }
665 
cryp_disable_power(struct device * dev,struct cryp_device_data * device_data,bool save_device_context)666 static int cryp_disable_power(struct device *dev,
667 			      struct cryp_device_data *device_data,
668 			      bool save_device_context)
669 {
670 	int ret = 0;
671 
672 	dev_dbg(dev, "[%s]", __func__);
673 
674 	spin_lock(&device_data->power_state_spinlock);
675 	if (!device_data->power_state)
676 		goto out;
677 
678 	spin_lock(&device_data->ctx_lock);
679 	if (save_device_context && device_data->current_ctx) {
680 		cryp_save_device_context(device_data,
681 				&device_data->current_ctx->dev_ctx,
682 				cryp_mode);
683 		device_data->restore_dev_ctx = true;
684 	}
685 	spin_unlock(&device_data->ctx_lock);
686 
687 	clk_disable(device_data->clk);
688 	ret = regulator_disable(device_data->pwr_regulator);
689 	if (ret)
690 		dev_err(dev, "[%s]: "
691 				"regulator_disable() failed!",
692 				__func__);
693 
694 	device_data->power_state = false;
695 
696 out:
697 	spin_unlock(&device_data->power_state_spinlock);
698 
699 	return ret;
700 }
701 
cryp_enable_power(struct device * dev,struct cryp_device_data * device_data,bool restore_device_context)702 static int cryp_enable_power(
703 		struct device *dev,
704 		struct cryp_device_data *device_data,
705 		bool restore_device_context)
706 {
707 	int ret = 0;
708 
709 	dev_dbg(dev, "[%s]", __func__);
710 
711 	spin_lock(&device_data->power_state_spinlock);
712 	if (!device_data->power_state) {
713 		ret = regulator_enable(device_data->pwr_regulator);
714 		if (ret) {
715 			dev_err(dev, "[%s]: regulator_enable() failed!",
716 					__func__);
717 			goto out;
718 		}
719 
720 		ret = clk_enable(device_data->clk);
721 		if (ret) {
722 			dev_err(dev, "[%s]: clk_enable() failed!",
723 					__func__);
724 			regulator_disable(device_data->pwr_regulator);
725 			goto out;
726 		}
727 		device_data->power_state = true;
728 	}
729 
730 	if (device_data->restore_dev_ctx) {
731 		spin_lock(&device_data->ctx_lock);
732 		if (restore_device_context && device_data->current_ctx) {
733 			device_data->restore_dev_ctx = false;
734 			cryp_restore_device_context(device_data,
735 					&device_data->current_ctx->dev_ctx);
736 		}
737 		spin_unlock(&device_data->ctx_lock);
738 	}
739 out:
740 	spin_unlock(&device_data->power_state_spinlock);
741 
742 	return ret;
743 }
744 
hw_crypt_noxts(struct cryp_ctx * ctx,struct cryp_device_data * device_data)745 static int hw_crypt_noxts(struct cryp_ctx *ctx,
746 			  struct cryp_device_data *device_data)
747 {
748 	int ret = 0;
749 
750 	const u8 *indata = ctx->indata;
751 	u8 *outdata = ctx->outdata;
752 	u32 datalen = ctx->datalen;
753 	u32 outlen = datalen;
754 
755 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
756 
757 	ctx->outlen = ctx->datalen;
758 
759 	if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
760 		pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
761 			 "0x%08x", __func__, (u32)indata);
762 		return -EINVAL;
763 	}
764 
765 	ret = cryp_setup_context(ctx, device_data);
766 
767 	if (ret)
768 		goto out;
769 
770 	if (cryp_mode == CRYP_MODE_INTERRUPT) {
771 		cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
772 				    CRYP_IRQ_SRC_OUTPUT_FIFO);
773 
774 		/*
775 		 * ctx->outlen is decremented in the cryp_interrupt_handler
776 		 * function. We had to add cpu_relax() (barrier) to make sure
777 		 * that gcc didn't optimze away this variable.
778 		 */
779 		while (ctx->outlen > 0)
780 			cpu_relax();
781 	} else if (cryp_mode == CRYP_MODE_POLLING ||
782 		   cryp_mode == CRYP_MODE_DMA) {
783 		/*
784 		 * The reason for having DMA in this if case is that if we are
785 		 * running cryp_mode = 2, then we separate DMA routines for
786 		 * handling cipher/plaintext > blocksize, except when
787 		 * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
788 		 * the polling mode. Overhead of doing DMA setup eats up the
789 		 * benefits using it.
790 		 */
791 		cryp_polling_mode(ctx, device_data);
792 	} else {
793 		dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
794 			__func__);
795 		ret = -EPERM;
796 		goto out;
797 	}
798 
799 	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
800 	ctx->updated = 1;
801 
802 out:
803 	ctx->indata = indata;
804 	ctx->outdata = outdata;
805 	ctx->datalen = datalen;
806 	ctx->outlen = outlen;
807 
808 	return ret;
809 }
810 
get_nents(struct scatterlist * sg,int nbytes)811 static int get_nents(struct scatterlist *sg, int nbytes)
812 {
813 	int nents = 0;
814 
815 	while (nbytes > 0) {
816 		nbytes -= sg->length;
817 		sg = sg_next(sg);
818 		nents++;
819 	}
820 
821 	return nents;
822 }
823 
ablk_dma_crypt(struct ablkcipher_request * areq)824 static int ablk_dma_crypt(struct ablkcipher_request *areq)
825 {
826 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
827 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
828 	struct cryp_device_data *device_data;
829 
830 	int bytes_written = 0;
831 	int bytes_read = 0;
832 	int ret;
833 
834 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
835 
836 	ctx->datalen = areq->nbytes;
837 	ctx->outlen = areq->nbytes;
838 
839 	ret = cryp_get_device_data(ctx, &device_data);
840 	if (ret)
841 		return ret;
842 
843 	ret = cryp_setup_context(ctx, device_data);
844 	if (ret)
845 		goto out;
846 
847 	/* We have the device now, so store the nents in the dma struct. */
848 	ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
849 	ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
850 
851 	/* Enable DMA in- and output. */
852 	cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
853 
854 	bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
855 	bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
856 
857 	wait_for_completion(&ctx->device->dma.cryp_dma_complete);
858 	cryp_dma_done(ctx);
859 
860 	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
861 	ctx->updated = 1;
862 
863 out:
864 	spin_lock(&device_data->ctx_lock);
865 	device_data->current_ctx = NULL;
866 	ctx->device = NULL;
867 	spin_unlock(&device_data->ctx_lock);
868 
869 	/*
870 	 * The down_interruptible part for this semaphore is called in
871 	 * cryp_get_device_data.
872 	 */
873 	up(&driver_data.device_allocation);
874 
875 	if (unlikely(bytes_written != bytes_read))
876 		return -EPERM;
877 
878 	return 0;
879 }
880 
ablk_crypt(struct ablkcipher_request * areq)881 static int ablk_crypt(struct ablkcipher_request *areq)
882 {
883 	struct ablkcipher_walk walk;
884 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
885 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
886 	struct cryp_device_data *device_data;
887 	unsigned long src_paddr;
888 	unsigned long dst_paddr;
889 	int ret;
890 	int nbytes;
891 
892 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
893 
894 	ret = cryp_get_device_data(ctx, &device_data);
895 	if (ret)
896 		goto out;
897 
898 	ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
899 	ret = ablkcipher_walk_phys(areq, &walk);
900 
901 	if (ret) {
902 		pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
903 			__func__);
904 		goto out;
905 	}
906 
907 	while ((nbytes = walk.nbytes) > 0) {
908 		ctx->iv = walk.iv;
909 		src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
910 		ctx->indata = phys_to_virt(src_paddr);
911 
912 		dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
913 		ctx->outdata = phys_to_virt(dst_paddr);
914 
915 		ctx->datalen = nbytes - (nbytes % ctx->blocksize);
916 
917 		ret = hw_crypt_noxts(ctx, device_data);
918 		if (ret)
919 			goto out;
920 
921 		nbytes -= ctx->datalen;
922 		ret = ablkcipher_walk_done(areq, &walk, nbytes);
923 		if (ret)
924 			goto out;
925 	}
926 	ablkcipher_walk_complete(&walk);
927 
928 out:
929 	/* Release the device */
930 	spin_lock(&device_data->ctx_lock);
931 	device_data->current_ctx = NULL;
932 	ctx->device = NULL;
933 	spin_unlock(&device_data->ctx_lock);
934 
935 	/*
936 	 * The down_interruptible part for this semaphore is called in
937 	 * cryp_get_device_data.
938 	 */
939 	up(&driver_data.device_allocation);
940 
941 	return ret;
942 }
943 
aes_ablkcipher_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)944 static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
945 				 const u8 *key, unsigned int keylen)
946 {
947 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
948 	u32 *flags = &cipher->base.crt_flags;
949 
950 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
951 
952 	switch (keylen) {
953 	case AES_KEYSIZE_128:
954 		ctx->config.keysize = CRYP_KEY_SIZE_128;
955 		break;
956 
957 	case AES_KEYSIZE_192:
958 		ctx->config.keysize = CRYP_KEY_SIZE_192;
959 		break;
960 
961 	case AES_KEYSIZE_256:
962 		ctx->config.keysize = CRYP_KEY_SIZE_256;
963 		break;
964 
965 	default:
966 		pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
967 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
968 		return -EINVAL;
969 	}
970 
971 	memcpy(ctx->key, key, keylen);
972 	ctx->keylen = keylen;
973 
974 	ctx->updated = 0;
975 
976 	return 0;
977 }
978 
des_ablkcipher_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)979 static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
980 				 const u8 *key, unsigned int keylen)
981 {
982 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
983 	u32 *flags = &cipher->base.crt_flags;
984 	u32 tmp[DES_EXPKEY_WORDS];
985 	int ret;
986 
987 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
988 	if (keylen != DES_KEY_SIZE) {
989 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
990 		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
991 				__func__);
992 		return -EINVAL;
993 	}
994 
995 	ret = des_ekey(tmp, key);
996 	if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
997 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
998 		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
999 				__func__);
1000 		return -EINVAL;
1001 	}
1002 
1003 	memcpy(ctx->key, key, keylen);
1004 	ctx->keylen = keylen;
1005 
1006 	ctx->updated = 0;
1007 	return 0;
1008 }
1009 
des3_ablkcipher_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)1010 static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1011 				  const u8 *key, unsigned int keylen)
1012 {
1013 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1014 	u32 *flags = &cipher->base.crt_flags;
1015 	const u32 *K = (const u32 *)key;
1016 	u32 tmp[DES3_EDE_EXPKEY_WORDS];
1017 	int i, ret;
1018 
1019 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1020 	if (keylen != DES3_EDE_KEY_SIZE) {
1021 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1022 		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
1023 				__func__);
1024 		return -EINVAL;
1025 	}
1026 
1027 	/* Checking key interdependency for weak key detection. */
1028 	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1029 				!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
1030 			(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1031 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
1032 		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
1033 				__func__);
1034 		return -EINVAL;
1035 	}
1036 	for (i = 0; i < 3; i++) {
1037 		ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
1038 		if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1039 			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
1040 			pr_debug(DEV_DBG_NAME " [%s]: "
1041 					"CRYPTO_TFM_REQ_WEAK_KEY", __func__);
1042 			return -EINVAL;
1043 		}
1044 	}
1045 
1046 	memcpy(ctx->key, key, keylen);
1047 	ctx->keylen = keylen;
1048 
1049 	ctx->updated = 0;
1050 	return 0;
1051 }
1052 
cryp_blk_encrypt(struct ablkcipher_request * areq)1053 static int cryp_blk_encrypt(struct ablkcipher_request *areq)
1054 {
1055 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1056 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1057 
1058 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1059 
1060 	ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
1061 
1062 	/*
1063 	 * DMA does not work for DES due to a hw bug */
1064 	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1065 		return ablk_dma_crypt(areq);
1066 
1067 	/* For everything except DMA, we run the non DMA version. */
1068 	return ablk_crypt(areq);
1069 }
1070 
cryp_blk_decrypt(struct ablkcipher_request * areq)1071 static int cryp_blk_decrypt(struct ablkcipher_request *areq)
1072 {
1073 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1074 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1075 
1076 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1077 
1078 	ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
1079 
1080 	/* DMA does not work for DES due to a hw bug */
1081 	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1082 		return ablk_dma_crypt(areq);
1083 
1084 	/* For everything except DMA, we run the non DMA version. */
1085 	return ablk_crypt(areq);
1086 }
1087 
1088 struct cryp_algo_template {
1089 	enum cryp_algo_mode algomode;
1090 	struct crypto_alg crypto;
1091 };
1092 
cryp_cra_init(struct crypto_tfm * tfm)1093 static int cryp_cra_init(struct crypto_tfm *tfm)
1094 {
1095 	struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
1096 	struct crypto_alg *alg = tfm->__crt_alg;
1097 	struct cryp_algo_template *cryp_alg = container_of(alg,
1098 			struct cryp_algo_template,
1099 			crypto);
1100 
1101 	ctx->config.algomode = cryp_alg->algomode;
1102 	ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
1103 
1104 	return 0;
1105 }
1106 
1107 static struct cryp_algo_template cryp_algs[] = {
1108 	{
1109 		.algomode = CRYP_ALGO_AES_ECB,
1110 		.crypto = {
1111 			.cra_name = "aes",
1112 			.cra_driver_name = "aes-ux500",
1113 			.cra_priority =	300,
1114 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1115 					CRYPTO_ALG_ASYNC,
1116 			.cra_blocksize = AES_BLOCK_SIZE,
1117 			.cra_ctxsize = sizeof(struct cryp_ctx),
1118 			.cra_alignmask = 3,
1119 			.cra_type = &crypto_ablkcipher_type,
1120 			.cra_init = cryp_cra_init,
1121 			.cra_module = THIS_MODULE,
1122 			.cra_u = {
1123 				.ablkcipher = {
1124 					.min_keysize = AES_MIN_KEY_SIZE,
1125 					.max_keysize = AES_MAX_KEY_SIZE,
1126 					.setkey = aes_ablkcipher_setkey,
1127 					.encrypt = cryp_blk_encrypt,
1128 					.decrypt = cryp_blk_decrypt
1129 				}
1130 			}
1131 		}
1132 	},
1133 	{
1134 		.algomode = CRYP_ALGO_AES_ECB,
1135 		.crypto = {
1136 			.cra_name = "ecb(aes)",
1137 			.cra_driver_name = "ecb-aes-ux500",
1138 			.cra_priority = 300,
1139 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1140 					CRYPTO_ALG_ASYNC,
1141 			.cra_blocksize = AES_BLOCK_SIZE,
1142 			.cra_ctxsize = sizeof(struct cryp_ctx),
1143 			.cra_alignmask = 3,
1144 			.cra_type = &crypto_ablkcipher_type,
1145 			.cra_init = cryp_cra_init,
1146 			.cra_module = THIS_MODULE,
1147 			.cra_u = {
1148 				.ablkcipher = {
1149 					.min_keysize = AES_MIN_KEY_SIZE,
1150 					.max_keysize = AES_MAX_KEY_SIZE,
1151 					.setkey = aes_ablkcipher_setkey,
1152 					.encrypt = cryp_blk_encrypt,
1153 					.decrypt = cryp_blk_decrypt,
1154 				}
1155 			}
1156 		}
1157 	},
1158 	{
1159 		.algomode = CRYP_ALGO_AES_CBC,
1160 		.crypto = {
1161 			.cra_name = "cbc(aes)",
1162 			.cra_driver_name = "cbc-aes-ux500",
1163 			.cra_priority = 300,
1164 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1165 					CRYPTO_ALG_ASYNC,
1166 			.cra_blocksize = AES_BLOCK_SIZE,
1167 			.cra_ctxsize = sizeof(struct cryp_ctx),
1168 			.cra_alignmask = 3,
1169 			.cra_type = &crypto_ablkcipher_type,
1170 			.cra_init = cryp_cra_init,
1171 			.cra_module = THIS_MODULE,
1172 			.cra_u = {
1173 				.ablkcipher = {
1174 					.min_keysize = AES_MIN_KEY_SIZE,
1175 					.max_keysize = AES_MAX_KEY_SIZE,
1176 					.setkey = aes_ablkcipher_setkey,
1177 					.encrypt = cryp_blk_encrypt,
1178 					.decrypt = cryp_blk_decrypt,
1179 					.ivsize = AES_BLOCK_SIZE,
1180 				}
1181 			}
1182 		}
1183 	},
1184 	{
1185 		.algomode = CRYP_ALGO_AES_CTR,
1186 		.crypto = {
1187 			.cra_name = "ctr(aes)",
1188 			.cra_driver_name = "ctr-aes-ux500",
1189 			.cra_priority = 300,
1190 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1191 						CRYPTO_ALG_ASYNC,
1192 			.cra_blocksize = AES_BLOCK_SIZE,
1193 			.cra_ctxsize = sizeof(struct cryp_ctx),
1194 			.cra_alignmask = 3,
1195 			.cra_type = &crypto_ablkcipher_type,
1196 			.cra_init = cryp_cra_init,
1197 			.cra_module = THIS_MODULE,
1198 			.cra_u = {
1199 				.ablkcipher = {
1200 					.min_keysize = AES_MIN_KEY_SIZE,
1201 					.max_keysize = AES_MAX_KEY_SIZE,
1202 					.setkey = aes_ablkcipher_setkey,
1203 					.encrypt = cryp_blk_encrypt,
1204 					.decrypt = cryp_blk_decrypt,
1205 					.ivsize = AES_BLOCK_SIZE,
1206 				}
1207 			}
1208 		}
1209 	},
1210 	{
1211 		.algomode = CRYP_ALGO_DES_ECB,
1212 		.crypto = {
1213 			.cra_name = "des",
1214 			.cra_driver_name = "des-ux500",
1215 			.cra_priority = 300,
1216 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1217 						CRYPTO_ALG_ASYNC,
1218 			.cra_blocksize = DES_BLOCK_SIZE,
1219 			.cra_ctxsize = sizeof(struct cryp_ctx),
1220 			.cra_alignmask = 3,
1221 			.cra_type = &crypto_ablkcipher_type,
1222 			.cra_init = cryp_cra_init,
1223 			.cra_module = THIS_MODULE,
1224 			.cra_u = {
1225 				.ablkcipher = {
1226 					.min_keysize = DES_KEY_SIZE,
1227 					.max_keysize = DES_KEY_SIZE,
1228 					.setkey = des_ablkcipher_setkey,
1229 					.encrypt = cryp_blk_encrypt,
1230 					.decrypt = cryp_blk_decrypt
1231 				}
1232 			}
1233 		}
1234 
1235 	},
1236 	{
1237 		.algomode = CRYP_ALGO_TDES_ECB,
1238 		.crypto = {
1239 			.cra_name = "des3_ede",
1240 			.cra_driver_name = "des3_ede-ux500",
1241 			.cra_priority = 300,
1242 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1243 						CRYPTO_ALG_ASYNC,
1244 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1245 			.cra_ctxsize = sizeof(struct cryp_ctx),
1246 			.cra_alignmask = 3,
1247 			.cra_type = &crypto_ablkcipher_type,
1248 			.cra_init = cryp_cra_init,
1249 			.cra_module = THIS_MODULE,
1250 			.cra_u = {
1251 				.ablkcipher = {
1252 					.min_keysize = DES3_EDE_KEY_SIZE,
1253 					.max_keysize = DES3_EDE_KEY_SIZE,
1254 					.setkey = des_ablkcipher_setkey,
1255 					.encrypt = cryp_blk_encrypt,
1256 					.decrypt = cryp_blk_decrypt
1257 				}
1258 			}
1259 		}
1260 	},
1261 	{
1262 		.algomode = CRYP_ALGO_DES_ECB,
1263 		.crypto = {
1264 			.cra_name = "ecb(des)",
1265 			.cra_driver_name = "ecb-des-ux500",
1266 			.cra_priority = 300,
1267 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1268 					CRYPTO_ALG_ASYNC,
1269 			.cra_blocksize = DES_BLOCK_SIZE,
1270 			.cra_ctxsize = sizeof(struct cryp_ctx),
1271 			.cra_alignmask = 3,
1272 			.cra_type = &crypto_ablkcipher_type,
1273 			.cra_init = cryp_cra_init,
1274 			.cra_module = THIS_MODULE,
1275 			.cra_u = {
1276 				.ablkcipher = {
1277 					.min_keysize = DES_KEY_SIZE,
1278 					.max_keysize = DES_KEY_SIZE,
1279 					.setkey = des_ablkcipher_setkey,
1280 					.encrypt = cryp_blk_encrypt,
1281 					.decrypt = cryp_blk_decrypt,
1282 				}
1283 			}
1284 		}
1285 	},
1286 	{
1287 		.algomode = CRYP_ALGO_TDES_ECB,
1288 		.crypto = {
1289 			.cra_name = "ecb(des3_ede)",
1290 			.cra_driver_name = "ecb-des3_ede-ux500",
1291 			.cra_priority = 300,
1292 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1293 					CRYPTO_ALG_ASYNC,
1294 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1295 			.cra_ctxsize = sizeof(struct cryp_ctx),
1296 			.cra_alignmask = 3,
1297 			.cra_type = &crypto_ablkcipher_type,
1298 			.cra_init = cryp_cra_init,
1299 			.cra_module = THIS_MODULE,
1300 			.cra_u = {
1301 				.ablkcipher = {
1302 					.min_keysize = DES3_EDE_KEY_SIZE,
1303 					.max_keysize = DES3_EDE_KEY_SIZE,
1304 					.setkey = des3_ablkcipher_setkey,
1305 					.encrypt = cryp_blk_encrypt,
1306 					.decrypt = cryp_blk_decrypt,
1307 				}
1308 			}
1309 		}
1310 	},
1311 	{
1312 		.algomode = CRYP_ALGO_DES_CBC,
1313 		.crypto = {
1314 			.cra_name = "cbc(des)",
1315 			.cra_driver_name = "cbc-des-ux500",
1316 			.cra_priority = 300,
1317 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1318 					CRYPTO_ALG_ASYNC,
1319 			.cra_blocksize = DES_BLOCK_SIZE,
1320 			.cra_ctxsize = sizeof(struct cryp_ctx),
1321 			.cra_alignmask = 3,
1322 			.cra_type = &crypto_ablkcipher_type,
1323 			.cra_init = cryp_cra_init,
1324 			.cra_module = THIS_MODULE,
1325 			.cra_u = {
1326 				.ablkcipher = {
1327 					.min_keysize = DES_KEY_SIZE,
1328 					.max_keysize = DES_KEY_SIZE,
1329 					.setkey = des_ablkcipher_setkey,
1330 					.encrypt = cryp_blk_encrypt,
1331 					.decrypt = cryp_blk_decrypt,
1332 				}
1333 			}
1334 		}
1335 	},
1336 	{
1337 		.algomode = CRYP_ALGO_TDES_CBC,
1338 		.crypto = {
1339 			.cra_name = "cbc(des3_ede)",
1340 			.cra_driver_name = "cbc-des3_ede-ux500",
1341 			.cra_priority = 300,
1342 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1343 					CRYPTO_ALG_ASYNC,
1344 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1345 			.cra_ctxsize = sizeof(struct cryp_ctx),
1346 			.cra_alignmask = 3,
1347 			.cra_type = &crypto_ablkcipher_type,
1348 			.cra_init = cryp_cra_init,
1349 			.cra_module = THIS_MODULE,
1350 			.cra_u = {
1351 				.ablkcipher = {
1352 					.min_keysize = DES3_EDE_KEY_SIZE,
1353 					.max_keysize = DES3_EDE_KEY_SIZE,
1354 					.setkey = des3_ablkcipher_setkey,
1355 					.encrypt = cryp_blk_encrypt,
1356 					.decrypt = cryp_blk_decrypt,
1357 					.ivsize = DES3_EDE_BLOCK_SIZE,
1358 				}
1359 			}
1360 		}
1361 	}
1362 };
1363 
1364 /**
1365  * cryp_algs_register_all -
1366  */
cryp_algs_register_all(void)1367 static int cryp_algs_register_all(void)
1368 {
1369 	int ret;
1370 	int i;
1371 	int count;
1372 
1373 	pr_debug("[%s]", __func__);
1374 
1375 	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
1376 		ret = crypto_register_alg(&cryp_algs[i].crypto);
1377 		if (ret) {
1378 			count = i;
1379 			pr_err("[%s] alg registration failed",
1380 					cryp_algs[i].crypto.cra_driver_name);
1381 			goto unreg;
1382 		}
1383 	}
1384 	return 0;
1385 unreg:
1386 	for (i = 0; i < count; i++)
1387 		crypto_unregister_alg(&cryp_algs[i].crypto);
1388 	return ret;
1389 }
1390 
1391 /**
1392  * cryp_algs_unregister_all -
1393  */
cryp_algs_unregister_all(void)1394 static void cryp_algs_unregister_all(void)
1395 {
1396 	int i;
1397 
1398 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1399 
1400 	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
1401 		crypto_unregister_alg(&cryp_algs[i].crypto);
1402 }
1403 
ux500_cryp_probe(struct platform_device * pdev)1404 static int ux500_cryp_probe(struct platform_device *pdev)
1405 {
1406 	int ret;
1407 	int cryp_error = 0;
1408 	struct resource *res = NULL;
1409 	struct resource *res_irq = NULL;
1410 	struct cryp_device_data *device_data;
1411 	struct cryp_protection_config prot = {
1412 		.privilege_access = CRYP_STATE_ENABLE
1413 	};
1414 	struct device *dev = &pdev->dev;
1415 
1416 	dev_dbg(dev, "[%s]", __func__);
1417 	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1418 	if (!device_data) {
1419 		dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1420 		ret = -ENOMEM;
1421 		goto out;
1422 	}
1423 
1424 	device_data->dev = dev;
1425 	device_data->current_ctx = NULL;
1426 
1427 	/* Grab the DMA configuration from platform data. */
1428 	mem_to_engine = &((struct cryp_platform_data *)
1429 			 dev->platform_data)->mem_to_engine;
1430 	engine_to_mem = &((struct cryp_platform_data *)
1431 			 dev->platform_data)->engine_to_mem;
1432 
1433 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1434 	if (!res) {
1435 		dev_err(dev, "[%s]: platform_get_resource() failed",
1436 				__func__);
1437 		ret = -ENODEV;
1438 		goto out;
1439 	}
1440 
1441 	device_data->phybase = res->start;
1442 	device_data->base = devm_ioremap_resource(dev, res);
1443 	if (IS_ERR(device_data->base)) {
1444 		dev_err(dev, "[%s]: ioremap failed!", __func__);
1445 		ret = PTR_ERR(device_data->base);
1446 		goto out;
1447 	}
1448 
1449 	spin_lock_init(&device_data->ctx_lock);
1450 	spin_lock_init(&device_data->power_state_spinlock);
1451 
1452 	/* Enable power for CRYP hardware block */
1453 	device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
1454 	if (IS_ERR(device_data->pwr_regulator)) {
1455 		dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1456 		ret = PTR_ERR(device_data->pwr_regulator);
1457 		device_data->pwr_regulator = NULL;
1458 		goto out;
1459 	}
1460 
1461 	/* Enable the clk for CRYP hardware block */
1462 	device_data->clk = devm_clk_get(&pdev->dev, NULL);
1463 	if (IS_ERR(device_data->clk)) {
1464 		dev_err(dev, "[%s]: clk_get() failed!", __func__);
1465 		ret = PTR_ERR(device_data->clk);
1466 		goto out_regulator;
1467 	}
1468 
1469 	ret = clk_prepare(device_data->clk);
1470 	if (ret) {
1471 		dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
1472 		goto out_regulator;
1473 	}
1474 
1475 	/* Enable device power (and clock) */
1476 	ret = cryp_enable_power(device_data->dev, device_data, false);
1477 	if (ret) {
1478 		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1479 		goto out_clk_unprepare;
1480 	}
1481 
1482 	cryp_error = cryp_check(device_data);
1483 	if (cryp_error != 0) {
1484 		dev_err(dev, "[%s]: cryp_init() failed!", __func__);
1485 		ret = -EINVAL;
1486 		goto out_power;
1487 	}
1488 
1489 	cryp_error = cryp_configure_protection(device_data, &prot);
1490 	if (cryp_error != 0) {
1491 		dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1492 			__func__);
1493 		ret = -EINVAL;
1494 		goto out_power;
1495 	}
1496 
1497 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1498 	if (!res_irq) {
1499 		dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
1500 			__func__);
1501 		ret = -ENODEV;
1502 		goto out_power;
1503 	}
1504 
1505 	ret = devm_request_irq(&pdev->dev, res_irq->start,
1506 			       cryp_interrupt_handler, 0, "cryp1", device_data);
1507 	if (ret) {
1508 		dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1509 		goto out_power;
1510 	}
1511 
1512 	if (cryp_mode == CRYP_MODE_DMA)
1513 		cryp_dma_setup_channel(device_data, dev);
1514 
1515 	platform_set_drvdata(pdev, device_data);
1516 
1517 	/* Put the new device into the device list... */
1518 	klist_add_tail(&device_data->list_node, &driver_data.device_list);
1519 
1520 	/* ... and signal that a new device is available. */
1521 	up(&driver_data.device_allocation);
1522 
1523 	atomic_set(&session_id, 1);
1524 
1525 	ret = cryp_algs_register_all();
1526 	if (ret) {
1527 		dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
1528 			__func__);
1529 		goto out_power;
1530 	}
1531 
1532 	dev_info(dev, "successfully registered\n");
1533 
1534 	return 0;
1535 
1536 out_power:
1537 	cryp_disable_power(device_data->dev, device_data, false);
1538 
1539 out_clk_unprepare:
1540 	clk_unprepare(device_data->clk);
1541 
1542 out_regulator:
1543 	regulator_put(device_data->pwr_regulator);
1544 
1545 out:
1546 	return ret;
1547 }
1548 
ux500_cryp_remove(struct platform_device * pdev)1549 static int ux500_cryp_remove(struct platform_device *pdev)
1550 {
1551 	struct cryp_device_data *device_data;
1552 
1553 	dev_dbg(&pdev->dev, "[%s]", __func__);
1554 	device_data = platform_get_drvdata(pdev);
1555 	if (!device_data) {
1556 		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1557 			__func__);
1558 		return -ENOMEM;
1559 	}
1560 
1561 	/* Try to decrease the number of available devices. */
1562 	if (down_trylock(&driver_data.device_allocation))
1563 		return -EBUSY;
1564 
1565 	/* Check that the device is free */
1566 	spin_lock(&device_data->ctx_lock);
1567 	/* current_ctx allocates a device, NULL = unallocated */
1568 	if (device_data->current_ctx) {
1569 		/* The device is busy */
1570 		spin_unlock(&device_data->ctx_lock);
1571 		/* Return the device to the pool. */
1572 		up(&driver_data.device_allocation);
1573 		return -EBUSY;
1574 	}
1575 
1576 	spin_unlock(&device_data->ctx_lock);
1577 
1578 	/* Remove the device from the list */
1579 	if (klist_node_attached(&device_data->list_node))
1580 		klist_remove(&device_data->list_node);
1581 
1582 	/* If this was the last device, remove the services */
1583 	if (list_empty(&driver_data.device_list.k_list))
1584 		cryp_algs_unregister_all();
1585 
1586 	if (cryp_disable_power(&pdev->dev, device_data, false))
1587 		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1588 			__func__);
1589 
1590 	clk_unprepare(device_data->clk);
1591 	regulator_put(device_data->pwr_regulator);
1592 
1593 	return 0;
1594 }
1595 
ux500_cryp_shutdown(struct platform_device * pdev)1596 static void ux500_cryp_shutdown(struct platform_device *pdev)
1597 {
1598 	struct cryp_device_data *device_data;
1599 
1600 	dev_dbg(&pdev->dev, "[%s]", __func__);
1601 
1602 	device_data = platform_get_drvdata(pdev);
1603 	if (!device_data) {
1604 		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1605 			__func__);
1606 		return;
1607 	}
1608 
1609 	/* Check that the device is free */
1610 	spin_lock(&device_data->ctx_lock);
1611 	/* current_ctx allocates a device, NULL = unallocated */
1612 	if (!device_data->current_ctx) {
1613 		if (down_trylock(&driver_data.device_allocation))
1614 			dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1615 				"Shutting down anyway...", __func__);
1616 		/**
1617 		 * (Allocate the device)
1618 		 * Need to set this to non-null (dummy) value,
1619 		 * to avoid usage if context switching.
1620 		 */
1621 		device_data->current_ctx++;
1622 	}
1623 	spin_unlock(&device_data->ctx_lock);
1624 
1625 	/* Remove the device from the list */
1626 	if (klist_node_attached(&device_data->list_node))
1627 		klist_remove(&device_data->list_node);
1628 
1629 	/* If this was the last device, remove the services */
1630 	if (list_empty(&driver_data.device_list.k_list))
1631 		cryp_algs_unregister_all();
1632 
1633 	if (cryp_disable_power(&pdev->dev, device_data, false))
1634 		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1635 			__func__);
1636 
1637 }
1638 
1639 #ifdef CONFIG_PM_SLEEP
ux500_cryp_suspend(struct device * dev)1640 static int ux500_cryp_suspend(struct device *dev)
1641 {
1642 	int ret;
1643 	struct platform_device *pdev = to_platform_device(dev);
1644 	struct cryp_device_data *device_data;
1645 	struct resource *res_irq;
1646 	struct cryp_ctx *temp_ctx = NULL;
1647 
1648 	dev_dbg(dev, "[%s]", __func__);
1649 
1650 	/* Handle state? */
1651 	device_data = platform_get_drvdata(pdev);
1652 	if (!device_data) {
1653 		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1654 		return -ENOMEM;
1655 	}
1656 
1657 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1658 	if (!res_irq)
1659 		dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
1660 	else
1661 		disable_irq(res_irq->start);
1662 
1663 	spin_lock(&device_data->ctx_lock);
1664 	if (!device_data->current_ctx)
1665 		device_data->current_ctx++;
1666 	spin_unlock(&device_data->ctx_lock);
1667 
1668 	if (device_data->current_ctx == ++temp_ctx) {
1669 		if (down_interruptible(&driver_data.device_allocation))
1670 			dev_dbg(dev, "[%s]: down_interruptible() failed",
1671 				__func__);
1672 		ret = cryp_disable_power(dev, device_data, false);
1673 
1674 	} else
1675 		ret = cryp_disable_power(dev, device_data, true);
1676 
1677 	if (ret)
1678 		dev_err(dev, "[%s]: cryp_disable_power()", __func__);
1679 
1680 	return ret;
1681 }
1682 
ux500_cryp_resume(struct device * dev)1683 static int ux500_cryp_resume(struct device *dev)
1684 {
1685 	int ret = 0;
1686 	struct platform_device *pdev = to_platform_device(dev);
1687 	struct cryp_device_data *device_data;
1688 	struct resource *res_irq;
1689 	struct cryp_ctx *temp_ctx = NULL;
1690 
1691 	dev_dbg(dev, "[%s]", __func__);
1692 
1693 	device_data = platform_get_drvdata(pdev);
1694 	if (!device_data) {
1695 		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1696 		return -ENOMEM;
1697 	}
1698 
1699 	spin_lock(&device_data->ctx_lock);
1700 	if (device_data->current_ctx == ++temp_ctx)
1701 		device_data->current_ctx = NULL;
1702 	spin_unlock(&device_data->ctx_lock);
1703 
1704 
1705 	if (!device_data->current_ctx)
1706 		up(&driver_data.device_allocation);
1707 	else
1708 		ret = cryp_enable_power(dev, device_data, true);
1709 
1710 	if (ret)
1711 		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1712 	else {
1713 		res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1714 		if (res_irq)
1715 			enable_irq(res_irq->start);
1716 	}
1717 
1718 	return ret;
1719 }
1720 #endif
1721 
1722 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1723 
1724 static const struct of_device_id ux500_cryp_match[] = {
1725 	{ .compatible = "stericsson,ux500-cryp" },
1726 	{ },
1727 };
1728 MODULE_DEVICE_TABLE(of, ux500_cryp_match);
1729 
1730 static struct platform_driver cryp_driver = {
1731 	.probe  = ux500_cryp_probe,
1732 	.remove = ux500_cryp_remove,
1733 	.shutdown = ux500_cryp_shutdown,
1734 	.driver = {
1735 		.name  = "cryp1",
1736 		.of_match_table = ux500_cryp_match,
1737 		.pm    = &ux500_cryp_pm,
1738 	}
1739 };
1740 
ux500_cryp_mod_init(void)1741 static int __init ux500_cryp_mod_init(void)
1742 {
1743 	pr_debug("[%s] is called!", __func__);
1744 	klist_init(&driver_data.device_list, NULL, NULL);
1745 	/* Initialize the semaphore to 0 devices (locked state) */
1746 	sema_init(&driver_data.device_allocation, 0);
1747 	return platform_driver_register(&cryp_driver);
1748 }
1749 
ux500_cryp_mod_fini(void)1750 static void __exit ux500_cryp_mod_fini(void)
1751 {
1752 	pr_debug("[%s] is called!", __func__);
1753 	platform_driver_unregister(&cryp_driver);
1754 	return;
1755 }
1756 
1757 module_init(ux500_cryp_mod_init);
1758 module_exit(ux500_cryp_mod_fini);
1759 
1760 module_param(cryp_mode, int, 0);
1761 
1762 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1763 MODULE_ALIAS_CRYPTO("aes-all");
1764 MODULE_ALIAS_CRYPTO("des-all");
1765 
1766 MODULE_LICENSE("GPL");
1767