• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Freescale i.MX23/i.MX28 Data Co-Processor driver
3  *
4  * Copyright (C) 2013 Marek Vasut <marex@denx.de>
5  *
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13 
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/stmp_device.h>
24 
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/scatterwalk.h>
29 
30 #define DCP_MAX_CHANS	4
31 #define DCP_BUF_SZ	PAGE_SIZE
32 #define DCP_SHA_PAY_SZ  64
33 
34 #define DCP_ALIGNMENT	64
35 
36 /*
37  * Null hashes to align with hw behavior on imx6sl and ull
38  * these are flipped for consistency with hw output
39  */
40 static const uint8_t sha1_null_hash[] =
41 	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
42 	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
43 
44 static const uint8_t sha256_null_hash[] =
45 	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
46 	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
47 	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
48 	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
49 
50 /* DCP DMA descriptor. */
51 struct dcp_dma_desc {
52 	uint32_t	next_cmd_addr;
53 	uint32_t	control0;
54 	uint32_t	control1;
55 	uint32_t	source;
56 	uint32_t	destination;
57 	uint32_t	size;
58 	uint32_t	payload;
59 	uint32_t	status;
60 };
61 
62 /* Coherent aligned block for bounce buffering. */
63 struct dcp_coherent_block {
64 	uint8_t			aes_in_buf[DCP_BUF_SZ];
65 	uint8_t			aes_out_buf[DCP_BUF_SZ];
66 	uint8_t			sha_in_buf[DCP_BUF_SZ];
67 	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
68 
69 	uint8_t			aes_key[2 * AES_KEYSIZE_128];
70 
71 	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
72 };
73 
74 struct dcp {
75 	struct device			*dev;
76 	void __iomem			*base;
77 
78 	uint32_t			caps;
79 
80 	struct dcp_coherent_block	*coh;
81 
82 	struct completion		completion[DCP_MAX_CHANS];
83 	spinlock_t			lock[DCP_MAX_CHANS];
84 	struct task_struct		*thread[DCP_MAX_CHANS];
85 	struct crypto_queue		queue[DCP_MAX_CHANS];
86 };
87 
88 enum dcp_chan {
89 	DCP_CHAN_HASH_SHA	= 0,
90 	DCP_CHAN_CRYPTO		= 2,
91 };
92 
93 struct dcp_async_ctx {
94 	/* Common context */
95 	enum dcp_chan	chan;
96 	uint32_t	fill;
97 
98 	/* SHA Hash-specific context */
99 	struct mutex			mutex;
100 	uint32_t			alg;
101 	unsigned int			hot:1;
102 
103 	/* Crypto-specific context */
104 	struct crypto_ablkcipher	*fallback;
105 	unsigned int			key_len;
106 	uint8_t				key[AES_KEYSIZE_128];
107 };
108 
109 struct dcp_aes_req_ctx {
110 	unsigned int	enc:1;
111 	unsigned int	ecb:1;
112 };
113 
114 struct dcp_sha_req_ctx {
115 	unsigned int	init:1;
116 	unsigned int	fini:1;
117 };
118 
119 /*
120  * There can even be only one instance of the MXS DCP due to the
121  * design of Linux Crypto API.
122  */
123 static struct dcp *global_sdcp;
124 
125 /* DCP register layout. */
126 #define MXS_DCP_CTRL				0x00
127 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
128 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
129 
130 #define MXS_DCP_STAT				0x10
131 #define MXS_DCP_STAT_CLR			0x18
132 #define MXS_DCP_STAT_IRQ_MASK			0xf
133 
134 #define MXS_DCP_CHANNELCTRL			0x20
135 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
136 
137 #define MXS_DCP_CAPABILITY1			0x40
138 #define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
139 #define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
140 #define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
141 
142 #define MXS_DCP_CONTEXT				0x50
143 
144 #define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
145 
146 #define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
147 
148 #define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
149 #define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
150 
151 /* DMA descriptor bits. */
152 #define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
153 #define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
154 #define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
155 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
156 #define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
157 #define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
158 #define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
159 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
160 #define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
161 
162 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
164 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
165 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
166 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
167 
mxs_dcp_start_dma(struct dcp_async_ctx * actx)168 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
169 {
170 	int dma_err;
171 	struct dcp *sdcp = global_sdcp;
172 	const int chan = actx->chan;
173 	uint32_t stat;
174 	unsigned long ret;
175 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
176 	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
177 					      DMA_TO_DEVICE);
178 
179 	dma_err = dma_mapping_error(sdcp->dev, desc_phys);
180 	if (dma_err)
181 		return dma_err;
182 
183 	reinit_completion(&sdcp->completion[chan]);
184 
185 	/* Clear status register. */
186 	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
187 
188 	/* Load the DMA descriptor. */
189 	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
190 
191 	/* Increment the semaphore to start the DMA transfer. */
192 	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
193 
194 	ret = wait_for_completion_timeout(&sdcp->completion[chan],
195 					  msecs_to_jiffies(1000));
196 	if (!ret) {
197 		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
198 			chan, readl(sdcp->base + MXS_DCP_STAT));
199 		return -ETIMEDOUT;
200 	}
201 
202 	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
203 	if (stat & 0xff) {
204 		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
205 			chan, stat);
206 		return -EINVAL;
207 	}
208 
209 	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
210 
211 	return 0;
212 }
213 
214 /*
215  * Encryption (AES128)
216  */
mxs_dcp_run_aes(struct dcp_async_ctx * actx,struct ablkcipher_request * req,int init)217 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
218 			   struct ablkcipher_request *req, int init)
219 {
220 	dma_addr_t key_phys, src_phys, dst_phys;
221 	struct dcp *sdcp = global_sdcp;
222 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
223 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
224 	int ret;
225 
226 	key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
227 				  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
228 	ret = dma_mapping_error(sdcp->dev, key_phys);
229 	if (ret)
230 		return ret;
231 
232 	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
233 				  DCP_BUF_SZ, DMA_TO_DEVICE);
234 	ret = dma_mapping_error(sdcp->dev, src_phys);
235 	if (ret)
236 		goto err_src;
237 
238 	dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
239 				  DCP_BUF_SZ, DMA_FROM_DEVICE);
240 	ret = dma_mapping_error(sdcp->dev, dst_phys);
241 	if (ret)
242 		goto err_dst;
243 
244 	if (actx->fill % AES_BLOCK_SIZE) {
245 		dev_err(sdcp->dev, "Invalid block size!\n");
246 		ret = -EINVAL;
247 		goto aes_done_run;
248 	}
249 
250 	/* Fill in the DMA descriptor. */
251 	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
252 		    MXS_DCP_CONTROL0_INTERRUPT |
253 		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
254 
255 	/* Payload contains the key. */
256 	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
257 
258 	if (rctx->enc)
259 		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
260 	if (init)
261 		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
262 
263 	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
264 
265 	if (rctx->ecb)
266 		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
267 	else
268 		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
269 
270 	desc->next_cmd_addr = 0;
271 	desc->source = src_phys;
272 	desc->destination = dst_phys;
273 	desc->size = actx->fill;
274 	desc->payload = key_phys;
275 	desc->status = 0;
276 
277 	ret = mxs_dcp_start_dma(actx);
278 
279 aes_done_run:
280 	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
281 err_dst:
282 	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
283 err_src:
284 	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
285 			 DMA_TO_DEVICE);
286 
287 	return ret;
288 }
289 
mxs_dcp_aes_block_crypt(struct crypto_async_request * arq)290 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
291 {
292 	struct dcp *sdcp = global_sdcp;
293 
294 	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
295 	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
296 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
297 
298 	struct scatterlist *dst = req->dst;
299 	struct scatterlist *src = req->src;
300 	int dst_nents = sg_nents(dst);
301 
302 	const int out_off = DCP_BUF_SZ;
303 	uint8_t *in_buf = sdcp->coh->aes_in_buf;
304 	uint8_t *out_buf = sdcp->coh->aes_out_buf;
305 
306 	uint32_t dst_off = 0;
307 	uint8_t *src_buf = NULL;
308 	uint32_t last_out_len = 0;
309 
310 	uint8_t *key = sdcp->coh->aes_key;
311 
312 	int ret = 0;
313 	unsigned int i, len, clen, tlen = 0;
314 	int init = 0;
315 	bool limit_hit = false;
316 
317 	actx->fill = 0;
318 
319 	/* Copy the key from the temporary location. */
320 	memcpy(key, actx->key, actx->key_len);
321 
322 	if (!rctx->ecb) {
323 		/* Copy the CBC IV just past the key. */
324 		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
325 		/* CBC needs the INIT set. */
326 		init = 1;
327 	} else {
328 		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
329 	}
330 
331 	for_each_sg(req->src, src, sg_nents(src), i) {
332 		src_buf = sg_virt(src);
333 		len = sg_dma_len(src);
334 		tlen += len;
335 		limit_hit = tlen > req->nbytes;
336 
337 		if (limit_hit)
338 			len = req->nbytes - (tlen - len);
339 
340 		do {
341 			if (actx->fill + len > out_off)
342 				clen = out_off - actx->fill;
343 			else
344 				clen = len;
345 
346 			memcpy(in_buf + actx->fill, src_buf, clen);
347 			len -= clen;
348 			src_buf += clen;
349 			actx->fill += clen;
350 
351 			/*
352 			 * If we filled the buffer or this is the last SG,
353 			 * submit the buffer.
354 			 */
355 			if (actx->fill == out_off || sg_is_last(src) ||
356 			    limit_hit) {
357 				ret = mxs_dcp_run_aes(actx, req, init);
358 				if (ret)
359 					return ret;
360 				init = 0;
361 
362 				sg_pcopy_from_buffer(dst, dst_nents, out_buf,
363 						     actx->fill, dst_off);
364 				dst_off += actx->fill;
365 				last_out_len = actx->fill;
366 				actx->fill = 0;
367 			}
368 		} while (len);
369 
370 		if (limit_hit)
371 			break;
372 	}
373 
374 	/* Copy the IV for CBC for chaining */
375 	if (!rctx->ecb) {
376 		if (rctx->enc)
377 			memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
378 				AES_BLOCK_SIZE);
379 		else
380 			memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
381 				AES_BLOCK_SIZE);
382 	}
383 
384 	return ret;
385 }
386 
dcp_chan_thread_aes(void * data)387 static int dcp_chan_thread_aes(void *data)
388 {
389 	struct dcp *sdcp = global_sdcp;
390 	const int chan = DCP_CHAN_CRYPTO;
391 
392 	struct crypto_async_request *backlog;
393 	struct crypto_async_request *arq;
394 
395 	int ret;
396 
397 	while (!kthread_should_stop()) {
398 		set_current_state(TASK_INTERRUPTIBLE);
399 
400 		spin_lock(&sdcp->lock[chan]);
401 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
402 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
403 		spin_unlock(&sdcp->lock[chan]);
404 
405 		if (!backlog && !arq) {
406 			schedule();
407 			continue;
408 		}
409 
410 		set_current_state(TASK_RUNNING);
411 
412 		if (backlog)
413 			backlog->complete(backlog, -EINPROGRESS);
414 
415 		if (arq) {
416 			ret = mxs_dcp_aes_block_crypt(arq);
417 			arq->complete(arq, ret);
418 		}
419 	}
420 
421 	return 0;
422 }
423 
mxs_dcp_block_fallback(struct ablkcipher_request * req,int enc)424 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
425 {
426 	struct crypto_tfm *tfm =
427 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
428 	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
429 		crypto_ablkcipher_reqtfm(req));
430 	int ret;
431 
432 	ablkcipher_request_set_tfm(req, ctx->fallback);
433 
434 	if (enc)
435 		ret = crypto_ablkcipher_encrypt(req);
436 	else
437 		ret = crypto_ablkcipher_decrypt(req);
438 
439 	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
440 
441 	return ret;
442 }
443 
mxs_dcp_aes_enqueue(struct ablkcipher_request * req,int enc,int ecb)444 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
445 {
446 	struct dcp *sdcp = global_sdcp;
447 	struct crypto_async_request *arq = &req->base;
448 	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
449 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
450 	int ret;
451 
452 	if (unlikely(actx->key_len != AES_KEYSIZE_128))
453 		return mxs_dcp_block_fallback(req, enc);
454 
455 	rctx->enc = enc;
456 	rctx->ecb = ecb;
457 	actx->chan = DCP_CHAN_CRYPTO;
458 
459 	spin_lock(&sdcp->lock[actx->chan]);
460 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
461 	spin_unlock(&sdcp->lock[actx->chan]);
462 
463 	wake_up_process(sdcp->thread[actx->chan]);
464 
465 	return -EINPROGRESS;
466 }
467 
mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request * req)468 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
469 {
470 	return mxs_dcp_aes_enqueue(req, 0, 1);
471 }
472 
mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request * req)473 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
474 {
475 	return mxs_dcp_aes_enqueue(req, 1, 1);
476 }
477 
mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request * req)478 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
479 {
480 	return mxs_dcp_aes_enqueue(req, 0, 0);
481 }
482 
mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request * req)483 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
484 {
485 	return mxs_dcp_aes_enqueue(req, 1, 0);
486 }
487 
mxs_dcp_aes_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int len)488 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
489 			      unsigned int len)
490 {
491 	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
492 	unsigned int ret;
493 
494 	/*
495 	 * AES 128 is supposed by the hardware, store key into temporary
496 	 * buffer and exit. We must use the temporary buffer here, since
497 	 * there can still be an operation in progress.
498 	 */
499 	actx->key_len = len;
500 	if (len == AES_KEYSIZE_128) {
501 		memcpy(actx->key, key, len);
502 		return 0;
503 	}
504 
505 	/* Check if the key size is supported by kernel at all. */
506 	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
507 		tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
508 		return -EINVAL;
509 	}
510 
511 	/*
512 	 * If the requested AES key size is not supported by the hardware,
513 	 * but is supported by in-kernel software implementation, we use
514 	 * software fallback.
515 	 */
516 	actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
517 	actx->fallback->base.crt_flags |=
518 		tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
519 
520 	ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
521 	if (!ret)
522 		return 0;
523 
524 	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
525 	tfm->base.crt_flags |=
526 		actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
527 
528 	return ret;
529 }
530 
mxs_dcp_aes_fallback_init(struct crypto_tfm * tfm)531 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
532 {
533 	const char *name = crypto_tfm_alg_name(tfm);
534 	const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
535 	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
536 	struct crypto_ablkcipher *blk;
537 
538 	blk = crypto_alloc_ablkcipher(name, 0, flags);
539 	if (IS_ERR(blk))
540 		return PTR_ERR(blk);
541 
542 	actx->fallback = blk;
543 	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
544 	return 0;
545 }
546 
mxs_dcp_aes_fallback_exit(struct crypto_tfm * tfm)547 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
548 {
549 	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
550 
551 	crypto_free_ablkcipher(actx->fallback);
552 	actx->fallback = NULL;
553 }
554 
555 /*
556  * Hashing (SHA1/SHA256)
557  */
mxs_dcp_run_sha(struct ahash_request * req)558 static int mxs_dcp_run_sha(struct ahash_request *req)
559 {
560 	struct dcp *sdcp = global_sdcp;
561 	int ret;
562 
563 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
564 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
565 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
566 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
567 
568 	dma_addr_t digest_phys = 0;
569 	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
570 					     DCP_BUF_SZ, DMA_TO_DEVICE);
571 
572 	ret = dma_mapping_error(sdcp->dev, buf_phys);
573 	if (ret)
574 		return ret;
575 
576 	/* Fill in the DMA descriptor. */
577 	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
578 		    MXS_DCP_CONTROL0_INTERRUPT |
579 		    MXS_DCP_CONTROL0_ENABLE_HASH;
580 	if (rctx->init)
581 		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
582 
583 	desc->control1 = actx->alg;
584 	desc->next_cmd_addr = 0;
585 	desc->source = buf_phys;
586 	desc->destination = 0;
587 	desc->size = actx->fill;
588 	desc->payload = 0;
589 	desc->status = 0;
590 
591 	/*
592 	 * Align driver with hw behavior when generating null hashes
593 	 */
594 	if (rctx->init && rctx->fini && desc->size == 0) {
595 		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
596 		const uint8_t *sha_buf =
597 			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
598 			sha1_null_hash : sha256_null_hash;
599 		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
600 		ret = 0;
601 		goto done_run;
602 	}
603 
604 	/* Set HASH_TERM bit for last transfer block. */
605 	if (rctx->fini) {
606 		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
607 					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
608 		ret = dma_mapping_error(sdcp->dev, digest_phys);
609 		if (ret)
610 			goto done_run;
611 
612 		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
613 		desc->payload = digest_phys;
614 	}
615 
616 	ret = mxs_dcp_start_dma(actx);
617 
618 	if (rctx->fini)
619 		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
620 				 DMA_FROM_DEVICE);
621 
622 done_run:
623 	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
624 
625 	return ret;
626 }
627 
dcp_sha_req_to_buf(struct crypto_async_request * arq)628 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
629 {
630 	struct dcp *sdcp = global_sdcp;
631 
632 	struct ahash_request *req = ahash_request_cast(arq);
633 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
634 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
635 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
636 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
637 
638 	uint8_t *in_buf = sdcp->coh->sha_in_buf;
639 	uint8_t *out_buf = sdcp->coh->sha_out_buf;
640 
641 	struct scatterlist *src;
642 
643 	unsigned int i, len, clen, oft = 0;
644 	int ret;
645 
646 	int fin = rctx->fini;
647 	if (fin)
648 		rctx->fini = 0;
649 
650 	src = req->src;
651 	len = req->nbytes;
652 
653 	while (len) {
654 		if (actx->fill + len > DCP_BUF_SZ)
655 			clen = DCP_BUF_SZ - actx->fill;
656 		else
657 			clen = len;
658 
659 		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
660 					 0);
661 
662 		len -= clen;
663 		oft += clen;
664 		actx->fill += clen;
665 
666 		/*
667 		 * If we filled the buffer and still have some
668 		 * more data, submit the buffer.
669 		 */
670 		if (len && actx->fill == DCP_BUF_SZ) {
671 			ret = mxs_dcp_run_sha(req);
672 			if (ret)
673 				return ret;
674 			actx->fill = 0;
675 			rctx->init = 0;
676 		}
677 	}
678 
679 	if (fin) {
680 		rctx->fini = 1;
681 
682 		/* Submit whatever is left. */
683 		if (!req->result)
684 			return -EINVAL;
685 
686 		ret = mxs_dcp_run_sha(req);
687 		if (ret)
688 			return ret;
689 
690 		actx->fill = 0;
691 
692 		/* For some reason the result is flipped */
693 		for (i = 0; i < halg->digestsize; i++)
694 			req->result[i] = out_buf[halg->digestsize - i - 1];
695 	}
696 
697 	return 0;
698 }
699 
dcp_chan_thread_sha(void * data)700 static int dcp_chan_thread_sha(void *data)
701 {
702 	struct dcp *sdcp = global_sdcp;
703 	const int chan = DCP_CHAN_HASH_SHA;
704 
705 	struct crypto_async_request *backlog;
706 	struct crypto_async_request *arq;
707 
708 	struct dcp_sha_req_ctx *rctx;
709 
710 	struct ahash_request *req;
711 	int ret, fini;
712 
713 	while (!kthread_should_stop()) {
714 		set_current_state(TASK_INTERRUPTIBLE);
715 
716 		spin_lock(&sdcp->lock[chan]);
717 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
718 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
719 		spin_unlock(&sdcp->lock[chan]);
720 
721 		if (!backlog && !arq) {
722 			schedule();
723 			continue;
724 		}
725 
726 		set_current_state(TASK_RUNNING);
727 
728 		if (backlog)
729 			backlog->complete(backlog, -EINPROGRESS);
730 
731 		if (arq) {
732 			req = ahash_request_cast(arq);
733 			rctx = ahash_request_ctx(req);
734 
735 			ret = dcp_sha_req_to_buf(arq);
736 			fini = rctx->fini;
737 			arq->complete(arq, ret);
738 		}
739 	}
740 
741 	return 0;
742 }
743 
dcp_sha_init(struct ahash_request * req)744 static int dcp_sha_init(struct ahash_request *req)
745 {
746 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
747 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
748 
749 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
750 
751 	/*
752 	 * Start hashing session. The code below only inits the
753 	 * hashing session context, nothing more.
754 	 */
755 	memset(actx, 0, sizeof(*actx));
756 
757 	if (strcmp(halg->base.cra_name, "sha1") == 0)
758 		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
759 	else
760 		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
761 
762 	actx->fill = 0;
763 	actx->hot = 0;
764 	actx->chan = DCP_CHAN_HASH_SHA;
765 
766 	mutex_init(&actx->mutex);
767 
768 	return 0;
769 }
770 
dcp_sha_update_fx(struct ahash_request * req,int fini)771 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
772 {
773 	struct dcp *sdcp = global_sdcp;
774 
775 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
776 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
777 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
778 
779 	int ret;
780 
781 	/*
782 	 * Ignore requests that have no data in them and are not
783 	 * the trailing requests in the stream of requests.
784 	 */
785 	if (!req->nbytes && !fini)
786 		return 0;
787 
788 	mutex_lock(&actx->mutex);
789 
790 	rctx->fini = fini;
791 
792 	if (!actx->hot) {
793 		actx->hot = 1;
794 		rctx->init = 1;
795 	}
796 
797 	spin_lock(&sdcp->lock[actx->chan]);
798 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
799 	spin_unlock(&sdcp->lock[actx->chan]);
800 
801 	wake_up_process(sdcp->thread[actx->chan]);
802 	mutex_unlock(&actx->mutex);
803 
804 	return -EINPROGRESS;
805 }
806 
dcp_sha_update(struct ahash_request * req)807 static int dcp_sha_update(struct ahash_request *req)
808 {
809 	return dcp_sha_update_fx(req, 0);
810 }
811 
dcp_sha_final(struct ahash_request * req)812 static int dcp_sha_final(struct ahash_request *req)
813 {
814 	ahash_request_set_crypt(req, NULL, req->result, 0);
815 	req->nbytes = 0;
816 	return dcp_sha_update_fx(req, 1);
817 }
818 
dcp_sha_finup(struct ahash_request * req)819 static int dcp_sha_finup(struct ahash_request *req)
820 {
821 	return dcp_sha_update_fx(req, 1);
822 }
823 
dcp_sha_digest(struct ahash_request * req)824 static int dcp_sha_digest(struct ahash_request *req)
825 {
826 	int ret;
827 
828 	ret = dcp_sha_init(req);
829 	if (ret)
830 		return ret;
831 
832 	return dcp_sha_finup(req);
833 }
834 
dcp_sha_cra_init(struct crypto_tfm * tfm)835 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
836 {
837 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
838 				 sizeof(struct dcp_sha_req_ctx));
839 	return 0;
840 }
841 
dcp_sha_cra_exit(struct crypto_tfm * tfm)842 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
843 {
844 }
845 
846 /* AES 128 ECB and AES 128 CBC */
847 static struct crypto_alg dcp_aes_algs[] = {
848 	{
849 		.cra_name		= "ecb(aes)",
850 		.cra_driver_name	= "ecb-aes-dcp",
851 		.cra_priority		= 400,
852 		.cra_alignmask		= 15,
853 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
854 					  CRYPTO_ALG_ASYNC |
855 					  CRYPTO_ALG_NEED_FALLBACK,
856 		.cra_init		= mxs_dcp_aes_fallback_init,
857 		.cra_exit		= mxs_dcp_aes_fallback_exit,
858 		.cra_blocksize		= AES_BLOCK_SIZE,
859 		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
860 		.cra_type		= &crypto_ablkcipher_type,
861 		.cra_module		= THIS_MODULE,
862 		.cra_u	= {
863 			.ablkcipher = {
864 				.min_keysize	= AES_MIN_KEY_SIZE,
865 				.max_keysize	= AES_MAX_KEY_SIZE,
866 				.setkey		= mxs_dcp_aes_setkey,
867 				.encrypt	= mxs_dcp_aes_ecb_encrypt,
868 				.decrypt	= mxs_dcp_aes_ecb_decrypt
869 			},
870 		},
871 	}, {
872 		.cra_name		= "cbc(aes)",
873 		.cra_driver_name	= "cbc-aes-dcp",
874 		.cra_priority		= 400,
875 		.cra_alignmask		= 15,
876 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
877 					  CRYPTO_ALG_ASYNC |
878 					  CRYPTO_ALG_NEED_FALLBACK,
879 		.cra_init		= mxs_dcp_aes_fallback_init,
880 		.cra_exit		= mxs_dcp_aes_fallback_exit,
881 		.cra_blocksize		= AES_BLOCK_SIZE,
882 		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
883 		.cra_type		= &crypto_ablkcipher_type,
884 		.cra_module		= THIS_MODULE,
885 		.cra_u = {
886 			.ablkcipher = {
887 				.min_keysize	= AES_MIN_KEY_SIZE,
888 				.max_keysize	= AES_MAX_KEY_SIZE,
889 				.setkey		= mxs_dcp_aes_setkey,
890 				.encrypt	= mxs_dcp_aes_cbc_encrypt,
891 				.decrypt	= mxs_dcp_aes_cbc_decrypt,
892 				.ivsize		= AES_BLOCK_SIZE,
893 			},
894 		},
895 	},
896 };
897 
898 /* SHA1 */
899 static struct ahash_alg dcp_sha1_alg = {
900 	.init	= dcp_sha_init,
901 	.update	= dcp_sha_update,
902 	.final	= dcp_sha_final,
903 	.finup	= dcp_sha_finup,
904 	.digest	= dcp_sha_digest,
905 	.halg	= {
906 		.digestsize	= SHA1_DIGEST_SIZE,
907 		.base		= {
908 			.cra_name		= "sha1",
909 			.cra_driver_name	= "sha1-dcp",
910 			.cra_priority		= 400,
911 			.cra_alignmask		= 63,
912 			.cra_flags		= CRYPTO_ALG_ASYNC,
913 			.cra_blocksize		= SHA1_BLOCK_SIZE,
914 			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
915 			.cra_module		= THIS_MODULE,
916 			.cra_init		= dcp_sha_cra_init,
917 			.cra_exit		= dcp_sha_cra_exit,
918 		},
919 	},
920 };
921 
922 /* SHA256 */
923 static struct ahash_alg dcp_sha256_alg = {
924 	.init	= dcp_sha_init,
925 	.update	= dcp_sha_update,
926 	.final	= dcp_sha_final,
927 	.finup	= dcp_sha_finup,
928 	.digest	= dcp_sha_digest,
929 	.halg	= {
930 		.digestsize	= SHA256_DIGEST_SIZE,
931 		.base		= {
932 			.cra_name		= "sha256",
933 			.cra_driver_name	= "sha256-dcp",
934 			.cra_priority		= 400,
935 			.cra_alignmask		= 63,
936 			.cra_flags		= CRYPTO_ALG_ASYNC,
937 			.cra_blocksize		= SHA256_BLOCK_SIZE,
938 			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
939 			.cra_module		= THIS_MODULE,
940 			.cra_init		= dcp_sha_cra_init,
941 			.cra_exit		= dcp_sha_cra_exit,
942 		},
943 	},
944 };
945 
mxs_dcp_irq(int irq,void * context)946 static irqreturn_t mxs_dcp_irq(int irq, void *context)
947 {
948 	struct dcp *sdcp = context;
949 	uint32_t stat;
950 	int i;
951 
952 	stat = readl(sdcp->base + MXS_DCP_STAT);
953 	stat &= MXS_DCP_STAT_IRQ_MASK;
954 	if (!stat)
955 		return IRQ_NONE;
956 
957 	/* Clear the interrupts. */
958 	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
959 
960 	/* Complete the DMA requests that finished. */
961 	for (i = 0; i < DCP_MAX_CHANS; i++)
962 		if (stat & (1 << i))
963 			complete(&sdcp->completion[i]);
964 
965 	return IRQ_HANDLED;
966 }
967 
mxs_dcp_probe(struct platform_device * pdev)968 static int mxs_dcp_probe(struct platform_device *pdev)
969 {
970 	struct device *dev = &pdev->dev;
971 	struct dcp *sdcp = NULL;
972 	int i, ret;
973 
974 	struct resource *iores;
975 	int dcp_vmi_irq, dcp_irq;
976 
977 	if (global_sdcp) {
978 		dev_err(dev, "Only one DCP instance allowed!\n");
979 		return -ENODEV;
980 	}
981 
982 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
983 	dcp_vmi_irq = platform_get_irq(pdev, 0);
984 	if (dcp_vmi_irq < 0)
985 		return dcp_vmi_irq;
986 
987 	dcp_irq = platform_get_irq(pdev, 1);
988 	if (dcp_irq < 0)
989 		return dcp_irq;
990 
991 	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
992 	if (!sdcp)
993 		return -ENOMEM;
994 
995 	sdcp->dev = dev;
996 	sdcp->base = devm_ioremap_resource(dev, iores);
997 	if (IS_ERR(sdcp->base))
998 		return PTR_ERR(sdcp->base);
999 
1000 
1001 	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1002 			       "dcp-vmi-irq", sdcp);
1003 	if (ret) {
1004 		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1005 		return ret;
1006 	}
1007 
1008 	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1009 			       "dcp-irq", sdcp);
1010 	if (ret) {
1011 		dev_err(dev, "Failed to claim DCP IRQ!\n");
1012 		return ret;
1013 	}
1014 
1015 	/* Allocate coherent helper block. */
1016 	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1017 				   GFP_KERNEL);
1018 	if (!sdcp->coh)
1019 		return -ENOMEM;
1020 
1021 	/* Re-align the structure so it fits the DCP constraints. */
1022 	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1023 
1024 	/* Restart the DCP block. */
1025 	ret = stmp_reset_block(sdcp->base);
1026 	if (ret)
1027 		return ret;
1028 
1029 	/* Initialize control register. */
1030 	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1031 	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1032 	       sdcp->base + MXS_DCP_CTRL);
1033 
1034 	/* Enable all DCP DMA channels. */
1035 	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1036 	       sdcp->base + MXS_DCP_CHANNELCTRL);
1037 
1038 	/*
1039 	 * We do not enable context switching. Give the context buffer a
1040 	 * pointer to an illegal address so if context switching is
1041 	 * inadvertantly enabled, the DCP will return an error instead of
1042 	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1043 	 * address will do.
1044 	 */
1045 	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1046 	for (i = 0; i < DCP_MAX_CHANS; i++)
1047 		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1048 	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1049 
1050 	global_sdcp = sdcp;
1051 
1052 	platform_set_drvdata(pdev, sdcp);
1053 
1054 	for (i = 0; i < DCP_MAX_CHANS; i++) {
1055 		spin_lock_init(&sdcp->lock[i]);
1056 		init_completion(&sdcp->completion[i]);
1057 		crypto_init_queue(&sdcp->queue[i], 50);
1058 	}
1059 
1060 	/* Create the SHA and AES handler threads. */
1061 	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1062 						      NULL, "mxs_dcp_chan/sha");
1063 	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1064 		dev_err(dev, "Error starting SHA thread!\n");
1065 		return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1066 	}
1067 
1068 	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1069 						    NULL, "mxs_dcp_chan/aes");
1070 	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1071 		dev_err(dev, "Error starting SHA thread!\n");
1072 		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1073 		goto err_destroy_sha_thread;
1074 	}
1075 
1076 	/* Register the various crypto algorithms. */
1077 	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1078 
1079 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1080 		ret = crypto_register_algs(dcp_aes_algs,
1081 					   ARRAY_SIZE(dcp_aes_algs));
1082 		if (ret) {
1083 			/* Failed to register algorithm. */
1084 			dev_err(dev, "Failed to register AES crypto!\n");
1085 			goto err_destroy_aes_thread;
1086 		}
1087 	}
1088 
1089 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1090 		ret = crypto_register_ahash(&dcp_sha1_alg);
1091 		if (ret) {
1092 			dev_err(dev, "Failed to register %s hash!\n",
1093 				dcp_sha1_alg.halg.base.cra_name);
1094 			goto err_unregister_aes;
1095 		}
1096 	}
1097 
1098 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1099 		ret = crypto_register_ahash(&dcp_sha256_alg);
1100 		if (ret) {
1101 			dev_err(dev, "Failed to register %s hash!\n",
1102 				dcp_sha256_alg.halg.base.cra_name);
1103 			goto err_unregister_sha1;
1104 		}
1105 	}
1106 
1107 	return 0;
1108 
1109 err_unregister_sha1:
1110 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1111 		crypto_unregister_ahash(&dcp_sha1_alg);
1112 
1113 err_unregister_aes:
1114 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1115 		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1116 
1117 err_destroy_aes_thread:
1118 	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1119 
1120 err_destroy_sha_thread:
1121 	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1122 	return ret;
1123 }
1124 
mxs_dcp_remove(struct platform_device * pdev)1125 static int mxs_dcp_remove(struct platform_device *pdev)
1126 {
1127 	struct dcp *sdcp = platform_get_drvdata(pdev);
1128 
1129 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1130 		crypto_unregister_ahash(&dcp_sha256_alg);
1131 
1132 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1133 		crypto_unregister_ahash(&dcp_sha1_alg);
1134 
1135 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1136 		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1137 
1138 	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1139 	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1140 
1141 	platform_set_drvdata(pdev, NULL);
1142 
1143 	global_sdcp = NULL;
1144 
1145 	return 0;
1146 }
1147 
1148 static const struct of_device_id mxs_dcp_dt_ids[] = {
1149 	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1150 	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1151 	{ /* sentinel */ }
1152 };
1153 
1154 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1155 
1156 static struct platform_driver mxs_dcp_driver = {
1157 	.probe	= mxs_dcp_probe,
1158 	.remove	= mxs_dcp_remove,
1159 	.driver	= {
1160 		.name		= "mxs-dcp",
1161 		.of_match_table	= mxs_dcp_dt_ids,
1162 	},
1163 };
1164 
1165 module_platform_driver(mxs_dcp_driver);
1166 
1167 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1168 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1169 MODULE_LICENSE("GPL");
1170 MODULE_ALIAS("platform:mxs-dcp");
1171