1 /*
2 * Cryptographic API.
3 *
4 * Support for Samsung S5PV210 HW acceleration.
5 *
6 * Copyright (C) 2011 NetUP Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 */
13
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/clk.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/crypto.h>
27 #include <linux/interrupt.h>
28
29 #include <crypto/algapi.h>
30 #include <crypto/aes.h>
31 #include <crypto/ctr.h>
32
33 #define _SBF(s, v) ((v) << (s))
34 #define _BIT(b) _SBF(b, 1)
35
36 /* Feed control registers */
37 #define SSS_REG_FCINTSTAT 0x0000
38 #define SSS_FCINTSTAT_BRDMAINT _BIT(3)
39 #define SSS_FCINTSTAT_BTDMAINT _BIT(2)
40 #define SSS_FCINTSTAT_HRDMAINT _BIT(1)
41 #define SSS_FCINTSTAT_PKDMAINT _BIT(0)
42
43 #define SSS_REG_FCINTENSET 0x0004
44 #define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
45 #define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
46 #define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
47 #define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
48
49 #define SSS_REG_FCINTENCLR 0x0008
50 #define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
51 #define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
52 #define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
53 #define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
54
55 #define SSS_REG_FCINTPEND 0x000C
56 #define SSS_FCINTPEND_BRDMAINTP _BIT(3)
57 #define SSS_FCINTPEND_BTDMAINTP _BIT(2)
58 #define SSS_FCINTPEND_HRDMAINTP _BIT(1)
59 #define SSS_FCINTPEND_PKDMAINTP _BIT(0)
60
61 #define SSS_REG_FCFIFOSTAT 0x0010
62 #define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
63 #define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
64 #define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
65 #define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
66 #define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
67 #define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
68 #define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
69 #define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
70
71 #define SSS_REG_FCFIFOCTRL 0x0014
72 #define SSS_FCFIFOCTRL_DESSEL _BIT(2)
73 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
74 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
75 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
76
77 #define SSS_REG_FCBRDMAS 0x0020
78 #define SSS_REG_FCBRDMAL 0x0024
79 #define SSS_REG_FCBRDMAC 0x0028
80 #define SSS_FCBRDMAC_BYTESWAP _BIT(1)
81 #define SSS_FCBRDMAC_FLUSH _BIT(0)
82
83 #define SSS_REG_FCBTDMAS 0x0030
84 #define SSS_REG_FCBTDMAL 0x0034
85 #define SSS_REG_FCBTDMAC 0x0038
86 #define SSS_FCBTDMAC_BYTESWAP _BIT(1)
87 #define SSS_FCBTDMAC_FLUSH _BIT(0)
88
89 #define SSS_REG_FCHRDMAS 0x0040
90 #define SSS_REG_FCHRDMAL 0x0044
91 #define SSS_REG_FCHRDMAC 0x0048
92 #define SSS_FCHRDMAC_BYTESWAP _BIT(1)
93 #define SSS_FCHRDMAC_FLUSH _BIT(0)
94
95 #define SSS_REG_FCPKDMAS 0x0050
96 #define SSS_REG_FCPKDMAL 0x0054
97 #define SSS_REG_FCPKDMAC 0x0058
98 #define SSS_FCPKDMAC_BYTESWAP _BIT(3)
99 #define SSS_FCPKDMAC_DESCEND _BIT(2)
100 #define SSS_FCPKDMAC_TRANSMIT _BIT(1)
101 #define SSS_FCPKDMAC_FLUSH _BIT(0)
102
103 #define SSS_REG_FCPKDMAO 0x005C
104
105 /* AES registers */
106 #define SSS_REG_AES_CONTROL 0x00
107 #define SSS_AES_BYTESWAP_DI _BIT(11)
108 #define SSS_AES_BYTESWAP_DO _BIT(10)
109 #define SSS_AES_BYTESWAP_IV _BIT(9)
110 #define SSS_AES_BYTESWAP_CNT _BIT(8)
111 #define SSS_AES_BYTESWAP_KEY _BIT(7)
112 #define SSS_AES_KEY_CHANGE_MODE _BIT(6)
113 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
114 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
115 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
116 #define SSS_AES_FIFO_MODE _BIT(3)
117 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
118 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
119 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
120 #define SSS_AES_MODE_DECRYPT _BIT(0)
121
122 #define SSS_REG_AES_STATUS 0x04
123 #define SSS_AES_BUSY _BIT(2)
124 #define SSS_AES_INPUT_READY _BIT(1)
125 #define SSS_AES_OUTPUT_READY _BIT(0)
126
127 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
128 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
129 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
130 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
131 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
132
133 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
134 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
135 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
136
137 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
138 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
139 SSS_AES_REG(dev, reg))
140
141 /* HW engine modes */
142 #define FLAGS_AES_DECRYPT _BIT(0)
143 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
144 #define FLAGS_AES_CBC _SBF(1, 0x01)
145 #define FLAGS_AES_CTR _SBF(1, 0x02)
146
147 #define AES_KEY_LEN 16
148 #define CRYPTO_QUEUE_LEN 1
149
150 /**
151 * struct samsung_aes_variant - platform specific SSS driver data
152 * @aes_offset: AES register offset from SSS module's base.
153 *
154 * Specifies platform specific configuration of SSS module.
155 * Note: A structure for driver specific platform data is used for future
156 * expansion of its usage.
157 */
158 struct samsung_aes_variant {
159 unsigned int aes_offset;
160 };
161
162 struct s5p_aes_reqctx {
163 unsigned long mode;
164 };
165
166 struct s5p_aes_ctx {
167 struct s5p_aes_dev *dev;
168
169 uint8_t aes_key[AES_MAX_KEY_SIZE];
170 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
171 int keylen;
172 };
173
174 struct s5p_aes_dev {
175 struct device *dev;
176 struct clk *clk;
177 void __iomem *ioaddr;
178 void __iomem *aes_ioaddr;
179 int irq_fc;
180
181 struct ablkcipher_request *req;
182 struct s5p_aes_ctx *ctx;
183 struct scatterlist *sg_src;
184 struct scatterlist *sg_dst;
185
186 struct tasklet_struct tasklet;
187 struct crypto_queue queue;
188 bool busy;
189 spinlock_t lock;
190
191 struct samsung_aes_variant *variant;
192 };
193
194 static struct s5p_aes_dev *s5p_dev;
195
196 static const struct samsung_aes_variant s5p_aes_data = {
197 .aes_offset = 0x4000,
198 };
199
200 static const struct samsung_aes_variant exynos_aes_data = {
201 .aes_offset = 0x200,
202 };
203
204 static const struct of_device_id s5p_sss_dt_match[] = {
205 {
206 .compatible = "samsung,s5pv210-secss",
207 .data = &s5p_aes_data,
208 },
209 {
210 .compatible = "samsung,exynos4210-secss",
211 .data = &exynos_aes_data,
212 },
213 { },
214 };
215 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
216
find_s5p_sss_version(struct platform_device * pdev)217 static inline struct samsung_aes_variant *find_s5p_sss_version
218 (struct platform_device *pdev)
219 {
220 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
221 const struct of_device_id *match;
222 match = of_match_node(s5p_sss_dt_match,
223 pdev->dev.of_node);
224 return (struct samsung_aes_variant *)match->data;
225 }
226 return (struct samsung_aes_variant *)
227 platform_get_device_id(pdev)->driver_data;
228 }
229
s5p_set_dma_indata(struct s5p_aes_dev * dev,struct scatterlist * sg)230 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
231 {
232 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
233 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
234 }
235
s5p_set_dma_outdata(struct s5p_aes_dev * dev,struct scatterlist * sg)236 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
237 {
238 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
239 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
240 }
241
s5p_aes_complete(struct s5p_aes_dev * dev,int err)242 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
243 {
244 /* holding a lock outside */
245 dev->req->base.complete(&dev->req->base, err);
246 dev->busy = false;
247 }
248
s5p_unset_outdata(struct s5p_aes_dev * dev)249 static void s5p_unset_outdata(struct s5p_aes_dev *dev)
250 {
251 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
252 }
253
s5p_unset_indata(struct s5p_aes_dev * dev)254 static void s5p_unset_indata(struct s5p_aes_dev *dev)
255 {
256 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
257 }
258
s5p_set_outdata(struct s5p_aes_dev * dev,struct scatterlist * sg)259 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
260 {
261 int err;
262
263 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
264 err = -EINVAL;
265 goto exit;
266 }
267 if (!sg_dma_len(sg)) {
268 err = -EINVAL;
269 goto exit;
270 }
271
272 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
273 if (!err) {
274 err = -ENOMEM;
275 goto exit;
276 }
277
278 dev->sg_dst = sg;
279 err = 0;
280
281 exit:
282 return err;
283 }
284
s5p_set_indata(struct s5p_aes_dev * dev,struct scatterlist * sg)285 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
286 {
287 int err;
288
289 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
290 err = -EINVAL;
291 goto exit;
292 }
293 if (!sg_dma_len(sg)) {
294 err = -EINVAL;
295 goto exit;
296 }
297
298 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
299 if (!err) {
300 err = -ENOMEM;
301 goto exit;
302 }
303
304 dev->sg_src = sg;
305 err = 0;
306
307 exit:
308 return err;
309 }
310
311 /*
312 * Returns true if new transmitting (output) data is ready and its
313 * address+length have to be written to device (by calling
314 * s5p_set_dma_outdata()). False otherwise.
315 */
s5p_aes_tx(struct s5p_aes_dev * dev)316 static bool s5p_aes_tx(struct s5p_aes_dev *dev)
317 {
318 int err = 0;
319 bool ret = false;
320
321 s5p_unset_outdata(dev);
322
323 if (!sg_is_last(dev->sg_dst)) {
324 err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
325 if (err)
326 s5p_aes_complete(dev, err);
327 else
328 ret = true;
329 } else {
330 s5p_aes_complete(dev, err);
331
332 dev->busy = true;
333 tasklet_schedule(&dev->tasklet);
334 }
335
336 return ret;
337 }
338
339 /*
340 * Returns true if new receiving (input) data is ready and its
341 * address+length have to be written to device (by calling
342 * s5p_set_dma_indata()). False otherwise.
343 */
s5p_aes_rx(struct s5p_aes_dev * dev)344 static bool s5p_aes_rx(struct s5p_aes_dev *dev)
345 {
346 int err;
347 bool ret = false;
348
349 s5p_unset_indata(dev);
350
351 if (!sg_is_last(dev->sg_src)) {
352 err = s5p_set_indata(dev, sg_next(dev->sg_src));
353 if (err)
354 s5p_aes_complete(dev, err);
355 else
356 ret = true;
357 }
358
359 return ret;
360 }
361
s5p_aes_interrupt(int irq,void * dev_id)362 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
363 {
364 struct platform_device *pdev = dev_id;
365 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
366 uint32_t status;
367 unsigned long flags;
368 bool set_dma_tx = false;
369 bool set_dma_rx = false;
370
371 spin_lock_irqsave(&dev->lock, flags);
372
373 status = SSS_READ(dev, FCINTSTAT);
374 if (status & SSS_FCINTSTAT_BRDMAINT)
375 set_dma_rx = s5p_aes_rx(dev);
376 if (status & SSS_FCINTSTAT_BTDMAINT)
377 set_dma_tx = s5p_aes_tx(dev);
378
379 SSS_WRITE(dev, FCINTPEND, status);
380
381 /*
382 * Writing length of DMA block (either receiving or transmitting)
383 * will start the operation immediately, so this should be done
384 * at the end (even after clearing pending interrupts to not miss the
385 * interrupt).
386 */
387 if (set_dma_tx)
388 s5p_set_dma_outdata(dev, dev->sg_dst);
389 if (set_dma_rx)
390 s5p_set_dma_indata(dev, dev->sg_src);
391
392 spin_unlock_irqrestore(&dev->lock, flags);
393
394 return IRQ_HANDLED;
395 }
396
s5p_set_aes(struct s5p_aes_dev * dev,uint8_t * key,uint8_t * iv,unsigned int keylen)397 static void s5p_set_aes(struct s5p_aes_dev *dev,
398 uint8_t *key, uint8_t *iv, unsigned int keylen)
399 {
400 void __iomem *keystart;
401
402 if (iv)
403 memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
404
405 if (keylen == AES_KEYSIZE_256)
406 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
407 else if (keylen == AES_KEYSIZE_192)
408 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
409 else
410 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
411
412 memcpy(keystart, key, keylen);
413 }
414
s5p_aes_crypt_start(struct s5p_aes_dev * dev,unsigned long mode)415 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
416 {
417 struct ablkcipher_request *req = dev->req;
418
419 uint32_t aes_control;
420 int err;
421 unsigned long flags;
422
423 aes_control = SSS_AES_KEY_CHANGE_MODE;
424 if (mode & FLAGS_AES_DECRYPT)
425 aes_control |= SSS_AES_MODE_DECRYPT;
426
427 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
428 aes_control |= SSS_AES_CHAIN_MODE_CBC;
429 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
430 aes_control |= SSS_AES_CHAIN_MODE_CTR;
431
432 if (dev->ctx->keylen == AES_KEYSIZE_192)
433 aes_control |= SSS_AES_KEY_SIZE_192;
434 else if (dev->ctx->keylen == AES_KEYSIZE_256)
435 aes_control |= SSS_AES_KEY_SIZE_256;
436
437 aes_control |= SSS_AES_FIFO_MODE;
438
439 /* as a variant it is possible to use byte swapping on DMA side */
440 aes_control |= SSS_AES_BYTESWAP_DI
441 | SSS_AES_BYTESWAP_DO
442 | SSS_AES_BYTESWAP_IV
443 | SSS_AES_BYTESWAP_KEY
444 | SSS_AES_BYTESWAP_CNT;
445
446 spin_lock_irqsave(&dev->lock, flags);
447
448 SSS_WRITE(dev, FCINTENCLR,
449 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
450 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
451
452 err = s5p_set_indata(dev, req->src);
453 if (err)
454 goto indata_error;
455
456 err = s5p_set_outdata(dev, req->dst);
457 if (err)
458 goto outdata_error;
459
460 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
461 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
462
463 s5p_set_dma_indata(dev, req->src);
464 s5p_set_dma_outdata(dev, req->dst);
465
466 SSS_WRITE(dev, FCINTENSET,
467 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
468
469 spin_unlock_irqrestore(&dev->lock, flags);
470
471 return;
472
473 outdata_error:
474 s5p_unset_indata(dev);
475
476 indata_error:
477 s5p_aes_complete(dev, err);
478 spin_unlock_irqrestore(&dev->lock, flags);
479 }
480
s5p_tasklet_cb(unsigned long data)481 static void s5p_tasklet_cb(unsigned long data)
482 {
483 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
484 struct crypto_async_request *async_req, *backlog;
485 struct s5p_aes_reqctx *reqctx;
486 unsigned long flags;
487
488 spin_lock_irqsave(&dev->lock, flags);
489 backlog = crypto_get_backlog(&dev->queue);
490 async_req = crypto_dequeue_request(&dev->queue);
491
492 if (!async_req) {
493 dev->busy = false;
494 spin_unlock_irqrestore(&dev->lock, flags);
495 return;
496 }
497 spin_unlock_irqrestore(&dev->lock, flags);
498
499 if (backlog)
500 backlog->complete(backlog, -EINPROGRESS);
501
502 dev->req = ablkcipher_request_cast(async_req);
503 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
504 reqctx = ablkcipher_request_ctx(dev->req);
505
506 s5p_aes_crypt_start(dev, reqctx->mode);
507 }
508
s5p_aes_handle_req(struct s5p_aes_dev * dev,struct ablkcipher_request * req)509 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
510 struct ablkcipher_request *req)
511 {
512 unsigned long flags;
513 int err;
514
515 spin_lock_irqsave(&dev->lock, flags);
516 err = ablkcipher_enqueue_request(&dev->queue, req);
517 if (dev->busy) {
518 spin_unlock_irqrestore(&dev->lock, flags);
519 goto exit;
520 }
521 dev->busy = true;
522
523 spin_unlock_irqrestore(&dev->lock, flags);
524
525 tasklet_schedule(&dev->tasklet);
526
527 exit:
528 return err;
529 }
530
s5p_aes_crypt(struct ablkcipher_request * req,unsigned long mode)531 static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
532 {
533 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
534 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
535 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
536 struct s5p_aes_dev *dev = ctx->dev;
537
538 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
539 pr_err("request size is not exact amount of AES blocks\n");
540 return -EINVAL;
541 }
542
543 reqctx->mode = mode;
544
545 return s5p_aes_handle_req(dev, req);
546 }
547
s5p_aes_setkey(struct crypto_ablkcipher * cipher,const uint8_t * key,unsigned int keylen)548 static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
549 const uint8_t *key, unsigned int keylen)
550 {
551 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
552 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
553
554 if (keylen != AES_KEYSIZE_128 &&
555 keylen != AES_KEYSIZE_192 &&
556 keylen != AES_KEYSIZE_256)
557 return -EINVAL;
558
559 memcpy(ctx->aes_key, key, keylen);
560 ctx->keylen = keylen;
561
562 return 0;
563 }
564
s5p_aes_ecb_encrypt(struct ablkcipher_request * req)565 static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
566 {
567 return s5p_aes_crypt(req, 0);
568 }
569
s5p_aes_ecb_decrypt(struct ablkcipher_request * req)570 static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
571 {
572 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
573 }
574
s5p_aes_cbc_encrypt(struct ablkcipher_request * req)575 static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
576 {
577 return s5p_aes_crypt(req, FLAGS_AES_CBC);
578 }
579
s5p_aes_cbc_decrypt(struct ablkcipher_request * req)580 static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
581 {
582 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
583 }
584
s5p_aes_cra_init(struct crypto_tfm * tfm)585 static int s5p_aes_cra_init(struct crypto_tfm *tfm)
586 {
587 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
588
589 ctx->dev = s5p_dev;
590 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
591
592 return 0;
593 }
594
595 static struct crypto_alg algs[] = {
596 {
597 .cra_name = "ecb(aes)",
598 .cra_driver_name = "ecb-aes-s5p",
599 .cra_priority = 100,
600 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
601 CRYPTO_ALG_ASYNC |
602 CRYPTO_ALG_KERN_DRIVER_ONLY,
603 .cra_blocksize = AES_BLOCK_SIZE,
604 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
605 .cra_alignmask = 0x0f,
606 .cra_type = &crypto_ablkcipher_type,
607 .cra_module = THIS_MODULE,
608 .cra_init = s5p_aes_cra_init,
609 .cra_u.ablkcipher = {
610 .min_keysize = AES_MIN_KEY_SIZE,
611 .max_keysize = AES_MAX_KEY_SIZE,
612 .setkey = s5p_aes_setkey,
613 .encrypt = s5p_aes_ecb_encrypt,
614 .decrypt = s5p_aes_ecb_decrypt,
615 }
616 },
617 {
618 .cra_name = "cbc(aes)",
619 .cra_driver_name = "cbc-aes-s5p",
620 .cra_priority = 100,
621 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
622 CRYPTO_ALG_ASYNC |
623 CRYPTO_ALG_KERN_DRIVER_ONLY,
624 .cra_blocksize = AES_BLOCK_SIZE,
625 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
626 .cra_alignmask = 0x0f,
627 .cra_type = &crypto_ablkcipher_type,
628 .cra_module = THIS_MODULE,
629 .cra_init = s5p_aes_cra_init,
630 .cra_u.ablkcipher = {
631 .min_keysize = AES_MIN_KEY_SIZE,
632 .max_keysize = AES_MAX_KEY_SIZE,
633 .ivsize = AES_BLOCK_SIZE,
634 .setkey = s5p_aes_setkey,
635 .encrypt = s5p_aes_cbc_encrypt,
636 .decrypt = s5p_aes_cbc_decrypt,
637 }
638 },
639 };
640
s5p_aes_probe(struct platform_device * pdev)641 static int s5p_aes_probe(struct platform_device *pdev)
642 {
643 int i, j, err = -ENODEV;
644 struct s5p_aes_dev *pdata;
645 struct device *dev = &pdev->dev;
646 struct resource *res;
647 struct samsung_aes_variant *variant;
648
649 if (s5p_dev)
650 return -EEXIST;
651
652 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
653 if (!pdata)
654 return -ENOMEM;
655
656 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
657 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
658 if (IS_ERR(pdata->ioaddr))
659 return PTR_ERR(pdata->ioaddr);
660
661 variant = find_s5p_sss_version(pdev);
662
663 pdata->clk = devm_clk_get(dev, "secss");
664 if (IS_ERR(pdata->clk)) {
665 dev_err(dev, "failed to find secss clock source\n");
666 return -ENOENT;
667 }
668
669 err = clk_prepare_enable(pdata->clk);
670 if (err < 0) {
671 dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
672 return err;
673 }
674
675 spin_lock_init(&pdata->lock);
676
677 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
678
679 pdata->irq_fc = platform_get_irq(pdev, 0);
680 if (pdata->irq_fc < 0) {
681 err = pdata->irq_fc;
682 dev_warn(dev, "feed control interrupt is not available.\n");
683 goto err_irq;
684 }
685 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
686 s5p_aes_interrupt, IRQF_ONESHOT,
687 pdev->name, pdev);
688 if (err < 0) {
689 dev_warn(dev, "feed control interrupt is not available.\n");
690 goto err_irq;
691 }
692
693 pdata->busy = false;
694 pdata->variant = variant;
695 pdata->dev = dev;
696 platform_set_drvdata(pdev, pdata);
697 s5p_dev = pdata;
698
699 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
700 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
701
702 for (i = 0; i < ARRAY_SIZE(algs); i++) {
703 err = crypto_register_alg(&algs[i]);
704 if (err)
705 goto err_algs;
706 }
707
708 pr_info("s5p-sss driver registered\n");
709
710 return 0;
711
712 err_algs:
713 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
714
715 for (j = 0; j < i; j++)
716 crypto_unregister_alg(&algs[j]);
717
718 tasklet_kill(&pdata->tasklet);
719
720 err_irq:
721 clk_disable_unprepare(pdata->clk);
722
723 s5p_dev = NULL;
724
725 return err;
726 }
727
s5p_aes_remove(struct platform_device * pdev)728 static int s5p_aes_remove(struct platform_device *pdev)
729 {
730 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
731 int i;
732
733 if (!pdata)
734 return -ENODEV;
735
736 for (i = 0; i < ARRAY_SIZE(algs); i++)
737 crypto_unregister_alg(&algs[i]);
738
739 tasklet_kill(&pdata->tasklet);
740
741 clk_disable_unprepare(pdata->clk);
742
743 s5p_dev = NULL;
744
745 return 0;
746 }
747
748 static struct platform_driver s5p_aes_crypto = {
749 .probe = s5p_aes_probe,
750 .remove = s5p_aes_remove,
751 .driver = {
752 .owner = THIS_MODULE,
753 .name = "s5p-secss",
754 .of_match_table = s5p_sss_dt_match,
755 },
756 };
757
758 module_platform_driver(s5p_aes_crypto);
759
760 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
761 MODULE_LICENSE("GPL v2");
762 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
763