• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
2   *
3   * This program is free software; you can redistribute it and/or modify
4   * it under the terms of the GNU General Public License as published by
5   * the Free Software Foundation; either version 2 of the License, or
6   * (at your option) any later version.
7   */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/pci_ids.h>
13 #include <linux/crypto.h>
14 #include <linux/spinlock.h>
15 #include <crypto/algapi.h>
16 #include <crypto/aes.h>
17 #include <crypto/internal/skcipher.h>
18 
19 #include <linux/io.h>
20 #include <linux/delay.h>
21 
22 #include "geode-aes.h"
23 
24 /* Static structures */
25 
26 static void __iomem *_iobase;
27 static spinlock_t lock;
28 
29 /* Write a 128 bit field (either a writable key or IV) */
30 static inline void
_writefield(u32 offset,const void * value)31 _writefield(u32 offset, const void *value)
32 {
33 	int i;
34 
35 	for (i = 0; i < 4; i++)
36 		iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
37 }
38 
39 /* Read a 128 bit field (either a writable key or IV) */
40 static inline void
_readfield(u32 offset,void * value)41 _readfield(u32 offset, void *value)
42 {
43 	int i;
44 
45 	for (i = 0; i < 4; i++)
46 		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
47 }
48 
49 static int
do_crypt(const void * src,void * dst,u32 len,u32 flags)50 do_crypt(const void *src, void *dst, u32 len, u32 flags)
51 {
52 	u32 status;
53 	u32 counter = AES_OP_TIMEOUT;
54 
55 	iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
56 	iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
57 	iowrite32(len,  _iobase + AES_LENA_REG);
58 
59 	/* Start the operation */
60 	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
61 
62 	do {
63 		status = ioread32(_iobase + AES_INTR_REG);
64 		cpu_relax();
65 	} while (!(status & AES_INTRA_PENDING) && --counter);
66 
67 	/* Clear the event */
68 	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
69 	return counter ? 0 : 1;
70 }
71 
72 static void
geode_aes_crypt(const struct geode_aes_tfm_ctx * tctx,const void * src,void * dst,u32 len,u8 * iv,int mode,int dir)73 geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
74 		void *dst, u32 len, u8 *iv, int mode, int dir)
75 {
76 	u32 flags = 0;
77 	unsigned long iflags;
78 	int ret;
79 
80 	/* If the source and destination is the same, then
81 	 * we need to turn on the coherent flags, otherwise
82 	 * we don't need to worry
83 	 */
84 
85 	flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
86 
87 	if (dir == AES_DIR_ENCRYPT)
88 		flags |= AES_CTRL_ENCRYPT;
89 
90 	/* Start the critical section */
91 
92 	spin_lock_irqsave(&lock, iflags);
93 
94 	if (mode == AES_MODE_CBC) {
95 		flags |= AES_CTRL_CBC;
96 		_writefield(AES_WRITEIV0_REG, iv);
97 	}
98 
99 	flags |= AES_CTRL_WRKEY;
100 	_writefield(AES_WRITEKEY0_REG, tctx->key);
101 
102 	ret = do_crypt(src, dst, len, flags);
103 	BUG_ON(ret);
104 
105 	if (mode == AES_MODE_CBC)
106 		_readfield(AES_WRITEIV0_REG, iv);
107 
108 	spin_unlock_irqrestore(&lock, iflags);
109 }
110 
111 /* CRYPTO-API Functions */
112 
geode_setkey_cip(struct crypto_tfm * tfm,const u8 * key,unsigned int len)113 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
114 		unsigned int len)
115 {
116 	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
117 	unsigned int ret;
118 
119 	tctx->keylen = len;
120 
121 	if (len == AES_KEYSIZE_128) {
122 		memcpy(tctx->key, key, len);
123 		return 0;
124 	}
125 
126 	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
127 		/* not supported at all */
128 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
129 		return -EINVAL;
130 	}
131 
132 	/*
133 	 * The requested key size is not supported by HW, do a fallback
134 	 */
135 	tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
136 	tctx->fallback.cip->base.crt_flags |=
137 		(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
138 
139 	ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
140 	if (ret) {
141 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
142 		tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
143 				   CRYPTO_TFM_RES_MASK);
144 	}
145 	return ret;
146 }
147 
geode_setkey_skcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)148 static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
149 				 unsigned int len)
150 {
151 	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
152 	unsigned int ret;
153 
154 	tctx->keylen = len;
155 
156 	if (len == AES_KEYSIZE_128) {
157 		memcpy(tctx->key, key, len);
158 		return 0;
159 	}
160 
161 	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
162 		/* not supported at all */
163 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
164 		return -EINVAL;
165 	}
166 
167 	/*
168 	 * The requested key size is not supported by HW, do a fallback
169 	 */
170 	crypto_skcipher_clear_flags(tctx->fallback.skcipher,
171 				    CRYPTO_TFM_REQ_MASK);
172 	crypto_skcipher_set_flags(tctx->fallback.skcipher,
173 				  crypto_skcipher_get_flags(tfm) &
174 				  CRYPTO_TFM_REQ_MASK);
175 	ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
176 	crypto_skcipher_set_flags(tfm,
177 				  crypto_skcipher_get_flags(tctx->fallback.skcipher) &
178 				  CRYPTO_TFM_RES_MASK);
179 	return ret;
180 }
181 
182 static void
geode_encrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)183 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
184 {
185 	const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
186 
187 	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
188 		crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
189 		return;
190 	}
191 
192 	geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
193 			AES_MODE_ECB, AES_DIR_ENCRYPT);
194 }
195 
196 
197 static void
geode_decrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)198 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
199 {
200 	const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
201 
202 	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
203 		crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
204 		return;
205 	}
206 
207 	geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
208 			AES_MODE_ECB, AES_DIR_DECRYPT);
209 }
210 
fallback_init_cip(struct crypto_tfm * tfm)211 static int fallback_init_cip(struct crypto_tfm *tfm)
212 {
213 	const char *name = crypto_tfm_alg_name(tfm);
214 	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
215 
216 	tctx->fallback.cip = crypto_alloc_cipher(name, 0,
217 						 CRYPTO_ALG_NEED_FALLBACK);
218 
219 	if (IS_ERR(tctx->fallback.cip)) {
220 		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
221 		return PTR_ERR(tctx->fallback.cip);
222 	}
223 
224 	return 0;
225 }
226 
fallback_exit_cip(struct crypto_tfm * tfm)227 static void fallback_exit_cip(struct crypto_tfm *tfm)
228 {
229 	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
230 
231 	crypto_free_cipher(tctx->fallback.cip);
232 }
233 
234 static struct crypto_alg geode_alg = {
235 	.cra_name			=	"aes",
236 	.cra_driver_name	=	"geode-aes",
237 	.cra_priority		=	300,
238 	.cra_alignmask		=	15,
239 	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
240 							CRYPTO_ALG_NEED_FALLBACK,
241 	.cra_init			=	fallback_init_cip,
242 	.cra_exit			=	fallback_exit_cip,
243 	.cra_blocksize		=	AES_BLOCK_SIZE,
244 	.cra_ctxsize		=	sizeof(struct geode_aes_tfm_ctx),
245 	.cra_module			=	THIS_MODULE,
246 	.cra_u				=	{
247 		.cipher	=	{
248 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
249 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
250 			.cia_setkey			=	geode_setkey_cip,
251 			.cia_encrypt		=	geode_encrypt,
252 			.cia_decrypt		=	geode_decrypt
253 		}
254 	}
255 };
256 
geode_init_skcipher(struct crypto_skcipher * tfm)257 static int geode_init_skcipher(struct crypto_skcipher *tfm)
258 {
259 	const char *name = crypto_tfm_alg_name(&tfm->base);
260 	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
261 
262 	tctx->fallback.skcipher =
263 		crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
264 				      CRYPTO_ALG_ASYNC);
265 	if (IS_ERR(tctx->fallback.skcipher)) {
266 		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
267 		return PTR_ERR(tctx->fallback.skcipher);
268 	}
269 
270 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
271 				    crypto_skcipher_reqsize(tctx->fallback.skcipher));
272 	return 0;
273 }
274 
geode_exit_skcipher(struct crypto_skcipher * tfm)275 static void geode_exit_skcipher(struct crypto_skcipher *tfm)
276 {
277 	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
278 
279 	crypto_free_skcipher(tctx->fallback.skcipher);
280 }
281 
geode_skcipher_crypt(struct skcipher_request * req,int mode,int dir)282 static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
283 {
284 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
285 	const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
286 	struct skcipher_walk walk;
287 	unsigned int nbytes;
288 	int err;
289 
290 	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
291 		struct skcipher_request *subreq = skcipher_request_ctx(req);
292 
293 		*subreq = *req;
294 		skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
295 		if (dir == AES_DIR_DECRYPT)
296 			return crypto_skcipher_decrypt(subreq);
297 		else
298 			return crypto_skcipher_encrypt(subreq);
299 	}
300 
301 	err = skcipher_walk_virt(&walk, req, false);
302 
303 	while ((nbytes = walk.nbytes) != 0) {
304 		geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
305 				round_down(nbytes, AES_BLOCK_SIZE),
306 				walk.iv, mode, dir);
307 		err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
308 	}
309 
310 	return err;
311 }
312 
geode_cbc_encrypt(struct skcipher_request * req)313 static int geode_cbc_encrypt(struct skcipher_request *req)
314 {
315 	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
316 }
317 
geode_cbc_decrypt(struct skcipher_request * req)318 static int geode_cbc_decrypt(struct skcipher_request *req)
319 {
320 	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
321 }
322 
geode_ecb_encrypt(struct skcipher_request * req)323 static int geode_ecb_encrypt(struct skcipher_request *req)
324 {
325 	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
326 }
327 
geode_ecb_decrypt(struct skcipher_request * req)328 static int geode_ecb_decrypt(struct skcipher_request *req)
329 {
330 	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
331 }
332 
333 static struct skcipher_alg geode_skcipher_algs[] = {
334 	{
335 		.base.cra_name		= "cbc(aes)",
336 		.base.cra_driver_name	= "cbc-aes-geode",
337 		.base.cra_priority	= 400,
338 		.base.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
339 					  CRYPTO_ALG_NEED_FALLBACK,
340 		.base.cra_blocksize	= AES_BLOCK_SIZE,
341 		.base.cra_ctxsize	= sizeof(struct geode_aes_tfm_ctx),
342 		.base.cra_alignmask	= 15,
343 		.base.cra_module	= THIS_MODULE,
344 		.init			= geode_init_skcipher,
345 		.exit			= geode_exit_skcipher,
346 		.setkey			= geode_setkey_skcipher,
347 		.encrypt		= geode_cbc_encrypt,
348 		.decrypt		= geode_cbc_decrypt,
349 		.min_keysize		= AES_MIN_KEY_SIZE,
350 		.max_keysize		= AES_MAX_KEY_SIZE,
351 		.ivsize			= AES_BLOCK_SIZE,
352 	}, {
353 		.base.cra_name		= "ecb(aes)",
354 		.base.cra_driver_name	= "ecb-aes-geode",
355 		.base.cra_priority	= 400,
356 		.base.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
357 					  CRYPTO_ALG_NEED_FALLBACK,
358 		.base.cra_blocksize	= AES_BLOCK_SIZE,
359 		.base.cra_ctxsize	= sizeof(struct geode_aes_tfm_ctx),
360 		.base.cra_alignmask	= 15,
361 		.base.cra_module	= THIS_MODULE,
362 		.init			= geode_init_skcipher,
363 		.exit			= geode_exit_skcipher,
364 		.setkey			= geode_setkey_skcipher,
365 		.encrypt		= geode_ecb_encrypt,
366 		.decrypt		= geode_ecb_decrypt,
367 		.min_keysize		= AES_MIN_KEY_SIZE,
368 		.max_keysize		= AES_MAX_KEY_SIZE,
369 	},
370 };
371 
geode_aes_remove(struct pci_dev * dev)372 static void geode_aes_remove(struct pci_dev *dev)
373 {
374 	crypto_unregister_alg(&geode_alg);
375 	crypto_unregister_skciphers(geode_skcipher_algs,
376 				    ARRAY_SIZE(geode_skcipher_algs));
377 
378 	pci_iounmap(dev, _iobase);
379 	_iobase = NULL;
380 
381 	pci_release_regions(dev);
382 	pci_disable_device(dev);
383 }
384 
385 
geode_aes_probe(struct pci_dev * dev,const struct pci_device_id * id)386 static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
387 {
388 	int ret;
389 
390 	ret = pci_enable_device(dev);
391 	if (ret)
392 		return ret;
393 
394 	ret = pci_request_regions(dev, "geode-aes");
395 	if (ret)
396 		goto eenable;
397 
398 	_iobase = pci_iomap(dev, 0, 0);
399 
400 	if (_iobase == NULL) {
401 		ret = -ENOMEM;
402 		goto erequest;
403 	}
404 
405 	spin_lock_init(&lock);
406 
407 	/* Clear any pending activity */
408 	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
409 
410 	ret = crypto_register_alg(&geode_alg);
411 	if (ret)
412 		goto eiomap;
413 
414 	ret = crypto_register_skciphers(geode_skcipher_algs,
415 					ARRAY_SIZE(geode_skcipher_algs));
416 	if (ret)
417 		goto ealg;
418 
419 	dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
420 	return 0;
421 
422  ealg:
423 	crypto_unregister_alg(&geode_alg);
424 
425  eiomap:
426 	pci_iounmap(dev, _iobase);
427 
428  erequest:
429 	pci_release_regions(dev);
430 
431  eenable:
432 	pci_disable_device(dev);
433 
434 	dev_err(&dev->dev, "GEODE AES initialization failed.\n");
435 	return ret;
436 }
437 
438 static struct pci_device_id geode_aes_tbl[] = {
439 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
440 	{ 0, }
441 };
442 
443 MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
444 
445 static struct pci_driver geode_aes_driver = {
446 	.name = "Geode LX AES",
447 	.id_table = geode_aes_tbl,
448 	.probe = geode_aes_probe,
449 	.remove = geode_aes_remove,
450 };
451 
452 module_pci_driver(geode_aes_driver);
453 
454 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
455 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
456 MODULE_LICENSE("GPL");
457