• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* LRW: as defined by Cyril Guyot in
2  *	http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
3  *
4  * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5  *
6  * Based on ecb.c
7  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  */
14 /* This implementation is checked against the test vectors in the above
15  * document and by a test vector provided by Ken Buchanan at
16  * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
17  *
18  * The test vectors are included in the testing module tcrypt.[ch] */
19 
20 #include <crypto/algapi.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/slab.h>
27 
28 #include <crypto/b128ops.h>
29 #include <crypto/gf128mul.h>
30 #include <crypto/lrw.h>
31 
32 struct priv {
33 	struct crypto_cipher *child;
34 	struct lrw_table_ctx table;
35 };
36 
setbit128_bbe(void * b,int bit)37 static inline void setbit128_bbe(void *b, int bit)
38 {
39 	__set_bit(bit ^ (0x80 -
40 #ifdef __BIG_ENDIAN
41 			 BITS_PER_LONG
42 #else
43 			 BITS_PER_BYTE
44 #endif
45 			), b);
46 }
47 
lrw_init_table(struct lrw_table_ctx * ctx,const u8 * tweak)48 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
49 {
50 	be128 tmp = { 0 };
51 	int i;
52 
53 	if (ctx->table)
54 		gf128mul_free_64k(ctx->table);
55 
56 	/* initialize multiplication table for Key2 */
57 	ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
58 	if (!ctx->table)
59 		return -ENOMEM;
60 
61 	/* initialize optimization table */
62 	for (i = 0; i < 128; i++) {
63 		setbit128_bbe(&tmp, i);
64 		ctx->mulinc[i] = tmp;
65 		gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
66 	}
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL_GPL(lrw_init_table);
71 
lrw_free_table(struct lrw_table_ctx * ctx)72 void lrw_free_table(struct lrw_table_ctx *ctx)
73 {
74 	if (ctx->table)
75 		gf128mul_free_64k(ctx->table);
76 }
77 EXPORT_SYMBOL_GPL(lrw_free_table);
78 
setkey(struct crypto_tfm * parent,const u8 * key,unsigned int keylen)79 static int setkey(struct crypto_tfm *parent, const u8 *key,
80 		  unsigned int keylen)
81 {
82 	struct priv *ctx = crypto_tfm_ctx(parent);
83 	struct crypto_cipher *child = ctx->child;
84 	int err, bsize = LRW_BLOCK_SIZE;
85 	const u8 *tweak = key + keylen - bsize;
86 
87 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
88 	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
89 				       CRYPTO_TFM_REQ_MASK);
90 	err = crypto_cipher_setkey(child, key, keylen - bsize);
91 	if (err)
92 		return err;
93 	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
94 				     CRYPTO_TFM_RES_MASK);
95 
96 	return lrw_init_table(&ctx->table, tweak);
97 }
98 
99 struct sinfo {
100 	be128 t;
101 	struct crypto_tfm *tfm;
102 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
103 };
104 
inc(be128 * iv)105 static inline void inc(be128 *iv)
106 {
107 	be64_add_cpu(&iv->b, 1);
108 	if (!iv->b)
109 		be64_add_cpu(&iv->a, 1);
110 }
111 
lrw_round(struct sinfo * s,void * dst,const void * src)112 static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
113 {
114 	be128_xor(dst, &s->t, src);		/* PP <- T xor P */
115 	s->fn(s->tfm, dst, dst);		/* CC <- E(Key2,PP) */
116 	be128_xor(dst, dst, &s->t);		/* C <- T xor CC */
117 }
118 
119 /* this returns the number of consequative 1 bits starting
120  * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
get_index128(be128 * block)121 static inline int get_index128(be128 *block)
122 {
123 	int x;
124 	__be32 *p = (__be32 *) block;
125 
126 	for (p += 3, x = 0; x < 128; p--, x += 32) {
127 		u32 val = be32_to_cpup(p);
128 
129 		if (!~val)
130 			continue;
131 
132 		return x + ffz(val);
133 	}
134 
135 	/*
136 	 * If we get here, then x == 128 and we are incrementing the counter
137 	 * from all ones to all zeros. This means we must return index 127, i.e.
138 	 * the one corresponding to key2*{ 1,...,1 }.
139 	 */
140 	return 127;
141 }
142 
crypt(struct blkcipher_desc * d,struct blkcipher_walk * w,struct priv * ctx,void (* fn)(struct crypto_tfm *,u8 *,const u8 *))143 static int crypt(struct blkcipher_desc *d,
144 		 struct blkcipher_walk *w, struct priv *ctx,
145 		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
146 {
147 	int err;
148 	unsigned int avail;
149 	const int bs = LRW_BLOCK_SIZE;
150 	struct sinfo s = {
151 		.tfm = crypto_cipher_tfm(ctx->child),
152 		.fn = fn
153 	};
154 	be128 *iv;
155 	u8 *wsrc;
156 	u8 *wdst;
157 
158 	err = blkcipher_walk_virt(d, w);
159 	if (!(avail = w->nbytes))
160 		return err;
161 
162 	wsrc = w->src.virt.addr;
163 	wdst = w->dst.virt.addr;
164 
165 	/* calculate first value of T */
166 	iv = (be128 *)w->iv;
167 	s.t = *iv;
168 
169 	/* T <- I*Key2 */
170 	gf128mul_64k_bbe(&s.t, ctx->table.table);
171 
172 	goto first;
173 
174 	for (;;) {
175 		do {
176 			/* T <- I*Key2, using the optimization
177 			 * discussed in the specification */
178 			be128_xor(&s.t, &s.t,
179 				  &ctx->table.mulinc[get_index128(iv)]);
180 			inc(iv);
181 
182 first:
183 			lrw_round(&s, wdst, wsrc);
184 
185 			wsrc += bs;
186 			wdst += bs;
187 		} while ((avail -= bs) >= bs);
188 
189 		err = blkcipher_walk_done(d, w, avail);
190 		if (!(avail = w->nbytes))
191 			break;
192 
193 		wsrc = w->src.virt.addr;
194 		wdst = w->dst.virt.addr;
195 	}
196 
197 	return err;
198 }
199 
encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)200 static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
201 		   struct scatterlist *src, unsigned int nbytes)
202 {
203 	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
204 	struct blkcipher_walk w;
205 
206 	blkcipher_walk_init(&w, dst, src, nbytes);
207 	return crypt(desc, &w, ctx,
208 		     crypto_cipher_alg(ctx->child)->cia_encrypt);
209 }
210 
decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)211 static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
212 		   struct scatterlist *src, unsigned int nbytes)
213 {
214 	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
215 	struct blkcipher_walk w;
216 
217 	blkcipher_walk_init(&w, dst, src, nbytes);
218 	return crypt(desc, &w, ctx,
219 		     crypto_cipher_alg(ctx->child)->cia_decrypt);
220 }
221 
lrw_crypt(struct blkcipher_desc * desc,struct scatterlist * sdst,struct scatterlist * ssrc,unsigned int nbytes,struct lrw_crypt_req * req)222 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
223 	      struct scatterlist *ssrc, unsigned int nbytes,
224 	      struct lrw_crypt_req *req)
225 {
226 	const unsigned int bsize = LRW_BLOCK_SIZE;
227 	const unsigned int max_blks = req->tbuflen / bsize;
228 	struct lrw_table_ctx *ctx = req->table_ctx;
229 	struct blkcipher_walk walk;
230 	unsigned int nblocks;
231 	be128 *iv, *src, *dst, *t;
232 	be128 *t_buf = req->tbuf;
233 	int err, i;
234 
235 	BUG_ON(max_blks < 1);
236 
237 	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
238 
239 	err = blkcipher_walk_virt(desc, &walk);
240 	nbytes = walk.nbytes;
241 	if (!nbytes)
242 		return err;
243 
244 	nblocks = min(walk.nbytes / bsize, max_blks);
245 	src = (be128 *)walk.src.virt.addr;
246 	dst = (be128 *)walk.dst.virt.addr;
247 
248 	/* calculate first value of T */
249 	iv = (be128 *)walk.iv;
250 	t_buf[0] = *iv;
251 
252 	/* T <- I*Key2 */
253 	gf128mul_64k_bbe(&t_buf[0], ctx->table);
254 
255 	i = 0;
256 	goto first;
257 
258 	for (;;) {
259 		do {
260 			for (i = 0; i < nblocks; i++) {
261 				/* T <- I*Key2, using the optimization
262 				 * discussed in the specification */
263 				be128_xor(&t_buf[i], t,
264 						&ctx->mulinc[get_index128(iv)]);
265 				inc(iv);
266 first:
267 				t = &t_buf[i];
268 
269 				/* PP <- T xor P */
270 				be128_xor(dst + i, t, src + i);
271 			}
272 
273 			/* CC <- E(Key2,PP) */
274 			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
275 				      nblocks * bsize);
276 
277 			/* C <- T xor CC */
278 			for (i = 0; i < nblocks; i++)
279 				be128_xor(dst + i, dst + i, &t_buf[i]);
280 
281 			src += nblocks;
282 			dst += nblocks;
283 			nbytes -= nblocks * bsize;
284 			nblocks = min(nbytes / bsize, max_blks);
285 		} while (nblocks > 0);
286 
287 		err = blkcipher_walk_done(desc, &walk, nbytes);
288 		nbytes = walk.nbytes;
289 		if (!nbytes)
290 			break;
291 
292 		nblocks = min(nbytes / bsize, max_blks);
293 		src = (be128 *)walk.src.virt.addr;
294 		dst = (be128 *)walk.dst.virt.addr;
295 	}
296 
297 	return err;
298 }
299 EXPORT_SYMBOL_GPL(lrw_crypt);
300 
init_tfm(struct crypto_tfm * tfm)301 static int init_tfm(struct crypto_tfm *tfm)
302 {
303 	struct crypto_cipher *cipher;
304 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
305 	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
306 	struct priv *ctx = crypto_tfm_ctx(tfm);
307 	u32 *flags = &tfm->crt_flags;
308 
309 	cipher = crypto_spawn_cipher(spawn);
310 	if (IS_ERR(cipher))
311 		return PTR_ERR(cipher);
312 
313 	if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
314 		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
315 		crypto_free_cipher(cipher);
316 		return -EINVAL;
317 	}
318 
319 	ctx->child = cipher;
320 	return 0;
321 }
322 
exit_tfm(struct crypto_tfm * tfm)323 static void exit_tfm(struct crypto_tfm *tfm)
324 {
325 	struct priv *ctx = crypto_tfm_ctx(tfm);
326 
327 	lrw_free_table(&ctx->table);
328 	crypto_free_cipher(ctx->child);
329 }
330 
alloc(struct rtattr ** tb)331 static struct crypto_instance *alloc(struct rtattr **tb)
332 {
333 	struct crypto_instance *inst;
334 	struct crypto_alg *alg;
335 	int err;
336 
337 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
338 	if (err)
339 		return ERR_PTR(err);
340 
341 	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
342 				  CRYPTO_ALG_TYPE_MASK);
343 	if (IS_ERR(alg))
344 		return ERR_CAST(alg);
345 
346 	inst = crypto_alloc_instance("lrw", alg);
347 	if (IS_ERR(inst))
348 		goto out_put_alg;
349 
350 	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
351 	inst->alg.cra_priority = alg->cra_priority;
352 	inst->alg.cra_blocksize = alg->cra_blocksize;
353 
354 	if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
355 	else inst->alg.cra_alignmask = alg->cra_alignmask;
356 	inst->alg.cra_type = &crypto_blkcipher_type;
357 
358 	if (!(alg->cra_blocksize % 4))
359 		inst->alg.cra_alignmask |= 3;
360 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
361 	inst->alg.cra_blkcipher.min_keysize =
362 		alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
363 	inst->alg.cra_blkcipher.max_keysize =
364 		alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
365 
366 	inst->alg.cra_ctxsize = sizeof(struct priv);
367 
368 	inst->alg.cra_init = init_tfm;
369 	inst->alg.cra_exit = exit_tfm;
370 
371 	inst->alg.cra_blkcipher.setkey = setkey;
372 	inst->alg.cra_blkcipher.encrypt = encrypt;
373 	inst->alg.cra_blkcipher.decrypt = decrypt;
374 
375 out_put_alg:
376 	crypto_mod_put(alg);
377 	return inst;
378 }
379 
free_inst(struct crypto_instance * inst)380 static void free_inst(struct crypto_instance *inst)
381 {
382 	crypto_drop_spawn(crypto_instance_ctx(inst));
383 	kfree(inst);
384 }
385 
386 static struct crypto_template crypto_tmpl = {
387 	.name = "lrw",
388 	.alloc = alloc,
389 	.free = free_inst,
390 	.module = THIS_MODULE,
391 };
392 
crypto_module_init(void)393 static int __init crypto_module_init(void)
394 {
395 	return crypto_register_template(&crypto_tmpl);
396 }
397 
crypto_module_exit(void)398 static void __exit crypto_module_exit(void)
399 {
400 	crypto_unregister_template(&crypto_tmpl);
401 }
402 
403 module_init(crypto_module_init);
404 module_exit(crypto_module_exit);
405 
406 MODULE_LICENSE("GPL");
407 MODULE_DESCRIPTION("LRW block cipher mode");
408 MODULE_ALIAS_CRYPTO("lrw");
409