• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * CBC: Cipher Block Chaining mode
3  *
4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 
13 #include <crypto/algapi.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 
22 struct crypto_cbc_ctx {
23 	struct crypto_cipher *child;
24 };
25 
crypto_cbc_setkey(struct crypto_tfm * parent,const u8 * key,unsigned int keylen)26 static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
27 			     unsigned int keylen)
28 {
29 	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
30 	struct crypto_cipher *child = ctx->child;
31 	int err;
32 
33 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
34 	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
35 				       CRYPTO_TFM_REQ_MASK);
36 	err = crypto_cipher_setkey(child, key, keylen);
37 	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
38 				     CRYPTO_TFM_RES_MASK);
39 	return err;
40 }
41 
crypto_cbc_encrypt_segment(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_cipher * tfm)42 static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
43 				      struct blkcipher_walk *walk,
44 				      struct crypto_cipher *tfm)
45 {
46 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
47 		crypto_cipher_alg(tfm)->cia_encrypt;
48 	int bsize = crypto_cipher_blocksize(tfm);
49 	unsigned int nbytes = walk->nbytes;
50 	u8 *src = walk->src.virt.addr;
51 	u8 *dst = walk->dst.virt.addr;
52 	u8 *iv = walk->iv;
53 
54 	do {
55 		crypto_xor(iv, src, bsize);
56 		fn(crypto_cipher_tfm(tfm), dst, iv);
57 		memcpy(iv, dst, bsize);
58 
59 		src += bsize;
60 		dst += bsize;
61 	} while ((nbytes -= bsize) >= bsize);
62 
63 	return nbytes;
64 }
65 
crypto_cbc_encrypt_inplace(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_cipher * tfm)66 static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
67 				      struct blkcipher_walk *walk,
68 				      struct crypto_cipher *tfm)
69 {
70 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
71 		crypto_cipher_alg(tfm)->cia_encrypt;
72 	int bsize = crypto_cipher_blocksize(tfm);
73 	unsigned int nbytes = walk->nbytes;
74 	u8 *src = walk->src.virt.addr;
75 	u8 *iv = walk->iv;
76 
77 	do {
78 		crypto_xor(src, iv, bsize);
79 		fn(crypto_cipher_tfm(tfm), src, src);
80 		iv = src;
81 
82 		src += bsize;
83 	} while ((nbytes -= bsize) >= bsize);
84 
85 	memcpy(walk->iv, iv, bsize);
86 
87 	return nbytes;
88 }
89 
crypto_cbc_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)90 static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
91 			      struct scatterlist *dst, struct scatterlist *src,
92 			      unsigned int nbytes)
93 {
94 	struct blkcipher_walk walk;
95 	struct crypto_blkcipher *tfm = desc->tfm;
96 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
97 	struct crypto_cipher *child = ctx->child;
98 	int err;
99 
100 	blkcipher_walk_init(&walk, dst, src, nbytes);
101 	err = blkcipher_walk_virt(desc, &walk);
102 
103 	while ((nbytes = walk.nbytes)) {
104 		if (walk.src.virt.addr == walk.dst.virt.addr)
105 			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
106 		else
107 			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
108 		err = blkcipher_walk_done(desc, &walk, nbytes);
109 	}
110 
111 	return err;
112 }
113 
crypto_cbc_decrypt_segment(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_cipher * tfm)114 static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
115 				      struct blkcipher_walk *walk,
116 				      struct crypto_cipher *tfm)
117 {
118 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
119 		crypto_cipher_alg(tfm)->cia_decrypt;
120 	int bsize = crypto_cipher_blocksize(tfm);
121 	unsigned int nbytes = walk->nbytes;
122 	u8 *src = walk->src.virt.addr;
123 	u8 *dst = walk->dst.virt.addr;
124 	u8 *iv = walk->iv;
125 
126 	do {
127 		fn(crypto_cipher_tfm(tfm), dst, src);
128 		crypto_xor(dst, iv, bsize);
129 		iv = src;
130 
131 		src += bsize;
132 		dst += bsize;
133 	} while ((nbytes -= bsize) >= bsize);
134 
135 	memcpy(walk->iv, iv, bsize);
136 
137 	return nbytes;
138 }
139 
crypto_cbc_decrypt_inplace(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_cipher * tfm)140 static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
141 				      struct blkcipher_walk *walk,
142 				      struct crypto_cipher *tfm)
143 {
144 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
145 		crypto_cipher_alg(tfm)->cia_decrypt;
146 	int bsize = crypto_cipher_blocksize(tfm);
147 	unsigned int nbytes = walk->nbytes;
148 	u8 *src = walk->src.virt.addr;
149 	u8 last_iv[bsize];
150 
151 	/* Start of the last block. */
152 	src += nbytes - (nbytes & (bsize - 1)) - bsize;
153 	memcpy(last_iv, src, bsize);
154 
155 	for (;;) {
156 		fn(crypto_cipher_tfm(tfm), src, src);
157 		if ((nbytes -= bsize) < bsize)
158 			break;
159 		crypto_xor(src, src - bsize, bsize);
160 		src -= bsize;
161 	}
162 
163 	crypto_xor(src, walk->iv, bsize);
164 	memcpy(walk->iv, last_iv, bsize);
165 
166 	return nbytes;
167 }
168 
crypto_cbc_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)169 static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
170 			      struct scatterlist *dst, struct scatterlist *src,
171 			      unsigned int nbytes)
172 {
173 	struct blkcipher_walk walk;
174 	struct crypto_blkcipher *tfm = desc->tfm;
175 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
176 	struct crypto_cipher *child = ctx->child;
177 	int err;
178 
179 	blkcipher_walk_init(&walk, dst, src, nbytes);
180 	err = blkcipher_walk_virt(desc, &walk);
181 
182 	while ((nbytes = walk.nbytes)) {
183 		if (walk.src.virt.addr == walk.dst.virt.addr)
184 			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
185 		else
186 			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
187 		err = blkcipher_walk_done(desc, &walk, nbytes);
188 	}
189 
190 	return err;
191 }
192 
crypto_cbc_init_tfm(struct crypto_tfm * tfm)193 static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
194 {
195 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
196 	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
197 	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
198 	struct crypto_cipher *cipher;
199 
200 	cipher = crypto_spawn_cipher(spawn);
201 	if (IS_ERR(cipher))
202 		return PTR_ERR(cipher);
203 
204 	ctx->child = cipher;
205 	return 0;
206 }
207 
crypto_cbc_exit_tfm(struct crypto_tfm * tfm)208 static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
209 {
210 	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
211 	crypto_free_cipher(ctx->child);
212 }
213 
crypto_cbc_alloc(struct rtattr ** tb)214 static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
215 {
216 	struct crypto_instance *inst;
217 	struct crypto_alg *alg;
218 	int err;
219 
220 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
221 	if (err)
222 		return ERR_PTR(err);
223 
224 	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
225 				  CRYPTO_ALG_TYPE_MASK);
226 	if (IS_ERR(alg))
227 		return ERR_CAST(alg);
228 
229 	inst = ERR_PTR(-EINVAL);
230 	if (!is_power_of_2(alg->cra_blocksize))
231 		goto out_put_alg;
232 
233 	inst = crypto_alloc_instance("cbc", alg);
234 	if (IS_ERR(inst))
235 		goto out_put_alg;
236 
237 	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
238 	inst->alg.cra_priority = alg->cra_priority;
239 	inst->alg.cra_blocksize = alg->cra_blocksize;
240 	inst->alg.cra_alignmask = alg->cra_alignmask;
241 	inst->alg.cra_type = &crypto_blkcipher_type;
242 
243 	/* We access the data as u32s when xoring. */
244 	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
245 
246 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
247 	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
248 	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
249 
250 	inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
251 
252 	inst->alg.cra_init = crypto_cbc_init_tfm;
253 	inst->alg.cra_exit = crypto_cbc_exit_tfm;
254 
255 	inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
256 	inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
257 	inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
258 
259 out_put_alg:
260 	crypto_mod_put(alg);
261 	return inst;
262 }
263 
crypto_cbc_free(struct crypto_instance * inst)264 static void crypto_cbc_free(struct crypto_instance *inst)
265 {
266 	crypto_drop_spawn(crypto_instance_ctx(inst));
267 	kfree(inst);
268 }
269 
270 static struct crypto_template crypto_cbc_tmpl = {
271 	.name = "cbc",
272 	.alloc = crypto_cbc_alloc,
273 	.free = crypto_cbc_free,
274 	.module = THIS_MODULE,
275 };
276 
crypto_cbc_module_init(void)277 static int __init crypto_cbc_module_init(void)
278 {
279 	return crypto_register_template(&crypto_cbc_tmpl);
280 }
281 
crypto_cbc_module_exit(void)282 static void __exit crypto_cbc_module_exit(void)
283 {
284 	crypto_unregister_template(&crypto_cbc_tmpl);
285 }
286 
287 module_init(crypto_cbc_module_init);
288 module_exit(crypto_cbc_module_exit);
289 
290 MODULE_LICENSE("GPL");
291 MODULE_DESCRIPTION("CBC block cipher algorithm");
292