• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * CFB: Cipher FeedBack mode
4  *
5  * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
6  *
7  * CFB is a stream cipher mode which is layered on to a block
8  * encryption scheme.  It works very much like a one time pad where
9  * the pad is generated initially from the encrypted IV and then
10  * subsequently from the encrypted previous block of ciphertext.  The
11  * pad is XOR'd into the plain text to get the final ciphertext.
12  *
13  * The scheme of CFB is best described by wikipedia:
14  *
15  * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
16  *
17  * Note that since the pad for both encryption and decryption is
18  * generated by an encryption operation, CFB never uses the block
19  * decryption function.
20  */
21 
22 #include <crypto/algapi.h>
23 #include <crypto/internal/skcipher.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 
crypto_cfb_bsize(struct crypto_skcipher * tfm)30 static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
31 {
32 	return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
33 }
34 
crypto_cfb_encrypt_one(struct crypto_skcipher * tfm,const u8 * src,u8 * dst)35 static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
36 					  const u8 *src, u8 *dst)
37 {
38 	crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
39 }
40 
41 /* final encrypt and decrypt is the same */
crypto_cfb_final(struct skcipher_walk * walk,struct crypto_skcipher * tfm)42 static void crypto_cfb_final(struct skcipher_walk *walk,
43 			     struct crypto_skcipher *tfm)
44 {
45 	const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
46 	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
47 	u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
48 	u8 *src = walk->src.virt.addr;
49 	u8 *dst = walk->dst.virt.addr;
50 	u8 *iv = walk->iv;
51 	unsigned int nbytes = walk->nbytes;
52 
53 	crypto_cfb_encrypt_one(tfm, iv, stream);
54 	crypto_xor_cpy(dst, stream, src, nbytes);
55 }
56 
crypto_cfb_encrypt_segment(struct skcipher_walk * walk,struct crypto_skcipher * tfm)57 static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
58 				      struct crypto_skcipher *tfm)
59 {
60 	const unsigned int bsize = crypto_cfb_bsize(tfm);
61 	unsigned int nbytes = walk->nbytes;
62 	u8 *src = walk->src.virt.addr;
63 	u8 *dst = walk->dst.virt.addr;
64 	u8 *iv = walk->iv;
65 
66 	do {
67 		crypto_cfb_encrypt_one(tfm, iv, dst);
68 		crypto_xor(dst, src, bsize);
69 		iv = dst;
70 
71 		src += bsize;
72 		dst += bsize;
73 	} while ((nbytes -= bsize) >= bsize);
74 
75 	memcpy(walk->iv, iv, bsize);
76 
77 	return nbytes;
78 }
79 
crypto_cfb_encrypt_inplace(struct skcipher_walk * walk,struct crypto_skcipher * tfm)80 static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
81 				      struct crypto_skcipher *tfm)
82 {
83 	const unsigned int bsize = crypto_cfb_bsize(tfm);
84 	unsigned int nbytes = walk->nbytes;
85 	u8 *src = walk->src.virt.addr;
86 	u8 *iv = walk->iv;
87 	u8 tmp[MAX_CIPHER_BLOCKSIZE];
88 
89 	do {
90 		crypto_cfb_encrypt_one(tfm, iv, tmp);
91 		crypto_xor(src, tmp, bsize);
92 		iv = src;
93 
94 		src += bsize;
95 	} while ((nbytes -= bsize) >= bsize);
96 
97 	memcpy(walk->iv, iv, bsize);
98 
99 	return nbytes;
100 }
101 
crypto_cfb_encrypt(struct skcipher_request * req)102 static int crypto_cfb_encrypt(struct skcipher_request *req)
103 {
104 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
105 	struct skcipher_walk walk;
106 	unsigned int bsize = crypto_cfb_bsize(tfm);
107 	int err;
108 
109 	err = skcipher_walk_virt(&walk, req, false);
110 
111 	while (walk.nbytes >= bsize) {
112 		if (walk.src.virt.addr == walk.dst.virt.addr)
113 			err = crypto_cfb_encrypt_inplace(&walk, tfm);
114 		else
115 			err = crypto_cfb_encrypt_segment(&walk, tfm);
116 		err = skcipher_walk_done(&walk, err);
117 	}
118 
119 	if (walk.nbytes) {
120 		crypto_cfb_final(&walk, tfm);
121 		err = skcipher_walk_done(&walk, 0);
122 	}
123 
124 	return err;
125 }
126 
crypto_cfb_decrypt_segment(struct skcipher_walk * walk,struct crypto_skcipher * tfm)127 static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
128 				      struct crypto_skcipher *tfm)
129 {
130 	const unsigned int bsize = crypto_cfb_bsize(tfm);
131 	unsigned int nbytes = walk->nbytes;
132 	u8 *src = walk->src.virt.addr;
133 	u8 *dst = walk->dst.virt.addr;
134 	u8 *iv = walk->iv;
135 
136 	do {
137 		crypto_cfb_encrypt_one(tfm, iv, dst);
138 		crypto_xor(dst, src, bsize);
139 		iv = src;
140 
141 		src += bsize;
142 		dst += bsize;
143 	} while ((nbytes -= bsize) >= bsize);
144 
145 	memcpy(walk->iv, iv, bsize);
146 
147 	return nbytes;
148 }
149 
crypto_cfb_decrypt_inplace(struct skcipher_walk * walk,struct crypto_skcipher * tfm)150 static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
151 				      struct crypto_skcipher *tfm)
152 {
153 	const unsigned int bsize = crypto_cfb_bsize(tfm);
154 	unsigned int nbytes = walk->nbytes;
155 	u8 *src = walk->src.virt.addr;
156 	u8 * const iv = walk->iv;
157 	u8 tmp[MAX_CIPHER_BLOCKSIZE];
158 
159 	do {
160 		crypto_cfb_encrypt_one(tfm, iv, tmp);
161 		memcpy(iv, src, bsize);
162 		crypto_xor(src, tmp, bsize);
163 		src += bsize;
164 	} while ((nbytes -= bsize) >= bsize);
165 
166 	return nbytes;
167 }
168 
crypto_cfb_decrypt_blocks(struct skcipher_walk * walk,struct crypto_skcipher * tfm)169 static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
170 				     struct crypto_skcipher *tfm)
171 {
172 	if (walk->src.virt.addr == walk->dst.virt.addr)
173 		return crypto_cfb_decrypt_inplace(walk, tfm);
174 	else
175 		return crypto_cfb_decrypt_segment(walk, tfm);
176 }
177 
crypto_cfb_decrypt(struct skcipher_request * req)178 static int crypto_cfb_decrypt(struct skcipher_request *req)
179 {
180 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
181 	struct skcipher_walk walk;
182 	const unsigned int bsize = crypto_cfb_bsize(tfm);
183 	int err;
184 
185 	err = skcipher_walk_virt(&walk, req, false);
186 
187 	while (walk.nbytes >= bsize) {
188 		err = crypto_cfb_decrypt_blocks(&walk, tfm);
189 		err = skcipher_walk_done(&walk, err);
190 	}
191 
192 	if (walk.nbytes) {
193 		crypto_cfb_final(&walk, tfm);
194 		err = skcipher_walk_done(&walk, 0);
195 	}
196 
197 	return err;
198 }
199 
crypto_cfb_create(struct crypto_template * tmpl,struct rtattr ** tb)200 static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
201 {
202 	struct skcipher_instance *inst;
203 	struct crypto_alg *alg;
204 	int err;
205 
206 	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
207 	if (IS_ERR(inst))
208 		return PTR_ERR(inst);
209 
210 	/* CFB mode is a stream cipher. */
211 	inst->alg.base.cra_blocksize = 1;
212 
213 	/*
214 	 * To simplify the implementation, configure the skcipher walk to only
215 	 * give a partial block at the very end, never earlier.
216 	 */
217 	inst->alg.chunksize = alg->cra_blocksize;
218 
219 	inst->alg.encrypt = crypto_cfb_encrypt;
220 	inst->alg.decrypt = crypto_cfb_decrypt;
221 
222 	err = skcipher_register_instance(tmpl, inst);
223 	if (err)
224 		inst->free(inst);
225 
226 	crypto_mod_put(alg);
227 	return err;
228 }
229 
230 static struct crypto_template crypto_cfb_tmpl = {
231 	.name = "cfb",
232 	.create = crypto_cfb_create,
233 	.module = THIS_MODULE,
234 };
235 
crypto_cfb_module_init(void)236 static int __init crypto_cfb_module_init(void)
237 {
238 	return crypto_register_template(&crypto_cfb_tmpl);
239 }
240 
crypto_cfb_module_exit(void)241 static void __exit crypto_cfb_module_exit(void)
242 {
243 	crypto_unregister_template(&crypto_cfb_tmpl);
244 }
245 
246 subsys_initcall(crypto_cfb_module_init);
247 module_exit(crypto_cfb_module_exit);
248 
249 MODULE_LICENSE("GPL");
250 MODULE_DESCRIPTION("CFB block cipher mode of operation");
251 MODULE_ALIAS_CRYPTO("cfb");
252