• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Block crypto operations until tests complete
4  *
5  * Copyright 2021 Google LLC
6  *
7  * This file defines the fips140_crypto_register_*() functions, to which all
8  * calls to crypto_register_*() in the module are redirected.  These functions
9  * override the tfm initialization function of each algorithm to insert a wait
10  * for the module having completed its self-tests and integrity check.
11  *
12  * The exact field that we override depends on the algorithm type.  For
13  * algorithm types that have a strongly-typed initialization function pointer
14  * (e.g. skcipher), we must override that, since cra_init isn't guaranteed to be
15  * called for those despite the field being present in the base struct.  For the
16  * other algorithm types (e.g. "cipher") we must override cra_init.
17  *
18  * All of this applies to both normal algorithms and template instances.
19  *
20  * The purpose of all of this is to meet a FIPS requirement where the module
21  * must not produce any output from cryptographic algorithms until it completes
22  * its tests.  Technically this is impossible, but this solution meets the
23  * intent of the requirement, assuming the user makes a supported sequence of
24  * API calls.  Note that we can't simply run the tests before registering the
25  * algorithms, as the algorithms must be registered in order to run the tests.
26  *
27  * It would be much easier to handle this in the kernel's crypto API framework.
28  * Unfortunately, that was deemed insufficient because the module itself is
29  * required to do the enforcement.  What is *actually* required is still very
30  * vague, but the approach implemented here should meet the requirement.
31  */
32 
33 /*
34  * This file is the one place in fips140.ko that needs to call the kernel's real
35  * algorithm registration functions, so #undefine all the macros from
36  * fips140-defs.h so that the "fips140_" prefix doesn't automatically get added.
37  */
38 #undef aead_register_instance
39 #undef ahash_register_instance
40 #undef crypto_register_aead
41 #undef crypto_register_aeads
42 #undef crypto_register_ahash
43 #undef crypto_register_ahashes
44 #undef crypto_register_alg
45 #undef crypto_register_algs
46 #undef crypto_register_rng
47 #undef crypto_register_rngs
48 #undef crypto_register_shash
49 #undef crypto_register_shashes
50 #undef crypto_register_skcipher
51 #undef crypto_register_skciphers
52 #undef shash_register_instance
53 #undef skcipher_register_instance
54 
55 #include <crypto/algapi.h>
56 #include <crypto/internal/aead.h>
57 #include <crypto/internal/hash.h>
58 #include <crypto/internal/rng.h>
59 #include <crypto/internal/skcipher.h>
60 #include <linux/xarray.h>
61 
62 #include "fips140-module.h"
63 
64 /* Indicates whether the self-tests and integrity check have completed */
65 DECLARE_COMPLETION(fips140_tests_done);
66 
67 /* The thread running the self-tests and integrity check */
68 struct task_struct *fips140_init_thread;
69 
70 /*
71  * Map from crypto_alg to original initialization function (possibly NULL)
72  *
73  * Note: unregistering an algorithm will leak its map entry, as we don't bother
74  * to remove it.  This should be fine since fips140.ko can't be unloaded.  The
75  * proper solution would be to store the original function pointer in a new
76  * field in 'struct crypto_alg', but that would require kernel support.
77  */
78 static DEFINE_XARRAY(fips140_init_func_map);
79 
fips140_ready(void)80 static bool fips140_ready(void)
81 {
82 	return completion_done(&fips140_tests_done);
83 }
84 
85 /*
86  * Wait until crypto operations are allowed to proceed.  Return true if the
87  * tests are done, or false if the caller is the thread running the tests so it
88  * is allowed to proceed anyway.
89  */
fips140_wait_until_ready(struct crypto_alg * alg)90 static bool fips140_wait_until_ready(struct crypto_alg *alg)
91 {
92 	if (fips140_ready())
93 		return true;
94 	/*
95 	 * The thread running the tests must not wait.  Since tfms can only be
96 	 * allocated in task context, we can reliably determine whether the
97 	 * invocation is from that thread or not by checking 'current'.
98 	 */
99 	if (current == fips140_init_thread)
100 		return false;
101 
102 	pr_info("blocking user of %s until tests complete\n",
103 		alg->cra_driver_name);
104 	wait_for_completion(&fips140_tests_done);
105 	pr_info("tests done, allowing %s to proceed\n", alg->cra_driver_name);
106 	return true;
107 }
108 
fips140_store_init_function(struct crypto_alg * alg,void * func)109 static int fips140_store_init_function(struct crypto_alg *alg, void *func)
110 {
111 	void *ret;
112 
113 	/*
114 	 * The XArray API requires 4-byte aligned values.  Although function
115 	 * pointers in general aren't guaranteed to be 4-byte aligned, it should
116 	 * be the case for the platforms this module is used on.
117 	 */
118 	if (WARN_ON((unsigned long)func & 3))
119 		return -EINVAL;
120 
121 	ret = xa_store(&fips140_init_func_map, (unsigned long)alg, func,
122 		       GFP_KERNEL);
123 	return xa_err(ret);
124 }
125 
126 /* Get the algorithm's original initialization function (possibly NULL) */
fips140_load_init_function(struct crypto_alg * alg)127 static void *fips140_load_init_function(struct crypto_alg *alg)
128 {
129 	return xa_load(&fips140_init_func_map, (unsigned long)alg);
130 }
131 
132 /* tfm initialization function overrides */
133 
fips140_alg_init_tfm(struct crypto_tfm * tfm)134 static int fips140_alg_init_tfm(struct crypto_tfm *tfm)
135 {
136 	struct crypto_alg *alg = tfm->__crt_alg;
137 	int (*cra_init)(struct crypto_tfm *tfm) =
138 		fips140_load_init_function(alg);
139 
140 	if (fips140_wait_until_ready(alg))
141 		WRITE_ONCE(alg->cra_init, cra_init);
142 	return cra_init ? cra_init(tfm) : 0;
143 }
144 
fips140_aead_init_tfm(struct crypto_aead * tfm)145 static int fips140_aead_init_tfm(struct crypto_aead *tfm)
146 {
147 	struct aead_alg *alg = crypto_aead_alg(tfm);
148 	int (*init)(struct crypto_aead *tfm) =
149 		fips140_load_init_function(&alg->base);
150 
151 	if (fips140_wait_until_ready(&alg->base))
152 		WRITE_ONCE(alg->init, init);
153 	return init ? init(tfm) : 0;
154 }
155 
fips140_ahash_init_tfm(struct crypto_ahash * tfm)156 static int fips140_ahash_init_tfm(struct crypto_ahash *tfm)
157 {
158 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
159 	struct ahash_alg *alg = container_of(halg, struct ahash_alg, halg);
160 	int (*init_tfm)(struct crypto_ahash *tfm) =
161 		fips140_load_init_function(&halg->base);
162 
163 	if (fips140_wait_until_ready(&halg->base))
164 		WRITE_ONCE(alg->init_tfm, init_tfm);
165 	return init_tfm ? init_tfm(tfm) : 0;
166 }
167 
fips140_shash_init_tfm(struct crypto_shash * tfm)168 static int fips140_shash_init_tfm(struct crypto_shash *tfm)
169 {
170 	struct shash_alg *alg = crypto_shash_alg(tfm);
171 	int (*init_tfm)(struct crypto_shash *tfm) =
172 		fips140_load_init_function(&alg->base);
173 
174 	if (fips140_wait_until_ready(&alg->base))
175 		WRITE_ONCE(alg->init_tfm, init_tfm);
176 	return init_tfm ? init_tfm(tfm) : 0;
177 }
178 
fips140_skcipher_init_tfm(struct crypto_skcipher * tfm)179 static int fips140_skcipher_init_tfm(struct crypto_skcipher *tfm)
180 {
181 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
182 	int (*init)(struct crypto_skcipher *tfm) =
183 		fips140_load_init_function(&alg->base);
184 
185 	if (fips140_wait_until_ready(&alg->base))
186 		WRITE_ONCE(alg->init, init);
187 	return init ? init(tfm) : 0;
188 }
189 
190 /* Single algorithm registration */
191 
192 #define prepare_alg(alg, base_alg, field, wrapper_func)			\
193 ({									\
194 	int err = 0;							\
195 									\
196 	if (!fips140_ready() && alg->field != wrapper_func) {		\
197 		err = fips140_store_init_function(base_alg, alg->field);\
198 		if (err == 0)						\
199 			alg->field = wrapper_func;			\
200 	}								\
201 	err;								\
202 })
203 
fips140_prepare_alg(struct crypto_alg * alg)204 static int fips140_prepare_alg(struct crypto_alg *alg)
205 {
206 	/*
207 	 * Override cra_init.  This is only for algorithm types like cipher and
208 	 * rng that don't have a strongly-typed initialization function.
209 	 */
210 	return prepare_alg(alg, alg, cra_init, fips140_alg_init_tfm);
211 }
212 
fips140_prepare_aead_alg(struct aead_alg * alg)213 static int fips140_prepare_aead_alg(struct aead_alg *alg)
214 {
215 	return prepare_alg(alg, &alg->base, init, fips140_aead_init_tfm);
216 }
217 
fips140_prepare_ahash_alg(struct ahash_alg * alg)218 static int fips140_prepare_ahash_alg(struct ahash_alg *alg)
219 {
220 	return prepare_alg(alg, &alg->halg.base, init_tfm,
221 			   fips140_ahash_init_tfm);
222 }
223 
fips140_prepare_rng_alg(struct rng_alg * alg)224 static int fips140_prepare_rng_alg(struct rng_alg *alg)
225 {
226 	/*
227 	 * rng doesn't have a strongly-typed initialization function, so we must
228 	 * treat rng algorithms as "generic" algorithms.
229 	 */
230 	return fips140_prepare_alg(&alg->base);
231 }
232 
fips140_prepare_shash_alg(struct shash_alg * alg)233 static int fips140_prepare_shash_alg(struct shash_alg *alg)
234 {
235 	return prepare_alg(alg, &alg->base, init_tfm, fips140_shash_init_tfm);
236 }
237 
fips140_prepare_skcipher_alg(struct skcipher_alg * alg)238 static int fips140_prepare_skcipher_alg(struct skcipher_alg *alg)
239 {
240 	return prepare_alg(alg, &alg->base, init, fips140_skcipher_init_tfm);
241 }
242 
fips140_crypto_register_alg(struct crypto_alg * alg)243 int fips140_crypto_register_alg(struct crypto_alg *alg)
244 {
245 	return fips140_prepare_alg(alg) ?: crypto_register_alg(alg);
246 }
247 
fips140_crypto_register_aead(struct aead_alg * alg)248 int fips140_crypto_register_aead(struct aead_alg *alg)
249 {
250 	return fips140_prepare_aead_alg(alg) ?: crypto_register_aead(alg);
251 }
252 
fips140_crypto_register_ahash(struct ahash_alg * alg)253 int fips140_crypto_register_ahash(struct ahash_alg *alg)
254 {
255 	return fips140_prepare_ahash_alg(alg) ?: crypto_register_ahash(alg);
256 }
257 
fips140_crypto_register_rng(struct rng_alg * alg)258 int fips140_crypto_register_rng(struct rng_alg *alg)
259 {
260 	return fips140_prepare_rng_alg(alg) ?: crypto_register_rng(alg);
261 }
262 
fips140_crypto_register_shash(struct shash_alg * alg)263 int fips140_crypto_register_shash(struct shash_alg *alg)
264 {
265 	return fips140_prepare_shash_alg(alg) ?: crypto_register_shash(alg);
266 }
267 
fips140_crypto_register_skcipher(struct skcipher_alg * alg)268 int fips140_crypto_register_skcipher(struct skcipher_alg *alg)
269 {
270 	return fips140_prepare_skcipher_alg(alg) ?:
271 		crypto_register_skcipher(alg);
272 }
273 
274 /* Instance registration */
275 
fips140_aead_register_instance(struct crypto_template * tmpl,struct aead_instance * inst)276 int fips140_aead_register_instance(struct crypto_template *tmpl,
277 				   struct aead_instance *inst)
278 {
279 	return fips140_prepare_aead_alg(&inst->alg) ?:
280 		aead_register_instance(tmpl, inst);
281 }
282 
fips140_ahash_register_instance(struct crypto_template * tmpl,struct ahash_instance * inst)283 int fips140_ahash_register_instance(struct crypto_template *tmpl,
284 				    struct ahash_instance *inst)
285 {
286 	return fips140_prepare_ahash_alg(&inst->alg) ?:
287 		ahash_register_instance(tmpl, inst);
288 }
289 
fips140_shash_register_instance(struct crypto_template * tmpl,struct shash_instance * inst)290 int fips140_shash_register_instance(struct crypto_template *tmpl,
291 				    struct shash_instance *inst)
292 {
293 	return fips140_prepare_shash_alg(&inst->alg) ?:
294 		shash_register_instance(tmpl, inst);
295 }
296 
fips140_skcipher_register_instance(struct crypto_template * tmpl,struct skcipher_instance * inst)297 int fips140_skcipher_register_instance(struct crypto_template *tmpl,
298 				       struct skcipher_instance *inst)
299 {
300 	return fips140_prepare_skcipher_alg(&inst->alg) ?:
301 		skcipher_register_instance(tmpl, inst);
302 }
303 
304 /* Bulk algorithm registration */
305 
fips140_crypto_register_algs(struct crypto_alg * algs,int count)306 int fips140_crypto_register_algs(struct crypto_alg *algs, int count)
307 {
308 	int i;
309 	int err;
310 
311 	for (i = 0; i < count; i++) {
312 		err = fips140_prepare_alg(&algs[i]);
313 		if (err)
314 			return err;
315 	}
316 
317 	return crypto_register_algs(algs, count);
318 }
319 
fips140_crypto_register_aeads(struct aead_alg * algs,int count)320 int fips140_crypto_register_aeads(struct aead_alg *algs, int count)
321 {
322 	int i;
323 	int err;
324 
325 	for (i = 0; i < count; i++) {
326 		err = fips140_prepare_aead_alg(&algs[i]);
327 		if (err)
328 			return err;
329 	}
330 
331 	return crypto_register_aeads(algs, count);
332 }
333 
fips140_crypto_register_ahashes(struct ahash_alg * algs,int count)334 int fips140_crypto_register_ahashes(struct ahash_alg *algs, int count)
335 {
336 	int i;
337 	int err;
338 
339 	for (i = 0; i < count; i++) {
340 		err = fips140_prepare_ahash_alg(&algs[i]);
341 		if (err)
342 			return err;
343 	}
344 
345 	return crypto_register_ahashes(algs, count);
346 }
347 
fips140_crypto_register_rngs(struct rng_alg * algs,int count)348 int fips140_crypto_register_rngs(struct rng_alg *algs, int count)
349 {
350 	int i;
351 	int err;
352 
353 	for (i = 0; i < count; i++) {
354 		err = fips140_prepare_rng_alg(&algs[i]);
355 		if (err)
356 			return err;
357 	}
358 
359 	return crypto_register_rngs(algs, count);
360 }
361 
fips140_crypto_register_shashes(struct shash_alg * algs,int count)362 int fips140_crypto_register_shashes(struct shash_alg *algs, int count)
363 {
364 	int i;
365 	int err;
366 
367 	for (i = 0; i < count; i++) {
368 		err = fips140_prepare_shash_alg(&algs[i]);
369 		if (err)
370 			return err;
371 	}
372 
373 	return crypto_register_shashes(algs, count);
374 }
375 
fips140_crypto_register_skciphers(struct skcipher_alg * algs,int count)376 int fips140_crypto_register_skciphers(struct skcipher_alg *algs, int count)
377 {
378 	int i;
379 	int err;
380 
381 	for (i = 0; i < count; i++) {
382 		err = fips140_prepare_skcipher_alg(&algs[i]);
383 		if (err)
384 			return err;
385 	}
386 
387 	return crypto_register_skciphers(algs, count);
388 }
389