1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2021 Google LLC
4 * Author: Ard Biesheuvel <ardb@google.com>
5 *
6 * This file is the core of fips140.ko, which contains various crypto algorithms
7 * that are also built into vmlinux. At load time, this module overrides the
8 * built-in implementations of these algorithms with its implementations. It
9 * also runs self-tests on these algorithms and verifies the integrity of its
10 * code and data. If either of these steps fails, the kernel will panic.
11 *
12 * This module is intended to be loaded at early boot time in order to meet
13 * FIPS 140 and NIAP FPT_TST_EXT.1 requirements. It shouldn't be used if you
14 * don't need to meet these requirements.
15 */
16
17 #undef __DISABLE_EXPORTS
18
19 #include <linux/ctype.h>
20 #include <linux/module.h>
21 #include <crypto/aead.h>
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
24 #include <crypto/sha.h>
25 #include <crypto/skcipher.h>
26 #include <crypto/rng.h>
27 #include <trace/hooks/fips140.h>
28
29 #include "fips140-module.h"
30 #include "internal.h"
31
32 /*
33 * FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
34 */
35 u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog";
36
37 /* this is populated by the build tool */
38 u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE];
39
40 const u32 __initcall_start_marker __section(".initcalls._start");
41 const u32 __initcall_end_marker __section(".initcalls._end");
42
43 const u8 __fips140_text_start __section(".text.._start");
44 const u8 __fips140_text_end __section(".text.._end");
45
46 const u8 __fips140_rodata_start __section(".rodata.._start");
47 const u8 __fips140_rodata_end __section(".rodata.._end");
48
49 /*
50 * We need this little detour to prevent Clang from detecting out of bounds
51 * accesses to __fips140_text_start and __fips140_rodata_start, which only exist
52 * to delineate the section, and so their sizes are not relevant to us.
53 */
54 const u32 *__initcall_start = &__initcall_start_marker;
55
56 const u8 *__text_start = &__fips140_text_start;
57 const u8 *__rodata_start = &__fips140_rodata_start;
58
59 /*
60 * The list of the crypto API algorithms (by cra_name) that will be unregistered
61 * by this module, in preparation for the module registering its own
62 * implementation(s) of them.
63 *
64 * All algorithms that will be declared as FIPS-approved in the module
65 * certification must be listed here, to ensure that the non-FIPS-approved
66 * implementations of these algorithms in the kernel image aren't used.
67 *
68 * For every algorithm in this list, the module should contain all the "same"
69 * implementations that the kernel image does, including the C implementation as
70 * well as any architecture-specific implementations. This is needed to avoid
71 * performance regressions as well as the possibility of an algorithm being
72 * unavailable on some CPUs. E.g., "xcbc(aes)" isn't in this list, as the
73 * module doesn't have a C implementation of it (and it won't be FIPS-approved).
74 *
75 * Due to a quirk in the FIPS requirements, "gcm(aes)" isn't actually able to be
76 * FIPS-approved. However, we otherwise treat it the same as the algorithms
77 * that will be FIPS-approved, and therefore it's included in this list.
78 *
79 * When adding a new algorithm here, make sure to consider whether it needs a
80 * self-test added to fips140_selftests[] as well.
81 */
82 static const struct {
83 const char *name;
84 bool approved;
85 } fips140_algs_to_replace[] = {
86 {"aes", true},
87
88 {"cmac(aes)", true},
89 {"ecb(aes)", true},
90
91 {"cbc(aes)", true},
92 {"cts(cbc(aes))", true},
93 {"ctr(aes)", true},
94 {"xts(aes)", true},
95 {"gcm(aes)", false},
96
97 {"hmac(sha1)", true},
98 {"hmac(sha224)", true},
99 {"hmac(sha256)", true},
100 {"hmac(sha384)", true},
101 {"hmac(sha512)", true},
102 {"sha1", true},
103 {"sha224", true},
104 {"sha256", true},
105 {"sha384", true},
106 {"sha512", true},
107
108 {"stdrng", true},
109 {"jitterentropy_rng", false},
110 };
111
fips140_should_unregister_alg(struct crypto_alg * alg)112 static bool __init fips140_should_unregister_alg(struct crypto_alg *alg)
113 {
114 int i;
115
116 /*
117 * All software algorithms are synchronous, hardware algorithms must
118 * be covered by their own FIPS 140 certification.
119 */
120 if (alg->cra_flags & CRYPTO_ALG_ASYNC)
121 return false;
122
123 for (i = 0; i < ARRAY_SIZE(fips140_algs_to_replace); i++) {
124 if (!strcmp(alg->cra_name, fips140_algs_to_replace[i].name))
125 return true;
126 }
127 return false;
128 }
129
130 /*
131 * FIPS 140-3 service indicators. FIPS 140-3 requires that all services
132 * "provide an indicator when the service utilises an approved cryptographic
133 * algorithm, security function or process in an approved manner". What this
134 * means is very debatable, even with the help of the FIPS 140-3 Implementation
135 * Guidance document. However, it was decided that a function that takes in an
136 * algorithm name and returns whether that algorithm is approved or not will
137 * meet this requirement. Note, this relies on some properties of the module:
138 *
139 * - The module doesn't distinguish between "services" and "algorithms"; its
140 * services are simply its algorithms.
141 *
142 * - The status of an approved algorithm is never non-approved, since (a) the
143 * module doesn't support operating in a non-approved mode, such as a mode
144 * where the self-tests are skipped; (b) there are no cases where the module
145 * supports non-approved settings for approved algorithms, e.g.
146 * non-approved key sizes; and (c) this function isn't available to be
147 * called until the module_init function has completed, so it's guaranteed
148 * that the self-tests and integrity check have already passed.
149 *
150 * - The module does support some non-approved algorithms, so a single static
151 * indicator ("return true;") would not be acceptable.
152 */
fips140_is_approved_service(const char * name)153 bool fips140_is_approved_service(const char *name)
154 {
155 size_t i;
156
157 for (i = 0; i < ARRAY_SIZE(fips140_algs_to_replace); i++) {
158 if (!strcmp(name, fips140_algs_to_replace[i].name))
159 return fips140_algs_to_replace[i].approved;
160 }
161 return false;
162 }
163 EXPORT_SYMBOL_GPL(fips140_is_approved_service);
164
165 /*
166 * FIPS 140-3 requires that modules provide a "service" that outputs "the name
167 * or module identifier and the versioning information that can be correlated
168 * with a validation record". This function meets that requirement.
169 *
170 * Note: the module also prints this same information to the kernel log when it
171 * is loaded. That might meet the requirement by itself. However, given the
172 * vagueness of what counts as a "service", we provide this function too, just
173 * in case the certification lab or CMVP is happier with an explicit function.
174 *
175 * Note: /sys/modules/fips140/scmversion also provides versioning information
176 * about the module. However that file just shows the bare git commit ID, so it
177 * probably isn't sufficient to meet the FIPS requirement, which seems to want
178 * the "official" module name and version number used in the FIPS certificate.
179 */
fips140_module_version(void)180 const char *fips140_module_version(void)
181 {
182 return FIPS140_MODULE_NAME " " FIPS140_MODULE_VERSION;
183 }
184 EXPORT_SYMBOL_GPL(fips140_module_version);
185
186 static LIST_HEAD(existing_live_algos);
187
188 /*
189 * Release a list of algorithms which have been removed from crypto_alg_list.
190 *
191 * Note that even though the list is a private list, we have to hold
192 * crypto_alg_sem while iterating through it because crypto_unregister_alg() may
193 * run concurrently (as we haven't taken a reference to the algorithms on the
194 * list), and crypto_unregister_alg() will remove the algorithm from whichever
195 * list it happens to be on, while holding crypto_alg_sem. That's okay, since
196 * in that case crypto_unregister_alg() will handle the crypto_alg_put().
197 */
fips140_remove_final(struct list_head * list)198 static void fips140_remove_final(struct list_head *list)
199 {
200 struct crypto_alg *alg;
201 struct crypto_alg *n;
202
203 /*
204 * We need to take crypto_alg_sem to safely traverse the list (see
205 * comment above), but we have to drop it when doing each
206 * crypto_alg_put() as that may take crypto_alg_sem again.
207 */
208 down_write(&crypto_alg_sem);
209 list_for_each_entry_safe(alg, n, list, cra_list) {
210 list_del_init(&alg->cra_list);
211 up_write(&crypto_alg_sem);
212
213 crypto_alg_put(alg);
214
215 down_write(&crypto_alg_sem);
216 }
217 up_write(&crypto_alg_sem);
218 }
219
unregister_existing_fips140_algos(void)220 static void __init unregister_existing_fips140_algos(void)
221 {
222 struct crypto_alg *alg, *tmp;
223 LIST_HEAD(remove_list);
224 LIST_HEAD(spawns);
225
226 down_write(&crypto_alg_sem);
227
228 /*
229 * Find all registered algorithms that we care about, and move them to a
230 * private list so that they are no longer exposed via the algo lookup
231 * API. Subsequently, we will unregister them if they are not in active
232 * use. If they are, we can't fully unregister them but we can ensure
233 * that new users won't use them.
234 */
235 list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
236 if (!fips140_should_unregister_alg(alg))
237 continue;
238 if (refcount_read(&alg->cra_refcnt) == 1) {
239 /*
240 * This algorithm is not currently in use, but there may
241 * be template instances holding references to it via
242 * spawns. So let's tear it down like
243 * crypto_unregister_alg() would, but without releasing
244 * the lock, to prevent races with concurrent TFM
245 * allocations.
246 */
247 alg->cra_flags |= CRYPTO_ALG_DEAD;
248 list_move(&alg->cra_list, &remove_list);
249 crypto_remove_spawns(alg, &spawns, NULL);
250 } else {
251 /*
252 * This algorithm is live, i.e. it has TFMs allocated,
253 * so we can't fully unregister it. It's not necessary
254 * to dynamically redirect existing users to the FIPS
255 * code, given that they can't be relying on FIPS
256 * certified crypto in the first place. However, we do
257 * need to ensure that new users will get the FIPS code.
258 *
259 * In most cases, setting alg->cra_priority to 0
260 * achieves this. However, that isn't enough for
261 * algorithms like "hmac(sha256)" that need to be
262 * instantiated from a template, since existing
263 * algorithms always take priority over a template being
264 * instantiated. Therefore, we move the algorithm to
265 * a private list so that algorithm lookups won't find
266 * it anymore. To further distinguish it from the FIPS
267 * algorithms, we also append "+orig" to its name.
268 */
269 pr_info("found already-live algorithm '%s' ('%s')\n",
270 alg->cra_name, alg->cra_driver_name);
271 alg->cra_priority = 0;
272 strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
273 strlcat(alg->cra_driver_name, "+orig",
274 CRYPTO_MAX_ALG_NAME);
275 list_move(&alg->cra_list, &existing_live_algos);
276 }
277 }
278 up_write(&crypto_alg_sem);
279
280 fips140_remove_final(&remove_list);
281 fips140_remove_final(&spawns);
282 }
283
unapply_text_relocations(void * section,int section_size,const Elf64_Rela * rela,int numrels)284 static void __init unapply_text_relocations(void *section, int section_size,
285 const Elf64_Rela *rela, int numrels)
286 {
287 while (numrels--) {
288 u32 *place = (u32 *)(section + rela->r_offset);
289
290 BUG_ON(rela->r_offset >= section_size);
291
292 switch (ELF64_R_TYPE(rela->r_info)) {
293 #ifdef CONFIG_ARM64
294 case R_AARCH64_JUMP26:
295 case R_AARCH64_CALL26:
296 *place &= ~GENMASK(25, 0);
297 break;
298
299 case R_AARCH64_ADR_PREL_LO21:
300 case R_AARCH64_ADR_PREL_PG_HI21:
301 case R_AARCH64_ADR_PREL_PG_HI21_NC:
302 *place &= ~(GENMASK(30, 29) | GENMASK(23, 5));
303 break;
304
305 case R_AARCH64_ADD_ABS_LO12_NC:
306 case R_AARCH64_LDST8_ABS_LO12_NC:
307 case R_AARCH64_LDST16_ABS_LO12_NC:
308 case R_AARCH64_LDST32_ABS_LO12_NC:
309 case R_AARCH64_LDST64_ABS_LO12_NC:
310 case R_AARCH64_LDST128_ABS_LO12_NC:
311 *place &= ~GENMASK(21, 10);
312 break;
313 default:
314 pr_err("unhandled relocation type %llu\n",
315 ELF64_R_TYPE(rela->r_info));
316 BUG();
317 #else
318 #error
319 #endif
320 }
321 rela++;
322 }
323 }
324
unapply_rodata_relocations(void * section,int section_size,const Elf64_Rela * rela,int numrels)325 static void __init unapply_rodata_relocations(void *section, int section_size,
326 const Elf64_Rela *rela, int numrels)
327 {
328 while (numrels--) {
329 void *place = section + rela->r_offset;
330
331 BUG_ON(rela->r_offset >= section_size);
332
333 switch (ELF64_R_TYPE(rela->r_info)) {
334 #ifdef CONFIG_ARM64
335 case R_AARCH64_ABS64:
336 *(u64 *)place = 0;
337 break;
338 default:
339 pr_err("unhandled relocation type %llu\n",
340 ELF64_R_TYPE(rela->r_info));
341 BUG();
342 #else
343 #error
344 #endif
345 }
346 rela++;
347 }
348 }
349
350 extern struct {
351 u32 offset;
352 u32 count;
353 } fips140_rela_text, fips140_rela_rodata;
354
check_fips140_module_hmac(void)355 static bool __init check_fips140_module_hmac(void)
356 {
357 struct crypto_shash *tfm = NULL;
358 SHASH_DESC_ON_STACK(desc, dontcare);
359 u8 digest[SHA256_DIGEST_SIZE];
360 void *textcopy, *rodatacopy;
361 int textsize, rodatasize;
362 bool ok = false;
363 int err;
364
365 textsize = &__fips140_text_end - &__fips140_text_start;
366 rodatasize = &__fips140_rodata_end - &__fips140_rodata_start;
367
368 pr_info("text size : 0x%x\n", textsize);
369 pr_info("rodata size: 0x%x\n", rodatasize);
370
371 textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL);
372 if (!textcopy) {
373 pr_err("Failed to allocate memory for copy of .text\n");
374 goto out;
375 }
376
377 rodatacopy = textcopy + textsize;
378
379 memcpy(textcopy, __text_start, textsize);
380 memcpy(rodatacopy, __rodata_start, rodatasize);
381
382 // apply the relocations in reverse on the copies of .text and .rodata
383 unapply_text_relocations(textcopy, textsize,
384 offset_to_ptr(&fips140_rela_text.offset),
385 fips140_rela_text.count);
386
387 unapply_rodata_relocations(rodatacopy, rodatasize,
388 offset_to_ptr(&fips140_rela_rodata.offset),
389 fips140_rela_rodata.count);
390
391 fips140_inject_integrity_failure(textcopy);
392
393 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
394 if (IS_ERR(tfm)) {
395 pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(tfm));
396 tfm = NULL;
397 goto out;
398 }
399 desc->tfm = tfm;
400
401 pr_info("using '%s' for integrity check\n",
402 crypto_shash_driver_name(tfm));
403
404 err = crypto_shash_setkey(tfm, fips140_integ_hmac_key,
405 strlen(fips140_integ_hmac_key)) ?:
406 crypto_shash_init(desc) ?:
407 crypto_shash_update(desc, textcopy, textsize) ?:
408 crypto_shash_finup(desc, rodatacopy, rodatasize, digest);
409
410 /* Zeroizing this is important; see the comment below. */
411 shash_desc_zero(desc);
412
413 if (err) {
414 pr_err("failed to calculate hmac shash (%d)\n", err);
415 goto out;
416 }
417
418 if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) {
419 pr_err("provided_digest : %*phN\n", (int)sizeof(digest),
420 fips140_integ_hmac_digest);
421
422 pr_err("calculated digest: %*phN\n", (int)sizeof(digest),
423 digest);
424 goto out;
425 }
426 ok = true;
427 out:
428 /*
429 * FIPS 140-3 requires that all "temporary value(s) generated during the
430 * integrity test" be zeroized (ref: FIPS 140-3 IG 9.7.B). There is no
431 * technical reason to do this given that these values are public
432 * information, but this is the requirement so we follow it.
433 */
434 crypto_free_shash(tfm);
435 memzero_explicit(digest, sizeof(digest));
436 kfree_sensitive(textcopy);
437 return ok;
438 }
439
fips140_sha256(void * p,const u8 * data,unsigned int len,u8 * out,int * hook_inuse)440 static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
441 int *hook_inuse)
442 {
443 sha256(data, len, out);
444 *hook_inuse = 1;
445 }
446
fips140_aes_expandkey(void * p,struct crypto_aes_ctx * ctx,const u8 * in_key,unsigned int key_len,int * err)447 static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx,
448 const u8 *in_key, unsigned int key_len,
449 int *err)
450 {
451 *err = aes_expandkey(ctx, in_key, key_len);
452 }
453
fips140_aes_encrypt(void * priv,const struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,int * hook_inuse)454 static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx,
455 u8 *out, const u8 *in, int *hook_inuse)
456 {
457 aes_encrypt(ctx, out, in);
458 *hook_inuse = 1;
459 }
460
fips140_aes_decrypt(void * priv,const struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,int * hook_inuse)461 static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx,
462 u8 *out, const u8 *in, int *hook_inuse)
463 {
464 aes_decrypt(ctx, out, in);
465 *hook_inuse = 1;
466 }
467
update_fips140_library_routines(void)468 static bool update_fips140_library_routines(void)
469 {
470 int ret;
471
472 ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?:
473 register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?:
474 register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?:
475 register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL);
476
477 return ret == 0;
478 }
479
480 /*
481 * Initialize the FIPS 140 module.
482 *
483 * Note: this routine iterates over the contents of the initcall section, which
484 * consists of an array of function pointers that was emitted by the linker
485 * rather than the compiler. This means that these function pointers lack the
486 * usual CFI stubs that the compiler emits when CFI codegen is enabled. So
487 * let's disable CFI locally when handling the initcall array, to avoid
488 * surpises.
489 */
490 static int __init __attribute__((__no_sanitize__("cfi")))
fips140_init(void)491 fips140_init(void)
492 {
493 const u32 *initcall;
494
495 pr_info("loading " FIPS140_MODULE_NAME " " FIPS140_MODULE_VERSION "\n");
496 fips140_init_thread = current;
497
498 unregister_existing_fips140_algos();
499
500 /* iterate over all init routines present in this module and call them */
501 for (initcall = __initcall_start + 1;
502 initcall < &__initcall_end_marker;
503 initcall++) {
504 int (*init)(void) = offset_to_ptr(initcall);
505 int err = init();
506
507 /*
508 * ENODEV is expected from initcalls that only register
509 * algorithms that depend on non-present CPU features. Besides
510 * that, errors aren't expected here.
511 */
512 if (err && err != -ENODEV) {
513 pr_err("initcall %ps() failed: %d\n", init, err);
514 goto panic;
515 }
516 }
517
518 if (!fips140_run_selftests())
519 goto panic;
520
521 /*
522 * It may seem backward to perform the integrity check last, but this
523 * is intentional: the check itself uses hmac(sha256) which is one of
524 * the algorithms that are replaced with versions from this module, and
525 * the integrity check must use the replacement version. Also, to be
526 * ready for FIPS 140-3, the integrity check algorithm must have already
527 * been self-tested.
528 */
529
530 if (!check_fips140_module_hmac()) {
531 pr_crit("integrity check failed -- giving up!\n");
532 goto panic;
533 }
534 pr_info("integrity check passed\n");
535
536 complete_all(&fips140_tests_done);
537
538 if (!update_fips140_library_routines())
539 goto panic;
540
541 if (!fips140_eval_testing_init())
542 goto panic;
543
544 pr_info("module successfully loaded\n");
545 return 0;
546
547 panic:
548 panic("FIPS 140 module load failure");
549 }
550
551 module_init(fips140_init);
552
553 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
554 MODULE_LICENSE("GPL v2");
555
556 /*
557 * Crypto-related helper functions, reproduced here so that they will be
558 * covered by the FIPS 140 integrity check.
559 *
560 * Non-cryptographic helper functions such as memcpy() can be excluded from the
561 * FIPS module, but there is ambiguity about other helper functions like
562 * __crypto_xor() and crypto_inc() which aren't cryptographic by themselves,
563 * but are more closely associated with cryptography than e.g. memcpy(). To
564 * err on the side of caution, we include copies of these in the FIPS module.
565 */
__crypto_xor(u8 * dst,const u8 * src1,const u8 * src2,unsigned int len)566 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
567 {
568 while (len >= 8) {
569 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
570 dst += 8;
571 src1 += 8;
572 src2 += 8;
573 len -= 8;
574 }
575
576 while (len >= 4) {
577 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
578 dst += 4;
579 src1 += 4;
580 src2 += 4;
581 len -= 4;
582 }
583
584 while (len >= 2) {
585 *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
586 dst += 2;
587 src1 += 2;
588 src2 += 2;
589 len -= 2;
590 }
591
592 while (len--)
593 *dst++ = *src1++ ^ *src2++;
594 }
595
crypto_inc(u8 * a,unsigned int size)596 void crypto_inc(u8 *a, unsigned int size)
597 {
598 a += size;
599
600 while (size--)
601 if (++*--a)
602 break;
603 }
604