1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6 *
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
9 *
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
16 *
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/genhd.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/siphash.h>
55 #include <linux/uio.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62
63 // GKI: Keep this header to retain the original CRC that previously used the
64 // random.h tracepoints.
65 #include <linux/writeback.h>
66
67 /*********************************************************************
68 *
69 * Initialization and readiness waiting.
70 *
71 * Much of the RNG infrastructure is devoted to various dependencies
72 * being able to wait until the RNG has collected enough entropy and
73 * is ready for safe consumption.
74 *
75 *********************************************************************/
76
77 /*
78 * crng_init is protected by base_crng->lock, and only increases
79 * its value (from empty->early->ready).
80 */
81 static enum {
82 CRNG_EMPTY = 0, /* Little to no entropy collected */
83 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
84 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
85 } crng_init __read_mostly = CRNG_EMPTY;
86 #define crng_ready() (likely(crng_init >= CRNG_READY))
87 /* Various types of waiters for crng_init->CRNG_READY transition. */
88 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
89 static struct fasync_struct *fasync;
90 static DEFINE_SPINLOCK(random_ready_chain_lock);
91 static RAW_NOTIFIER_HEAD(random_ready_chain);
92
93 /* Control how we warn userspace. */
94 static struct ratelimit_state urandom_warning =
95 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
96 static int ratelimit_disable __read_mostly =
97 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
98 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
99 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
100
101 /*
102 * Returns whether or not the input pool has been seeded and thus guaranteed
103 * to supply cryptographically secure random numbers. This applies to: the
104 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
105 * ,u64,int,long} family of functions.
106 *
107 * Returns: true if the input pool has been seeded.
108 * false if the input pool has not been seeded.
109 */
rng_is_initialized(void)110 bool rng_is_initialized(void)
111 {
112 return crng_ready();
113 }
114 EXPORT_SYMBOL(rng_is_initialized);
115
116 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
117 static void try_to_generate_entropy(void);
118
119 /*
120 * Wait for the input pool to be seeded and thus guaranteed to supply
121 * cryptographically secure random numbers. This applies to: the /dev/urandom
122 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
123 * family of functions. Using any of these functions without first calling
124 * this function forfeits the guarantee of security.
125 *
126 * Returns: 0 if the input pool has been seeded.
127 * -ERESTARTSYS if the function was interrupted by a signal.
128 */
wait_for_random_bytes(void)129 int wait_for_random_bytes(void)
130 {
131 while (!crng_ready()) {
132 int ret;
133
134 try_to_generate_entropy();
135 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
136 if (ret)
137 return ret > 0 ? 0 : ret;
138 }
139 return 0;
140 }
141 EXPORT_SYMBOL(wait_for_random_bytes);
142
143 /*
144 * Add a callback function that will be invoked when the input
145 * pool is initialised.
146 *
147 * returns: 0 if callback is successfully added
148 * -EALREADY if pool is already initialised (callback not called)
149 */
register_random_ready_notifier(struct notifier_block * nb)150 int __cold register_random_ready_notifier(struct notifier_block *nb)
151 {
152 unsigned long flags;
153 int ret = -EALREADY;
154
155 if (crng_ready())
156 return ret;
157
158 spin_lock_irqsave(&random_ready_chain_lock, flags);
159 if (!crng_ready())
160 ret = raw_notifier_chain_register(&random_ready_chain, nb);
161 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
162 return ret;
163 }
164
165 /*
166 * Delete a previously registered readiness callback function.
167 */
unregister_random_ready_notifier(struct notifier_block * nb)168 int __cold unregister_random_ready_notifier(struct notifier_block *nb)
169 {
170 unsigned long flags;
171 int ret;
172
173 spin_lock_irqsave(&random_ready_chain_lock, flags);
174 ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
175 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
176 return ret;
177 }
178
179 static void process_oldschool_random_ready_list(void);
process_random_ready_list(void)180 static void __cold process_random_ready_list(void)
181 {
182 unsigned long flags;
183
184 spin_lock_irqsave(&random_ready_chain_lock, flags);
185 raw_notifier_call_chain(&random_ready_chain, 0, NULL);
186 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
187
188 process_oldschool_random_ready_list();
189 }
190
191 #define warn_unseeded_randomness() \
192 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
193 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
194 __func__, (void *)_RET_IP_, crng_init)
195
196
197 /*********************************************************************
198 *
199 * Fast key erasure RNG, the "crng".
200 *
201 * These functions expand entropy from the entropy extractor into
202 * long streams for external consumption using the "fast key erasure"
203 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
204 *
205 * There are a few exported interfaces for use by other drivers:
206 *
207 * void get_random_bytes(void *buf, size_t len)
208 * u32 get_random_u32()
209 * u64 get_random_u64()
210 * unsigned int get_random_int()
211 * unsigned long get_random_long()
212 *
213 * These interfaces will return the requested number of random bytes
214 * into the given buffer or as a return value. This is equivalent to
215 * a read from /dev/urandom. The u32, u64, int, and long family of
216 * functions may be higher performance for one-off random integers,
217 * because they do a bit of buffering and do not invoke reseeding
218 * until the buffer is emptied.
219 *
220 *********************************************************************/
221
222 enum {
223 CRNG_RESEED_START_INTERVAL = HZ,
224 CRNG_RESEED_INTERVAL = 60 * HZ
225 };
226
227 static struct {
228 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
229 unsigned long birth;
230 unsigned long generation;
231 spinlock_t lock;
232 } base_crng = {
233 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
234 };
235
236 struct crng {
237 u8 key[CHACHA_KEY_SIZE];
238 unsigned long generation;
239 local_lock_t lock;
240 };
241
242 static DEFINE_PER_CPU(struct crng, crngs) = {
243 .generation = ULONG_MAX,
244 .lock = INIT_LOCAL_LOCK(crngs.lock),
245 };
246
247 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
248 static void extract_entropy(void *buf, size_t len);
249
250 /* This extracts a new crng key from the input pool. */
crng_reseed(void)251 static void crng_reseed(void)
252 {
253 unsigned long flags;
254 unsigned long next_gen;
255 u8 key[CHACHA_KEY_SIZE];
256
257 extract_entropy(key, sizeof(key));
258
259 /*
260 * We copy the new key into the base_crng, overwriting the old one,
261 * and update the generation counter. We avoid hitting ULONG_MAX,
262 * because the per-cpu crngs are initialized to ULONG_MAX, so this
263 * forces new CPUs that come online to always initialize.
264 */
265 spin_lock_irqsave(&base_crng.lock, flags);
266 memcpy(base_crng.key, key, sizeof(base_crng.key));
267 next_gen = base_crng.generation + 1;
268 if (next_gen == ULONG_MAX)
269 ++next_gen;
270 WRITE_ONCE(base_crng.generation, next_gen);
271 WRITE_ONCE(base_crng.birth, jiffies);
272 if (!crng_ready())
273 crng_init = CRNG_READY;
274 spin_unlock_irqrestore(&base_crng.lock, flags);
275 memzero_explicit(key, sizeof(key));
276 }
277
278 /*
279 * This generates a ChaCha block using the provided key, and then
280 * immediately overwites that key with half the block. It returns
281 * the resultant ChaCha state to the user, along with the second
282 * half of the block containing 32 bytes of random data that may
283 * be used; random_data_len may not be greater than 32.
284 *
285 * The returned ChaCha state contains within it a copy of the old
286 * key value, at index 4, so the state should always be zeroed out
287 * immediately after using in order to maintain forward secrecy.
288 * If the state cannot be erased in a timely manner, then it is
289 * safer to set the random_data parameter to &chacha_state[4] so
290 * that this function overwrites it before returning.
291 */
crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)292 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
293 u32 chacha_state[CHACHA_STATE_WORDS],
294 u8 *random_data, size_t random_data_len)
295 {
296 u8 first_block[CHACHA_BLOCK_SIZE];
297
298 BUG_ON(random_data_len > 32);
299
300 chacha_init_consts(chacha_state);
301 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
302 memset(&chacha_state[12], 0, sizeof(u32) * 4);
303 chacha20_block(chacha_state, first_block);
304
305 memcpy(key, first_block, CHACHA_KEY_SIZE);
306 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
307 memzero_explicit(first_block, sizeof(first_block));
308 }
309
310 /*
311 * Return whether the crng seed is considered to be sufficiently old
312 * that a reseeding is needed. This happens if the last reseeding
313 * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
314 * proportional to the uptime.
315 */
crng_has_old_seed(void)316 static bool crng_has_old_seed(void)
317 {
318 static bool early_boot = true;
319 unsigned long interval = CRNG_RESEED_INTERVAL;
320
321 if (unlikely(READ_ONCE(early_boot))) {
322 time64_t uptime = ktime_get_seconds();
323 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
324 WRITE_ONCE(early_boot, false);
325 else
326 interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
327 (unsigned int)uptime / 2 * HZ);
328 }
329 return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
330 }
331
332 /*
333 * This function returns a ChaCha state that you may use for generating
334 * random data. It also returns up to 32 bytes on its own of random data
335 * that may be used; random_data_len may not be greater than 32.
336 */
crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)337 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
338 u8 *random_data, size_t random_data_len)
339 {
340 unsigned long flags;
341 struct crng *crng;
342
343 BUG_ON(random_data_len > 32);
344
345 /*
346 * For the fast path, we check whether we're ready, unlocked first, and
347 * then re-check once locked later. In the case where we're really not
348 * ready, we do fast key erasure with the base_crng directly, extracting
349 * when crng_init is CRNG_EMPTY.
350 */
351 if (!crng_ready()) {
352 bool ready;
353
354 spin_lock_irqsave(&base_crng.lock, flags);
355 ready = crng_ready();
356 if (!ready) {
357 if (crng_init == CRNG_EMPTY)
358 extract_entropy(base_crng.key, sizeof(base_crng.key));
359 crng_fast_key_erasure(base_crng.key, chacha_state,
360 random_data, random_data_len);
361 }
362 spin_unlock_irqrestore(&base_crng.lock, flags);
363 if (!ready)
364 return;
365 }
366
367 /*
368 * If the base_crng is old enough, we reseed, which in turn bumps the
369 * generation counter that we check below.
370 */
371 if (unlikely(crng_has_old_seed()))
372 crng_reseed();
373
374 local_lock_irqsave(&crngs.lock, flags);
375 crng = raw_cpu_ptr(&crngs);
376
377 /*
378 * If our per-cpu crng is older than the base_crng, then it means
379 * somebody reseeded the base_crng. In that case, we do fast key
380 * erasure on the base_crng, and use its output as the new key
381 * for our per-cpu crng. This brings us up to date with base_crng.
382 */
383 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
384 spin_lock(&base_crng.lock);
385 crng_fast_key_erasure(base_crng.key, chacha_state,
386 crng->key, sizeof(crng->key));
387 crng->generation = base_crng.generation;
388 spin_unlock(&base_crng.lock);
389 }
390
391 /*
392 * Finally, when we've made it this far, our per-cpu crng has an up
393 * to date key, and we can do fast key erasure with it to produce
394 * some random data and a ChaCha state for the caller. All other
395 * branches of this function are "unlikely", so most of the time we
396 * should wind up here immediately.
397 */
398 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
399 local_unlock_irqrestore(&crngs.lock, flags);
400 }
401
_get_random_bytes(void * buf,size_t len)402 static void _get_random_bytes(void *buf, size_t len)
403 {
404 u32 chacha_state[CHACHA_STATE_WORDS];
405 u8 tmp[CHACHA_BLOCK_SIZE];
406 size_t first_block_len;
407
408 if (!len)
409 return;
410
411 first_block_len = min_t(size_t, 32, len);
412 crng_make_state(chacha_state, buf, first_block_len);
413 len -= first_block_len;
414 buf += first_block_len;
415
416 while (len) {
417 if (len < CHACHA_BLOCK_SIZE) {
418 chacha20_block(chacha_state, tmp);
419 memcpy(buf, tmp, len);
420 memzero_explicit(tmp, sizeof(tmp));
421 break;
422 }
423
424 chacha20_block(chacha_state, buf);
425 if (unlikely(chacha_state[12] == 0))
426 ++chacha_state[13];
427 len -= CHACHA_BLOCK_SIZE;
428 buf += CHACHA_BLOCK_SIZE;
429 }
430
431 memzero_explicit(chacha_state, sizeof(chacha_state));
432 }
433
434 /*
435 * This function is the exported kernel interface. It returns some
436 * number of good random numbers, suitable for key generation, seeding
437 * TCP sequence numbers, etc. It does not rely on the hardware random
438 * number generator. For random bytes direct from the hardware RNG
439 * (when available), use get_random_bytes_arch(). In order to ensure
440 * that the randomness provided by this function is okay, the function
441 * wait_for_random_bytes() should be called and return 0 at least once
442 * at any point prior.
443 */
get_random_bytes(void * buf,int len)444 void get_random_bytes(void *buf, int len)
445 {
446 warn_unseeded_randomness();
447 _get_random_bytes(buf, len);
448 }
449 EXPORT_SYMBOL(get_random_bytes);
450
get_random_bytes_user(struct iov_iter * iter)451 static ssize_t get_random_bytes_user(struct iov_iter *iter)
452 {
453 u32 chacha_state[CHACHA_STATE_WORDS];
454 u8 block[CHACHA_BLOCK_SIZE];
455 size_t ret = 0, copied;
456
457 if (unlikely(!iov_iter_count(iter)))
458 return 0;
459
460 /*
461 * Immediately overwrite the ChaCha key at index 4 with random
462 * bytes, in case userspace causes copy_to_iter() below to sleep
463 * forever, so that we still retain forward secrecy in that case.
464 */
465 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
466 /*
467 * However, if we're doing a read of len <= 32, we don't need to
468 * use chacha_state after, so we can simply return those bytes to
469 * the user directly.
470 */
471 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
472 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
473 goto out_zero_chacha;
474 }
475
476 for (;;) {
477 chacha20_block(chacha_state, block);
478 if (unlikely(chacha_state[12] == 0))
479 ++chacha_state[13];
480
481 copied = copy_to_iter(block, sizeof(block), iter);
482 ret += copied;
483 if (!iov_iter_count(iter) || copied != sizeof(block))
484 break;
485
486 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
487 if (ret % PAGE_SIZE == 0) {
488 if (signal_pending(current))
489 break;
490 cond_resched();
491 }
492 }
493
494 memzero_explicit(block, sizeof(block));
495 out_zero_chacha:
496 memzero_explicit(chacha_state, sizeof(chacha_state));
497 return ret ? ret : -EFAULT;
498 }
499
500 /*
501 * Batched entropy returns random integers. The quality of the random
502 * number is good as /dev/urandom. In order to ensure that the randomness
503 * provided by this function is okay, the function wait_for_random_bytes()
504 * should be called and return 0 at least once at any point prior.
505 */
506
507 #define DEFINE_BATCHED_ENTROPY(type) \
508 struct batch_ ##type { \
509 /* \
510 * We make this 1.5x a ChaCha block, so that we get the \
511 * remaining 32 bytes from fast key erasure, plus one full \
512 * block from the detached ChaCha state. We can increase \
513 * the size of this later if needed so long as we keep the \
514 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
515 */ \
516 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
517 local_lock_t lock; \
518 unsigned long generation; \
519 unsigned int position; \
520 }; \
521 \
522 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
523 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
524 .position = UINT_MAX \
525 }; \
526 \
527 type get_random_ ##type(void) \
528 { \
529 type ret; \
530 unsigned long flags; \
531 struct batch_ ##type *batch; \
532 unsigned long next_gen; \
533 \
534 warn_unseeded_randomness(); \
535 \
536 if (!crng_ready()) { \
537 _get_random_bytes(&ret, sizeof(ret)); \
538 return ret; \
539 } \
540 \
541 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
542 batch = raw_cpu_ptr(&batched_entropy_##type); \
543 \
544 next_gen = READ_ONCE(base_crng.generation); \
545 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
546 next_gen != batch->generation) { \
547 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
548 batch->position = 0; \
549 batch->generation = next_gen; \
550 } \
551 \
552 ret = batch->entropy[batch->position]; \
553 batch->entropy[batch->position] = 0; \
554 ++batch->position; \
555 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
556 return ret; \
557 } \
558 EXPORT_SYMBOL(get_random_ ##type);
559
560 DEFINE_BATCHED_ENTROPY(u64)
DEFINE_BATCHED_ENTROPY(u32)561 DEFINE_BATCHED_ENTROPY(u32)
562
563 #ifdef CONFIG_SMP
564 /*
565 * This function is called when the CPU is coming up, with entry
566 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
567 */
568 int __cold random_prepare_cpu(unsigned int cpu)
569 {
570 /*
571 * When the cpu comes back online, immediately invalidate both
572 * the per-cpu crng and all batches, so that we serve fresh
573 * randomness.
574 */
575 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
576 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
577 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
578 return 0;
579 }
580 #endif
581
582 /*
583 * This function will use the architecture-specific hardware random
584 * number generator if it is available. It is not recommended for
585 * use. Use get_random_bytes() instead. It returns the number of
586 * bytes filled in.
587 */
get_random_bytes_arch(void * buf,int len)588 int __must_check get_random_bytes_arch(void *buf, int len)
589 {
590 size_t left = len;
591 u8 *p = buf;
592
593 while (left) {
594 unsigned long v;
595 size_t block_len = min_t(size_t, left, sizeof(unsigned long));
596
597 if (!arch_get_random_long(&v))
598 break;
599
600 memcpy(p, &v, block_len);
601 p += block_len;
602 left -= block_len;
603 }
604
605 return len - left;
606 }
607 EXPORT_SYMBOL(get_random_bytes_arch);
608
609
610 /**********************************************************************
611 *
612 * Entropy accumulation and extraction routines.
613 *
614 * Callers may add entropy via:
615 *
616 * static void mix_pool_bytes(const void *buf, size_t len)
617 *
618 * After which, if added entropy should be credited:
619 *
620 * static void credit_init_bits(size_t bits)
621 *
622 * Finally, extract entropy via:
623 *
624 * static void extract_entropy(void *buf, size_t len)
625 *
626 **********************************************************************/
627
628 enum {
629 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
630 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
631 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
632 };
633
634 static struct {
635 struct blake2s_state hash;
636 spinlock_t lock;
637 unsigned int init_bits;
638 } input_pool = {
639 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
640 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
641 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
642 .hash.outlen = BLAKE2S_HASH_SIZE,
643 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
644 };
645
_mix_pool_bytes(const void * buf,size_t len)646 static void _mix_pool_bytes(const void *buf, size_t len)
647 {
648 blake2s_update(&input_pool.hash, buf, len);
649 }
650
651 /*
652 * This function adds bytes into the input pool. It does not
653 * update the initialization bit counter; the caller should call
654 * credit_init_bits if this is appropriate.
655 */
mix_pool_bytes(const void * buf,size_t len)656 static void mix_pool_bytes(const void *buf, size_t len)
657 {
658 unsigned long flags;
659
660 spin_lock_irqsave(&input_pool.lock, flags);
661 _mix_pool_bytes(buf, len);
662 spin_unlock_irqrestore(&input_pool.lock, flags);
663 }
664
665 /*
666 * This is an HKDF-like construction for using the hashed collected entropy
667 * as a PRF key, that's then expanded block-by-block.
668 */
extract_entropy(void * buf,size_t len)669 static void extract_entropy(void *buf, size_t len)
670 {
671 unsigned long flags;
672 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
673 struct {
674 unsigned long rdseed[32 / sizeof(long)];
675 size_t counter;
676 } block;
677 size_t i;
678
679 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
680 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
681 !arch_get_random_long(&block.rdseed[i]))
682 block.rdseed[i] = random_get_entropy();
683 }
684
685 spin_lock_irqsave(&input_pool.lock, flags);
686
687 /* seed = HASHPRF(last_key, entropy_input) */
688 blake2s_final(&input_pool.hash, seed);
689
690 /* next_key = HASHPRF(seed, RDSEED || 0) */
691 block.counter = 0;
692 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
693 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
694
695 spin_unlock_irqrestore(&input_pool.lock, flags);
696 memzero_explicit(next_key, sizeof(next_key));
697
698 while (len) {
699 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
700 /* output = HASHPRF(seed, RDSEED || ++counter) */
701 ++block.counter;
702 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
703 len -= i;
704 buf += i;
705 }
706
707 memzero_explicit(seed, sizeof(seed));
708 memzero_explicit(&block, sizeof(block));
709 }
710
711 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
712
_credit_init_bits(size_t bits)713 static void __cold _credit_init_bits(size_t bits)
714 {
715 unsigned int new, orig, add;
716 unsigned long flags;
717
718 if (!bits)
719 return;
720
721 add = min_t(size_t, bits, POOL_BITS);
722
723 do {
724 orig = READ_ONCE(input_pool.init_bits);
725 new = min_t(unsigned int, POOL_BITS, orig + add);
726 } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
727
728 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
729 crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
730 process_random_ready_list();
731 wake_up_interruptible(&crng_init_wait);
732 kill_fasync(&fasync, SIGIO, POLL_IN);
733 pr_notice("crng init done\n");
734 if (urandom_warning.missed)
735 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
736 urandom_warning.missed);
737 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
738 spin_lock_irqsave(&base_crng.lock, flags);
739 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
740 if (crng_init == CRNG_EMPTY) {
741 extract_entropy(base_crng.key, sizeof(base_crng.key));
742 crng_init = CRNG_EARLY;
743 }
744 spin_unlock_irqrestore(&base_crng.lock, flags);
745 }
746 }
747
748
749 /**********************************************************************
750 *
751 * Entropy collection routines.
752 *
753 * The following exported functions are used for pushing entropy into
754 * the above entropy accumulation routines:
755 *
756 * void add_device_randomness(const void *buf, size_t len);
757 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
758 * void add_bootloader_randomness(const void *buf, size_t len);
759 * void add_interrupt_randomness(int irq);
760 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
761 * void add_disk_randomness(struct gendisk *disk);
762 *
763 * add_device_randomness() adds data to the input pool that
764 * is likely to differ between two devices (or possibly even per boot).
765 * This would be things like MAC addresses or serial numbers, or the
766 * read-out of the RTC. This does *not* credit any actual entropy to
767 * the pool, but it initializes the pool to different values for devices
768 * that might otherwise be identical and have very little entropy
769 * available to them (particularly common in the embedded world).
770 *
771 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
772 * entropy as specified by the caller. If the entropy pool is full it will
773 * block until more entropy is needed.
774 *
775 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
776 * and device tree, and credits its input depending on whether or not the
777 * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
778 *
779 * add_interrupt_randomness() uses the interrupt timing as random
780 * inputs to the entropy pool. Using the cycle counters and the irq source
781 * as inputs, it feeds the input pool roughly once a second or after 64
782 * interrupts, crediting 1 bit of entropy for whichever comes first.
783 *
784 * add_input_randomness() uses the input layer interrupt timing, as well
785 * as the event type information from the hardware.
786 *
787 * add_disk_randomness() uses what amounts to the seek time of block
788 * layer request events, on a per-disk_devt basis, as input to the
789 * entropy pool. Note that high-speed solid state drives with very low
790 * seek times do not make for good sources of entropy, as their seek
791 * times are usually fairly consistent.
792 *
793 * The last two routines try to estimate how many bits of entropy
794 * to credit. They do this by keeping track of the first and second
795 * order deltas of the event timings.
796 *
797 **********************************************************************/
798
799 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
800 static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
parse_trust_cpu(char * arg)801 static int __init parse_trust_cpu(char *arg)
802 {
803 return kstrtobool(arg, &trust_cpu);
804 }
parse_trust_bootloader(char * arg)805 static int __init parse_trust_bootloader(char *arg)
806 {
807 return kstrtobool(arg, &trust_bootloader);
808 }
809 early_param("random.trust_cpu", parse_trust_cpu);
810 early_param("random.trust_bootloader", parse_trust_bootloader);
811
812 /*
813 * The first collection of entropy occurs at system boot while interrupts
814 * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
815 * utsname(), and the command line. Depending on the above configuration knob,
816 * RDSEED may be considered sufficient for initialization. Note that much
817 * earlier setup may already have pushed entropy into the input pool by the
818 * time we get here.
819 */
random_init(const char * command_line)820 int __init random_init(const char *command_line)
821 {
822 ktime_t now = ktime_get_real();
823 unsigned int i, arch_bytes;
824 unsigned long entropy;
825
826 #if defined(LATENT_ENTROPY_PLUGIN)
827 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
828 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
829 #endif
830
831 for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
832 i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
833 if (!arch_get_random_seed_long_early(&entropy) &&
834 !arch_get_random_long_early(&entropy)) {
835 entropy = random_get_entropy();
836 arch_bytes -= sizeof(entropy);
837 }
838 _mix_pool_bytes(&entropy, sizeof(entropy));
839 }
840 _mix_pool_bytes(&now, sizeof(now));
841 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
842 _mix_pool_bytes(command_line, strlen(command_line));
843 add_latent_entropy();
844
845 if (crng_ready())
846 crng_reseed();
847 else if (trust_cpu)
848 credit_init_bits(arch_bytes * 8);
849
850 return 0;
851 }
852
853 /*
854 * Add device- or boot-specific data to the input pool to help
855 * initialize it.
856 *
857 * None of this adds any entropy; it is meant to avoid the problem of
858 * the entropy pool having similar initial state across largely
859 * identical devices.
860 */
add_device_randomness(const void * buf,unsigned int len)861 void add_device_randomness(const void *buf, unsigned int len)
862 {
863 unsigned long entropy = random_get_entropy();
864 unsigned long flags;
865
866 spin_lock_irqsave(&input_pool.lock, flags);
867 _mix_pool_bytes(&entropy, sizeof(entropy));
868 _mix_pool_bytes(buf, len);
869 spin_unlock_irqrestore(&input_pool.lock, flags);
870 }
871 EXPORT_SYMBOL(add_device_randomness);
872
873 /*
874 * Interface for in-kernel drivers of true hardware RNGs.
875 * Those devices may produce endless random bits and will be throttled
876 * when our pool is full.
877 */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy)878 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
879 {
880 mix_pool_bytes(buf, len);
881 credit_init_bits(entropy);
882
883 /*
884 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
885 * we're not yet initialized.
886 */
887 if (!kthread_should_stop() && crng_ready())
888 schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
889 }
890 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
891
892 /*
893 * Handle random seed passed by bootloader, and credit it if
894 * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
895 */
add_bootloader_randomness(const void * buf,size_t len)896 void __cold add_bootloader_randomness(const void *buf, size_t len)
897 {
898 mix_pool_bytes(buf, len);
899 if (trust_bootloader)
900 credit_init_bits(len * 8);
901 }
902 EXPORT_SYMBOL_GPL(add_bootloader_randomness);
903
904 struct fast_pool {
905 unsigned long pool[4];
906 unsigned long last;
907 unsigned int count;
908 struct timer_list mix;
909 };
910
911 static void mix_interrupt_randomness(struct timer_list *work);
912
913 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
914 #ifdef CONFIG_64BIT
915 #define FASTMIX_PERM SIPHASH_PERMUTATION
916 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
917 #else
918 #define FASTMIX_PERM HSIPHASH_PERMUTATION
919 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
920 #endif
921 .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
922 };
923
924 /*
925 * This is [Half]SipHash-1-x, starting from an empty key. Because
926 * the key is fixed, it assumes that its inputs are non-malicious,
927 * and therefore this has no security on its own. s represents the
928 * four-word SipHash state, while v represents a two-word input.
929 */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)930 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
931 {
932 s[3] ^= v1;
933 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
934 s[0] ^= v1;
935 s[3] ^= v2;
936 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
937 s[0] ^= v2;
938 }
939
940 #ifdef CONFIG_SMP
941 /*
942 * This function is called when the CPU has just come online, with
943 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
944 */
random_online_cpu(unsigned int cpu)945 int __cold random_online_cpu(unsigned int cpu)
946 {
947 /*
948 * During CPU shutdown and before CPU onlining, add_interrupt_
949 * randomness() may schedule mix_interrupt_randomness(), and
950 * set the MIX_INFLIGHT flag. However, because the worker can
951 * be scheduled on a different CPU during this period, that
952 * flag will never be cleared. For that reason, we zero out
953 * the flag here, which runs just after workqueues are onlined
954 * for the CPU again. This also has the effect of setting the
955 * irq randomness count to zero so that new accumulated irqs
956 * are fresh.
957 */
958 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
959 return 0;
960 }
961 #endif
962
mix_interrupt_randomness(struct timer_list * work)963 static void mix_interrupt_randomness(struct timer_list *work)
964 {
965 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
966 /*
967 * The size of the copied stack pool is explicitly 2 longs so that we
968 * only ever ingest half of the siphash output each time, retaining
969 * the other half as the next "key" that carries over. The entropy is
970 * supposed to be sufficiently dispersed between bits so on average
971 * we don't wind up "losing" some.
972 */
973 unsigned long pool[2];
974 unsigned int count;
975
976 /* Check to see if we're running on the wrong CPU due to hotplug. */
977 local_irq_disable();
978 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
979 local_irq_enable();
980 return;
981 }
982
983 /*
984 * Copy the pool to the stack so that the mixer always has a
985 * consistent view, before we reenable irqs again.
986 */
987 memcpy(pool, fast_pool->pool, sizeof(pool));
988 count = fast_pool->count;
989 fast_pool->count = 0;
990 fast_pool->last = jiffies;
991 local_irq_enable();
992
993 mix_pool_bytes(pool, sizeof(pool));
994 credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
995
996 memzero_explicit(pool, sizeof(pool));
997 }
998
add_interrupt_randomness(int irq)999 void add_interrupt_randomness(int irq)
1000 {
1001 enum { MIX_INFLIGHT = 1U << 31 };
1002 unsigned long entropy = random_get_entropy();
1003 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1004 struct pt_regs *regs = get_irq_regs();
1005 unsigned int new_count;
1006
1007 fast_mix(fast_pool->pool, entropy,
1008 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1009 new_count = ++fast_pool->count;
1010
1011 if (new_count & MIX_INFLIGHT)
1012 return;
1013
1014 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1015 return;
1016
1017 fast_pool->count |= MIX_INFLIGHT;
1018 if (!timer_pending(&fast_pool->mix)) {
1019 fast_pool->mix.expires = jiffies;
1020 add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1021 }
1022 }
1023 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1024
1025 /* There is one of these per entropy source */
1026 struct timer_rand_state {
1027 unsigned long last_time;
1028 long last_delta, last_delta2;
1029 };
1030
1031 /*
1032 * This function adds entropy to the entropy "pool" by using timing
1033 * delays. It uses the timer_rand_state structure to make an estimate
1034 * of how many bits of entropy this call has added to the pool. The
1035 * value "num" is also added to the pool; it should somehow describe
1036 * the type of event that just happened.
1037 */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1038 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1039 {
1040 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1041 long delta, delta2, delta3;
1042 unsigned int bits;
1043
1044 /*
1045 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1046 * sometime after, so mix into the fast pool.
1047 */
1048 if (in_irq()) {
1049 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1050 } else {
1051 spin_lock_irqsave(&input_pool.lock, flags);
1052 _mix_pool_bytes(&entropy, sizeof(entropy));
1053 _mix_pool_bytes(&num, sizeof(num));
1054 spin_unlock_irqrestore(&input_pool.lock, flags);
1055 }
1056
1057 if (crng_ready())
1058 return;
1059
1060 /*
1061 * Calculate number of bits of randomness we probably added.
1062 * We take into account the first, second and third-order deltas
1063 * in order to make our estimate.
1064 */
1065 delta = now - READ_ONCE(state->last_time);
1066 WRITE_ONCE(state->last_time, now);
1067
1068 delta2 = delta - READ_ONCE(state->last_delta);
1069 WRITE_ONCE(state->last_delta, delta);
1070
1071 delta3 = delta2 - READ_ONCE(state->last_delta2);
1072 WRITE_ONCE(state->last_delta2, delta2);
1073
1074 if (delta < 0)
1075 delta = -delta;
1076 if (delta2 < 0)
1077 delta2 = -delta2;
1078 if (delta3 < 0)
1079 delta3 = -delta3;
1080 if (delta > delta2)
1081 delta = delta2;
1082 if (delta > delta3)
1083 delta = delta3;
1084
1085 /*
1086 * delta is now minimum absolute delta. Round down by 1 bit
1087 * on general principles, and limit entropy estimate to 11 bits.
1088 */
1089 bits = min(fls(delta >> 1), 11);
1090
1091 /*
1092 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1093 * will run after this, which uses a different crediting scheme of 1 bit
1094 * per every 64 interrupts. In order to let that function do accounting
1095 * close to the one in this function, we credit a full 64/64 bit per bit,
1096 * and then subtract one to account for the extra one added.
1097 */
1098 if (in_irq())
1099 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1100 else
1101 _credit_init_bits(bits);
1102 }
1103
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1104 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1105 {
1106 static unsigned char last_value;
1107 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1108
1109 /* Ignore autorepeat and the like. */
1110 if (value == last_value)
1111 return;
1112
1113 last_value = value;
1114 add_timer_randomness(&input_timer_state,
1115 (type << 4) ^ code ^ (code >> 4) ^ value);
1116 }
1117 EXPORT_SYMBOL_GPL(add_input_randomness);
1118
1119 #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1120 void add_disk_randomness(struct gendisk *disk)
1121 {
1122 if (!disk || !disk->random)
1123 return;
1124 /* First major is 1, so we get >= 0x200 here. */
1125 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1126 }
1127 EXPORT_SYMBOL_GPL(add_disk_randomness);
1128
rand_initialize_disk(struct gendisk * disk)1129 void __cold rand_initialize_disk(struct gendisk *disk)
1130 {
1131 struct timer_rand_state *state;
1132
1133 /*
1134 * If kzalloc returns null, we just won't use that entropy
1135 * source.
1136 */
1137 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1138 if (state) {
1139 state->last_time = INITIAL_JIFFIES;
1140 disk->random = state;
1141 }
1142 }
1143 #endif
1144
1145 /*
1146 * Each time the timer fires, we expect that we got an unpredictable
1147 * jump in the cycle counter. Even if the timer is running on another
1148 * CPU, the timer activity will be touching the stack of the CPU that is
1149 * generating entropy..
1150 *
1151 * Note that we don't re-arm the timer in the timer itself - we are
1152 * happy to be scheduled away, since that just makes the load more
1153 * complex, but we do not want the timer to keep ticking unless the
1154 * entropy loop is running.
1155 *
1156 * So the re-arming always happens in the entropy loop itself.
1157 */
entropy_timer(struct timer_list * t)1158 static void __cold entropy_timer(struct timer_list *t)
1159 {
1160 credit_init_bits(1);
1161 }
1162
1163 /*
1164 * If we have an actual cycle counter, see if we can
1165 * generate enough entropy with timing noise
1166 */
try_to_generate_entropy(void)1167 static void __cold try_to_generate_entropy(void)
1168 {
1169 struct {
1170 unsigned long entropy;
1171 struct timer_list timer;
1172 } stack;
1173
1174 stack.entropy = random_get_entropy();
1175
1176 /* Slow counter - or none. Don't even bother */
1177 if (stack.entropy == random_get_entropy())
1178 return;
1179
1180 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1181 while (!crng_ready() && !signal_pending(current)) {
1182 if (!timer_pending(&stack.timer))
1183 mod_timer(&stack.timer, jiffies + 1);
1184 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1185 schedule();
1186 stack.entropy = random_get_entropy();
1187 }
1188
1189 del_timer_sync(&stack.timer);
1190 destroy_timer_on_stack(&stack.timer);
1191 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1192 }
1193
1194
1195 /**********************************************************************
1196 *
1197 * Userspace reader/writer interfaces.
1198 *
1199 * getrandom(2) is the primary modern interface into the RNG and should
1200 * be used in preference to anything else.
1201 *
1202 * Reading from /dev/random has the same functionality as calling
1203 * getrandom(2) with flags=0. In earlier versions, however, it had
1204 * vastly different semantics and should therefore be avoided, to
1205 * prevent backwards compatibility issues.
1206 *
1207 * Reading from /dev/urandom has the same functionality as calling
1208 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1209 * waiting for the RNG to be ready, it should not be used.
1210 *
1211 * Writing to either /dev/random or /dev/urandom adds entropy to
1212 * the input pool but does not credit it.
1213 *
1214 * Polling on /dev/random indicates when the RNG is initialized, on
1215 * the read side, and when it wants new entropy, on the write side.
1216 *
1217 * Both /dev/random and /dev/urandom have the same set of ioctls for
1218 * adding entropy, getting the entropy count, zeroing the count, and
1219 * reseeding the crng.
1220 *
1221 **********************************************************************/
1222
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1223 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1224 {
1225 struct iov_iter iter;
1226 struct iovec iov;
1227 int ret;
1228
1229 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1230 return -EINVAL;
1231
1232 /*
1233 * Requesting insecure and blocking randomness at the same time makes
1234 * no sense.
1235 */
1236 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1237 return -EINVAL;
1238
1239 if (!crng_ready() && !(flags & GRND_INSECURE)) {
1240 if (flags & GRND_NONBLOCK)
1241 return -EAGAIN;
1242 ret = wait_for_random_bytes();
1243 if (unlikely(ret))
1244 return ret;
1245 }
1246
1247 ret = import_single_range(READ, ubuf, len, &iov, &iter);
1248 if (unlikely(ret))
1249 return ret;
1250 return get_random_bytes_user(&iter);
1251 }
1252
random_poll(struct file * file,poll_table * wait)1253 static __poll_t random_poll(struct file *file, poll_table *wait)
1254 {
1255 poll_wait(file, &crng_init_wait, wait);
1256 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1257 }
1258
write_pool_user(struct iov_iter * iter)1259 static ssize_t write_pool_user(struct iov_iter *iter)
1260 {
1261 u8 block[BLAKE2S_BLOCK_SIZE];
1262 ssize_t ret = 0;
1263 size_t copied;
1264
1265 if (unlikely(!iov_iter_count(iter)))
1266 return 0;
1267
1268 for (;;) {
1269 copied = copy_from_iter(block, sizeof(block), iter);
1270 ret += copied;
1271 mix_pool_bytes(block, copied);
1272 if (!iov_iter_count(iter) || copied != sizeof(block))
1273 break;
1274
1275 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1276 if (ret % PAGE_SIZE == 0) {
1277 if (signal_pending(current))
1278 break;
1279 cond_resched();
1280 }
1281 }
1282
1283 memzero_explicit(block, sizeof(block));
1284 return ret ? ret : -EFAULT;
1285 }
1286
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1287 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1288 {
1289 return write_pool_user(iter);
1290 }
1291
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1292 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1293 {
1294 static int maxwarn = 10;
1295
1296 if (!crng_ready()) {
1297 if (!ratelimit_disable && maxwarn <= 0)
1298 ++urandom_warning.missed;
1299 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1300 --maxwarn;
1301 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1302 current->comm, iov_iter_count(iter));
1303 }
1304 }
1305
1306 return get_random_bytes_user(iter);
1307 }
1308
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1309 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1310 {
1311 int ret;
1312
1313 if (!crng_ready() &&
1314 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1315 (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1316 return -EAGAIN;
1317
1318 ret = wait_for_random_bytes();
1319 if (ret != 0)
1320 return ret;
1321 return get_random_bytes_user(iter);
1322 }
1323
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1324 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1325 {
1326 int __user *p = (int __user *)arg;
1327 int ent_count;
1328
1329 switch (cmd) {
1330 case RNDGETENTCNT:
1331 /* Inherently racy, no point locking. */
1332 if (put_user(input_pool.init_bits, p))
1333 return -EFAULT;
1334 return 0;
1335 case RNDADDTOENTCNT:
1336 if (!capable(CAP_SYS_ADMIN))
1337 return -EPERM;
1338 if (get_user(ent_count, p))
1339 return -EFAULT;
1340 if (ent_count < 0)
1341 return -EINVAL;
1342 credit_init_bits(ent_count);
1343 return 0;
1344 case RNDADDENTROPY: {
1345 struct iov_iter iter;
1346 struct iovec iov;
1347 ssize_t ret;
1348 int len;
1349
1350 if (!capable(CAP_SYS_ADMIN))
1351 return -EPERM;
1352 if (get_user(ent_count, p++))
1353 return -EFAULT;
1354 if (ent_count < 0)
1355 return -EINVAL;
1356 if (get_user(len, p++))
1357 return -EFAULT;
1358 ret = import_single_range(WRITE, p, len, &iov, &iter);
1359 if (unlikely(ret))
1360 return ret;
1361 ret = write_pool_user(&iter);
1362 if (unlikely(ret < 0))
1363 return ret;
1364 /* Since we're crediting, enforce that it was all written into the pool. */
1365 if (unlikely(ret != len))
1366 return -EFAULT;
1367 credit_init_bits(ent_count);
1368 return 0;
1369 }
1370 case RNDZAPENTCNT:
1371 case RNDCLEARPOOL:
1372 /* No longer has any effect. */
1373 if (!capable(CAP_SYS_ADMIN))
1374 return -EPERM;
1375 return 0;
1376 case RNDRESEEDCRNG:
1377 if (!capable(CAP_SYS_ADMIN))
1378 return -EPERM;
1379 if (!crng_ready())
1380 return -ENODATA;
1381 crng_reseed();
1382 return 0;
1383 default:
1384 return -EINVAL;
1385 }
1386 }
1387
random_fasync(int fd,struct file * filp,int on)1388 static int random_fasync(int fd, struct file *filp, int on)
1389 {
1390 return fasync_helper(fd, filp, on, &fasync);
1391 }
1392
1393 const struct file_operations random_fops = {
1394 .read_iter = random_read_iter,
1395 .write_iter = random_write_iter,
1396 .poll = random_poll,
1397 .unlocked_ioctl = random_ioctl,
1398 .compat_ioctl = compat_ptr_ioctl,
1399 .fasync = random_fasync,
1400 .llseek = noop_llseek,
1401 .splice_read = generic_file_splice_read,
1402 .splice_write = iter_file_splice_write,
1403 };
1404
1405 const struct file_operations urandom_fops = {
1406 .read_iter = urandom_read_iter,
1407 .write_iter = random_write_iter,
1408 .unlocked_ioctl = random_ioctl,
1409 .compat_ioctl = compat_ptr_ioctl,
1410 .fasync = random_fasync,
1411 .llseek = noop_llseek,
1412 .splice_read = generic_file_splice_read,
1413 .splice_write = iter_file_splice_write,
1414 };
1415
1416
1417 /********************************************************************
1418 *
1419 * Sysctl interface.
1420 *
1421 * These are partly unused legacy knobs with dummy values to not break
1422 * userspace and partly still useful things. They are usually accessible
1423 * in /proc/sys/kernel/random/ and are as follows:
1424 *
1425 * - boot_id - a UUID representing the current boot.
1426 *
1427 * - uuid - a random UUID, different each time the file is read.
1428 *
1429 * - poolsize - the number of bits of entropy that the input pool can
1430 * hold, tied to the POOL_BITS constant.
1431 *
1432 * - entropy_avail - the number of bits of entropy currently in the
1433 * input pool. Always <= poolsize.
1434 *
1435 * - write_wakeup_threshold - the amount of entropy in the input pool
1436 * below which write polls to /dev/random will unblock, requesting
1437 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1438 * to avoid breaking old userspaces, but writing to it does not
1439 * change any behavior of the RNG.
1440 *
1441 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1442 * It is writable to avoid breaking old userspaces, but writing
1443 * to it does not change any behavior of the RNG.
1444 *
1445 ********************************************************************/
1446
1447 #ifdef CONFIG_SYSCTL
1448
1449 #include <linux/sysctl.h>
1450
1451 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1452 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1453 static int sysctl_poolsize = POOL_BITS;
1454 static u8 sysctl_bootid[UUID_SIZE];
1455
1456 /*
1457 * This function is used to return both the bootid UUID, and random
1458 * UUID. The difference is in whether table->data is NULL; if it is,
1459 * then a new UUID is generated and returned to the user.
1460 */
proc_do_uuid(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1461 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1462 size_t *lenp, loff_t *ppos)
1463 {
1464 u8 tmp_uuid[UUID_SIZE], *uuid;
1465 char uuid_string[UUID_STRING_LEN + 1];
1466 struct ctl_table fake_table = {
1467 .data = uuid_string,
1468 .maxlen = UUID_STRING_LEN
1469 };
1470
1471 if (write)
1472 return -EPERM;
1473
1474 uuid = table->data;
1475 if (!uuid) {
1476 uuid = tmp_uuid;
1477 generate_random_uuid(uuid);
1478 } else {
1479 static DEFINE_SPINLOCK(bootid_spinlock);
1480
1481 spin_lock(&bootid_spinlock);
1482 if (!uuid[8])
1483 generate_random_uuid(uuid);
1484 spin_unlock(&bootid_spinlock);
1485 }
1486
1487 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1488 return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1489 }
1490
1491 /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1492 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1493 size_t *lenp, loff_t *ppos)
1494 {
1495 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1496 }
1497
1498 extern struct ctl_table random_table[];
1499 struct ctl_table random_table[] = {
1500 {
1501 .procname = "poolsize",
1502 .data = &sysctl_poolsize,
1503 .maxlen = sizeof(int),
1504 .mode = 0444,
1505 .proc_handler = proc_dointvec,
1506 },
1507 {
1508 .procname = "entropy_avail",
1509 .data = &input_pool.init_bits,
1510 .maxlen = sizeof(int),
1511 .mode = 0444,
1512 .proc_handler = proc_dointvec,
1513 },
1514 {
1515 .procname = "write_wakeup_threshold",
1516 .data = &sysctl_random_write_wakeup_bits,
1517 .maxlen = sizeof(int),
1518 .mode = 0644,
1519 .proc_handler = proc_do_rointvec,
1520 },
1521 {
1522 .procname = "urandom_min_reseed_secs",
1523 .data = &sysctl_random_min_urandom_seed,
1524 .maxlen = sizeof(int),
1525 .mode = 0644,
1526 .proc_handler = proc_do_rointvec,
1527 },
1528 {
1529 .procname = "boot_id",
1530 .data = &sysctl_bootid,
1531 .mode = 0444,
1532 .proc_handler = proc_do_uuid,
1533 },
1534 {
1535 .procname = "uuid",
1536 .mode = 0444,
1537 .proc_handler = proc_do_uuid,
1538 },
1539 { }
1540 };
1541 #endif /* CONFIG_SYSCTL */
1542
1543 /*
1544 * Android KABI fixups
1545 *
1546 * Add back two functions that were being used by out-of-tree drivers.
1547 *
1548 * Yes, horrible hack, the things we do for FIPS "compliance"...
1549 */
1550 static DEFINE_SPINLOCK(random_ready_list_lock);
1551 static LIST_HEAD(random_ready_list);
1552
add_random_ready_callback(struct random_ready_callback * rdy)1553 int add_random_ready_callback(struct random_ready_callback *rdy)
1554 {
1555 struct module *owner;
1556 unsigned long flags;
1557 int err = -EALREADY;
1558
1559 if (crng_ready())
1560 return err;
1561
1562 owner = rdy->owner;
1563 if (!try_module_get(owner))
1564 return -ENOENT;
1565
1566 spin_lock_irqsave(&random_ready_list_lock, flags);
1567 if (crng_ready())
1568 goto out;
1569
1570 owner = NULL;
1571
1572 list_add(&rdy->list, &random_ready_list);
1573 err = 0;
1574
1575 out:
1576 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1577
1578 module_put(owner);
1579
1580 return err;
1581 }
1582 EXPORT_SYMBOL(add_random_ready_callback);
1583
del_random_ready_callback(struct random_ready_callback * rdy)1584 void del_random_ready_callback(struct random_ready_callback *rdy)
1585 {
1586 unsigned long flags;
1587 struct module *owner = NULL;
1588
1589 spin_lock_irqsave(&random_ready_list_lock, flags);
1590 if (!list_empty(&rdy->list)) {
1591 list_del_init(&rdy->list);
1592 owner = rdy->owner;
1593 }
1594 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1595
1596 module_put(owner);
1597 }
1598 EXPORT_SYMBOL(del_random_ready_callback);
1599
process_oldschool_random_ready_list(void)1600 static void process_oldschool_random_ready_list(void)
1601 {
1602 unsigned long flags;
1603 struct random_ready_callback *rdy, *tmp;
1604
1605 spin_lock_irqsave(&random_ready_list_lock, flags);
1606 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
1607 struct module *owner = rdy->owner;
1608
1609 list_del_init(&rdy->list);
1610 rdy->func(rdy);
1611 module_put(owner);
1612 }
1613 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1614 }
1615