• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This is a maximally equidistributed combined Tausworthe generator
3  * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
4  *
5  * lfsr113 version:
6  *
7  * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
8  *
9  * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n <<  6) ^ s1_n) >> 13))
10  * s2_{n+1} = (((s2_n & 4294967288) <<  2) ^ (((s2_n <<  2) ^ s2_n) >> 27))
11  * s3_{n+1} = (((s3_n & 4294967280) <<  7) ^ (((s3_n << 13) ^ s3_n) >> 21))
12  * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n <<  3) ^ s4_n) >> 12))
13  *
14  * The period of this generator is about 2^113 (see erratum paper).
15  *
16  * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
17  * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
18  * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
19  * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
20  *
21  * There is an erratum in the paper "Tables of Maximally Equidistributed
22  * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
23  * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
24  *
25  *      ... the k_j most significant bits of z_j must be non-zero,
26  *      for each j. (Note: this restriction also applies to the
27  *      computer code given in [4], but was mistakenly not mentioned
28  *      in that paper.)
29  *
30  * This affects the seeding procedure by imposing the requirement
31  * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
32  */
33 
34 #include <linux/types.h>
35 #include <linux/percpu.h>
36 #include <linux/export.h>
37 #include <linux/jiffies.h>
38 #include <linux/random.h>
39 #include <linux/sched.h>
40 #include <asm/unaligned.h>
41 
42 /**
43  *	prandom_u32_state - seeded pseudo-random number generator.
44  *	@state: pointer to state structure holding seeded state.
45  *
46  *	This is used for pseudo-randomness with no outside seeding.
47  *	For more random results, use prandom_u32().
48  */
prandom_u32_state(struct rnd_state * state)49 u32 prandom_u32_state(struct rnd_state *state)
50 {
51 #define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
52 	state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
53 	state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
54 	state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
55 	state->s4 = TAUSWORTHE(state->s4,  3U, 12U, 4294967168U, 13U);
56 
57 	return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
58 }
59 EXPORT_SYMBOL(prandom_u32_state);
60 
61 /**
62  *	prandom_bytes_state - get the requested number of pseudo-random bytes
63  *
64  *	@state: pointer to state structure holding seeded state.
65  *	@buf: where to copy the pseudo-random bytes to
66  *	@bytes: the requested number of bytes
67  *
68  *	This is used for pseudo-randomness with no outside seeding.
69  *	For more random results, use prandom_bytes().
70  */
prandom_bytes_state(struct rnd_state * state,void * buf,size_t bytes)71 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
72 {
73 	u8 *ptr = buf;
74 
75 	while (bytes >= sizeof(u32)) {
76 		put_unaligned(prandom_u32_state(state), (u32 *) ptr);
77 		ptr += sizeof(u32);
78 		bytes -= sizeof(u32);
79 	}
80 
81 	if (bytes > 0) {
82 		u32 rem = prandom_u32_state(state);
83 		do {
84 			*ptr++ = (u8) rem;
85 			bytes--;
86 			rem >>= BITS_PER_BYTE;
87 		} while (bytes > 0);
88 	}
89 }
90 EXPORT_SYMBOL(prandom_bytes_state);
91 
prandom_warmup(struct rnd_state * state)92 static void prandom_warmup(struct rnd_state *state)
93 {
94 	/* Calling RNG ten times to satisfy recurrence condition */
95 	prandom_u32_state(state);
96 	prandom_u32_state(state);
97 	prandom_u32_state(state);
98 	prandom_u32_state(state);
99 	prandom_u32_state(state);
100 	prandom_u32_state(state);
101 	prandom_u32_state(state);
102 	prandom_u32_state(state);
103 	prandom_u32_state(state);
104 	prandom_u32_state(state);
105 }
106 
prandom_seed_full_state(struct rnd_state __percpu * pcpu_state)107 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
108 {
109 	int i;
110 
111 	for_each_possible_cpu(i) {
112 		struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
113 		u32 seeds[4];
114 
115 		get_random_bytes(&seeds, sizeof(seeds));
116 		state->s1 = __seed(seeds[0],   2U);
117 		state->s2 = __seed(seeds[1],   8U);
118 		state->s3 = __seed(seeds[2],  16U);
119 		state->s4 = __seed(seeds[3], 128U);
120 
121 		prandom_warmup(state);
122 	}
123 }
124 
125 #ifdef CONFIG_RANDOM32_SELFTEST
126 static struct prandom_test1 {
127 	u32 seed;
128 	u32 result;
129 } test1[] = {
130 	{ 1U, 3484351685U },
131 	{ 2U, 2623130059U },
132 	{ 3U, 3125133893U },
133 	{ 4U,  984847254U },
134 };
135 
136 static struct prandom_test2 {
137 	u32 seed;
138 	u32 iteration;
139 	u32 result;
140 } test2[] = {
141 	/* Test cases against taus113 from GSL library. */
142 	{  931557656U, 959U, 2975593782U },
143 	{ 1339693295U, 876U, 3887776532U },
144 	{ 1545556285U, 961U, 1615538833U },
145 	{  601730776U, 723U, 1776162651U },
146 	{ 1027516047U, 687U,  511983079U },
147 	{  416526298U, 700U,  916156552U },
148 	{ 1395522032U, 652U, 2222063676U },
149 	{  366221443U, 617U, 2992857763U },
150 	{ 1539836965U, 714U, 3783265725U },
151 	{  556206671U, 994U,  799626459U },
152 	{  684907218U, 799U,  367789491U },
153 	{ 2121230701U, 931U, 2115467001U },
154 	{ 1668516451U, 644U, 3620590685U },
155 	{  768046066U, 883U, 2034077390U },
156 	{ 1989159136U, 833U, 1195767305U },
157 	{  536585145U, 996U, 3577259204U },
158 	{ 1008129373U, 642U, 1478080776U },
159 	{ 1740775604U, 939U, 1264980372U },
160 	{ 1967883163U, 508U,   10734624U },
161 	{ 1923019697U, 730U, 3821419629U },
162 	{  442079932U, 560U, 3440032343U },
163 	{ 1961302714U, 845U,  841962572U },
164 	{ 2030205964U, 962U, 1325144227U },
165 	{ 1160407529U, 507U,  240940858U },
166 	{  635482502U, 779U, 4200489746U },
167 	{ 1252788931U, 699U,  867195434U },
168 	{ 1961817131U, 719U,  668237657U },
169 	{ 1071468216U, 983U,  917876630U },
170 	{ 1281848367U, 932U, 1003100039U },
171 	{  582537119U, 780U, 1127273778U },
172 	{ 1973672777U, 853U, 1071368872U },
173 	{ 1896756996U, 762U, 1127851055U },
174 	{  847917054U, 500U, 1717499075U },
175 	{ 1240520510U, 951U, 2849576657U },
176 	{ 1685071682U, 567U, 1961810396U },
177 	{ 1516232129U, 557U,    3173877U },
178 	{ 1208118903U, 612U, 1613145022U },
179 	{ 1817269927U, 693U, 4279122573U },
180 	{ 1510091701U, 717U,  638191229U },
181 	{  365916850U, 807U,  600424314U },
182 	{  399324359U, 702U, 1803598116U },
183 	{ 1318480274U, 779U, 2074237022U },
184 	{  697758115U, 840U, 1483639402U },
185 	{ 1696507773U, 840U,  577415447U },
186 	{ 2081979121U, 981U, 3041486449U },
187 	{  955646687U, 742U, 3846494357U },
188 	{ 1250683506U, 749U,  836419859U },
189 	{  595003102U, 534U,  366794109U },
190 	{   47485338U, 558U, 3521120834U },
191 	{  619433479U, 610U, 3991783875U },
192 	{  704096520U, 518U, 4139493852U },
193 	{ 1712224984U, 606U, 2393312003U },
194 	{ 1318233152U, 922U, 3880361134U },
195 	{  855572992U, 761U, 1472974787U },
196 	{   64721421U, 703U,  683860550U },
197 	{  678931758U, 840U,  380616043U },
198 	{  692711973U, 778U, 1382361947U },
199 	{  677703619U, 530U, 2826914161U },
200 	{   92393223U, 586U, 1522128471U },
201 	{ 1222592920U, 743U, 3466726667U },
202 	{  358288986U, 695U, 1091956998U },
203 	{ 1935056945U, 958U,  514864477U },
204 	{  735675993U, 990U, 1294239989U },
205 	{ 1560089402U, 897U, 2238551287U },
206 	{   70616361U, 829U,   22483098U },
207 	{  368234700U, 731U, 2913875084U },
208 	{   20221190U, 879U, 1564152970U },
209 	{  539444654U, 682U, 1835141259U },
210 	{ 1314987297U, 840U, 1801114136U },
211 	{ 2019295544U, 645U, 3286438930U },
212 	{  469023838U, 716U, 1637918202U },
213 	{ 1843754496U, 653U, 2562092152U },
214 	{  400672036U, 809U, 4264212785U },
215 	{  404722249U, 965U, 2704116999U },
216 	{  600702209U, 758U,  584979986U },
217 	{  519953954U, 667U, 2574436237U },
218 	{ 1658071126U, 694U, 2214569490U },
219 	{  420480037U, 749U, 3430010866U },
220 	{  690103647U, 969U, 3700758083U },
221 	{ 1029424799U, 937U, 3787746841U },
222 	{ 2012608669U, 506U, 3362628973U },
223 	{ 1535432887U, 998U,   42610943U },
224 	{ 1330635533U, 857U, 3040806504U },
225 	{ 1223800550U, 539U, 3954229517U },
226 	{ 1322411537U, 680U, 3223250324U },
227 	{ 1877847898U, 945U, 2915147143U },
228 	{ 1646356099U, 874U,  965988280U },
229 	{  805687536U, 744U, 4032277920U },
230 	{ 1948093210U, 633U, 1346597684U },
231 	{  392609744U, 783U, 1636083295U },
232 	{  690241304U, 770U, 1201031298U },
233 	{ 1360302965U, 696U, 1665394461U },
234 	{ 1220090946U, 780U, 1316922812U },
235 	{  447092251U, 500U, 3438743375U },
236 	{ 1613868791U, 592U,  828546883U },
237 	{  523430951U, 548U, 2552392304U },
238 	{  726692899U, 810U, 1656872867U },
239 	{ 1364340021U, 836U, 3710513486U },
240 	{ 1986257729U, 931U,  935013962U },
241 	{  407983964U, 921U,  728767059U },
242 };
243 
__extract_hwseed(void)244 static u32 __extract_hwseed(void)
245 {
246 	unsigned int val = 0;
247 
248 	(void)(arch_get_random_seed_int(&val) ||
249 	       arch_get_random_int(&val));
250 
251 	return val;
252 }
253 
prandom_seed_early(struct rnd_state * state,u32 seed,bool mix_with_hwseed)254 static void prandom_seed_early(struct rnd_state *state, u32 seed,
255 			       bool mix_with_hwseed)
256 {
257 #define LCG(x)	 ((x) * 69069U)	/* super-duper LCG */
258 #define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
259 	state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
260 	state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
261 	state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
262 	state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
263 }
264 
prandom_state_selftest(void)265 static int __init prandom_state_selftest(void)
266 {
267 	int i, j, errors = 0, runs = 0;
268 	bool error = false;
269 
270 	for (i = 0; i < ARRAY_SIZE(test1); i++) {
271 		struct rnd_state state;
272 
273 		prandom_seed_early(&state, test1[i].seed, false);
274 		prandom_warmup(&state);
275 
276 		if (test1[i].result != prandom_u32_state(&state))
277 			error = true;
278 	}
279 
280 	if (error)
281 		pr_warn("prandom: seed boundary self test failed\n");
282 	else
283 		pr_info("prandom: seed boundary self test passed\n");
284 
285 	for (i = 0; i < ARRAY_SIZE(test2); i++) {
286 		struct rnd_state state;
287 
288 		prandom_seed_early(&state, test2[i].seed, false);
289 		prandom_warmup(&state);
290 
291 		for (j = 0; j < test2[i].iteration - 1; j++)
292 			prandom_u32_state(&state);
293 
294 		if (test2[i].result != prandom_u32_state(&state))
295 			errors++;
296 
297 		runs++;
298 		cond_resched();
299 	}
300 
301 	if (errors)
302 		pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
303 	else
304 		pr_info("prandom: %d self tests passed\n", runs);
305 	return 0;
306 }
307 core_initcall(prandom_state_selftest);
308 #endif
309 
310 /*
311  * The prandom_u32() implementation is now completely separate from the
312  * prandom_state() functions, which are retained (for now) for compatibility.
313  *
314  * Because of (ab)use in the networking code for choosing random TCP/UDP port
315  * numbers, which open DoS possibilities if guessable, we want something
316  * stronger than a standard PRNG.  But the performance requirements of
317  * the network code do not allow robust crypto for this application.
318  *
319  * So this is a homebrew Junior Spaceman implementation, based on the
320  * lowest-latency trustworthy crypto primitive available, SipHash.
321  * (The authors of SipHash have not been consulted about this abuse of
322  * their work.)
323  *
324  * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
325  * one word of output.  This abbreviated version uses 2 rounds per word
326  * of output.
327  */
328 
329 struct siprand_state {
330 	unsigned long v0;
331 	unsigned long v1;
332 	unsigned long v2;
333 	unsigned long v3;
334 };
335 
336 static DEFINE_PER_CPU(struct siprand_state, net_rand_state);
337 
338 /*
339  * This is the core CPRNG function.  As "pseudorandom", this is not used
340  * for truly valuable things, just intended to be a PITA to guess.
341  * For maximum speed, we do just two SipHash rounds per word.  This is
342  * the same rate as 4 rounds per 64 bits that SipHash normally uses,
343  * so hopefully it's reasonably secure.
344  *
345  * There are two changes from the official SipHash finalization:
346  * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
347  *   they are there only to make the output rounds distinct from the input
348  *   rounds, and this application has no input rounds.
349  * - Rather than returning v0^v1^v2^v3, return v1+v3.
350  *   If you look at the SipHash round, the last operation on v3 is
351  *   "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
352  *   Likewise "v1 ^= v2".  (The rotate of v2 makes a difference, but
353  *   it still cancels out half of the bits in v2 for no benefit.)
354  *   Second, since the last combining operation was xor, continue the
355  *   pattern of alternating xor/add for a tiny bit of extra non-linearity.
356  */
siprand_u32(struct siprand_state * s)357 static inline u32 siprand_u32(struct siprand_state *s)
358 {
359 	unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
360 
361 	PRND_SIPROUND(v0, v1, v2, v3);
362 	PRND_SIPROUND(v0, v1, v2, v3);
363 	s->v0 = v0;  s->v1 = v1;  s->v2 = v2;  s->v3 = v3;
364 	return v1 + v3;
365 }
366 
367 
368 /**
369  *	prandom_u32 - pseudo random number generator
370  *
371  *	A 32 bit pseudo-random number is generated using a fast
372  *	algorithm suitable for simulation. This algorithm is NOT
373  *	considered safe for cryptographic use.
374  */
prandom_u32(void)375 u32 prandom_u32(void)
376 {
377 	struct siprand_state *state = get_cpu_ptr(&net_rand_state);
378 	u32 res = siprand_u32(state);
379 
380 	put_cpu_ptr(&net_rand_state);
381 	return res;
382 }
383 EXPORT_SYMBOL(prandom_u32);
384 
385 /**
386  *	prandom_bytes - get the requested number of pseudo-random bytes
387  *	@buf: where to copy the pseudo-random bytes to
388  *	@bytes: the requested number of bytes
389  */
prandom_bytes(void * buf,size_t bytes)390 void prandom_bytes(void *buf, size_t bytes)
391 {
392 	struct siprand_state *state = get_cpu_ptr(&net_rand_state);
393 	u8 *ptr = buf;
394 
395 	while (bytes >= sizeof(u32)) {
396 		put_unaligned(siprand_u32(state), (u32 *)ptr);
397 		ptr += sizeof(u32);
398 		bytes -= sizeof(u32);
399 	}
400 
401 	if (bytes > 0) {
402 		u32 rem = siprand_u32(state);
403 
404 		do {
405 			*ptr++ = (u8)rem;
406 			rem >>= BITS_PER_BYTE;
407 		} while (--bytes > 0);
408 	}
409 	put_cpu_ptr(&net_rand_state);
410 }
411 EXPORT_SYMBOL(prandom_bytes);
412 
413 /**
414  *	prandom_seed - add entropy to pseudo random number generator
415  *	@entropy: entropy value
416  *
417  *	Add some additional seed material to the prandom pool.
418  *	The "entropy" is actually our IP address (the only caller is
419  *	the network code), not for unpredictability, but to ensure that
420  *	different machines are initialized differently.
421  */
prandom_seed(u32 entropy)422 void prandom_seed(u32 entropy)
423 {
424 	int i;
425 
426 	add_device_randomness(&entropy, sizeof(entropy));
427 
428 	for_each_possible_cpu(i) {
429 		struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
430 		unsigned long v0 = state->v0, v1 = state->v1;
431 		unsigned long v2 = state->v2, v3 = state->v3;
432 
433 		do {
434 			v3 ^= entropy;
435 			PRND_SIPROUND(v0, v1, v2, v3);
436 			PRND_SIPROUND(v0, v1, v2, v3);
437 			v0 ^= entropy;
438 		} while (unlikely(!v0 || !v1 || !v2 || !v3));
439 
440 		WRITE_ONCE(state->v0, v0);
441 		WRITE_ONCE(state->v1, v1);
442 		WRITE_ONCE(state->v2, v2);
443 		WRITE_ONCE(state->v3, v3);
444 	}
445 }
446 EXPORT_SYMBOL(prandom_seed);
447 
448 /*
449  *	Generate some initially weak seeding values to allow
450  *	the prandom_u32() engine to be started.
451  */
prandom_init_early(void)452 static int __init prandom_init_early(void)
453 {
454 	int i;
455 	unsigned long v0, v1, v2, v3;
456 
457 	if (!arch_get_random_long(&v0))
458 		v0 = jiffies;
459 	if (!arch_get_random_long(&v1))
460 		v1 = random_get_entropy();
461 	v2 = v0 ^ PRND_K0;
462 	v3 = v1 ^ PRND_K1;
463 
464 	for_each_possible_cpu(i) {
465 		struct siprand_state *state;
466 
467 		v3 ^= i;
468 		PRND_SIPROUND(v0, v1, v2, v3);
469 		PRND_SIPROUND(v0, v1, v2, v3);
470 		v0 ^= i;
471 
472 		state = per_cpu_ptr(&net_rand_state, i);
473 		state->v0 = v0;  state->v1 = v1;
474 		state->v2 = v2;  state->v3 = v3;
475 	}
476 
477 	return 0;
478 }
479 core_initcall(prandom_init_early);
480 
481 
482 /* Stronger reseeding when available, and periodically thereafter. */
483 static void prandom_reseed(unsigned long dontcare);
484 
485 static DEFINE_TIMER(seed_timer, prandom_reseed, 0, 0);
486 
prandom_reseed(unsigned long dontcare)487 static void prandom_reseed(unsigned long dontcare)
488 {
489 	unsigned long expires;
490 	int i;
491 
492 	/*
493 	 * Reinitialize each CPU's PRNG with 128 bits of key.
494 	 * No locking on the CPUs, but then somewhat random results are,
495 	 * well, expected.
496 	 */
497 	for_each_possible_cpu(i) {
498 		struct siprand_state *state;
499 		unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
500 		unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
501 #if BITS_PER_LONG == 32
502 		int j;
503 
504 		/*
505 		 * On 32-bit machines, hash in two extra words to
506 		 * approximate 128-bit key length.  Not that the hash
507 		 * has that much security, but this prevents a trivial
508 		 * 64-bit brute force.
509 		 */
510 		for (j = 0; j < 2; j++) {
511 			unsigned long m = get_random_long();
512 
513 			v3 ^= m;
514 			PRND_SIPROUND(v0, v1, v2, v3);
515 			PRND_SIPROUND(v0, v1, v2, v3);
516 			v0 ^= m;
517 		}
518 #endif
519 		/*
520 		 * Probably impossible in practice, but there is a
521 		 * theoretical risk that a race between this reseeding
522 		 * and the target CPU writing its state back could
523 		 * create the all-zero SipHash fixed point.
524 		 *
525 		 * To ensure that never happens, ensure the state
526 		 * we write contains no zero words.
527 		 */
528 		state = per_cpu_ptr(&net_rand_state, i);
529 		WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
530 		WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
531 		WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
532 		WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
533 	}
534 
535 	/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
536 	expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
537 	mod_timer(&seed_timer, expires);
538 }
539 
540 /*
541  * The random ready callback can be called from almost any interrupt.
542  * To avoid worrying about whether it's safe to delay that interrupt
543  * long enough to seed all CPUs, just schedule an immediate timer event.
544  */
prandom_timer_start(struct random_ready_callback * unused)545 static void prandom_timer_start(struct random_ready_callback *unused)
546 {
547 	mod_timer(&seed_timer, jiffies);
548 }
549 
550 /*
551  * Start periodic full reseeding as soon as strong
552  * random numbers are available.
553  */
prandom_init_late(void)554 static int __init prandom_init_late(void)
555 {
556 	static struct random_ready_callback random_ready = {
557 		.func = prandom_timer_start
558 	};
559 	int ret = add_random_ready_callback(&random_ready);
560 
561 	if (ret == -EALREADY) {
562 		prandom_timer_start(&random_ready);
563 		ret = 0;
564 	}
565 	return ret;
566 }
567 late_initcall(prandom_init_late);
568