1 /*-
2 * Copyright (c) 2000-2015 Mark R V Murray
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #ifdef _KERNEL
33 #include <sys/malloc.h>
34 #include <sys/mutex.h>
35 #include <sys/systm.h>
36
37 #include <crypto/rijndael/rijndael-api-fst.h>
38 #include <crypto/sha2/sha256.h>
39
40 #include <dev/random/hash.h>
41 #include <dev/random/randomdev.h>
42 #include <dev/random/random_harvestq.h>
43 #include <dev/random/uint128.h>
44 #include <dev/random/yarrow.h>
45 #else /* !_KERNEL */
46 #include <inttypes.h>
47 #include <stdbool.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <stdint.h>
51 #include <string.h>
52 #include <sys/mutex.h>
53
54 #include "los_init.h"
55
56 #include "unit_test.h"
57
58 #include <crypto/rijndael/rijndael-api-fst.h>
59 #include <crypto/sha2/sha256.h>
60
61 #include <dev/random/hash.h>
62 #include <dev/random/randomdev.h>
63 #include <dev/random/uint128.h>
64 #include <dev/random/yarrow.h>
65 #endif /* _KERNEL */
66
67 #define RANDOM_ITERATE_NUMBER 16
68
69 #define RANDOM_YARROW_TIMEBIN 16 /* max value for Pt/t */
70
71 #define RANDOM_YARROW_FAST 0
72 #define RANDOM_YARROW_SLOW 1
73 #define RANDOM_YARROW_NPOOLS 2
74
75 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */
76 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t));
77 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE);
78
79 #ifndef __LITEOS__
80 /* Probes for dtrace(1) */
81 SDT_PROVIDER_DECLARE(random);
82 SDT_PROVIDER_DEFINE(random);
83 SDT_PROBE_DEFINE3(random, yarrow, event_processor, debug, "boolean", "u_int", "struct ys_pool *");
84 #endif
85
86 /*
87 * This is the beastie that needs protecting. It contains all of the
88 * state that we are excited about. Exactly one is instantiated.
89 */
90 static struct yarrow_state {
91 uint128_t ys_counter; /* C */
92 struct randomdev_key ys_key; /* K */
93 u_int ys_gengateinterval; /* Pg */
94 u_int ys_bins; /* Pt/t */
95 u_int ys_outputblocks; /* count output blocks for gates */
96 u_int ys_slowoverthresh; /* slow pool overthreshhold reseed count */
97 struct ys_pool {
98 u_int ysp_source_bits[ENTROPYSOURCE]; /* estimated bits of entropy per source */
99 u_int ysp_thresh; /* pool reseed threshold */
100 struct randomdev_hash ysp_hash; /* accumulated entropy */
101 } ys_pool[RANDOM_YARROW_NPOOLS];/* pool[0] is fast, pool[1] is slow */
102 bool ys_seeded;
103 /* Reseed lock */
104 mtx_t ys_mtx;
105 } yarrow_state;
106
107 #ifdef _KERNEL
108 static struct sysctl_ctx_list random_clist;
109 RANDOM_CHECK_UINT(gengateinterval, 4, 64);
110 RANDOM_CHECK_UINT(bins, RANDOM_YARROW_NPOOLS, 16);
111 RANDOM_CHECK_UINT(fastthresh, (RANDOM_BLOCKSIZE*8)/4, (RANDOM_BLOCKSIZE*8)); /* Bit counts */
112 RANDOM_CHECK_UINT(slowthresh, (RANDOM_BLOCKSIZE*8)/4, (RANDOM_BLOCKSIZE*8)); /* Bit counts */
113 RANDOM_CHECK_UINT(slowoverthresh, 1, 5);
114 #endif /* _KERNEL */
115
116 static void random_yarrow_pre_read(void);
117 static void random_yarrow_read(uint8_t *, u_int);
118 static bool random_yarrow_seeded(void);
119 static void random_yarrow_process_event(struct harvest_event *);
120 static void random_yarrow_init_alg(void *);
121 static void random_yarrow_deinit_alg(void *);
122
123 static void random_yarrow_reseed_internal(u_int);
124
125 struct random_algorithm random_alg_context = {
126 .ra_ident = "Yarrow",
127 .ra_init_alg = random_yarrow_init_alg,
128 .ra_deinit_alg = random_yarrow_deinit_alg,
129 .ra_pre_read = random_yarrow_pre_read,
130 .ra_read = random_yarrow_read,
131 .ra_seeded = random_yarrow_seeded,
132 .ra_event_processor = random_yarrow_process_event,
133 .ra_poolcount = RANDOM_YARROW_NPOOLS,
134 };
135
136 /* ARGSUSED */
137 static void
random_yarrow_init_alg(void * unused __unused)138 random_yarrow_init_alg(void *unused __unused)
139 {
140 int i, j;
141 #ifdef _KERNEL
142 struct sysctl_oid *random_yarrow_o;
143 #endif
144
145 RANDOM_RESEED_INIT_LOCK();
146 /* Start unseeded, therefore blocked. */
147 yarrow_state.ys_seeded = false;
148 #ifdef _KERNEL
149 /*
150 * Yarrow parameters. Do not adjust these unless you have
151 * have a very good clue about what they do!
152 */
153 random_yarrow_o = SYSCTL_ADD_NODE(&random_clist,
154 SYSCTL_STATIC_CHILDREN(_kern_random),
155 OID_AUTO, "yarrow", CTLFLAG_RW, 0,
156 "Yarrow Parameters");
157 SYSCTL_ADD_PROC(&random_clist,
158 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
159 "gengateinterval", CTLTYPE_UINT | CTLFLAG_RWTUN,
160 &yarrow_state.ys_gengateinterval, 0,
161 random_check_uint_gengateinterval, "UI",
162 "Generation gate interval");
163 SYSCTL_ADD_PROC(&random_clist,
164 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
165 "bins", CTLTYPE_UINT | CTLFLAG_RWTUN,
166 &yarrow_state.ys_bins, 0,
167 random_check_uint_bins, "UI",
168 "Execution time tuner");
169 SYSCTL_ADD_PROC(&random_clist,
170 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
171 "fastthresh", CTLTYPE_UINT | CTLFLAG_RWTUN,
172 &yarrow_state.ys_pool[0].ysp_thresh, 0,
173 random_check_uint_fastthresh, "UI",
174 "Fast reseed threshold");
175 SYSCTL_ADD_PROC(&random_clist,
176 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
177 "slowthresh", CTLTYPE_UINT | CTLFLAG_RWTUN,
178 &yarrow_state.ys_pool[1].ysp_thresh, 0,
179 random_check_uint_slowthresh, "UI",
180 "Slow reseed threshold");
181 SYSCTL_ADD_PROC(&random_clist,
182 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
183 "slowoverthresh", CTLTYPE_UINT | CTLFLAG_RWTUN,
184 &yarrow_state.ys_slowoverthresh, 0,
185 random_check_uint_slowoverthresh, "UI",
186 "Slow over-threshold reseed");
187 #endif /* _KERNEL */
188 yarrow_state.ys_gengateinterval = 10;
189 yarrow_state.ys_bins = 10;
190 yarrow_state.ys_pool[RANDOM_YARROW_FAST].ysp_thresh = (3*(RANDOM_BLOCKSIZE*8))/4;
191 yarrow_state.ys_pool[RANDOM_YARROW_SLOW].ysp_thresh = (RANDOM_BLOCKSIZE*8);
192 yarrow_state.ys_slowoverthresh = 2;
193 /* Ensure that the first time we read, we are gated. */
194 yarrow_state.ys_outputblocks = yarrow_state.ys_gengateinterval;
195 /* Initialise the fast and slow entropy pools */
196 for (i = RANDOM_YARROW_FAST; i <= RANDOM_YARROW_SLOW; i++) {
197 randomdev_hash_init(&yarrow_state.ys_pool[i].ysp_hash);
198 for (j = RANDOM_START; j < ENTROPYSOURCE; j++)
199 yarrow_state.ys_pool[i].ysp_source_bits[j] = 0;
200 }
201 /* Clear the counter */
202 yarrow_state.ys_counter = UINT128_ZERO;
203 }
204
205 /* ARGSUSED */
206 static void
random_yarrow_deinit_alg(void * unused __unused)207 random_yarrow_deinit_alg(void *unused __unused)
208 {
209
210 RANDOM_RESEED_DEINIT_LOCK();
211 explicit_bzero(&yarrow_state, sizeof(yarrow_state));
212 #ifdef _KERNEL
213 sysctl_ctx_free(&random_clist);
214 #endif
215 }
216
217 /* Process a single stochastic event off the harvest queue */
218 static void
random_yarrow_process_event(struct harvest_event * event)219 random_yarrow_process_event(struct harvest_event *event)
220 {
221 u_int pl, overthreshhold[RANDOM_YARROW_NPOOLS];
222 enum random_entropy_source src;
223
224 RANDOM_RESEED_LOCK();
225 /*
226 * Accumulate the event into the appropriate pool
227 * where each event carries the destination information.
228 * We lock against pool state modification which can happen
229 * during accumulation/reseeding and reading/regating
230 */
231 pl = event->he_destination % RANDOM_YARROW_NPOOLS;
232 randomdev_hash_iterate(&yarrow_state.ys_pool[pl].ysp_hash, event, sizeof(*event));
233 if((event->he_source < ENTROPYSOURCE)&&(pl < RANDOM_YARROW_NPOOLS)) {
234 yarrow_state.ys_pool[pl].ysp_source_bits[event->he_source] += event->he_bits;
235 }
236 /* Count the over-threshold sources in each pool */
237 for (pl = RANDOM_YARROW_FAST; (pl <= RANDOM_YARROW_SLOW) && (pl < RANDOM_YARROW_NPOOLS); pl++) {
238 overthreshhold[pl] = 0;
239 for (src = RANDOM_START; src < ENTROPYSOURCE; src++) {
240 if (yarrow_state.ys_pool[pl].ysp_source_bits[src] > yarrow_state.ys_pool[pl].ysp_thresh)
241 overthreshhold[pl]++;
242 }
243 }
244 /*
245 * If enough slow sources are over threshold, then slow reseed
246 * else if any fast source over threshold, then fast reseed.
247 */
248 if (overthreshhold[RANDOM_YARROW_SLOW] >= yarrow_state.ys_slowoverthresh)
249 random_yarrow_reseed_internal(RANDOM_YARROW_SLOW);
250 else if (overthreshhold[RANDOM_YARROW_FAST] > 0 && yarrow_state.ys_seeded)
251 random_yarrow_reseed_internal(RANDOM_YARROW_FAST);
252 explicit_bzero(event, sizeof(*event));
253 RANDOM_RESEED_UNLOCK();
254 }
255
256 static void
random_yarrow_reseed_internal(u_int fastslow)257 random_yarrow_reseed_internal(u_int fastslow)
258 {
259 /*
260 * Interrupt-context stack is a limited resource; make large
261 * structures static.
262 */
263 static uint8_t v[RANDOM_YARROW_TIMEBIN][RANDOM_KEYSIZE]; /* v[i] */
264 static uint128_t temp;
265 static struct randomdev_hash context;
266 u_int i;
267 enum random_entropy_source j;
268
269 KASSERT(yarrow_state.ys_pool[RANDOM_YARROW_FAST].ysp_thresh > 0, ("random: Yarrow fast threshold = 0"));
270 KASSERT(yarrow_state.ys_pool[RANDOM_YARROW_SLOW].ysp_thresh > 0, ("random: Yarrow slow threshold = 0"));
271 RANDOM_RESEED_ASSERT_LOCK_OWNED();
272 #ifndef __LITEOS__
273 SDT_PROBE3(random, yarrow, event_processor, debug, yarrow_state.ys_seeded, yarrow_state.ys_slowoverthresh, yarrow_state.ys_pool);
274 #endif
275 /* 1. Hash the accumulated entropy into v[0] */
276 randomdev_hash_init(&context);
277 /* Feed the slow pool hash in if slow */
278 if (fastslow == RANDOM_YARROW_SLOW) {
279 randomdev_hash_finish(&yarrow_state.ys_pool[RANDOM_YARROW_SLOW].ysp_hash, &temp);
280 randomdev_hash_iterate(&context, &temp, sizeof(temp));
281 }
282 randomdev_hash_finish(&yarrow_state.ys_pool[RANDOM_YARROW_FAST].ysp_hash, &temp);
283 randomdev_hash_iterate(&context, &temp, sizeof(temp));
284 randomdev_hash_finish(&context, v[0]);
285 /*-
286 * 2. Compute hash values for all v. _Supposed_ to be computationally
287 * intensive.
288 */
289 if (yarrow_state.ys_bins > RANDOM_YARROW_TIMEBIN)
290 yarrow_state.ys_bins = RANDOM_YARROW_TIMEBIN;
291 for (i = 1; i < yarrow_state.ys_bins; i++) {
292 randomdev_hash_init(&context);
293 /* v[i] #= h(v[i - 1]) */
294 randomdev_hash_iterate(&context, v[i - 1], RANDOM_KEYSIZE);
295 /* v[i] #= h(v[0]) */
296 randomdev_hash_iterate(&context, v[0], RANDOM_KEYSIZE);
297 /* v[i] #= h(i) */
298 randomdev_hash_iterate(&context, &i, sizeof(i));
299 /* Return the hashval */
300 randomdev_hash_finish(&context, v[i]);
301 }
302 /*-
303 * 3. Compute a new key; h' is the identity function here;
304 * it is not being ignored!
305 */
306 randomdev_hash_init(&context);
307 randomdev_hash_iterate(&context, &yarrow_state.ys_key, RANDOM_KEYSIZE);
308 for (i = 1; i < yarrow_state.ys_bins; i++)
309 randomdev_hash_iterate(&context, v[i], RANDOM_KEYSIZE);
310 randomdev_hash_finish(&context, &temp);
311 randomdev_encrypt_init(&yarrow_state.ys_key, &temp);
312 /* 4. Recompute the counter */
313 yarrow_state.ys_counter = UINT128_ZERO;
314 randomdev_encrypt(&yarrow_state.ys_key, &yarrow_state.ys_counter, &temp, RANDOM_BLOCKSIZE);
315 yarrow_state.ys_counter = temp;
316 /* 5. Reset entropy estimate accumulators to zero */
317 for (i = 0; i <= fastslow; i++)
318 for (j = RANDOM_START; j < ENTROPYSOURCE; j++)
319 yarrow_state.ys_pool[i].ysp_source_bits[j] = 0;
320 /* 6. Wipe memory of intermediate values */
321 explicit_bzero(v, sizeof(v));
322 explicit_bzero(&temp, sizeof(temp));
323 explicit_bzero(&context, sizeof(context));
324 /* Not defined so writes ain't gonna happen. Kept for documenting. */
325 #ifdef RANDOM_RWFILE_WRITE_IS_OK
326 /*-
327 * 7. Dump to seed file.
328 * This pseudo-code is documentation. Please leave it alone.
329 */
330 seed_file = "<some file>";
331 error = randomdev_write_file(seed_file, <generated entropy>, PAGE_SIZE);
332 if (error == 0)
333 printf("random: entropy seed file '%s' successfully written\n", seed_file);
334 #endif
335 /* Unblock the device if it was blocked due to being unseeded */
336 if (!yarrow_state.ys_seeded) {
337 yarrow_state.ys_seeded = true;
338 #ifndef __LITEOS__
339 randomdev_unblock();
340 #endif
341 }
342 }
343
344 static __inline void
random_yarrow_generator_gate(void)345 random_yarrow_generator_gate(void)
346 {
347 u_int i;
348 uint8_t temp[RANDOM_KEYSIZE];
349
350 RANDOM_RESEED_ASSERT_LOCK_OWNED();
351 uint128_increment(&yarrow_state.ys_counter);
352 for (i = 0; i < RANDOM_KEYSIZE; i += RANDOM_BLOCKSIZE)
353 randomdev_encrypt(&yarrow_state.ys_key, &yarrow_state.ys_counter, temp + i, RANDOM_BLOCKSIZE);
354 randomdev_encrypt_init(&yarrow_state.ys_key, temp);
355 explicit_bzero(temp, sizeof(temp));
356 }
357
358 /*-
359 * Used to return processed entropy from the PRNG. There is a pre_read
360 * required to be present (but it can be a stub) in order to allow
361 * specific actions at the begin of the read.
362 * Yarrow does its reseeding in its own thread; _pre_read() is not used
363 * by Yarrow but must be kept for completeness.
364 */
365 void
random_yarrow_pre_read(void)366 random_yarrow_pre_read(void)
367 {
368 }
369
370 /*-
371 * Main read from Yarrow.
372 * The supplied buf MUST be a multiple (>=0) of RANDOM_BLOCKSIZE in size.
373 * Lots of code presumes this for efficiency, both here and in other
374 * routines. You are NOT allowed to break this!
375 */
376 void
random_yarrow_read(uint8_t * buf,u_int bytecount)377 random_yarrow_read(uint8_t *buf, u_int bytecount)
378 {
379 u_int blockcount, i;
380
381 KASSERT((bytecount % RANDOM_BLOCKSIZE) == 0, ("%s(): bytecount (= %d) must be a multiple of %d", __func__, bytecount, RANDOM_BLOCKSIZE ));
382 RANDOM_RESEED_LOCK();
383 blockcount = howmany(bytecount, RANDOM_BLOCKSIZE);
384 for (i = 0; i < blockcount; i++) {
385 if (yarrow_state.ys_outputblocks++ >= yarrow_state.ys_gengateinterval) {
386 random_yarrow_generator_gate();
387 yarrow_state.ys_outputblocks = 0;
388 }
389 uint128_increment(&yarrow_state.ys_counter);
390 randomdev_encrypt(&yarrow_state.ys_key, &yarrow_state.ys_counter, buf, RANDOM_BLOCKSIZE);
391 buf += RANDOM_BLOCKSIZE;
392 }
393 RANDOM_RESEED_UNLOCK();
394 }
395
396 bool
random_yarrow_seeded(void)397 random_yarrow_seeded(void)
398 {
399
400 return (yarrow_state.ys_seeded);
401 }
402
403 #if defined(LOSCFG_HW_RANDOM_ENABLE)
404 void
random_hw_getnumber(char * pbuf,size_t len)405 random_hw_getnumber(char *pbuf, size_t len)
406 {
407 extern void HiRandomHwInit(void);
408 extern void HiRandomHwDeinit(void);
409 extern int HiRandomHwGetNumber(char *buffer, size_t buflen);
410
411 HiRandomHwInit();
412
413 ssize_t ret = HiRandomHwGetNumber((char *)pbuf, len);
414 if (ret != 0) {
415 HiRandomHwDeinit();
416 errno = EIO;
417 return;
418 }
419
420 HiRandomHwDeinit();
421 }
422 #endif
423
424 #if defined(LOSCFG_HW_RANDOM_ENABLE) || defined(LOSCFG_DRIVERS_RANDOM)
425 void
run_harvester_once(const char * pentropy,size_t num)426 run_harvester_once(const char *pentropy, size_t num)
427 {
428 struct harvest_event e;
429 uint64_t data;
430 errno_t err = EOK;
431
432 if (pentropy == NULL) {
433 return;
434 }
435 e.he_somecounter = num;
436 e.he_size = sizeof(e.he_entropy);
437 err = memcpy_s(e.he_entropy, e.he_size, pentropy, e.he_size);
438 if(err != EOK){
439 return;
440 }
441 err = memcpy_s(&data, sizeof(uint64_t), e.he_entropy, sizeof(uint64_t));
442 if(err != EOK){
443 return;
444 }
445 e.he_bits = data % 64 + 60; /* calculate bits */
446 e.he_destination = num;
447 e.he_source = (num + 3) % 3; /* calculate source */
448 e.he_next = NULL;
449 random_alg_context.ra_event_processor(&e);
450 }
451 #endif
452
453 void
run_harvester_iterate(void * arg __unused)454 run_harvester_iterate(void *arg __unused)
455 {
456 #if defined(LOSCFG_HW_RANDOM_ENABLE)
457 int i;
458 char buf[8]; /* store random numbers */
459 for (i = 0; i < RANDOM_ITERATE_NUMBER; i++) {
460 random_hw_getnumber(buf, sizeof(buf));
461 run_harvester_once(buf, i);
462 (void)memset_s(buf, sizeof(buf), 0, sizeof(buf));
463 }
464 #elif defined(LOSCFG_DRIVERS_RANDOM)
465 extern VOID LOS_GetCpuCycle(UINT32 *puwCntHi, UINT32 *puwCntLo);
466 int rdata1, rdata2;
467 char buf[8]; /* store random numbers */
468 int i;
469 for (i = 0; i < RANDOM_ITERATE_NUMBER; i++) {
470 LOS_GetCpuCycle(&rdata1, &rdata2);
471 srand((unsigned int)rdata2);
472 rdata1 = (int)rand();
473 rdata2 = (int)rand();
474 (void)memcpy_s(buf, sizeof(int), &rdata1, sizeof(int));
475 (void)memcpy_s(buf + 4, sizeof(int), &rdata2, sizeof(int));
476 run_harvester_once(buf, i);
477 }
478 (void)memset_s(buf, sizeof(buf), 0, sizeof(buf));
479 #endif
480 }
481
482 #if defined(LOSCFG_HW_RANDOM_ENABLE) || defined(LOSCFG_DRIVERS_RANDOM)
OsDriverRandomInit(VOID)483 UINT32 OsDriverRandomInit(VOID)
484 {
485 random_alg_context.ra_init_alg(NULL);
486 run_harvester_iterate(NULL);
487 return LOS_OK;
488 }
489 #endif
490