1 /*
2 * hw_random/core.c: HWRNG core API
3 *
4 * Copyright 2006 Michael Buesch <m@bues.ch>
5 * Copyright 2005 (c) MontaVista Software, Inc.
6 *
7 * Please read Documentation/admin-guide/hw_random.rst for details on use.
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/uaccess.h>
29
30 #define RNG_MODULE_NAME "hw_random"
31
32 #define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
33
34 static struct hwrng *current_rng;
35 /* the current rng has been explicitly chosen by user via sysfs */
36 static int cur_rng_set_by_user;
37 static struct task_struct *hwrng_fill;
38 /* list of registered rngs, sorted decending by quality */
39 static LIST_HEAD(rng_list);
40 /* Protects rng_list and current_rng */
41 static DEFINE_MUTEX(rng_mutex);
42 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
43 static DEFINE_MUTEX(reading_mutex);
44 static int data_avail;
45 static u8 *rng_buffer, *rng_fillbuf;
46 static unsigned short current_quality;
47 static unsigned short default_quality; /* = 0; default to "off" */
48
49 module_param(current_quality, ushort, 0644);
50 MODULE_PARM_DESC(current_quality,
51 "current hwrng entropy estimation per 1024 bits of input");
52 module_param(default_quality, ushort, 0644);
53 MODULE_PARM_DESC(default_quality,
54 "default entropy content of hwrng per 1024 bits of input");
55
56 static void drop_current_rng(void);
57 static int hwrng_init(struct hwrng *rng);
58 static void start_khwrngd(void);
59
60 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
61 int wait);
62
rng_buffer_size(void)63 static size_t rng_buffer_size(void)
64 {
65 return RNG_BUFFER_SIZE;
66 }
67
add_early_randomness(struct hwrng * rng)68 static void add_early_randomness(struct hwrng *rng)
69 {
70 int bytes_read;
71 size_t size = min_t(size_t, 16, rng_buffer_size());
72
73 mutex_lock(&reading_mutex);
74 bytes_read = rng_get_data(rng, rng_buffer, size, 0);
75 mutex_unlock(&reading_mutex);
76 if (bytes_read > 0)
77 add_device_randomness(rng_buffer, bytes_read);
78 }
79
cleanup_rng(struct kref * kref)80 static inline void cleanup_rng(struct kref *kref)
81 {
82 struct hwrng *rng = container_of(kref, struct hwrng, ref);
83
84 if (rng->cleanup)
85 rng->cleanup(rng);
86
87 complete(&rng->cleanup_done);
88 }
89
set_current_rng(struct hwrng * rng)90 static int set_current_rng(struct hwrng *rng)
91 {
92 int err;
93
94 BUG_ON(!mutex_is_locked(&rng_mutex));
95
96 err = hwrng_init(rng);
97 if (err)
98 return err;
99
100 drop_current_rng();
101 current_rng = rng;
102
103 return 0;
104 }
105
drop_current_rng(void)106 static void drop_current_rng(void)
107 {
108 BUG_ON(!mutex_is_locked(&rng_mutex));
109 if (!current_rng)
110 return;
111
112 /* decrease last reference for triggering the cleanup */
113 kref_put(¤t_rng->ref, cleanup_rng);
114 current_rng = NULL;
115 }
116
117 /* Returns ERR_PTR(), NULL or refcounted hwrng */
get_current_rng_nolock(void)118 static struct hwrng *get_current_rng_nolock(void)
119 {
120 if (current_rng)
121 kref_get(¤t_rng->ref);
122
123 return current_rng;
124 }
125
get_current_rng(void)126 static struct hwrng *get_current_rng(void)
127 {
128 struct hwrng *rng;
129
130 if (mutex_lock_interruptible(&rng_mutex))
131 return ERR_PTR(-ERESTARTSYS);
132
133 rng = get_current_rng_nolock();
134
135 mutex_unlock(&rng_mutex);
136 return rng;
137 }
138
put_rng(struct hwrng * rng)139 static void put_rng(struct hwrng *rng)
140 {
141 /*
142 * Hold rng_mutex here so we serialize in case they set_current_rng
143 * on rng again immediately.
144 */
145 mutex_lock(&rng_mutex);
146 if (rng)
147 kref_put(&rng->ref, cleanup_rng);
148 mutex_unlock(&rng_mutex);
149 }
150
hwrng_init(struct hwrng * rng)151 static int hwrng_init(struct hwrng *rng)
152 {
153 if (kref_get_unless_zero(&rng->ref))
154 goto skip_init;
155
156 if (rng->init) {
157 int ret;
158
159 ret = rng->init(rng);
160 if (ret)
161 return ret;
162 }
163
164 kref_init(&rng->ref);
165 reinit_completion(&rng->cleanup_done);
166
167 skip_init:
168 current_quality = rng->quality ? : default_quality;
169 if (current_quality > 1024)
170 current_quality = 1024;
171
172 if (current_quality == 0 && hwrng_fill)
173 kthread_stop(hwrng_fill);
174 if (current_quality > 0 && !hwrng_fill)
175 start_khwrngd();
176
177 return 0;
178 }
179
rng_dev_open(struct inode * inode,struct file * filp)180 static int rng_dev_open(struct inode *inode, struct file *filp)
181 {
182 /* enforce read-only access to this chrdev */
183 if ((filp->f_mode & FMODE_READ) == 0)
184 return -EINVAL;
185 if (filp->f_mode & FMODE_WRITE)
186 return -EINVAL;
187 return 0;
188 }
189
rng_get_data(struct hwrng * rng,u8 * buffer,size_t size,int wait)190 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
191 int wait) {
192 int present;
193
194 BUG_ON(!mutex_is_locked(&reading_mutex));
195 if (rng->read)
196 return rng->read(rng, (void *)buffer, size, wait);
197
198 if (rng->data_present)
199 present = rng->data_present(rng, wait);
200 else
201 present = 1;
202
203 if (present)
204 return rng->data_read(rng, (u32 *)buffer);
205
206 return 0;
207 }
208
rng_dev_read(struct file * filp,char __user * buf,size_t size,loff_t * offp)209 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
210 size_t size, loff_t *offp)
211 {
212 u8 buffer[RNG_BUFFER_SIZE];
213 ssize_t ret = 0;
214 int err = 0;
215 int bytes_read, len;
216 struct hwrng *rng;
217
218 while (size) {
219 rng = get_current_rng();
220 if (IS_ERR(rng)) {
221 err = PTR_ERR(rng);
222 goto out;
223 }
224 if (!rng) {
225 err = -ENODEV;
226 goto out;
227 }
228
229 if (mutex_lock_interruptible(&reading_mutex)) {
230 err = -ERESTARTSYS;
231 goto out_put;
232 }
233 if (!data_avail) {
234 bytes_read = rng_get_data(rng, rng_buffer,
235 rng_buffer_size(),
236 !(filp->f_flags & O_NONBLOCK));
237 if (bytes_read < 0) {
238 err = bytes_read;
239 goto out_unlock_reading;
240 } else if (bytes_read == 0 &&
241 (filp->f_flags & O_NONBLOCK)) {
242 err = -EAGAIN;
243 goto out_unlock_reading;
244 }
245
246 data_avail = bytes_read;
247 }
248
249 len = data_avail;
250 if (len) {
251 if (len > size)
252 len = size;
253
254 data_avail -= len;
255
256 memcpy(buffer, rng_buffer + data_avail, len);
257 }
258 mutex_unlock(&reading_mutex);
259 put_rng(rng);
260
261 if (len) {
262 if (copy_to_user(buf + ret, buffer, len)) {
263 err = -EFAULT;
264 goto out;
265 }
266
267 size -= len;
268 ret += len;
269 }
270
271
272 if (need_resched())
273 schedule_timeout_interruptible(1);
274
275 if (signal_pending(current)) {
276 err = -ERESTARTSYS;
277 goto out;
278 }
279 }
280 out:
281 memzero_explicit(buffer, sizeof(buffer));
282 return ret ? : err;
283
284 out_unlock_reading:
285 mutex_unlock(&reading_mutex);
286 out_put:
287 put_rng(rng);
288 goto out;
289 }
290
291 static const struct file_operations rng_chrdev_ops = {
292 .owner = THIS_MODULE,
293 .open = rng_dev_open,
294 .read = rng_dev_read,
295 .llseek = noop_llseek,
296 };
297
298 static const struct attribute_group *rng_dev_groups[];
299
300 static struct miscdevice rng_miscdev = {
301 .minor = HWRNG_MINOR,
302 .name = RNG_MODULE_NAME,
303 .nodename = "hwrng",
304 .fops = &rng_chrdev_ops,
305 .groups = rng_dev_groups,
306 };
307
enable_best_rng(void)308 static int enable_best_rng(void)
309 {
310 int ret = -ENODEV;
311
312 BUG_ON(!mutex_is_locked(&rng_mutex));
313
314 /* rng_list is sorted by quality, use the best (=first) one */
315 if (!list_empty(&rng_list)) {
316 struct hwrng *new_rng;
317
318 new_rng = list_entry(rng_list.next, struct hwrng, list);
319 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
320 if (!ret)
321 cur_rng_set_by_user = 0;
322 } else {
323 drop_current_rng();
324 cur_rng_set_by_user = 0;
325 ret = 0;
326 }
327
328 return ret;
329 }
330
rng_current_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)331 static ssize_t rng_current_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t len)
334 {
335 int err;
336 struct hwrng *rng, *old_rng, *new_rng;
337
338 err = mutex_lock_interruptible(&rng_mutex);
339 if (err)
340 return -ERESTARTSYS;
341
342 old_rng = current_rng;
343 if (sysfs_streq(buf, "")) {
344 err = enable_best_rng();
345 } else {
346 list_for_each_entry(rng, &rng_list, list) {
347 if (sysfs_streq(rng->name, buf)) {
348 cur_rng_set_by_user = 1;
349 err = set_current_rng(rng);
350 break;
351 }
352 }
353 }
354 new_rng = get_current_rng_nolock();
355 mutex_unlock(&rng_mutex);
356
357 if (new_rng) {
358 if (new_rng != old_rng)
359 add_early_randomness(new_rng);
360 put_rng(new_rng);
361 }
362
363 return err ? : len;
364 }
365
rng_current_show(struct device * dev,struct device_attribute * attr,char * buf)366 static ssize_t rng_current_show(struct device *dev,
367 struct device_attribute *attr,
368 char *buf)
369 {
370 ssize_t ret;
371 struct hwrng *rng;
372
373 rng = get_current_rng();
374 if (IS_ERR(rng))
375 return PTR_ERR(rng);
376
377 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
378 put_rng(rng);
379
380 return ret;
381 }
382
rng_available_show(struct device * dev,struct device_attribute * attr,char * buf)383 static ssize_t rng_available_show(struct device *dev,
384 struct device_attribute *attr,
385 char *buf)
386 {
387 int err;
388 struct hwrng *rng;
389
390 err = mutex_lock_interruptible(&rng_mutex);
391 if (err)
392 return -ERESTARTSYS;
393 buf[0] = '\0';
394 list_for_each_entry(rng, &rng_list, list) {
395 strlcat(buf, rng->name, PAGE_SIZE);
396 strlcat(buf, " ", PAGE_SIZE);
397 }
398 strlcat(buf, "\n", PAGE_SIZE);
399 mutex_unlock(&rng_mutex);
400
401 return strlen(buf);
402 }
403
rng_selected_show(struct device * dev,struct device_attribute * attr,char * buf)404 static ssize_t rng_selected_show(struct device *dev,
405 struct device_attribute *attr,
406 char *buf)
407 {
408 return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
409 }
410
411 static DEVICE_ATTR_RW(rng_current);
412 static DEVICE_ATTR_RO(rng_available);
413 static DEVICE_ATTR_RO(rng_selected);
414
415 static struct attribute *rng_dev_attrs[] = {
416 &dev_attr_rng_current.attr,
417 &dev_attr_rng_available.attr,
418 &dev_attr_rng_selected.attr,
419 NULL
420 };
421
422 ATTRIBUTE_GROUPS(rng_dev);
423
unregister_miscdev(void)424 static void __exit unregister_miscdev(void)
425 {
426 misc_deregister(&rng_miscdev);
427 }
428
register_miscdev(void)429 static int __init register_miscdev(void)
430 {
431 return misc_register(&rng_miscdev);
432 }
433
hwrng_fillfn(void * unused)434 static int hwrng_fillfn(void *unused)
435 {
436 long rc;
437
438 while (!kthread_should_stop()) {
439 struct hwrng *rng;
440
441 rng = get_current_rng();
442 if (IS_ERR(rng) || !rng)
443 break;
444 mutex_lock(&reading_mutex);
445 rc = rng_get_data(rng, rng_fillbuf,
446 rng_buffer_size(), 1);
447 mutex_unlock(&reading_mutex);
448 put_rng(rng);
449 if (rc <= 0) {
450 pr_warn("hwrng: no data available\n");
451 msleep_interruptible(10000);
452 continue;
453 }
454 /* Outside lock, sure, but y'know: randomness. */
455 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
456 rc * current_quality * 8 >> 10);
457 }
458 hwrng_fill = NULL;
459 return 0;
460 }
461
start_khwrngd(void)462 static void start_khwrngd(void)
463 {
464 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
465 if (IS_ERR(hwrng_fill)) {
466 pr_err("hwrng_fill thread creation failed\n");
467 hwrng_fill = NULL;
468 }
469 }
470
hwrng_register(struct hwrng * rng)471 int hwrng_register(struct hwrng *rng)
472 {
473 int err = -EINVAL;
474 struct hwrng *tmp;
475 struct list_head *rng_list_ptr;
476 bool is_new_current = false;
477
478 if (!rng->name || (!rng->data_read && !rng->read))
479 goto out;
480
481 mutex_lock(&rng_mutex);
482
483 /* Must not register two RNGs with the same name. */
484 err = -EEXIST;
485 list_for_each_entry(tmp, &rng_list, list) {
486 if (strcmp(tmp->name, rng->name) == 0)
487 goto out_unlock;
488 }
489
490 init_completion(&rng->cleanup_done);
491 complete(&rng->cleanup_done);
492
493 /* rng_list is sorted by decreasing quality */
494 list_for_each(rng_list_ptr, &rng_list) {
495 tmp = list_entry(rng_list_ptr, struct hwrng, list);
496 if (tmp->quality < rng->quality)
497 break;
498 }
499 list_add_tail(&rng->list, rng_list_ptr);
500
501 if (!current_rng ||
502 (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
503 /*
504 * Set new rng as current as the new rng source
505 * provides better entropy quality and was not
506 * chosen by userspace.
507 */
508 err = set_current_rng(rng);
509 if (err)
510 goto out_unlock;
511 /* to use current_rng in add_early_randomness() we need
512 * to take a ref
513 */
514 is_new_current = true;
515 kref_get(&rng->ref);
516 }
517 mutex_unlock(&rng_mutex);
518 if (is_new_current || !rng->init) {
519 /*
520 * Use a new device's input to add some randomness to
521 * the system. If this rng device isn't going to be
522 * used right away, its init function hasn't been
523 * called yet by set_current_rng(); so only use the
524 * randomness from devices that don't need an init callback
525 */
526 add_early_randomness(rng);
527 }
528 if (is_new_current)
529 put_rng(rng);
530 return 0;
531 out_unlock:
532 mutex_unlock(&rng_mutex);
533 out:
534 return err;
535 }
536 EXPORT_SYMBOL_GPL(hwrng_register);
537
hwrng_unregister(struct hwrng * rng)538 void hwrng_unregister(struct hwrng *rng)
539 {
540 struct hwrng *old_rng, *new_rng;
541 int err;
542
543 mutex_lock(&rng_mutex);
544
545 old_rng = current_rng;
546 list_del(&rng->list);
547 if (current_rng == rng) {
548 err = enable_best_rng();
549 if (err) {
550 drop_current_rng();
551 cur_rng_set_by_user = 0;
552 }
553 }
554
555 new_rng = get_current_rng_nolock();
556 if (list_empty(&rng_list)) {
557 mutex_unlock(&rng_mutex);
558 if (hwrng_fill)
559 kthread_stop(hwrng_fill);
560 } else
561 mutex_unlock(&rng_mutex);
562
563 if (new_rng) {
564 if (old_rng != new_rng)
565 add_early_randomness(new_rng);
566 put_rng(new_rng);
567 }
568
569 wait_for_completion(&rng->cleanup_done);
570 }
571 EXPORT_SYMBOL_GPL(hwrng_unregister);
572
devm_hwrng_release(struct device * dev,void * res)573 static void devm_hwrng_release(struct device *dev, void *res)
574 {
575 hwrng_unregister(*(struct hwrng **)res);
576 }
577
devm_hwrng_match(struct device * dev,void * res,void * data)578 static int devm_hwrng_match(struct device *dev, void *res, void *data)
579 {
580 struct hwrng **r = res;
581
582 if (WARN_ON(!r || !*r))
583 return 0;
584
585 return *r == data;
586 }
587
devm_hwrng_register(struct device * dev,struct hwrng * rng)588 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
589 {
590 struct hwrng **ptr;
591 int error;
592
593 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
594 if (!ptr)
595 return -ENOMEM;
596
597 error = hwrng_register(rng);
598 if (error) {
599 devres_free(ptr);
600 return error;
601 }
602
603 *ptr = rng;
604 devres_add(dev, ptr);
605 return 0;
606 }
607 EXPORT_SYMBOL_GPL(devm_hwrng_register);
608
devm_hwrng_unregister(struct device * dev,struct hwrng * rng)609 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
610 {
611 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
612 }
613 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
614
hwrng_modinit(void)615 static int __init hwrng_modinit(void)
616 {
617 int ret;
618
619 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
620 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
621 if (!rng_buffer)
622 return -ENOMEM;
623
624 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
625 if (!rng_fillbuf) {
626 kfree(rng_buffer);
627 return -ENOMEM;
628 }
629
630 ret = register_miscdev();
631 if (ret) {
632 kfree(rng_fillbuf);
633 kfree(rng_buffer);
634 }
635
636 return ret;
637 }
638
hwrng_modexit(void)639 static void __exit hwrng_modexit(void)
640 {
641 mutex_lock(&rng_mutex);
642 BUG_ON(current_rng);
643 kfree(rng_buffer);
644 kfree(rng_fillbuf);
645 mutex_unlock(&rng_mutex);
646
647 unregister_miscdev();
648 }
649
650 module_init(hwrng_modinit);
651 module_exit(hwrng_modexit);
652
653 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
654 MODULE_LICENSE("GPL");
655