• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 Developers of the Rand project.
2 //
3 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5 // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6 // option. This file may not be copied, modified, or distributed
7 // except according to those terms.
8 
9 //! Helper functions for implementing `RngCore` functions.
10 //!
11 //! For cross-platform reproducibility, these functions all use Little Endian:
12 //! least-significant part first. For example, `next_u64_via_u32` takes `u32`
13 //! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
14 //! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
15 //!
16 //! Byte-swapping (like the std `to_le` functions) is only needed to convert
17 //! to/from byte sequences, and since its purpose is reproducibility,
18 //! non-reproducible sources (e.g. `OsRng`) need not bother with it.
19 
20 use crate::RngCore;
21 use core::cmp::min;
22 
23 /// Implement `next_u64` via `next_u32`, little-endian order.
next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u6424 pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
25     // Use LE; we explicitly generate one value before the next.
26     let x = u64::from(rng.next_u32());
27     let y = u64::from(rng.next_u32());
28     (y << 32) | x
29 }
30 
31 /// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
32 ///
33 /// The fastest way to fill a slice is usually to work as long as possible with
34 /// integers. That is why this method mostly uses `next_u64`, and only when
35 /// there are 4 or less bytes remaining at the end of the slice it uses
36 /// `next_u32` once.
fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8])37 pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
38     let mut left = dest;
39     while left.len() >= 8 {
40         let (l, r) = { left }.split_at_mut(8);
41         left = r;
42         let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
43         l.copy_from_slice(&chunk);
44     }
45     let n = left.len();
46     if n > 4 {
47         let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
48         left.copy_from_slice(&chunk[..n]);
49     } else if n > 0 {
50         let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
51         left.copy_from_slice(&chunk[..n]);
52     }
53 }
54 
55 trait Observable: Copy {
56     type Bytes: AsRef<[u8]>;
to_le_bytes(self) -> Self::Bytes57     fn to_le_bytes(self) -> Self::Bytes;
58 
59     // Contract: observing self is memory-safe (implies no uninitialised padding)
as_byte_slice(x: &[Self]) -> &[u8]60     fn as_byte_slice(x: &[Self]) -> &[u8];
61 }
62 impl Observable for u32 {
63     type Bytes = [u8; 4];
to_le_bytes(self) -> Self::Bytes64     fn to_le_bytes(self) -> Self::Bytes {
65         self.to_le_bytes()
66     }
as_byte_slice(x: &[Self]) -> &[u8]67     fn as_byte_slice(x: &[Self]) -> &[u8] {
68         let ptr = x.as_ptr() as *const u8;
69         let len = x.len() * core::mem::size_of::<Self>();
70         unsafe { core::slice::from_raw_parts(ptr, len) }
71     }
72 }
73 impl Observable for u64 {
74     type Bytes = [u8; 8];
to_le_bytes(self) -> Self::Bytes75     fn to_le_bytes(self) -> Self::Bytes {
76         self.to_le_bytes()
77     }
as_byte_slice(x: &[Self]) -> &[u8]78     fn as_byte_slice(x: &[Self]) -> &[u8] {
79         let ptr = x.as_ptr() as *const u8;
80         let len = x.len() * core::mem::size_of::<Self>();
81         unsafe { core::slice::from_raw_parts(ptr, len) }
82     }
83 }
84 
fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize)85 fn fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize) {
86     let size = core::mem::size_of::<T>();
87     let byte_len = min(src.len() * size, dest.len());
88     let num_chunks = (byte_len + size - 1) / size;
89 
90     if cfg!(target_endian = "little") {
91         // On LE we can do a simple copy, which is 25-50% faster:
92         dest[..byte_len].copy_from_slice(&T::as_byte_slice(&src[..num_chunks])[..byte_len]);
93     } else {
94         // This code is valid on all arches, but slower than the above:
95         let mut i = 0;
96         let mut iter = dest[..byte_len].chunks_exact_mut(size);
97         for chunk in &mut iter {
98             chunk.copy_from_slice(src[i].to_le_bytes().as_ref());
99             i += 1;
100         }
101         let chunk = iter.into_remainder();
102         if !chunk.is_empty() {
103             chunk.copy_from_slice(&src[i].to_le_bytes().as_ref()[..chunk.len()]);
104         }
105     }
106 
107     (num_chunks, byte_len)
108 }
109 
110 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
111 /// based RNG.
112 ///
113 /// The return values are `(consumed_u32, filled_u8)`.
114 ///
115 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
116 /// the length of `dest`.
117 /// `consumed_u32` is the number of words consumed from `src`, which is the same
118 /// as `filled_u8 / 4` rounded up.
119 ///
120 /// # Example
121 /// (from `IsaacRng`)
122 ///
123 /// ```ignore
124 /// fn fill_bytes(&mut self, dest: &mut [u8]) {
125 ///     let mut read_len = 0;
126 ///     while read_len < dest.len() {
127 ///         if self.index >= self.rsl.len() {
128 ///             self.isaac();
129 ///         }
130 ///
131 ///         let (consumed_u32, filled_u8) =
132 ///             impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
133 ///                                        &mut dest[read_len..]);
134 ///
135 ///         self.index += consumed_u32;
136 ///         read_len += filled_u8;
137 ///     }
138 /// }
139 /// ```
fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize)140 pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
141     fill_via_chunks(src, dest)
142 }
143 
144 /// Implement `fill_bytes` by reading chunks from the output buffer of a block
145 /// based RNG.
146 ///
147 /// The return values are `(consumed_u64, filled_u8)`.
148 /// `filled_u8` is the number of filled bytes in `dest`, which may be less than
149 /// the length of `dest`.
150 /// `consumed_u64` is the number of words consumed from `src`, which is the same
151 /// as `filled_u8 / 8` rounded up.
152 ///
153 /// See `fill_via_u32_chunks` for an example.
fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize)154 pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
155     fill_via_chunks(src, dest)
156 }
157 
158 /// Implement `next_u32` via `fill_bytes`, little-endian order.
next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32159 pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
160     let mut buf = [0; 4];
161     rng.fill_bytes(&mut buf);
162     u32::from_le_bytes(buf)
163 }
164 
165 /// Implement `next_u64` via `fill_bytes`, little-endian order.
next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64166 pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
167     let mut buf = [0; 8];
168     rng.fill_bytes(&mut buf);
169     u64::from_le_bytes(buf)
170 }
171 
172 #[cfg(test)]
173 mod test {
174     use super::*;
175 
176     #[test]
test_fill_via_u32_chunks()177     fn test_fill_via_u32_chunks() {
178         let src = [1, 2, 3];
179         let mut dst = [0u8; 11];
180         assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 11));
181         assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]);
182 
183         let mut dst = [0u8; 13];
184         assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 12));
185         assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]);
186 
187         let mut dst = [0u8; 5];
188         assert_eq!(fill_via_u32_chunks(&src, &mut dst), (2, 5));
189         assert_eq!(dst, [1, 0, 0, 0, 2]);
190     }
191 
192     #[test]
test_fill_via_u64_chunks()193     fn test_fill_via_u64_chunks() {
194         let src = [1, 2];
195         let mut dst = [0u8; 11];
196         assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 11));
197         assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]);
198 
199         let mut dst = [0u8; 17];
200         assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 16));
201         assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]);
202 
203         let mut dst = [0u8; 5];
204         assert_eq!(fill_via_u64_chunks(&src, &mut dst), (1, 5));
205         assert_eq!(dst, [1, 0, 0, 0, 0]);
206     }
207 }
208