• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //! The First rule is because having references of any kind to memory considered volatile would
18 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19 //! done concurrently without synchronization. With volatile access we know that the compiler has
20 //! not reordered or elided the access.
21 
22 use std::cmp::min;
23 use std::fmt::{self, Display};
24 use std::marker::PhantomData;
25 use std::mem::size_of;
26 use std::ptr::{copy, null_mut, read_volatile, write_bytes, write_volatile};
27 use std::result;
28 use std::{isize, usize};
29 
30 use crate::DataInit;
31 
32 #[derive(Eq, PartialEq, Debug)]
33 pub enum VolatileMemoryError {
34     /// `addr` is out of bounds of the volatile memory slice.
35     OutOfBounds { addr: u64 },
36     /// Taking a slice at `base` with `offset` would overflow `u64`.
37     Overflow { base: u64, offset: u64 },
38 }
39 
40 impl Display for VolatileMemoryError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result41     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42         use self::VolatileMemoryError::*;
43 
44         match self {
45             OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
46             Overflow { base, offset } => write!(
47                 f,
48                 "address 0x{:x} offset by 0x{:x} would overflow",
49                 base, offset
50             ),
51         }
52     }
53 }
54 
55 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
56 
57 use crate::VolatileMemoryError as Error;
58 type Result<T> = VolatileMemoryResult<T>;
59 
60 /// Convenience function for computing `base + offset` which returns
61 /// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
62 /// `u64::MAX`.
63 ///
64 /// # Examples
65 ///
66 /// ```
67 /// # use data_model::*;
68 /// # fn get_slice(offset: u64, count: u64) -> VolatileMemoryResult<()> {
69 ///   let mem_end = calc_offset(offset, count)?;
70 ///   if mem_end > 100 {
71 ///       return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
72 ///   }
73 /// # Ok(())
74 /// # }
75 /// ```
calc_offset(base: u64, offset: u64) -> Result<u64>76 pub fn calc_offset(base: u64, offset: u64) -> Result<u64> {
77     match base.checked_add(offset) {
78         None => Err(Error::Overflow { base, offset }),
79         Some(m) => Ok(m),
80     }
81 }
82 
83 /// Trait for types that support raw volatile access to their data.
84 pub trait VolatileMemory {
85     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
86     /// access.
get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>87     fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>;
88 
89     /// Gets a `VolatileRef` at `offset`.
get_ref<T: DataInit>(&self, offset: u64) -> Result<VolatileRef<T>>90     fn get_ref<T: DataInit>(&self, offset: u64) -> Result<VolatileRef<T>> {
91         let slice = self.get_slice(offset, size_of::<T>() as u64)?;
92         Ok(VolatileRef {
93             addr: slice.addr as *mut T,
94             phantom: PhantomData,
95         })
96     }
97 }
98 
99 impl<'a> VolatileMemory for &'a mut [u8] {
get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>100     fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
101         let mem_end = calc_offset(offset, count)?;
102         if mem_end > self.len() as u64 {
103             return Err(Error::OutOfBounds { addr: mem_end });
104         }
105         Ok(unsafe { VolatileSlice::new((self.as_ptr() as u64 + offset) as *mut _, count) })
106     }
107 }
108 
109 /// A slice of raw memory that supports volatile access.
110 #[derive(Copy, Clone, Debug)]
111 pub struct VolatileSlice<'a> {
112     addr: *mut u8,
113     size: u64,
114     phantom: PhantomData<&'a u8>,
115 }
116 
117 impl<'a> Default for VolatileSlice<'a> {
default() -> VolatileSlice<'a>118     fn default() -> VolatileSlice<'a> {
119         VolatileSlice {
120             addr: null_mut(),
121             size: 0,
122             phantom: PhantomData,
123         }
124     }
125 }
126 
127 impl<'a> VolatileSlice<'a> {
128     /// Creates a slice of raw memory that must support volatile access.
129     ///
130     /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
131     /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
132     /// must also guarantee that all other users of the given chunk of memory are using volatile
133     /// accesses.
new(addr: *mut u8, size: u64) -> VolatileSlice<'a>134     pub unsafe fn new(addr: *mut u8, size: u64) -> VolatileSlice<'a> {
135         VolatileSlice {
136             addr,
137             size,
138             phantom: PhantomData,
139         }
140     }
141 
142     /// Gets the address of this slice's memory.
as_ptr(&self) -> *mut u8143     pub fn as_ptr(&self) -> *mut u8 {
144         self.addr
145     }
146 
147     /// Gets the size of this slice.
size(&self) -> u64148     pub fn size(&self) -> u64 {
149         self.size
150     }
151 
152     /// Creates a copy of this slice with the address increased by `count` bytes, and the size
153     /// reduced by `count` bytes.
offset(self, count: u64) -> Result<VolatileSlice<'a>>154     pub fn offset(self, count: u64) -> Result<VolatileSlice<'a>> {
155         let new_addr =
156             (self.addr as u64)
157                 .checked_add(count)
158                 .ok_or(VolatileMemoryError::Overflow {
159                     base: self.addr as u64,
160                     offset: count,
161                 })?;
162         if new_addr > usize::MAX as u64 {
163             return Err(VolatileMemoryError::Overflow {
164                 base: self.addr as u64,
165                 offset: count,
166             })?;
167         }
168         let new_size = self
169             .size
170             .checked_sub(count)
171             .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
172         // Safe because the memory has the same lifetime and points to a subset of the memory of the
173         // original slice.
174         unsafe { Ok(VolatileSlice::new(new_addr as *mut u8, new_size)) }
175     }
176 
177     /// Sets each byte of this slice with the given byte, similar to `memset`.
178     ///
179     /// The bytes of this slice are accessed in an arbitray order.
180     ///
181     /// # Examples
182     ///
183     /// ```
184     /// # use data_model::VolatileMemory;
185     /// # fn test_write_45() -> Result<(), ()> {
186     /// let mut mem = [0u8; 32];
187     /// let mem_ref = &mut mem[..];
188     /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
189     /// vslice.write_bytes(45);
190     /// for &mut v in mem_ref {
191     ///     assert_eq!(v, 45);
192     /// }
193     /// # Ok(())
194     /// # }
write_bytes(&self, value: u8)195     pub fn write_bytes(&self, value: u8) {
196         // Safe because the memory is valid and needs only byte alignment.
197         unsafe {
198             write_bytes(self.as_ptr(), value, self.size as usize);
199         }
200     }
201 
202     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
203     /// `buf`.
204     ///
205     /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
206     ///
207     /// # Examples
208     ///
209     /// ```
210     /// # use std::fs::File;
211     /// # use std::path::Path;
212     /// # use data_model::VolatileMemory;
213     /// # fn test_write_null() -> Result<(), ()> {
214     /// let mut mem = [0u8; 32];
215     /// let mem_ref = &mut mem[..];
216     /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
217     /// let mut buf = [5u8; 16];
218     /// vslice.copy_to(&mut buf[..]);
219     /// for v in &buf[..] {
220     ///     assert_eq!(buf[0], 0);
221     /// }
222     /// # Ok(())
223     /// # }
224     /// ```
copy_to<T>(&self, buf: &mut [T]) where T: DataInit,225     pub fn copy_to<T>(&self, buf: &mut [T])
226     where
227         T: DataInit,
228     {
229         let mut addr = self.addr;
230         for v in buf.iter_mut().take(self.size as usize / size_of::<T>()) {
231             unsafe {
232                 *v = read_volatile(addr as *const T);
233                 addr = addr.add(size_of::<T>());
234             }
235         }
236     }
237 
238     /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
239     ///
240     /// The copies happen in an undefined order.
241     /// # Examples
242     ///
243     /// ```
244     /// # use data_model::VolatileMemory;
245     /// # fn test_write_null() -> Result<(), ()> {
246     /// let mut mem = [0u8; 32];
247     /// let mem_ref = &mut mem[..];
248     /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
249     /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
250     /// # Ok(())
251     /// # }
252     /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)253     pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
254         unsafe {
255             copy(self.addr, slice.addr, min(self.size, slice.size) as usize);
256         }
257     }
258 
259     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
260     /// this slice's memory.
261     ///
262     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
263     ///
264     /// # Examples
265     ///
266     /// ```
267     /// # use std::fs::File;
268     /// # use std::path::Path;
269     /// # use data_model::VolatileMemory;
270     /// # fn test_write_null() -> Result<(), ()> {
271     /// let mut mem = [0u8; 32];
272     /// let mem_ref = &mut mem[..];
273     /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
274     /// let buf = [5u8; 64];
275     /// vslice.copy_from(&buf[..]);
276     /// for i in 0..4 {
277     ///     assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
278     /// }
279     /// # Ok(())
280     /// # }
281     /// ```
copy_from<T>(&self, buf: &[T]) where T: DataInit,282     pub fn copy_from<T>(&self, buf: &[T])
283     where
284         T: DataInit,
285     {
286         let mut addr = self.addr;
287         for &v in buf.iter().take(self.size as usize / size_of::<T>()) {
288             unsafe {
289                 write_volatile(addr as *mut T, v);
290                 addr = addr.add(size_of::<T>());
291             }
292         }
293     }
294 }
295 
296 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>297     fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
298         let mem_end = calc_offset(offset, count)?;
299         if mem_end > self.size {
300             return Err(Error::OutOfBounds { addr: mem_end });
301         }
302         Ok(VolatileSlice {
303             addr: (self.addr as u64 + offset) as *mut _,
304             size: count,
305             phantom: PhantomData,
306         })
307     }
308 }
309 
310 /// A memory location that supports volatile access of a `T`.
311 ///
312 /// # Examples
313 ///
314 /// ```
315 /// # use data_model::VolatileRef;
316 ///   let mut v = 5u32;
317 ///   assert_eq!(v, 5);
318 ///   let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32) };
319 ///   assert_eq!(v_ref.load(), 5);
320 ///   v_ref.store(500);
321 ///   assert_eq!(v, 500);
322 #[derive(Debug)]
323 pub struct VolatileRef<'a, T: DataInit>
324 where
325     T: 'a,
326 {
327     addr: *mut T,
328     phantom: PhantomData<&'a T>,
329 }
330 
331 impl<'a, T: DataInit> VolatileRef<'a, T> {
332     /// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
333     ///
334     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
335     /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
336     /// must also guarantee that all other users of the given chunk of memory are using volatile
337     /// accesses.
new(addr: *mut T) -> VolatileRef<'a, T>338     pub unsafe fn new(addr: *mut T) -> VolatileRef<'a, T> {
339         VolatileRef {
340             addr,
341             phantom: PhantomData,
342         }
343     }
344 
345     /// Gets the address of this slice's memory.
as_ptr(&self) -> *mut T346     pub fn as_ptr(&self) -> *mut T {
347         self.addr
348     }
349 
350     /// Gets the size of this slice.
351     ///
352     /// # Examples
353     ///
354     /// ```
355     /// # use std::mem::size_of;
356     /// # use data_model::VolatileRef;
357     ///   let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
358     ///   assert_eq!(v_ref.size(), size_of::<u32>() as u64);
359     /// ```
size(&self) -> u64360     pub fn size(&self) -> u64 {
361         size_of::<T>() as u64
362     }
363 
364     /// Does a volatile write of the value `v` to the address of this ref.
365     #[inline(always)]
store(&self, v: T)366     pub fn store(&self, v: T) {
367         unsafe { write_volatile(self.addr, v) };
368     }
369 
370     /// Does a volatile read of the value at the address of this ref.
371     #[inline(always)]
load(&self) -> T372     pub fn load(&self) -> T {
373         // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
374         // in this function with the commented code below and running `cargo test --release`.
375         // unsafe { *(self.addr as *const T) }
376         unsafe { read_volatile(self.addr) }
377     }
378 
379     /// Converts this `T` reference to a raw slice with the same size and address.
to_slice(&self) -> VolatileSlice<'a>380     pub fn to_slice(&self) -> VolatileSlice<'a> {
381         unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>() as u64) }
382     }
383 }
384 
385 #[cfg(test)]
386 mod tests {
387     use super::*;
388 
389     use std::sync::Arc;
390     use std::thread::{sleep, spawn};
391     use std::time::Duration;
392 
393     #[derive(Clone)]
394     struct VecMem {
395         mem: Arc<Vec<u8>>,
396     }
397 
398     impl VecMem {
new(size: usize) -> VecMem399         fn new(size: usize) -> VecMem {
400             let mut mem = Vec::new();
401             mem.resize(size, 0);
402             VecMem { mem: Arc::new(mem) }
403         }
404     }
405 
406     impl VolatileMemory for VecMem {
get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>407         fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
408             let mem_end = calc_offset(offset, count)?;
409             if mem_end > self.mem.len() as u64 {
410                 return Err(Error::OutOfBounds { addr: mem_end });
411             }
412             Ok(unsafe { VolatileSlice::new((self.mem.as_ptr() as u64 + offset) as *mut _, count) })
413         }
414     }
415 
416     #[test]
ref_store()417     fn ref_store() {
418         let mut a = [0u8; 1];
419         let a_ref = &mut a[..];
420         let v_ref = a_ref.get_ref(0).unwrap();
421         v_ref.store(2u8);
422         assert_eq!(a[0], 2);
423     }
424 
425     #[test]
ref_load()426     fn ref_load() {
427         let mut a = [5u8; 1];
428         {
429             let a_ref = &mut a[..];
430             let c = {
431                 let v_ref = a_ref.get_ref::<u8>(0).unwrap();
432                 assert_eq!(v_ref.load(), 5u8);
433                 v_ref
434             };
435             // To make sure we can take a v_ref out of the scope we made it in:
436             c.load();
437             // but not too far:
438             // c
439         } //.load()
440         ;
441     }
442 
443     #[test]
ref_to_slice()444     fn ref_to_slice() {
445         let mut a = [1u8; 5];
446         let a_ref = &mut a[..];
447         let v_ref = a_ref.get_ref(1).unwrap();
448         v_ref.store(0x12345678u32);
449         let ref_slice = v_ref.to_slice();
450         assert_eq!(v_ref.as_ptr() as u64, ref_slice.as_ptr() as u64);
451         assert_eq!(v_ref.size(), ref_slice.size());
452     }
453 
454     #[test]
observe_mutate()455     fn observe_mutate() {
456         let a = VecMem::new(1);
457         let a_clone = a.clone();
458         let v_ref = a.get_ref::<u8>(0).unwrap();
459         v_ref.store(99);
460         spawn(move || {
461             sleep(Duration::from_millis(10));
462             let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
463             clone_v_ref.store(0);
464         });
465 
466         // Technically this is a race condition but we have to observe the v_ref's value changing
467         // somehow and this helps to ensure the sleep actually happens before the store rather then
468         // being reordered by the compiler.
469         assert_eq!(v_ref.load(), 99);
470 
471         // Granted we could have a machine that manages to perform this many volatile loads in the
472         // amount of time the spawned thread sleeps, but the most likely reason the retry limit will
473         // get reached is because v_ref.load() is not actually performing the required volatile read
474         // or v_ref.store() is not doing a volatile write. A timer based solution was avoided
475         // because that might use a syscall which could hint the optimizer to reload v_ref's pointer
476         // regardless of volatile status. Note that we use a longer retry duration for optimized
477         // builds.
478         #[cfg(debug_assertions)]
479         const RETRY_MAX: u64 = 500_000_000;
480         #[cfg(not(debug_assertions))]
481         const RETRY_MAX: u64 = 10_000_000_000;
482 
483         let mut retry = 0;
484         while v_ref.load() == 99 && retry < RETRY_MAX {
485             retry += 1;
486         }
487 
488         assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
489         assert_eq!(v_ref.load(), 0);
490     }
491 
492     #[test]
slice_size()493     fn slice_size() {
494         let a = VecMem::new(100);
495         let s = a.get_slice(0, 27).unwrap();
496         assert_eq!(s.size(), 27);
497 
498         let s = a.get_slice(34, 27).unwrap();
499         assert_eq!(s.size(), 27);
500 
501         let s = s.get_slice(20, 5).unwrap();
502         assert_eq!(s.size(), 5);
503     }
504 
505     #[test]
slice_overflow_error()506     fn slice_overflow_error() {
507         use std::u64::MAX;
508         let a = VecMem::new(1);
509         let res = a.get_slice(MAX, 1).unwrap_err();
510         assert_eq!(
511             res,
512             Error::Overflow {
513                 base: MAX,
514                 offset: 1,
515             }
516         );
517     }
518 
519     #[test]
slice_oob_error()520     fn slice_oob_error() {
521         let a = VecMem::new(100);
522         a.get_slice(50, 50).unwrap();
523         let res = a.get_slice(55, 50).unwrap_err();
524         assert_eq!(res, Error::OutOfBounds { addr: 105 });
525     }
526 
527     #[test]
ref_overflow_error()528     fn ref_overflow_error() {
529         use std::u64::MAX;
530         let a = VecMem::new(1);
531         let res = a.get_ref::<u8>(MAX).unwrap_err();
532         assert_eq!(
533             res,
534             Error::Overflow {
535                 base: MAX,
536                 offset: 1,
537             }
538         );
539     }
540 
541     #[test]
ref_oob_error()542     fn ref_oob_error() {
543         let a = VecMem::new(100);
544         a.get_ref::<u8>(99).unwrap();
545         let res = a.get_ref::<u16>(99).unwrap_err();
546         assert_eq!(res, Error::OutOfBounds { addr: 101 });
547     }
548 
549     #[test]
ref_oob_too_large()550     fn ref_oob_too_large() {
551         let a = VecMem::new(3);
552         let res = a.get_ref::<u32>(0).unwrap_err();
553         assert_eq!(res, Error::OutOfBounds { addr: 4 });
554     }
555 }
556