• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //! The First rule is because having references of any kind to memory considered volatile would
18 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19 //! done concurrently without synchronization. With volatile access we know that the compiler has
20 //! not reordered or elided the access.
21 
22 use std::cmp::min;
23 use std::marker::PhantomData;
24 use std::mem::size_of;
25 use std::ptr::{copy, read_volatile, write_bytes, write_volatile};
26 use std::result;
27 use std::slice;
28 use std::usize;
29 
30 use remain::sorted;
31 use thiserror::Error;
32 
33 use crate::{sys::IoBufMut, DataInit};
34 
35 #[sorted]
36 #[derive(Error, Eq, PartialEq, Debug)]
37 pub enum VolatileMemoryError {
38     /// `addr` is out of bounds of the volatile memory slice.
39     #[error("address 0x{addr:x} is out of bounds")]
40     OutOfBounds { addr: usize },
41     /// Taking a slice at `base` with `offset` would overflow `usize`.
42     #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
43     Overflow { base: usize, offset: usize },
44 }
45 
46 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
47 
48 use crate::VolatileMemoryError as Error;
49 type Result<T> = VolatileMemoryResult<T>;
50 
51 /// Convenience function for computing `base + offset` which returns
52 /// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
53 /// `u64::MAX`.
54 ///
55 /// # Examples
56 ///
57 /// ```
58 /// # use data_model::*;
59 /// # fn get_slice(offset: usize, count: usize) -> VolatileMemoryResult<()> {
60 ///   let mem_end = calc_offset(offset, count)?;
61 ///   if mem_end > 100 {
62 ///       return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
63 ///   }
64 /// # Ok(())
65 /// # }
66 /// ```
calc_offset(base: usize, offset: usize) -> Result<usize>67 pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
68     match base.checked_add(offset) {
69         None => Err(Error::Overflow { base, offset }),
70         Some(m) => Ok(m),
71     }
72 }
73 
74 /// Trait for types that support raw volatile access to their data.
75 pub trait VolatileMemory {
76     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
77     /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>78     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
79 
80     /// Gets a `VolatileRef` at `offset`.
get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>>81     fn get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>> {
82         let slice = self.get_slice(offset, size_of::<T>())?;
83         Ok(VolatileRef {
84             addr: slice.as_mut_ptr() as *mut T,
85             phantom: PhantomData,
86         })
87     }
88 }
89 
90 /// A slice of raw memory that supports volatile access. Like `std::io::IoBufMut`, this type is
91 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoBufMut`, it doesn't
92 /// automatically deref to `&mut [u8]`.
93 #[derive(Copy, Clone, Debug)]
94 #[repr(transparent)]
95 pub struct VolatileSlice<'a>(IoBufMut<'a>);
96 
97 impl<'a> VolatileSlice<'a> {
98     /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice99     pub fn new(buf: &mut [u8]) -> VolatileSlice {
100         VolatileSlice(IoBufMut::new(buf))
101     }
102 
103     /// Creates a `VolatileSlice` from a pointer and a length.
104     ///
105     /// # Safety
106     ///
107     /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
108     /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>109     pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
110         VolatileSlice(IoBufMut::from_raw_parts(addr, len))
111     }
112 
113     /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u8114     pub fn as_ptr(&self) -> *const u8 {
115         self.0.as_ptr()
116     }
117 
118     /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u8119     pub fn as_mut_ptr(&self) -> *mut u8 {
120         self.0.as_mut_ptr()
121     }
122 
123     /// Gets the size of this slice.
size(&self) -> usize124     pub fn size(&self) -> usize {
125         self.0.len()
126     }
127 
128     /// Advance the starting position of this slice.
129     ///
130     /// Panics if `count > self.size()`.
advance(&mut self, count: usize)131     pub fn advance(&mut self, count: usize) {
132         self.0.advance(count)
133     }
134 
135     /// Shorten the length of the slice.
136     ///
137     /// Has no effect if `len > self.size()`.
truncate(&mut self, len: usize)138     pub fn truncate(&mut self, len: usize) {
139         self.0.truncate(len)
140     }
141 
142     /// Returns this `VolatileSlice` as an `IoBufMut`.
as_iobuf(&self) -> &IoBufMut143     pub fn as_iobuf(&self) -> &IoBufMut {
144         &self.0
145     }
146 
147     /// Converts a slice of `VolatileSlice`s into a slice of `IoBufMut`s
148     #[allow(clippy::wrong_self_convention)]
as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>]149     pub fn as_iobufs<'mem, 'slice>(
150         iovs: &'slice [VolatileSlice<'mem>],
151     ) -> &'slice [IoBufMut<'mem>] {
152         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
153         unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
154     }
155 
156     /// Creates a copy of this slice with the address increased by `count` bytes, and the size
157     /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>158     pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
159         let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
160             VolatileMemoryError::Overflow {
161                 base: self.as_mut_ptr() as usize,
162                 offset: count,
163             },
164         )?;
165         let new_size = self
166             .size()
167             .checked_sub(count)
168             .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
169 
170         // Safe because the memory has the same lifetime and points to a subset of the memory of the
171         // original slice.
172         unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
173     }
174 
175     /// Similar to `get_slice` but the returned slice outlives this slice.
176     ///
177     /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>178     pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
179         let mem_end = calc_offset(offset, count)?;
180         if mem_end > self.size() {
181             return Err(Error::OutOfBounds { addr: mem_end });
182         }
183         let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
184             VolatileMemoryError::Overflow {
185                 base: self.as_mut_ptr() as usize,
186                 offset,
187             },
188         )?;
189 
190         // Safe because we have verified that the new memory is a subset of the original slice.
191         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
192     }
193 
194     /// Sets each byte of this slice with the given byte, similar to `memset`.
195     ///
196     /// The bytes of this slice are accessed in an arbitray order.
197     ///
198     /// # Examples
199     ///
200     /// ```
201     /// # use data_model::VolatileSlice;
202     /// # fn test_write_45() -> Result<(), ()> {
203     /// let mut mem = [0u8; 32];
204     /// let vslice = VolatileSlice::new(&mut mem[..]);
205     /// vslice.write_bytes(45);
206     /// for &v in &mem[..] {
207     ///     assert_eq!(v, 45);
208     /// }
209     /// # Ok(())
210     /// # }
write_bytes(&self, value: u8)211     pub fn write_bytes(&self, value: u8) {
212         // Safe because the memory is valid and needs only byte alignment.
213         unsafe {
214             write_bytes(self.as_mut_ptr(), value, self.size());
215         }
216     }
217 
218     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
219     /// `buf`.
220     ///
221     /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
222     ///
223     /// # Examples
224     ///
225     /// ```
226     /// # use std::fs::File;
227     /// # use std::path::Path;
228     /// # use data_model::VolatileSlice;
229     /// # fn test_write_null() -> Result<(), ()> {
230     /// let mut mem = [0u8; 32];
231     /// let vslice = VolatileSlice::new(&mut mem[..]);
232     /// let mut buf = [5u8; 16];
233     /// vslice.copy_to(&mut buf[..]);
234     /// for v in &buf[..] {
235     ///     assert_eq!(buf[0], 0);
236     /// }
237     /// # Ok(())
238     /// # }
239     /// ```
copy_to<T>(&self, buf: &mut [T]) where T: DataInit,240     pub fn copy_to<T>(&self, buf: &mut [T])
241     where
242         T: DataInit,
243     {
244         let mut addr = self.as_mut_ptr() as *const u8;
245         for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
246             unsafe {
247                 *v = read_volatile(addr as *const T);
248                 addr = addr.add(size_of::<T>());
249             }
250         }
251     }
252 
253     /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
254     ///
255     /// The copies happen in an undefined order.
256     /// # Examples
257     ///
258     /// ```
259     /// # use data_model::{VolatileMemory, VolatileSlice};
260     /// # fn test_write_null() -> Result<(), ()> {
261     /// let mut mem = [0u8; 32];
262     /// let vslice = VolatileSlice::new(&mut mem[..]);
263     /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
264     /// # Ok(())
265     /// # }
266     /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)267     pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
268         unsafe {
269             copy(
270                 self.as_mut_ptr() as *const u8,
271                 slice.as_mut_ptr(),
272                 min(self.size(), slice.size()),
273             );
274         }
275     }
276 
277     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
278     /// this slice's memory.
279     ///
280     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
281     ///
282     /// # Examples
283     ///
284     /// ```
285     /// # use std::fs::File;
286     /// # use std::path::Path;
287     /// # use data_model::{VolatileMemory, VolatileSlice};
288     /// # fn test_write_null() -> Result<(), ()> {
289     /// let mut mem = [0u8; 32];
290     /// let vslice = VolatileSlice::new(&mut mem[..]);
291     /// let buf = [5u8; 64];
292     /// vslice.copy_from(&buf[..]);
293     /// for i in 0..4 {
294     ///     assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
295     /// }
296     /// # Ok(())
297     /// # }
298     /// ```
copy_from<T>(&self, buf: &[T]) where T: DataInit,299     pub fn copy_from<T>(&self, buf: &[T])
300     where
301         T: DataInit,
302     {
303         let mut addr = self.as_mut_ptr();
304         for &v in buf.iter().take(self.size() / size_of::<T>()) {
305             unsafe {
306                 write_volatile(addr as *mut T, v);
307                 addr = addr.add(size_of::<T>());
308             }
309         }
310     }
311 }
312 
313 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>314     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
315         self.sub_slice(offset, count)
316     }
317 }
318 
319 /// A memory location that supports volatile access of a `T`.
320 ///
321 /// # Examples
322 ///
323 /// ```
324 /// # use data_model::VolatileRef;
325 ///   let mut v = 5u32;
326 ///   assert_eq!(v, 5);
327 ///   let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32) };
328 ///   assert_eq!(v_ref.load(), 5);
329 ///   v_ref.store(500);
330 ///   assert_eq!(v, 500);
331 #[derive(Debug)]
332 pub struct VolatileRef<'a, T: DataInit>
333 where
334     T: 'a,
335 {
336     addr: *mut T,
337     phantom: PhantomData<&'a T>,
338 }
339 
340 impl<'a, T: DataInit> VolatileRef<'a, T> {
341     /// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
342     ///
343     /// # Safety
344     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
345     /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
346     /// must also guarantee that all other users of the given chunk of memory are using volatile
347     /// accesses.
new(addr: *mut T) -> VolatileRef<'a, T>348     pub unsafe fn new(addr: *mut T) -> VolatileRef<'a, T> {
349         VolatileRef {
350             addr,
351             phantom: PhantomData,
352         }
353     }
354 
355     /// Gets the address of this slice's memory.
as_mut_ptr(&self) -> *mut T356     pub fn as_mut_ptr(&self) -> *mut T {
357         self.addr
358     }
359 
360     /// Gets the size of this slice.
361     ///
362     /// # Examples
363     ///
364     /// ```
365     /// # use std::mem::size_of;
366     /// # use data_model::VolatileRef;
367     ///   let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
368     ///   assert_eq!(v_ref.size(), size_of::<u32>());
369     /// ```
size(&self) -> usize370     pub fn size(&self) -> usize {
371         size_of::<T>()
372     }
373 
374     /// Does a volatile write of the value `v` to the address of this ref.
375     #[inline(always)]
store(&self, v: T)376     pub fn store(&self, v: T) {
377         unsafe { write_volatile(self.addr, v) };
378     }
379 
380     /// Does a volatile read of the value at the address of this ref.
381     #[inline(always)]
load(&self) -> T382     pub fn load(&self) -> T {
383         // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
384         // in this function with the commented code below and running `cargo test --release`.
385         // unsafe { *(self.addr as *const T) }
386         unsafe { read_volatile(self.addr) }
387     }
388 
389     /// Converts this `T` reference to a raw slice with the same size and address.
to_slice(&self) -> VolatileSlice<'a>390     pub fn to_slice(&self) -> VolatileSlice<'a> {
391         unsafe { VolatileSlice::from_raw_parts(self.as_mut_ptr() as *mut u8, self.size()) }
392     }
393 }
394 
395 #[cfg(test)]
396 mod tests {
397     use super::*;
398 
399     use std::sync::{Arc, Barrier};
400     use std::thread::spawn;
401 
402     #[derive(Clone)]
403     struct VecMem {
404         mem: Arc<Vec<u8>>,
405     }
406 
407     impl VecMem {
new(size: usize) -> VecMem408         fn new(size: usize) -> VecMem {
409             let mut mem = Vec::new();
410             mem.resize(size, 0);
411             VecMem { mem: Arc::new(mem) }
412         }
413     }
414 
415     impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>416         fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
417             let mem_end = calc_offset(offset, count)?;
418             if mem_end > self.mem.len() {
419                 return Err(Error::OutOfBounds { addr: mem_end });
420             }
421 
422             let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
423                 VolatileMemoryError::Overflow {
424                     base: self.mem.as_ptr() as usize,
425                     offset,
426                 },
427             )?;
428 
429             Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
430         }
431     }
432 
433     #[test]
ref_store()434     fn ref_store() {
435         let mut a = [0u8; 1];
436         let a_ref = VolatileSlice::new(&mut a[..]);
437         let v_ref = a_ref.get_ref(0).unwrap();
438         v_ref.store(2u8);
439         assert_eq!(a[0], 2);
440     }
441 
442     #[test]
ref_load()443     fn ref_load() {
444         let mut a = [5u8; 1];
445         {
446             let a_ref = VolatileSlice::new(&mut a[..]);
447             let c = {
448                 let v_ref = a_ref.get_ref::<u8>(0).unwrap();
449                 assert_eq!(v_ref.load(), 5u8);
450                 v_ref
451             };
452             // To make sure we can take a v_ref out of the scope we made it in:
453             c.load();
454             // but not too far:
455             // c
456         } //.load()
457         ;
458     }
459 
460     #[test]
ref_to_slice()461     fn ref_to_slice() {
462         let mut a = [1u8; 5];
463         let a_ref = VolatileSlice::new(&mut a[..]);
464         let v_ref = a_ref.get_ref(1).unwrap();
465         v_ref.store(0x12345678u32);
466         let ref_slice = v_ref.to_slice();
467         assert_eq!(v_ref.as_mut_ptr() as usize, ref_slice.as_mut_ptr() as usize);
468         assert_eq!(v_ref.size(), ref_slice.size());
469     }
470 
471     #[test]
observe_mutate()472     fn observe_mutate() {
473         let a = VecMem::new(1);
474         let a_clone = a.clone();
475         let v_ref = a.get_ref::<u8>(0).unwrap();
476         v_ref.store(99);
477 
478         let start_barrier = Arc::new(Barrier::new(2));
479         let thread_start_barrier = start_barrier.clone();
480         let end_barrier = Arc::new(Barrier::new(2));
481         let thread_end_barrier = end_barrier.clone();
482         spawn(move || {
483             thread_start_barrier.wait();
484             let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
485             clone_v_ref.store(0);
486             thread_end_barrier.wait();
487         });
488 
489         assert_eq!(v_ref.load(), 99);
490 
491         start_barrier.wait();
492         end_barrier.wait();
493 
494         assert_eq!(v_ref.load(), 0);
495     }
496 
497     #[test]
slice_size()498     fn slice_size() {
499         let a = VecMem::new(100);
500         let s = a.get_slice(0, 27).unwrap();
501         assert_eq!(s.size(), 27);
502 
503         let s = a.get_slice(34, 27).unwrap();
504         assert_eq!(s.size(), 27);
505 
506         let s = s.get_slice(20, 5).unwrap();
507         assert_eq!(s.size(), 5);
508     }
509 
510     #[test]
slice_overflow_error()511     fn slice_overflow_error() {
512         use std::usize::MAX;
513         let a = VecMem::new(1);
514         let res = a.get_slice(MAX, 1).unwrap_err();
515         assert_eq!(
516             res,
517             Error::Overflow {
518                 base: MAX,
519                 offset: 1,
520             }
521         );
522     }
523 
524     #[test]
slice_oob_error()525     fn slice_oob_error() {
526         let a = VecMem::new(100);
527         a.get_slice(50, 50).unwrap();
528         let res = a.get_slice(55, 50).unwrap_err();
529         assert_eq!(res, Error::OutOfBounds { addr: 105 });
530     }
531 
532     #[test]
ref_overflow_error()533     fn ref_overflow_error() {
534         use std::usize::MAX;
535         let a = VecMem::new(1);
536         let res = a.get_ref::<u8>(MAX).unwrap_err();
537         assert_eq!(
538             res,
539             Error::Overflow {
540                 base: MAX,
541                 offset: 1,
542             }
543         );
544     }
545 
546     #[test]
ref_oob_error()547     fn ref_oob_error() {
548         let a = VecMem::new(100);
549         a.get_ref::<u8>(99).unwrap();
550         let res = a.get_ref::<u16>(99).unwrap_err();
551         assert_eq!(res, Error::OutOfBounds { addr: 101 });
552     }
553 
554     #[test]
ref_oob_too_large()555     fn ref_oob_too_large() {
556         let a = VecMem::new(3);
557         let res = a.get_ref::<u32>(0).unwrap_err();
558         assert_eq!(res, Error::OutOfBounds { addr: 4 });
559     }
560 }
561